diff --git a/.gitignore b/.gitignore index aad210b0..35ef4dc7 100644 --- a/.gitignore +++ b/.gitignore @@ -58,3 +58,6 @@ localtweak__*.tf # tests folder log file *.log + +# Ignore RSA files +*id_rsa \ No newline at end of file diff --git a/datasource.tf b/datasource.tf index 4f893a4a..5944e88f 100644 --- a/datasource.tf +++ b/datasource.tf @@ -49,4 +49,22 @@ data "ibm_is_subnet" "existing_client_subnets" { data "ibm_is_subnet" "existing_bastion_subnets" { count = var.vpc != null && var.bastion_subnets != null ? 1 : 0 name = var.bastion_subnets[count.index] +} + +# New Code +data "ibm_is_instance_profile" "dynmaic_worker_profile" { + name = var.dynamic_compute_instances[0].profile +} + +data "ibm_is_image" "dynamic_compute" { + name = var.dynamic_compute_instances[0].image +} + +data "ibm_is_ssh_key" "compute_ssh_keys" { + for_each = toset(local.compute_ssh_keys) + name = each.key +} + +data "ibm_is_subnet" "compute_subnet_crn" { + identifier = local.compute_subnet_id } \ No newline at end of file diff --git a/locals.tf b/locals.tf index 4311679b..4654ae40 100644 --- a/locals.tf +++ b/locals.tf @@ -237,6 +237,7 @@ locals { storage_private_key_path = var.enable_bastion ? "${path.root}/../../modules/ansible-roles/storage_id_rsa" : "${path.root}/modules/ansible-roles/storage_id_rsa" #checkov:skip=CKV_SECRET_6 compute_playbook_path = var.enable_bastion ? "${path.root}/../../modules/ansible-roles/compute_ssh.yaml" : "${path.root}/modules/ansible-roles/compute_ssh.yaml" storage_playbook_path = var.enable_bastion ? "${path.root}/../../modules/ansible-roles/storage_ssh.yaml" : "${path.root}/modules/ansible-roles/storage_ssh.yaml" + playbooks_root_path = var.enable_bastion ? "${path.root}/../../modules/ansible-roles" : "${path.root}/modules/ansible-roles" } # file Share OutPut @@ -257,6 +258,17 @@ locals { nfs_install_dir = "none" Enable_Monitoring = false lsf_deployer_hostname = var.deployer_hostname #data.external.get_hostname.result.name #var.enable_bastion ? "" : flatten(module.deployer.deployer_vsi_data[*].list)[0].name + vcpus = var.enable_deployer ? 0 : tonumber(data.ibm_is_instance_profile.dynmaic_worker_profile.vcpu_count[0].value) + ncores = var.enable_deployer ? 0 : tonumber(local.vcpus / 2) + ncpus = var.enable_deployer ? 0 : tonumber(var.enable_hyperthreading ? local.vcpus : local.ncores) + memInMB = var.enable_deployer ? 0 : tonumber(data.ibm_is_instance_profile.dynmaic_worker_profile.memory[0].value) * 1024 + rc_maxNum = var.enable_deployer ? 0 : tonumber(var.dynamic_compute_instances[0].count) + rc_profile = var.enable_deployer ? "" : var.dynamic_compute_instances[0].profile + imageID = var.enable_deployer ? "" : data.ibm_is_image.dynamic_compute.id + compute_subnets_cidr = var.compute_subnets_cidr + dynamic_compute_instances = var.dynamic_compute_instances + compute_subnet_crn = data.ibm_is_subnet.compute_subnet_crn.crn + compute_ssh_keys_ids = [for name in local.compute_ssh_keys : data.ibm_is_ssh_key.compute_ssh_keys[name].id] } locals { @@ -266,7 +278,7 @@ locals { remote_terraform_path = format("%s/terraform-ibm-hpc", local.deployer_path) remote_ansible_path = format("%s/terraform-ibm-hpc", local.deployer_path) da_hpc_repo_url = "https://github.com/terraform-ibm-modules/terraform-ibm-hpc.git" - da_hpc_repo_tag = "develop" ###### change it to main in future + da_hpc_repo_tag = "latest_code_anand" ###### change it to main in future zones = jsonencode(var.zones) list_compute_ssh_keys = jsonencode(local.compute_ssh_keys) list_storage_ssh_keys = jsonencode(local.storage_ssh_keys) diff --git a/main.tf b/main.tf index e1b8c012..9feffdfb 100644 --- a/main.tf +++ b/main.tf @@ -130,7 +130,22 @@ resource "local_sensitive_file" "prepare_tf_input" { "compute_private_key_content": ${local.compute_private_key_content}, "bastion_security_group_id": "${local.bastion_security_group_id}", "deployer_hostname": "${local.deployer_hostname}", - "deployer_ip": "${local.deployer_ip}" + "deployer_ip": "${local.deployer_ip}", + "enable_hyperthreading": ${var.enable_hyperthreading}, + "vcpus": ${local.vcpus}, + "ncores": ${local.ncores}, + "ncpus": ${local.ncpus}, + "memInMB": ${local.memInMB}, + "rc_maxNum": ${local.rc_maxNum}, + "rc_profile": "${local.rc_profile}", + "imageID": "${local.imageID}", + "compute_subnet_id": "${local.compute_subnet_id}", + "region": "${local.region}", + "resource_group_id": "${local.resource_group_ids["service_rg"]}", + "compute_subnets_cidr": ${jsonencode(local.compute_subnets_cidr)}, + "dynamic_compute_instances": ${jsonencode(local.dynamic_compute_instances)}, + "compute_ssh_keys_ids": ${jsonencode(local.compute_ssh_keys_ids)}, + "compute_subnet_crn": ${jsonencode(local.compute_subnet_crn)} } EOT filename = local.schematics_inputs_path @@ -274,6 +289,30 @@ module "write_compute_cluster_inventory" { nfs_install_dir = local.nfs_install_dir Enable_Monitoring = local.Enable_Monitoring lsf_deployer_hostname = local.lsf_deployer_hostname + # New Input + dns_domain_names = var.dns_domain_names["compute"] + compute_public_key_content = var.compute_public_key_content + compute_private_key_content = var.compute_private_key_content + # Other Code + enable_hyperthreading = var.enable_hyperthreading + ibmcloud_api_key = var.ibmcloud_api_key + vpc_id = local.vpc_id + vcpus = local.vcpus + ncores = local.ncores + ncpus = local.ncpus + memInMB = local.memInMB + rc_maxNum = local.rc_maxNum + rc_profile = local.rc_profile + imageID = local.imageID + compute_subnet_id = local.compute_subnet_id + region = local.region + resource_group_id = local.resource_group_ids["service_rg"] + zones = var.zones + compute_subnets_cidr = local.compute_subnets_cidr + dynamic_compute_instances = local.dynamic_compute_instances + compute_security_group_id = local.compute_security_group_id + compute_ssh_keys_ids = local.compute_ssh_keys_ids + compute_subnet_crn = local.compute_subnet_crn depends_on = [ time_sleep.wait_60_seconds ] } @@ -313,14 +352,15 @@ module "storage_inventory" { } module "compute_playbook" { - count = var.enable_deployer == false ? 1 : 0 - source = "./modules/playbook" - bastion_fip = local.bastion_fip - private_key_path = local.compute_private_key_path - inventory_path = local.compute_inventory_path - playbook_path = local.compute_playbook_path - enable_bastion = var.enable_bastion - depends_on = [ module.compute_inventory ] + count = var.enable_deployer == false ? 1 : 0 + source = "./modules/playbook" + bastion_fip = local.bastion_fip + private_key_path = local.compute_private_key_path + inventory_path = local.compute_inventory_path + playbook_path = local.compute_playbook_path + playbooks_root_path = local.playbooks_root_path + enable_bastion = var.enable_bastion + depends_on = [ module.compute_inventory ] } # module "storage_playbook" { diff --git a/modules/ansible-roles/roles/lsf_mgmt_config/handlers/main.yml b/modules/ansible-roles/roles/lsf_mgmt_config/handlers/main.yml new file mode 100644 index 00000000..cd548eff --- /dev/null +++ b/modules/ansible-roles/roles/lsf_mgmt_config/handlers/main.yml @@ -0,0 +1,10 @@ +--- +- name: Restart lsfd service + service: + name: lsfd + state: restarted + +- name: Restart NetworkManager + ansible.builtin.systemd: + name: NetworkManager + state: restarted \ No newline at end of file diff --git a/modules/ansible-roles/roles/lsf_mgmt_config/tasks/configure_dynamic_nodes_templates.yml b/modules/ansible-roles/roles/lsf_mgmt_config/tasks/configure_dynamic_nodes_templates.yml new file mode 100644 index 00000000..4a42465b --- /dev/null +++ b/modules/ansible-roles/roles/lsf_mgmt_config/tasks/configure_dynamic_nodes_templates.yml @@ -0,0 +1,38 @@ +--- +- name: Management Config Templates| Copy credentials + template: + src: "templates/credentials.j2" + dest: "{{ LSF_RC_IC_CONF }}/credentials" + mode: '0644' + run_once: true + +- name: Management Config Templates | Copy ibmcloudgen2_config.json + template: + src: "templates/ibmcloudgen2_config.json.j2" + dest: "{{ LSF_RC_IC_CONF }}/ibmcloudgen2_config.json" + mode: '0644' + run_once: true + +- name: Management Config Templates | Copy ibmcloudgen2_templates.json + template: + src: "templates/ibmcloudgen2_templates.json.j2" + dest: "{{ LSF_RC_IC_CONF }}/ibmcloudgen2_templates.json" + mode: '0644' + run_once: true + +- name: Management Config Templates | Copy hostProviders.json + template: + src: "templates/hostProviders.json.j2" + dest: "{{ LSF_CONF_FILE_PATH }}/resource_connector/hostProviders.json" + mode: '0644' + run_once: true + +- name: Management Config Templates | Copy user_data.sh + template: + src: "templates/user_data.sh" + dest: "{{ LSF_RC_IC_CONF }}/user_data.sh" + mode: '0644' + run_once: true + notify: + - Restart lsfd service + - Restart NetworkManager \ No newline at end of file diff --git a/modules/ansible-roles/roles/lsf_mgmt_config/tasks/configure_management_nodes.yml b/modules/ansible-roles/roles/lsf_mgmt_config/tasks/configure_management_nodes.yml new file mode 100644 index 00000000..b308b42f --- /dev/null +++ b/modules/ansible-roles/roles/lsf_mgmt_config/tasks/configure_management_nodes.yml @@ -0,0 +1,106 @@ +--- +- name: Management Config | Append LSF configuration settings + lineinfile: + path: "{{ LSF_CONF_FILE }}" + line: "{{ item }}" + create: yes + loop: + - "LSB_RC_EXTERNAL_HOST_IDLE_TIME=10" + - "LSF_DYNAMIC_HOST_WAIT_TIME=60" + - "LSF_DYNAMIC_HOST_TIMEOUT=\"EXPIRY[10m] THRESHOLD[250] INTERVAL[60m]\"" + - "LSB_RC_EXTERNAL_HOST_FLAG=\"icgen2host\"" + - "LSB_RC_UPDATE_INTERVAL=15" + - "LSB_RC_MAX_NEWDEMAND=50" + - "LSF_UDP_TO_TCP_THRESHOLD=9000" + - "LSF_CALL_LIM_WITH_TCP=Y" + - "LSF_ANNOUNCE_MASTER_TCP_WAITTIME=600" + - "LSF_RSH=\"ssh -o 'PasswordAuthentication no' -o 'StrictHostKeyChecking no'\"" + run_once: true + +- name: Management Config | Check if queue configuration already exists + shell: "grep -q '# ANSIBLE MANAGED: QUEUE_NAME added' '{{ LSF_LSBATCH_CONF }}/lsb.queues'" + register: queue_check + changed_when: false + failed_when: false + +- name: Management Config | Append LSF queue configuration to lsb.queues + blockinfile: + path: "{{ LSF_LSBATCH_CONF }}/lsb.queues" + insertafter: EOF + block: | + # ANSIBLE MANAGED: QUEUE_NAME added + Begin Queue + QUEUE_NAME=das_q + DATA_TRANSFER=Y + RC_HOSTS=all + HOSTS=all + RES_REQ=type==any + End Queue + marker: "" + when: queue_check.rc != 0 + run_once: true + +- name: Management Config | Update LSF configuration files + block: + - name: Management Config | Uncomment "icgen2host" in lsf.shared + replace: + path: "{{ LSF_CONF_FILE_PATH }}/lsf.shared" + regexp: '^#\s*(icgen2host)' + replace: ' \1' + + - name: Management Config | Uncomment "schmod_demand" in lsb.modules + replace: + path: "{{ LSF_LSBATCH_CONF }}/lsb.modules" + regexp: '^#\s*(schmod_demand)' + replace: '\1' + + - name: Check if RC_HOSTS modification was already done + stat: + path: "/tmp/rc_hosts_added" + register: rc_hosts_marker + + - name: Management Config | Add "RC_HOSTS = all" after QUEUE_NAME in lsb.queues using sed + shell: | + sed -i '/^Begin Queue$/,/^End Queue$/{/QUEUE_NAME/{N;s/\(QUEUE_NAME\s*=[^\n]*\)\n/\1\nRC_HOSTS = all\n/}}' "{{ LSF_LSBATCH_CONF }}/lsb.queues" + touch /tmp/rc_hosts_added + when: not rc_hosts_marker.stat.exists + run_once: true + +- name: Management Config | Append management hostnames to lsb.hosts + vars: + management_hostnames: "{{ lsf_masters_list.split() }}" + lineinfile: + path: "{{ LSF_LSBATCH_CONF }}/lsb.hosts" + insertafter: "^default !.*" + line: "{{ item }} 0 () () () () () (Y)" + state: present + loop: "{{ lsf_masters }}" + run_once: true + +- name: Management Config | Check if LSF_HOST_ADDR_RANGE is already set + shell: "grep -q '# ANSIBLE MANAGED: LSF_HOST_ADDR_RANGE added' '{{ LSF_CONF_FILE_PATH }}/lsf.cluster.{{ my_cluster_name }}'" + register: lsf_host_addr_range_marker_check + changed_when: false + failed_when: false + +- name: Management Config | Append LSF_HOST_ADDR_RANGE to lsf.cluster + blockinfile: + path: "{{ LSF_CONF_FILE_PATH }}/lsf.cluster.{{ my_cluster_name }}" + block: | + # ANSIBLE MANAGED: LSF_HOST_ADDR_RANGE added + Begin Parameters + LSF_HOST_ADDR_RANGE=10.*.*.* + End Parameters + marker: "" + when: lsf_host_addr_range_marker_check.rc != 0 + run_once: true + +- name: Set permissions for Python directories + file: + path: "{{ item }}" + mode: "0755" + recurse: yes + loop: + - /usr/local/lib/python3.11 + - /usr/local/lib64/python3.11 + ignore_errors: yes \ No newline at end of file diff --git a/modules/ansible-roles/roles/lsf_mgmt_config/tasks/hosts_file_update.yml b/modules/ansible-roles/roles/lsf_mgmt_config/tasks/hosts_file_update.yml new file mode 100644 index 00000000..b8162c69 --- /dev/null +++ b/modules/ansible-roles/roles/lsf_mgmt_config/tasks/hosts_file_update.yml @@ -0,0 +1,56 @@ +--- + +- name: Management Config | Check if IP-to-host mapping already exists + shell: "grep -q '# ANSIBLE MANAGED: IP mapping added' '{{ LSF_HOSTS_FILE }}'" + register: ip_mapping_check + changed_when: false + failed_when: false + run_once: true + +- name: Management Config | Generate and append IP-to-host mapping to LSF hosts file + shell: | + echo "# ANSIBLE MANAGED: IP mapping added" >> '{{ LSF_HOSTS_FILE }}' + python3 -c "import ipaddress; \ + print('\\n'.join([str(ip) + ' {{ my_cluster_name }}-' + str(ip).replace('.', '-') \ + for ip in ipaddress.IPv4Network('{{ compute_subnets_cidr | first }}')]))" >> '{{ LSF_HOSTS_FILE }}' + args: + executable: /bin/bash + run_once: true + when: ip_mapping_check.rc != 0 + +- name: Get IP addresses using nslookup + shell: "nslookup {{ inventory_hostname }} | awk '/Address: / { print $2 }' | tail -n 1" + register: dns_ip + changed_when: false + +- name: Store IPs for each host + set_fact: + host_ip: "{{ dns_ip.stdout }}" + +- name: Aggregate all IPs from all hosts + set_fact: + all_ips: "{{ groups['all'] | map('extract', hostvars, 'host_ip') | list }}" + run_once: true + +- name: Check if each IP exists in LSF hosts file + shell: "grep -w '{{ item }}' {{ LSF_HOSTS_FILE }} || true" + register: ip_check + loop: "{{ all_ips }}" + changed_when: false + run_once: true + +- name: Remove matched IPs from LSF hosts file if they exist + lineinfile: + path: "{{ LSF_HOSTS_FILE }}" + state: absent + regexp: "^{{ item.item }}\\s" + loop: "{{ ip_check.results }}" + when: item.stdout | length > 0 + run_once: true + +- name: Copy the Hosts file to /etc/hosts + copy: + src: "{{ LSF_HOSTS_FILE }}" + dest: /etc/hosts + remote_src: yes + ignore_errors: yes diff --git a/modules/ansible-roles/roles/lsf_mgmt_config/tasks/hyperthreading.yml b/modules/ansible-roles/roles/lsf_mgmt_config/tasks/hyperthreading.yml new file mode 100644 index 00000000..837f7964 --- /dev/null +++ b/modules/ansible-roles/roles/lsf_mgmt_config/tasks/hyperthreading.yml @@ -0,0 +1,41 @@ +--- +- name: Hyperthreading | Define ego_define_ncpus based on hyperthreading + set_fact: + ego_define_ncpus: "{{ 'threads' if enable_hyperthreading else 'cores' }}" + run_once: true + +- name: Hyperthreading | Print the value of ego_define_ncpus + debug: + msg: "EGO_DEFINE_NCPUS is set to {{ ego_define_ncpus }}" + run_once: true + +- name: Hyperthreading | Create LSF hyperthreading script for disabling threads if hyperthreading is false + copy: + dest: "{{ hyperthreading_file }}" + content: | + #!/bin/sh + for vcpu in $(cat /sys/devices/system/cpu/cpu*/topology/thread_siblings_list | cut -s -d- -f2 | cut -d- -f2 | uniq); do + echo "0" > "/sys/devices/system/cpu/cpu"$vcpu"/online" + done + mode: '0755' + when: not enable_hyperthreading + +- name: Hyperthreading | Run the hyperthreading script and add to cron if hyperthreading is false + shell: "{{ hyperthreading_file }}" + when: not enable_hyperthreading + +- name: Hyperthreading | Add script to cron for reboot if hyperthreading is false + cron: + name: "Disable Hyperthreading" + special_time: reboot + job: "{{ hyperthreading_file }}" + when: not enable_hyperthreading + +- name: Hyperthreading | Set the EGO_DEFINE_NCPUS in LSF config file + lineinfile: + path: "{{ LSF_CONF_FILE }}" + line: "{{ item }}" + create: yes + loop: + - "EGO_DEFINE_NCPUS={{ ego_define_ncpus }}" + run_once: true \ No newline at end of file diff --git a/modules/ansible-roles/roles/lsf_mgmt_config/tasks/main.yml b/modules/ansible-roles/roles/lsf_mgmt_config/tasks/main.yml new file mode 100644 index 00000000..f34af6c5 --- /dev/null +++ b/modules/ansible-roles/roles/lsf_mgmt_config/tasks/main.yml @@ -0,0 +1,9 @@ +--- + +- import_tasks: hyperthreading.yml + +- import_tasks: configure_management_nodes.yml + +- import_tasks: hosts_file_update.yml + +- import_tasks: configure_dynamic_nodes_templates.yml \ No newline at end of file diff --git a/modules/ansible-roles/roles/lsf_mgmt_config/templates/credentials.j2 b/modules/ansible-roles/roles/lsf_mgmt_config/templates/credentials.j2 new file mode 100644 index 00000000..86ab7a4c --- /dev/null +++ b/modules/ansible-roles/roles/lsf_mgmt_config/templates/credentials.j2 @@ -0,0 +1,8 @@ +# BEGIN ANSIBLE MANAGED BLOCK +VPC_URL=http://vpc.cloud.ibm.com/v1 +VPC_AUTH_TYPE=iam +VPC_APIKEY={{ ibmcloud_api_key }} +RESOURCE_RECORDS_URL=https://api.dns-svcs.cloud.ibm.com/v1 +RESOURCE_RECORDS_AUTH_TYPE=iam +RESOURCE_RECORDS_APIKEY={{ ibmcloud_api_key }} +# END ANSIBLE MANAGED BLOCK \ No newline at end of file diff --git a/modules/ansible-roles/roles/lsf_mgmt_config/templates/hostProviders.json.j2 b/modules/ansible-roles/roles/lsf_mgmt_config/templates/hostProviders.json.j2 new file mode 100644 index 00000000..bdd3ed85 --- /dev/null +++ b/modules/ansible-roles/roles/lsf_mgmt_config/templates/hostProviders.json.j2 @@ -0,0 +1,10 @@ +{ + "providers":[ + { + "name": "ibmcloudgen2", + "type": "ibmcloudgen2Prov", + "confPath": "resource_connector/ibmcloudgen2", + "scriptPath": "resource_connector/ibmcloudgen2" + } + ] +} \ No newline at end of file diff --git a/modules/ansible-roles/roles/lsf_mgmt_config/templates/ibmcloudgen2_config.json.j2 b/modules/ansible-roles/roles/lsf_mgmt_config/templates/ibmcloudgen2_config.json.j2 new file mode 100644 index 00000000..1dfda9ef --- /dev/null +++ b/modules/ansible-roles/roles/lsf_mgmt_config/templates/ibmcloudgen2_config.json.j2 @@ -0,0 +1,17 @@ +{ + "IBMCLOUDGEN2_KEY_FILE": "{{ LSF_RC_IC_CONF }}/credentials", + "IBMCLOUDGEN2_PROVISION_FILE": "{{ LSF_RC_IC_CONF }}/user_data.sh", + "IBMCLOUDGEN2_MACHINE_PREFIX": "{{ my_cluster_name }}", + "LogLevel": "INFO", + "ApiEndPoints": { + "eu-gb": "https://eu-gb.iaas.cloud.ibm.com/v1", + "au-syd": "https://au-syd.iaas.cloud.ibm.com/v1", + "ca-tor": "https://ca-tor.iaas.cloud.ibm.com/v1", + "jp-osa": "https://jp-osa.iaas.cloud.ibm.com/v1", + "jp-tok": "https://jp-tok.iaas.cloud.ibm.com/v1", + "br-sao": "https://br-sao.iaas.cloud.ibm.com/v1", + "us-south": "https://us-south.iaas.cloud.ibm.com/v1", + "eu-de": "https://eu-de.iaas.cloud.ibm.com/v1", + "us-east": "https://us-east.iaas.cloud.ibm.com/v1" + } +} \ No newline at end of file diff --git a/modules/ansible-roles/roles/lsf_mgmt_config/templates/ibmcloudgen2_templates.json.j2 b/modules/ansible-roles/roles/lsf_mgmt_config/templates/ibmcloudgen2_templates.json.j2 new file mode 100644 index 00000000..62d65f3e --- /dev/null +++ b/modules/ansible-roles/roles/lsf_mgmt_config/templates/ibmcloudgen2_templates.json.j2 @@ -0,0 +1,24 @@ +{ + "templates": [ + { + "templateId": "Template-1", + "maxNumber": "{{ rc_maxNum }}", + "attributes": { + "type": ["String", "X86_64"], + "ncores": ["Numeric", "{{ rc_ncores }}"], + "ncpus": ["Numeric", "{{ rc_ncpus }}"], + "mem": ["Numeric", "{{ rc_memInMB }}"], + "icgen2host": ["Boolean", "1"] + }, + "imageId": "{{ imageID }}", + "subnetId": "{{ compute_subnet_crn }}", + "vpcId": "{{ vpc_id }}", + "vmType": "{{ rc_profile }}", + "securityGroupIds": ["{{ compute_security_group_id | first }}"], + "resourceGroupId": "{{ resource_group_id }}", + "sshkey_id": "{{ compute_ssh_keys_ids | first }}", + "region": "{{ regionName }}", + "zone": "{{ zoneName | first }}" + } + ] +} \ No newline at end of file diff --git a/modules/ansible-roles/roles/lsf_mgmt_config/templates/user_data.sh b/modules/ansible-roles/roles/lsf_mgmt_config/templates/user_data.sh new file mode 100644 index 00000000..3944469e --- /dev/null +++ b/modules/ansible-roles/roles/lsf_mgmt_config/templates/user_data.sh @@ -0,0 +1,191 @@ +#!/bin/bash + +logfile="/tmp/user_data.log" +echo "START $(date '+%Y-%m-%d %H:%M:%S')" >> $logfile + +# Initialize variables +cluster_prefix="{{ my_cluster_name }}" +nfs_server_with_mount_path="{{ name_mount_path_map.lsf }}" +custom_file_shares="{% for key, value in name_mount_path_map.items() if key != 'lsf' %}{{ value }}{% if not loop.last %} {% endif %}{% endfor %}" +custom_mount_paths="{% for key in name_mount_path_map.keys() if key != 'lsf' %}{{ key }}{% if not loop.last %} {% endif %}{% endfor %}" +hyperthreading="{{ enable_hyperthreading }}" +ManagementHostNames="{{ lsf_masters | join(' ') }}" +rc_cidr_block="{{ compute_subnets_cidr | first }}" +dns_domain="{{ dns_domain_names }}" +network_interface="eth0" + +# Setup Hostname +HostIP=$(hostname -I | awk '{print $1}') +hostname=${cluster_prefix}-${HostIP//./-} +hostnamectl set-hostname $hostname +systemctl stop firewalld +systemctl disable firewalld + +# Setup vpcuser to login +if grep -E -q "CentOS|Red Hat" /etc/os-release +then + USER=vpcuser +elif grep -q "Ubuntu" /etc/os-release +then + USER=ubuntu +fi +sed -i -e "s/^/no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo \'Please login as the user \\\\\"$USER\\\\\" rather than the user \\\\\"root\\\\\".\';echo;sleep 5; exit 142\" /" /root/.ssh/authorized_keys + +# Make lsfadmin and vpcuser set to newer expire +chage -I -1 -m 0 -M 99999 -E -1 -W 14 $USER +chage -I -1 -m 0 -M 99999 -E -1 -W 14 lsfadmin + +# Setup Network configuration +if grep -q "NAME=\"Red Hat Enterprise Linux" /etc/os-release; then + echo "MTU=9000" >> "/etc/sysconfig/network-scripts/ifcfg-${network_interface}" + echo "DOMAIN=${dns_domain}" >> "/etc/sysconfig/network-scripts/ifcfg-${network_interface}" + gateway_ip=$(ip route | grep default | awk '{print $3}' | head -n 1) + cidr_range=$(ip route show | grep "kernel" | awk '{print $1}' | head -n 1) + echo "$cidr_range via $gateway_ip dev ${network_interface} metric 0 mtu 9000" >> /etc/sysconfig/network-scripts/route-${network_interface} + systemctl restart NetworkManager +fi + +# Setup VPC FileShare | NFS Mount +LSF_TOP="/opt/ibm/lsf" +echo "Initiating LSF share mount" >> $logfile + +# Function to attempt NFS mount with retries +mount_nfs_with_retries() { + local server_path=$1 + local client_path=$2 + local retries=5 + local success=false + + rm -rf "${client_path}" + mkdir -p "${client_path}" + + for (( j=0; j> $logfile + if mount | grep -q "${client_path}"; then + echo "Mount successful for ${server_path} on ${client_path}" >> $logfile + success=true + break + else + echo "Attempt $((j+1)) of $retries failed for ${server_path} on ${client_path}" >> $logfile + sleep 2 + fi + done + + if [ "$success" = true ]; then + chmod 777 "${client_path}" + echo "${server_path} ${client_path} nfs rw,sec=sys,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,_netdev 0 0" >> /etc/fstab + else + echo "Mount not found for ${server_path} on ${client_path} after $retries attempts." >> $logfile + rm -rf "${client_path}" + fi +} + +# Setup LSF share +if [ -n "${nfs_server_with_mount_path}" ]; then + echo "File share ${nfs_server_with_mount_path} found" >> $logfile + nfs_client_mount_path="/mnt/lsf" + if mount_nfs_with_retries "${nfs_server_with_mount_path}" "${nfs_client_mount_path}"; then + for dir in conf work; do + rm -rf "${LSF_TOP}/$dir" + ln -fs "${nfs_client_mount_path}/shared/lsf/$dir" "${LSF_TOP}/$dir" + done + chown -R lsfadmin:root "${LSF_TOP}" + else + echo "Mount not found for ${nfs_server_with_mount_path}, Exiting !!" >> $logfile + exit 1 + fi +fi +echo "Setting LSF share is completed." >> $logfile + +# Setup SSH +SSH_DIR="/home/lsfadmin/.ssh" +mkdir -p "$SSH_DIR" +cp /home/vpcuser/.ssh/authorized_keys "$SSH_DIR/authorized_keys" +cat /mnt/lsf/shared/ssh/id_rsa.pub >> "$SSH_DIR/authorized_keys" +cp /mnt/lsf/shared/ssh/id_rsa "$SSH_DIR/id_rsa" +echo "StrictHostKeyChecking no" >> "$SSH_DIR/config" +chmod 600 "$SSH_DIR/authorized_keys" +chmod 400 "$SSH_DIR/id_rsa" +chmod 700 "$SSH_DIR" +chown -R lsfadmin:lsfadmin "$SSH_DIR" + +# Setup LSF environment variables +LSF_TOP="/opt/ibm/lsf_worker" +LSF_TOP_VERSION=10.1 +LSF_CONF=$LSF_TOP/conf +LSF_CONF_FILE=$LSF_CONF/lsf.conf +LSF_HOSTS_FILE=${LSF_CONF}/hosts +. $LSF_CONF/profile.lsf >> $logfile +echo "Logging env variables" >> $logfile +env >> $logfile +echo "source ${LSF_CONF}/profile.lsf" >> ~/.bashrc +source ~/.bashrc + +# DNS Setup +echo "search ${dns_domain}" >> /etc/resolv.conf + +# Defining ncpus based on hyper-threading +if [ "$hyperthreading" == "True" ]; then + ego_define_ncpus="threads" +else + ego_define_ncpus="cores" + cat << 'EOT' > /root/lsf_hyperthreading +#!/bin/sh +for vcpu in $(cat /sys/devices/system/cpu/cpu*/topology/thread_siblings_list | cut -s -d- -f2 | cut -d- -f2 | uniq); do + echo "0" > "/sys/devices/system/cpu/cpu"$vcpu"/online" +done +EOT + chmod 755 /root/lsf_hyperthreading + command="/root/lsf_hyperthreading" + sh $command && (crontab -l 2>/dev/null; echo "@reboot $command") | crontab - +fi +echo "EGO_DEFINE_NCPUS=${ego_define_ncpus}" >> $LSF_CONF_FILE + +cat /opt/ibm/lsf/conf/hosts >> /etc/hosts + +# Apply system tuning parameters +LSF_TUNABLES="/etc/sysctl.conf" +echo 'vm.overcommit_memory=1' >> $LSF_TUNABLES +echo 'net.core.rmem_max=26214400' >> $LSF_TUNABLES +echo 'net.core.rmem_default=26214400' >> $LSF_TUNABLES +echo 'net.core.wmem_max=26214400' >> $LSF_TUNABLES +echo 'net.core.wmem_default=26214400' >> $LSF_TUNABLES +echo 'net.ipv4.tcp_fin_timeout = 5' >> $LSF_TUNABLES +echo 'net.core.somaxconn = 8000' >> $LSF_TUNABLES +sudo sysctl -p $LSF_TUNABLES + +# Update lsf configuration +echo 'LSB_MC_DISABLE_HOST_LOOKUP=Y' >> $LSF_CONF_FILE +echo "LSF_RSH=\"ssh -o 'PasswordAuthentication no' -o 'StrictHostKeyChecking no'\"" >> $LSF_CONF_FILE +sed -i "s/LSF_SERVER_HOSTS=.*/LSF_SERVER_HOSTS=\"$ManagementHostNames\"/g" $LSF_CONF_FILE +sed -i "s/LSF_ENABLE_EGO=N/LSF_ENABLE_EGO=Y/g" $LSF_CONF_FILE + +# TODO: Understand usage +# Support rc_account resource to enable RC_ACCOUNT policy +if [ -n "${rc_account}" ]; then +sed -i "s/\(LSF_LOCAL_RESOURCES=.*\)\"/\1 [resourcemap ${rc_account}*rc_account]\"/" $LSF_CONF_FILE +echo "Update LSF_LOCAL_RESOURCES lsf.conf successfully, add [resourcemap ${rc_account}*rc_account]" +fi + +# Add additional local resources if needed +instance_id=$(dmidecode | grep Family | cut -d ' ' -f 2 |head -1) +if [ -n "$instance_id" ]; then + sed -i "s/\(LSF_LOCAL_RESOURCES=.*\)\"/\1 [resourcemap $instance_id*instanceID]\"/" $LSF_CONF_FILE + echo "Update LSF_LOCAL_RESOURCES in $LSF_CONF_FILE successfully, add [resourcemap ${instance_id}*instanceID]" +else + echo "Can not get instance ID" >> $logfile +fi + +echo 'LSF_STARTUP_USERS="lsfadmin"' | sudo tee -a /etc/lsf1.sudoers +echo "LSF_STARTUP_PATH=$LSF_TOP_VERSION/linux3.10-glibc2.17-x86_64/etc/" | sudo tee -a /etc/lsf.sudoers +chmod 600 /etc/lsf.sudoers +ls -l /etc/lsf.sudoers + +cd /opt/ibm/lsf_worker/10.1/linux3.10-glibc2.17-x86_64/etc/ +sed -i "s|/opt/ibm/lsf/|/opt/ibm/lsf_worker/|g" lsf_daemons +cd - + +sudo /opt/ibm/lsf_worker/10.1/install/hostsetup --top="/opt/ibm/lsf_worker" --setuid >> $logfile +/opt/ibm/lsf_worker/10.1/install/hostsetup --top="/opt/ibm/lsf_worker" --boot="y" --start="y" --dynamic 2>&1 >> $logfile + +echo "END $(date '+%Y-%m-%d %H:%M:%S')" >> $logfile \ No newline at end of file diff --git a/modules/ansible-roles/roles/lsf_mgmt_config/vars/main.yml b/modules/ansible-roles/roles/lsf_mgmt_config/vars/main.yml new file mode 100644 index 00000000..69e5546e --- /dev/null +++ b/modules/ansible-roles/roles/lsf_mgmt_config/vars/main.yml @@ -0,0 +1,6 @@ +LSF_CONF_FILE_PATH: "/opt/ibm/lsfsuite/lsf/conf" +LSF_CONF_FILE: "{{ LSF_CONF_FILE_PATH }}/lsf.conf" +LSF_RC_IC_CONF: "{{ LSF_CONF_FILE_PATH }}/resource_connector/ibmcloudgen2/conf" +LSF_LSBATCH_CONF: "{{ LSF_CONF_FILE_PATH }}/lsbatch/{{ my_cluster_name }}/configdir" +LSF_HOSTS_FILE: "{{ LSF_CONF_FILE_PATH }}/hosts" +LSF_EGO_CONF_FILE: "{{ LSF_CONF_FILE }}/ego/{{ my_cluster_name }}/kernel/ego.conf" \ No newline at end of file diff --git a/modules/ansible-roles/roles/lsf_server_config/tasks/lsf_tunables.yml b/modules/ansible-roles/roles/lsf_server_config/tasks/lsf_tunables.yml new file mode 100644 index 00000000..b172552f --- /dev/null +++ b/modules/ansible-roles/roles/lsf_server_config/tasks/lsf_tunables.yml @@ -0,0 +1,20 @@ +# Update the Network Tunables +--- +- name: LSF_Tunables | Ensure sysctl parameters are set + ansible.builtin.lineinfile: + path: "{{ sysctl_conf }}" + line: "{{ item }}" + create: yes + loop: + - "vm.overcommit_memory=1" + - "net.core.rmem_max=26214400" + - "net.core.rmem_default=26214400" + - "net.core.wmem_max=26214400" + - "net.core.wmem_default=26214400" + - "net.ipv4.tcp_fin_timeout=5" + - "net.core.somaxconn=8000" + +- name: LSF_Tunables | Apply sysctl settings + ansible.builtin.command: + cmd: sysctl -p "{{ sysctl_conf }}" + changed_when: false \ No newline at end of file diff --git a/modules/ansible-roles/roles/lsf_server_config/tasks/lsfadmin_password_less_auth.yml b/modules/ansible-roles/roles/lsf_server_config/tasks/lsfadmin_password_less_auth.yml new file mode 100644 index 00000000..384792b7 --- /dev/null +++ b/modules/ansible-roles/roles/lsf_server_config/tasks/lsfadmin_password_less_auth.yml @@ -0,0 +1,86 @@ +--- +- name: Passwordless SSH | Create necessary directories + file: + path: "{{ item }}" + state: directory + mode: '0755' + loop: + - "{{ HA_shared_dir }}/ssh" + run_once: true + +- name: Passwordless SSH | Validate compute public and private key content + fail: + msg: "compute_public_key_content or compute_private_key_content is empty. Exiting." + when: (compute_public_key_content | length == 0) or (compute_private_key_content | length == 0) + +- name: Passwordless SSH | Decode and copy compute public and private key contents + shell: echo "{{ item.content }}" | base64 --decode > "{{ item.dest }}" + loop: + - { content: "{{ compute_public_key_content }}", dest: "{{ HA_shared_dir }}/ssh/id_rsa.pub" } + - { content: "{{ compute_private_key_content }}", dest: "{{ HA_shared_dir }}/ssh/id_rsa" } + no_log: true + run_once: true + +- name: Passwordless SSH | Create necessary directories + file: + path: "{{ item }}" + state: directory + mode: '0755' + loop: + - /home/lsfadmin + - "{{ lsfadmin_ssh_path }}" + +- name: Passwordless SSH | Set permissions for shared SSH directory + file: + path: "{{ HA_shared_dir }}/ssh" + state: directory + mode: '0777' + recurse: yes + +- name: Passwordless SSH | Copy authorized_keys to lsfadmin's .ssh + copy: + src: /home/vpcuser/.ssh/authorized_keys + dest: "{{ lsfadmin_ssh_path }}/authorized_keys" + remote_src: yes + owner: lsfadmin + group: lsfadmin + mode: '0600' + +- name: Passwordless SSH | Copy public key content to authorized_keys + command: "cat {{ HA_shared_dir }}/ssh/id_rsa.pub" + register: pub_key_content + changed_when: false + +- name: Passwordless SSH | Append public key to authorized_keys + lineinfile: + path: "{{ lsfadmin_ssh_path }}/authorized_keys" + line: "{{ pub_key_content.stdout }}" + owner: lsfadmin + group: lsfadmin + mode: '0600' + +- name: Passwordless SSH | Copy private key to lsfadmin's .ssh + copy: + src: "{{ HA_shared_dir }}/ssh/id_rsa" + dest: "{{ lsfadmin_ssh_path }}/id_rsa" + remote_src: yes + owner: lsfadmin + group: lsfadmin + mode: '0400' + +- name: Passwordless SSH | Disable StrictHostKeyChecking + lineinfile: + path: "{{ lsfadmin_ssh_path }}/config" + line: "StrictHostKeyChecking no" + create: yes + owner: lsfadmin + group: lsfadmin + mode: '0644' + +- name: Passwordless SSH | Ensure proper permissions on .ssh directory + file: + path: "{{ lsfadmin_ssh_path }}" + state: directory + owner: lsfadmin + group: lsfadmin + mode: '0700' \ No newline at end of file diff --git a/modules/ansible-roles/roles/lsf_server_config/tasks/main.yml b/modules/ansible-roles/roles/lsf_server_config/tasks/main.yml new file mode 100644 index 00000000..4d1d2132 --- /dev/null +++ b/modules/ansible-roles/roles/lsf_server_config/tasks/main.yml @@ -0,0 +1,10 @@ +--- + +# MTU Configuration +- import_tasks: mtu_configuration.yml + +# lsf_tunables configuration +- import_tasks: lsf_tunables.yml + +# Setup Password less authentication for lsfadmin user +- import_tasks: lsfadmin_password_less_auth.yml \ No newline at end of file diff --git a/modules/ansible-roles/roles/lsf_server_config/tasks/mtu_configuration.yml b/modules/ansible-roles/roles/lsf_server_config/tasks/mtu_configuration.yml new file mode 100644 index 00000000..88e8bb6a --- /dev/null +++ b/modules/ansible-roles/roles/lsf_server_config/tasks/mtu_configuration.yml @@ -0,0 +1,32 @@ +--- +- name: MTU | Check if MTU and Domain are already configured + ansible.builtin.shell: | + grep -q '^MTU=9000' {{ network_script_path }}/ifcfg-{{ network_interface }} && \ + grep -q '^DOMAIN={{ dns_domain_names }}' {{ network_script_path }}/ifcfg-{{ network_interface }} + register: mtu_check + failed_when: false + changed_when: false + ignore_errors: true + +- name: MTU | Set MTU and Domain in network script + ansible.builtin.blockinfile: + path: "{{ network_script_path }}/ifcfg-{{ network_interface }}" + block: | + MTU=9000 + DOMAIN={{ dns_domain_names }} + marker: "# {mark} ANSIBLE MANAGED BLOCK" + when: mtu_check.rc != 0 + +- name: MTU | Get default gateway and CIDR range + ansible.builtin.shell: | + gateway_ip=$(ip route | grep default | awk '{print $3}' | head -n 1) + cidr_range=$(ip route show | grep "kernel" | awk '{print $1}' | head -n 1) + echo "$cidr_range via $gateway_ip dev {{ network_interface }} metric 0 mtu 9000" + register: route + changed_when: false + +- name: MTU | Set MTU at the router level + ansible.builtin.lineinfile: + path: "{{ network_script_path }}/route-{{ network_interface }}" + line: "{{ route.stdout }}" + create: yes \ No newline at end of file diff --git a/modules/ansible-roles/roles/lsf_server_config/vars/main.yml b/modules/ansible-roles/roles/lsf_server_config/vars/main.yml new file mode 100644 index 00000000..a2bba70c --- /dev/null +++ b/modules/ansible-roles/roles/lsf_server_config/vars/main.yml @@ -0,0 +1,4 @@ +network_interface: "eth0" +network_script_path: "/etc/sysconfig/network-scripts" +sysctl_conf: "/etc/sysctl.conf" +lsfadmin_ssh_path: "/home/lsfadmin/.ssh" \ No newline at end of file diff --git a/modules/playbook/main.tf b/modules/playbook/main.tf index c3aeab5c..3a425cd5 100644 --- a/modules/playbook/main.tf +++ b/modules/playbook/main.tf @@ -1,5 +1,6 @@ locals { proxyjump = var.enable_bastion ? "-o ProxyJump=ubuntu@${var.bastion_fip}" : "" + mgmt_playbook_filename = format("%s/lsf_mgmt_config.yml", var.playbooks_root_path) } resource "local_file" "create_playbook" { @@ -47,6 +48,7 @@ resource "local_file" "create_playbook" { roles: - vpc_fileshare_configure - lsf + - lsf_server_config EOT filename = var.playbook_path } @@ -81,4 +83,65 @@ resource "null_resource" "run_lsf_playbooks" { } depends_on = [null_resource.run_playbook] -} \ No newline at end of file +} + +resource "local_file" "create_playbook_for_mgmt_config" { + count = var.inventory_path != null ? 1 : 0 + content = < + ${local.proxyjump} + -o ControlMaster=auto + -o ControlPersist=30m + -o UserKnownHostsFile=/dev/null + -o StrictHostKeyChecking=no + ansible_user: root + ansible_ssh_private_key_file: ${var.private_key_path} + tasks: + - name: Check passwordless SSH on all scale inventory hosts + shell: echo PASSWDLESS_SSH_ENABLED + register: result + until: result.stdout.find("PASSWDLESS_SSH_ENABLED") != -1 + retries: 60 + delay: 10 + +- name: Prerequisite Configuration + hosts: [all_nodes] + any_errors_fatal: true + gather_facts: false + vars: + ansible_ssh_common_args: > + ${local.proxyjump} + -o ControlMaster=auto + -o ControlPersist=30m + -o UserKnownHostsFile=/dev/null + -o StrictHostKeyChecking=no + ansible_user: root + ansible_ssh_private_key_file: ${var.private_key_path} + pre_tasks: + - name: Load cluster-specific variables + include_vars: all.json + roles: + - lsf_mgmt_config +EOT + filename = local.mgmt_playbook_filename +} + + +resource "null_resource" "run_playbook_for_mgmt_config" { + count = var.inventory_path != null ? 1 : 0 + provisioner "local-exec" { + interpreter = ["/bin/bash", "-c"] + command = "ansible-playbook -i ${var.inventory_path} '${local.mgmt_playbook_filename}'" + } + triggers = { + build = timestamp() + } + depends_on = [local_file.create_playbook_for_mgmt_config, null_resource.run_lsf_playbooks] +} diff --git a/modules/playbook/variables.tf b/modules/playbook/variables.tf index 248b03dd..366e2660 100644 --- a/modules/playbook/variables.tf +++ b/modules/playbook/variables.tf @@ -22,6 +22,12 @@ variable "playbook_path" { default = "ssh.yaml" } +variable "playbooks_root_path" { + description = "Playbook root path" + type = string + default = "" +} + variable "enable_bastion" { type = bool default = true diff --git a/modules/write_inventory/main.tf b/modules/write_inventory/main.tf index 55875618..75b11a65 100644 --- a/modules/write_inventory/main.tf +++ b/modules/write_inventory/main.tf @@ -14,7 +14,29 @@ resource "local_sensitive_file" "itself" { "HA_shared_dir": ${jsonencode(var.ha_shared_dir)}, "NFS_install_dir": ${jsonencode(var.nfs_install_dir)}, "Enable_Monitoring": ${jsonencode(var.Enable_Monitoring)}, - "lsf_deployer_hostname": ${jsonencode(var.lsf_deployer_hostname)} + "lsf_deployer_hostname": ${jsonencode(var.lsf_deployer_hostname)}, + "dns_domain_names": ${jsonencode(var.dns_domain_names)}, + "compute_public_key_content": ${jsonencode(var.compute_public_key_content)}, + "compute_private_key_content": ${jsonencode(var.compute_private_key_content)}, + "enable_hyperthreading": ${jsonencode(var.enable_hyperthreading)}, + "ibmcloud_api_key": ${jsonencode(var.ibmcloud_api_key)}, + "vpc_id": ${jsonencode(var.vpc_id)}, + "vcpus": ${jsonencode(var.vcpus)}, + "rc_ncores": ${jsonencode(var.ncores)}, + "rc_ncpus": ${jsonencode(var.ncpus)}, + "rc_memInMB": ${jsonencode(var.memInMB)}, + "rc_maxNum": ${jsonencode(var.rc_maxNum)}, + "rc_profile": ${jsonencode(var.rc_profile)}, + "imageID": ${jsonencode(var.imageID)}, + "compute_subnet_id": ${jsonencode(var.compute_subnet_id)}, + "regionName": ${jsonencode(var.region)}, + "resource_group_id": ${jsonencode(var.resource_group_id)}, + "zoneName": ${jsonencode(var.zones)}, + "compute_ssh_keys_ids": ${jsonencode(var.compute_ssh_keys_ids)}, + "dynamic_compute_instances": ${jsonencode(var.dynamic_compute_instances)}, + "compute_subnets_cidr": ${jsonencode(var.compute_subnets_cidr)}, + "compute_security_group_id": ${jsonencode(var.compute_security_group_id)}, + "compute_subnet_crn": ${jsonencode(var.compute_subnet_crn)} } EOT filename = var.json_inventory_path diff --git a/modules/write_inventory/variables.tf b/modules/write_inventory/variables.tf index 6cec960c..fe5d0625 100644 --- a/modules/write_inventory/variables.tf +++ b/modules/write_inventory/variables.tf @@ -62,4 +62,139 @@ variable "lsf_deployer_hostname" { type = string default = null description = "Deployer host name" +} + +# New Variables +variable "dns_domain_names" { + type = string + default = null + description = "IBM Cloud HPC DNS domain names." +} + +variable "compute_public_key_content" { + type = string + sensitive = true + default = null + description = "Compute security key content." +} + +variable "compute_private_key_content" { + type = string + sensitive = true + default = null + description = "Compute security key content." +} + +variable "enable_hyperthreading" { + description = "Enable or disable hyperthreading" + type = bool + default = null +} + +variable "vcpus" { + description = "Number of vCPUs" + type = number + default = null +} + +variable "ncores" { + description = "Number of cores" + type = number + default = null +} + +variable "ncpus" { + description = "Number of CPUs" + type = number + default = null +} + +variable "memInMB" { + description = "Memory in MB" + type = number + default = null +} + +variable "rc_maxNum" { + description = "Maximum number of resource instances" + type = number + default = null +} + +variable "rc_profile" { + description = "Resource profile" + type = string + default = null +} + +variable "imageID" { + description = "Image ID for the compute instance" + type = string + default = null +} + +variable "compute_subnet_id" { + description = "Compute subnet ID" + type = string + default = null +} + +variable "region" { + description = "Deployment region" + type = string + default = null +} + +variable "resource_group_id" { + description = "Resource group ID" + type = string + default = null +} + +variable "compute_subnets_cidr" { + description = "List of compute subnets CIDR" + type = list(string) + default = null +} + +variable "dynamic_compute_instances" { + description = "Dynamic compute instances configuration" + type = list(map(any)) + default = null +} + +variable "compute_ssh_keys_ids" { + description = "List of compute SSH key IDs" + type = list(string) + default = null +} + +variable "ibmcloud_api_key" { + type = string + sensitive = true + default = null +} + +variable "zones" { + description = "List of availability zones" + type = list(string) + default = null +} + +variable "compute_security_group_id" { + type = list(string) + description = "List of Security group IDs to allow File share access" + default = null +} + +variable "vpc_id" { + type = string + default = null + description = "ID of an existing VPC in which the cluster resources will be deployed." +} + +variable "compute_subnet_crn" { + type = string + default = null + description = "ID of an existing VPC in which the cluster resources will be deployed." } \ No newline at end of file