generated from terraform-ibm-modules/terraform-ibm-module-template
-
Notifications
You must be signed in to change notification settings - Fork 14
Open
Labels
Description
I have the following GPU scenario where I want to create a large single GPU worker (e.g. A100) in a single VPC Zone to limit cost.
Any recommandation on how to adapt the code below which by default provisions one worker per zone?
locals {
# list of subnets in all zones
subnets = [
for subnet in ibm_is_subnet.subnets :
{
id = subnet.id
zone = subnet.zone
cidr_block = subnet.ipv4_cidr_block
}
]
# mapping of cluster worker pool names to subnets
cluster_vpc_subnets = {
zone-1 = local.subnets,
zone-2 = local.subnets,
zone-3 = local.subnets
}
boot_volume_encryption_kms_config = {
crk = module.kp_all_inclusive.keys["${local.key_ring}.${local.boot_volume_key}"].key_id
kms_instance_id = module.kp_all_inclusive.kms_guid
}
worker_pools = [
{
subnet_prefix = "zone-1"
pool_name = "default" # ibm_container_vpc_cluster automatically names default pool "default" (See https://github.com/IBM-Cloud/terraform-provider-ibm/issues/2849)
machine_type = "bx2.16x64" # ODF Flavors
workers_per_zone = 1
operating_system = "RHCOS"
enableAutoscaling = true
minSize = 1
maxSize = 6
boot_volume_encryption_kms_config = local.boot_volume_encryption_kms_config
},
{
subnet_prefix = "zone-2"
pool_name = "gpu-pool"
machine_type = "gx3.16x80.l4"
workers_per_zone = 1
secondary_storage = "600gb.10iops-tier"
operating_system = "RHCOS"
boot_volume_encryption_kms_config = local.boot_volume_encryption_kms_config
},