|
| 1 | +##### |
| 2 | +# Configuration for the seed node (HA) or single node |
| 3 | +##### |
| 4 | + |
| 5 | +# The ID of an existing network to create the node on |
| 6 | +# infra_network_id: "2692933e-244f-4a3b-aa18-4daaf3383838" |
| 7 | +# OR |
| 8 | +# The CIDR of the subnet that should be created |
| 9 | +infra_network_cidr: "192.168.0.0/24" |
| 10 | +# The ID of the external network to connect to via a router |
| 11 | +infra_external_network_id: "cd0f63f2-f3f8-4c14-a688-4c5b9f4bed76" # external-internet |
| 12 | + |
| 13 | +# The fixed floating IP to associate with the machine |
| 14 | +# This IP must be pre-allocated to the project |
| 15 | +# For a single node deployment, this IP should have the wildcard ingress domain assigned to it |
| 16 | +infra_fixed_floatingip: "136.156.140.50" |
| 17 | +# OR |
| 18 | +# The name of the floating IP pool to allocate a floating IP from |
| 19 | +#infra_floatingip_pool: "<floating ip pool>" |
| 20 | +# OR |
| 21 | +# The ID of a provisioning network that will be used to access the seed node |
| 22 | +#infra_provisioning_network_id: |
| 23 | + |
| 24 | +# The image id of an Ubuntu 20.04 image to use for the node |
| 25 | +# N.B. This is populated automatically using community images by default |
| 26 | +# infra_image_id: "<image id>" |
| 27 | + |
| 28 | +# The id of the flavor to use for the node |
| 29 | +# For a seed node for an HA cluster, 8GB RAM is fine (maybe even 4GB) |
| 30 | +# For a single node deployment, >= 16GB RAM is recommended |
| 31 | +infra_flavor_id: "c01884ee-7bf3-4dfb-a947-ea3c68a0a66e" # 8cpu-8gbmem-30gbdisk |
| 32 | + |
| 33 | +##### |
| 34 | +# Configuration for the HA cluster |
| 35 | +##### |
| 36 | + |
| 37 | +# The fixed floating IP to associate with the load balancer for the ingress controller |
| 38 | +# This IP must be pre-allocated to the project and should have the wildcard ingress domain assigned to it |
| 39 | +capi_cluster_addons_ingress_load_balancer_ip: "136.156.138.225" |
| 40 | + |
| 41 | +# Storage settings for the management cluster |
| 42 | +capi_cluster_root_volume_type: "d6a82d3a-0ece-42ea-9aee-ffe768e5de22" # Ceph-HDD |
| 43 | + |
| 44 | +# Storage settings for tenant clusters |
| 45 | +azimuth_capi_operator_capi_helm_root_volume_type: "d6a82d3a-0ece-42ea-9aee-ffe768e5de22" # Ceph-HDD |
| 46 | + |
| 47 | +# Seperate etcd volume config |
| 48 | + |
| 49 | +# Management Cluster |
| 50 | +# Default volume type for the etcd block device if 'Volume' type is used in management clusters |
| 51 | +capi_cluster_etcd_blockdevice_volume_type: "87bc5d37-08b0-46c3-9e94-f93fe2616b21" # Ceph-SSD |
| 52 | + |
| 53 | +# Tenant Clusters |
| 54 | +# Default volume type for the etcd block device if 'Volume' type is used |
| 55 | +azimuth_capi_operator_capi_helm_etcd_blockdevice_volume_type: "87bc5d37-08b0-46c3-9e94-f93fe2616b21" # Ceph-SSD |
| 56 | + |
| 57 | +##### |
| 58 | +# Ingress configuration |
| 59 | +##### |
| 60 | +# The base domain to use for ingress resources |
| 61 | +ingress_base_domain: "azimuth.compute.cci2.ecmwf.int" |
| 62 | + |
| 63 | +zenith_sshd_service_load_balancer_ip: "136.156.138.51" |
| 64 | + |
| 65 | +##### |
| 66 | +# Azimuth configuration |
| 67 | +##### |
| 68 | + |
| 69 | +azimuth_capi_operator_capi_helm_openstack_loadbalancer_provider: amphora |
| 70 | +capi_cluster_addons_openstack_loadbalancer_provider: amphora |
| 71 | + |
| 72 | +##### |
| 73 | +# Terraform State |
| 74 | +#### |
| 75 | + |
| 76 | +# The endpoint of the object store |
| 77 | +terraform_s3_endpoint: https://object-store.os-api.cci2.ecmwf.int/ |
| 78 | + |
| 79 | +# The bucket to put Terraform states in |
| 80 | +# NOTE: This bucket must already exist - it will not be created by Terraform |
| 81 | +terraform_s3_bucket: az-terraform-state |
| 82 | + |
| 83 | +# Valero backup & recovery |
| 84 | +velero_s3_url: https://object-store.os-api.cci2.ecmwf.int/ |
0 commit comments