|
1 | 1 | #####
|
2 |
| -# Configuration for the seed node (HA) or single node |
| 2 | +# This file, combined with secrets.yml, shows an example configuration for a |
| 3 | +# minimal, but still best-practice, Azimuth deployment on a "well-behaved" cloud |
| 4 | +# |
| 5 | +# https://stackhpc.github.io/azimuth-config/best-practice/ |
| 6 | +# |
| 7 | +# It is recommended to read the "Configuration" section of the Azimuth Operator |
| 8 | +# Documentation in full to understand all the available options |
| 9 | +# |
| 10 | +# https://stackhpc.github.io/azimuth-config/configuration/ |
3 | 11 | #####
|
4 | 12 |
|
5 |
| -# The ID of an existing network to create the node on |
6 |
| -infra_network_id: "<internal network id>" |
7 |
| -# OR |
8 |
| -# The CIDR of the subnet that should be created |
9 |
| -infra_network_cidr: 192.168.100.0/24 |
10 |
| -# The ID of the external network to connect to via a router |
11 |
| -infra_external_network_id: "<external network id>" |
| 13 | +## Configuration for OpenTofu state |
| 14 | +## https://stackhpc.github.io/azimuth-config/repository/opentofu/ |
12 | 15 |
|
13 |
| -# The fixed floating IP to associate with the machine |
14 |
| -# This IP must be pre-allocated to the project |
15 |
| -# For a single node deployment, this IP should have the wildcard ingress domain assigned to it |
16 |
| -infra_fixed_floatingip: "<pre-allocated floating ip>" |
17 |
| -# OR |
18 |
| -# The name of the floating IP pool to allocate a floating IP from |
19 |
| -infra_floatingip_pool: "<floating ip pool>" |
20 |
| -# OR |
21 |
| -# The ID of a provisioning network that will be used to access the seed node |
22 |
| -infra_provisioning_network_id: "<provisioning network id>" |
23 |
| - |
24 |
| -# The image id of an Ubuntu 20.04 image to use for the node |
25 |
| -# N.B. This is populated automatically using community images by default |
26 |
| -# infra_image_id: "<image id>" |
| 16 | +# The Terraform backend type to use (HTTP and S3 supported) |
| 17 | +terraform_backend_type: "<http or s3>" |
| 18 | + |
| 19 | +# The backend configuration (depends on the selected backend type) |
| 20 | +terraform_backend_config: {} |
| 21 | + |
| 22 | + |
| 23 | +## Configuration for the seed node (HA) or single node deployment |
| 24 | +## https://stackhpc.github.io/azimuth-config/configuration/02-deployment-method/ |
| 25 | + |
| 26 | +# The ID of the external network to use |
| 27 | +# This network must provide _egress_ to the internet |
| 28 | +# https://stackhpc.github.io/azimuth-config/configuration/01-prerequisites/#networking |
| 29 | +infra_external_network_id: "<network id>" |
27 | 30 |
|
28 | 31 | # The id of the flavor to use for the node
|
29 | 32 | # For a seed node for an HA cluster, 8GB RAM is fine (maybe even 4GB)
|
30 | 33 | # For a single node deployment, >= 16GB RAM is recommended
|
31 | 34 | infra_flavor_id: "<flavor id>"
|
32 | 35 |
|
33 |
| -# The size in GB for the data volume |
34 |
| -# This will hold all cluster data, including Kubernetes resources, and also PVC data |
| 36 | +# The size of the volume to use for K3S cluster data |
35 | 37 | infra_data_volume_size: 100
|
36 | 38 |
|
37 |
| -##### |
38 |
| -# Configuration for the HA cluster |
39 |
| -##### |
| 39 | +# SINGLE NODE DEPLOYMENT ONLY |
| 40 | +# The fixed floating IP to associate with the machine |
| 41 | +# Must be pre-allocated to the project and have the wildcard ingress domain assigned to it |
| 42 | +# infra_fixed_floatingip: "<pre-allocated floating ip>" |
| 43 | + |
| 44 | + |
| 45 | +## Configuration for the HA cluster |
| 46 | +## https://stackhpc.github.io/azimuth-config/configuration/02-deployment-method/ |
| 47 | +## https://stackhpc.github.io/azimuth-config/configuration/03-kubernetes-config/ |
40 | 48 |
|
41 |
| -# The Kubernetes version that will be used for the HA cluster |
42 |
| -# N.B. This is populated automatically using community images by default |
43 |
| -# capi_cluster_kubernetes_version: 1.23.8 |
44 |
| -# The ID of the image that will be used for the nodes of the HA cluster |
45 |
| -# N.B. This is populated automatically using community images by default |
46 |
| -# capi_cluster_machine_image_id: "<image id>" |
47 | 49 | # The name of the flavor to use for control plane nodes
|
| 50 | +# A flavor with at least 2 CPUs, 8GB RAM and 100GB root disk is recommended |
48 | 51 | capi_cluster_control_plane_flavor: "<flavor name>"
|
| 52 | + |
49 | 53 | # The name of the flavor to use for worker nodes
|
| 54 | +# A flavor with at least 4 CPUs, 16GB RAM and 100GB root disk is recommended |
50 | 55 | capi_cluster_worker_flavor: "<flavor name>"
|
| 56 | + |
51 | 57 | # The number of worker nodes
|
52 | 58 | capi_cluster_worker_count: 3
|
53 |
| -# The fixed floating IP to associate with the load balancer for the ingress controller |
54 |
| -# This IP must be pre-allocated to the project and should have the wildcard ingress domain assigned to it |
55 |
| -capi_cluster_addons_ingress_load_balancer_ip: "<pre-allocated floating ip>" |
56 | 59 |
|
57 |
| -##### |
58 |
| -# Ingress configuration |
59 |
| -##### |
60 |
| -# The base domain to use for ingress resources |
61 |
| -ingress_base_domain: "<base domain>" |
62 |
| - |
63 |
| -# Indicates if cert-manager should be enabled |
64 |
| -# Currently, TLS is enabled for ingress iff cert-manager is enabled |
65 |
| -certmanager_enabled: yes |
| 60 | +# The floating IP to which to wildcard DNS entry has been assigned |
| 61 | +capi_cluster_addons_ingress_load_balancer_ip: "<pre-allocated floating ip>" |
66 | 62 |
|
67 |
| -# Indicates if Harbor should be enabled to provide pull-through caches |
68 |
| -harbor_enabled: no |
69 | 63 |
|
70 |
| -##### |
71 |
| -# Azimuth configuration |
72 |
| -##### |
73 |
| -# Indicates if the Zenith app proxy should be enabled |
74 |
| -azimuth_apps_enabled: yes |
75 |
| -# Indicates if Kubernetes support should be enabled |
76 |
| -azimuth_kubernetes_enabled: yes |
77 |
| -# Indicates if Cluster-as-a-Service (CaaS) should be enabled |
78 |
| -azimuth_clusters_enabled: yes |
| 64 | +## Target cloud configuration |
| 65 | +## https://stackhpc.github.io/azimuth-config/configuration/04-target-cloud/ |
79 | 66 |
|
80 | 67 | # The name of the current cloud
|
81 | 68 | azimuth_current_cloud_name: example
|
| 69 | + |
82 | 70 | # The label for the current cloud
|
83 | 71 | azimuth_current_cloud_label: Example
|
84 |
| -# The auth URL for the target OpenStack cloud |
85 |
| -azimuth_openstack_auth_url: https://cloud.example.com:5000/v3 |
86 | 72 |
|
87 |
| -##### |
88 |
| -# Configuration of authenticators / authentication methods |
89 |
| -##### |
90 |
| -# Whether the password authenticator should be enabled (enabled by default) |
91 |
| -azimuth_authenticator_password_enabled: true |
92 |
| -# The label for the password authenticator |
93 |
| -azimuth_authenticator_password_label: "Username + Password" |
94 |
| - |
95 |
| -# Whether the appcred authenticator should be enabled (not enabled by default) |
96 |
| -azimuth_authenticator_appcred_enabled: false |
97 |
| -# The label for the appcred authenticator |
98 |
| -azimuth_authenticator_appcred_label: "Application Credential" |
99 |
| - |
100 |
| -# Whether the federated authenticator should be enabled (not enabled by default) |
101 |
| -azimuth_authenticator_federated_enabled: false |
102 |
| -# The label for the federated authenticator |
103 |
| -azimuth_authenticator_federated_label: "Federated" |
104 |
| -# The provider for the federated authenticator |
105 |
| -# This should correspond to the Keystone federation URL, e.g. <auth url>/auth/OS-FEDERATION/websso/<provider> |
106 |
| -azimuth_authenticator_federated_provider: oidc |
107 | 73 |
|
108 |
| -##### |
109 |
| -# Configuration for CaaS appliances |
110 |
| -##### |
111 |
| -# If CaaS is enabled and the StackHPC Slurm appliance is enabled (the default), this |
112 |
| -# is the id of a Rocky 8 image that will be used for Slurm clusters |
113 |
| -# N.B. This is populated automatically using community images by default |
114 |
| -# azimuth_caas_stackhpc_slurm_appliance_image: "<image id>" |
115 |
| - |
116 |
| -# The ID of the desktop or webconsole image to use for the workstation appliance |
117 |
| -# See https://object.arcus.openstack.hpc.cam.ac.uk/swift/v1/AUTH_f0dc9cb312144d0aa44037c9149d2513/azimuth-images-prerelease/ |
118 |
| -# N.B. This is populated automatically using community images by default |
119 |
| -# azimuth_caas_stackhpc_workstation_image: "<image id>" |
120 |
| - |
121 |
| -# The ID of the repo2docker image to use for the repo2docker appliance |
122 |
| -# See https://object.arcus.openstack.hpc.cam.ac.uk/swift/v1/AUTH_f0dc9cb312144d0aa44037c9149d2513/azimuth-images-prerelease/ |
123 |
| -# N.B. This is populated automatically using community images by default |
124 |
| -# azimuth_caas_stackhpc_repo2docker_image: "<image id>" |
| 74 | +## Ingress configuration |
| 75 | +## https://stackhpc.github.io/azimuth-config/configuration/06-ingress/ |
| 76 | + |
| 77 | +# The base domain to use for ingress resources |
| 78 | +ingress_base_domain: "<base domain>" |
| 79 | + |
| 80 | + |
| 81 | +## Persistence and retention for monitoring (HA only) |
| 82 | +## https://stackhpc.github.io/azimuth-config/configuration/14-monitoring/#persistence-and-retention |
| 83 | + |
| 84 | +# Prometheus retention and volume size |
| 85 | +capi_cluster_addons_monitoring_prometheus_retention: 90d |
| 86 | +capi_cluster_addons_monitoring_prometheus_volume_size: 50Gi |
| 87 | + |
| 88 | +# Loki retention and volume size |
| 89 | +capi_cluster_addons_monitoring_loki_retention: 744h |
| 90 | +capi_cluster_addons_monitoring_loki_volume_size: 50Gi |
| 91 | + |
| 92 | + |
| 93 | +## Disaster recovery |
| 94 | +## https://stackhpc.github.io/azimuth-config/configuration/15-disaster-recovery/ |
| 95 | + |
| 96 | +# Enable Velero for backup |
| 97 | +velero_enabled: true |
| 98 | + |
| 99 | +# The URL of the S3 endpoint to use for backups |
| 100 | +velero_s3_url: "<endpoint URL>" |
| 101 | + |
| 102 | +# The name of the S3 bucket to use for backups (must already exist) |
| 103 | +velero_bucket_name: "<bucket name>" |
0 commit comments