diff --git a/modules/bootstrap/examples/basic/main.tf b/modules/bootstrap/examples/basic/main.tf index e8a3fdd..39ed969 100644 --- a/modules/bootstrap/examples/basic/main.tf +++ b/modules/bootstrap/examples/basic/main.tf @@ -35,7 +35,7 @@ provider "harvester" { provider "tls" {} module "bootstrap" { - source = "github.com/wso2-enterprise/open-cloud-datacenter//modules/bootstrap?ref=v0.1.0" + source = "../.." vm_name = "rancher-bootstrap" node_count = 1 @@ -47,7 +47,7 @@ module "bootstrap" { # Credentials – supply via tfvars or environment variables; never hardcode vm_password = var.vm_password - rancher_admin_password = var.rancher_admin_password + bootstrap_password = var.bootstrap_password rancher_hostname = "rancher.example.internal" @@ -63,7 +63,7 @@ variable "vm_password" { sensitive = true } -variable "rancher_admin_password" { +variable "bootstrap_password" { type = string sensitive = true } diff --git a/modules/bootstrap/main.tf b/modules/bootstrap/main.tf index fa7ca98..c1efaf5 100644 --- a/modules/bootstrap/main.tf +++ b/modules/bootstrap/main.tf @@ -1,14 +1,10 @@ terraform { - required_version = ">= 1.3" + required_version = ">= 1.5" required_providers { harvester = { source = "harvester/harvester" version = "~> 1.7" } - rancher2 = { - source = "rancher/rancher2" - version = "~> 13.1" - } tls = { source = "hashicorp/tls" version = "~> 4.0" @@ -16,92 +12,146 @@ terraform { } } +# ── SSH key pair (greenfield only) ──────────────────────────────────────────── +# Set create_ssh_key = false to attach existing ssh_key_ids instead. resource "tls_private_key" "bootstrap_key" { + count = var.create_ssh_key ? 1 : 0 algorithm = "RSA" rsa_bits = 4096 } resource "harvester_ssh_key" "bootstrap_key" { + count = var.create_ssh_key ? 1 : 0 name = "${var.vm_name}-ssh-key" namespace = var.harvester_namespace - public_key = tls_private_key.bootstrap_key.public_key_openssh + public_key = tls_private_key.bootstrap_key[0].public_key_openssh } -# Removed dynamic VLAN creation as DHCP is failing in the cluster - -# 1. Create the Cloud-Init Secret for the VM (Bypasses 2KB limit) +# ── Cloud-init secret (greenfield only) ────────────────────────────────────── +# Set create_cloudinit_secret = false and provide existing_cloudinit_secret_name instead. resource "harvester_cloudinit_secret" "cloudinit" { - count = var.node_count + count = var.create_cloudinit_secret ? var.node_count : 0 name = var.node_count > 1 ? "${var.vm_name}-cloudinit-${count.index}" : "${var.vm_name}-cloudinit" namespace = var.harvester_namespace user_data = templatefile("${path.module}/templates/cloud-init.yaml.tpl", { - password = var.vm_password, - cluster_dns = var.rancher_hostname, - rancher_password = var.bootstrap_password, - ssh_public_key = tls_private_key.bootstrap_key.public_key_openssh, - node_index = count.index, - node_count = var.node_count, - lb_ip = var.ippool_start # Using the LB IP for join logic + password = var.vm_password + cluster_dns = var.rancher_hostname + rancher_password = var.bootstrap_password + ssh_public_key = tls_private_key.bootstrap_key[0].public_key_openssh + node_index = count.index + node_count = var.node_count + lb_ip = var.ippool_start }) } -# 2. Create the Harvester VM +locals { + # Resolve the SSH key IDs: either freshly generated or caller-supplied + ssh_key_ids = var.create_ssh_key ? [harvester_ssh_key.bootstrap_key[0].id] : var.ssh_key_ids +} + +# ── Input validation ────────────────────────────────────────────────────────── + +# The cloud-init template embeds the generated SSH public key. If create_ssh_key +# is false the tls_private_key resource is empty, causing an invalid-index error. +check "ssh_key_required_for_cloudinit" { + assert { + condition = !var.create_cloudinit_secret || var.create_ssh_key + error_message = "create_ssh_key must be true when create_cloudinit_secret is true (the cloud-init template embeds the generated SSH public key)." + } +} + +# When reusing an existing cloud-init secret, the name must be provided. +check "existing_cloudinit_secret_name_required" { + assert { + condition = var.create_cloudinit_secret || var.existing_cloudinit_secret_name != "" + error_message = "existing_cloudinit_secret_name is required when create_cloudinit_secret = false." + } +} + +# ── Rancher server VM ───────────────────────────────────────────────────────── resource "harvester_virtualmachine" "rancher_server" { count = var.node_count name = var.node_count > 1 ? "${var.vm_name}-${count.index}" : var.vm_name namespace = var.harvester_namespace restart_after_update = true - cpu = 4 - memory = "8Gi" + cpu = var.vm_cpu + memory = var.vm_memory run_strategy = "RerunOnFailure" machine_type = "q35" - ssh_keys = [harvester_ssh_key.bootstrap_key.id] + ssh_keys = local.ssh_key_ids - network_interface { - name = "nic-1" - type = "masquerade" + # Masquerade (NAT): default for greenfield; no external network required + dynamic "network_interface" { + for_each = var.network_type == "masquerade" ? [1] : [] + content { + name = var.network_interface_name + type = "masquerade" + } + } + + # Bridge: for VMs that need direct VLAN access (e.g. existing production VMs) + dynamic "network_interface" { + for_each = var.network_type == "bridge" ? [1] : [] + content { + name = var.network_interface_name + type = "bridge" + network_name = var.network_name + mac_address = var.network_mac_address != "" ? var.network_mac_address : null + } } disk { - name = "rootdisk" + name = var.vm_disk_name type = "disk" - size = "40Gi" + size = var.vm_disk_size bus = "virtio" boot_order = 1 image = var.ubuntu_image_id - auto_delete = true + auto_delete = var.vm_disk_auto_delete + } + + # USB tablet input device — some VMs require this for correct cursor behaviour + # in the Harvester console; set enable_usb_tablet = true to include it. + dynamic "input" { + for_each = var.enable_usb_tablet ? [1] : [] + content { + name = "tablet" + type = "tablet" + bus = "usb" + } } cloudinit { - user_data_secret_name = harvester_cloudinit_secret.cloudinit[count.index].name - network_data = "" + user_data_secret_name = var.create_cloudinit_secret ? harvester_cloudinit_secret.cloudinit[count.index].name : var.existing_cloudinit_secret_name + network_data_secret_name = var.create_cloudinit_secret ? null : var.existing_cloudinit_secret_name } - # Rancher is installed entirely by cloud-init inside the VM (RKE2 + cert-manager + Helm). - # The VM uses a masquerade network so Terraform cannot SSH into it directly. provisioner "local-exec" { - command = "echo 'Please wait for cloud-init to finish installing RKE2/K3s and Rancher internally!'" + command = var.create_cloudinit_secret ? "echo 'VM created — cloud-init will install RKE2/Rancher internally.'" : "echo 'VM imported — cloud-init ran at initial provision time.'" } } -# 3. Expose the Rancher VM via a Load Balancer +# ── Load Balancer + IP Pool (greenfield only) ───────────────────────────────── +# Set create_lb = false when the Rancher VM is reachable directly via its +# bridge IP (no dedicated LB/IP-pool needed). resource "harvester_loadbalancer" "rancher_lb" { + count = var.create_lb ? 1 : 0 name = "${var.vm_name}-lb" namespace = var.harvester_namespace depends_on = [ harvester_virtualmachine.rancher_server, - harvester_ippool.rancher_ips + harvester_ippool.rancher_ips, ] workload_type = "vm" ipam = "pool" - ippool = harvester_ippool.rancher_ips.name + ippool = harvester_ippool.rancher_ips[0].name listener { name = "https" @@ -131,9 +181,9 @@ resource "harvester_loadbalancer" "rancher_lb" { } } -# 4. Create an IP Pool for the Load Balancer resource "harvester_ippool" "rancher_ips" { - name = "${var.vm_name}-ips" + count = var.create_lb ? 1 : 0 + name = "${var.vm_name}-ips" range { start = var.ippool_start @@ -142,11 +192,3 @@ resource "harvester_ippool" "rancher_ips" { gateway = var.ippool_gateway } } - -# Rancher is installed inside the VM by cloud-init (cert-manager + Helm). -# rancher2_bootstrap waits for Rancher to be reachable and sets the permanent admin -# password. Re-run `terraform apply` if Rancher is still starting up on first attempt. -resource "rancher2_bootstrap" "admin" { - initial_password = var.bootstrap_password - password = var.rancher_admin_password -} diff --git a/modules/bootstrap/outputs.tf b/modules/bootstrap/outputs.tf index 3ebb455..967d34c 100644 --- a/modules/bootstrap/outputs.tf +++ b/modules/bootstrap/outputs.tf @@ -1,15 +1,18 @@ +locals { + rancher_lb_ip = var.create_lb ? harvester_loadbalancer.rancher_lb[0].ip_address : var.static_rancher_ip +} + output "rancher_hostname" { value = var.rancher_hostname - description = "The FQDN of the bootstrapped Rancher server" + description = "FQDN of the Rancher server" } output "rancher_lb_ip" { - value = harvester_loadbalancer.rancher_lb.ip_address - description = "The IP address of the LoadBalancer exposing Rancher" + value = local.rancher_lb_ip + description = "IP used to reach Rancher: LoadBalancer IP (greenfield) or bridge VM IP (brownfield)" } -output "admin_token" { - value = rancher2_bootstrap.admin.token - description = "Rancher admin API token for use by downstream phases" - sensitive = true +output "vm_id" { + value = harvester_virtualmachine.rancher_server[0].id + description = "Harvester resource ID of the Rancher server VM (namespace/name)" } diff --git a/modules/bootstrap/variables.tf b/modules/bootstrap/variables.tf index 2400eb4..ece2a9c 100644 --- a/modules/bootstrap/variables.tf +++ b/modules/bootstrap/variables.tf @@ -1,27 +1,99 @@ -variable "vm_memory" { - type = string - description = "Memory size for the Rancher VM (e.g. '8Gi')" - default = "8Gi" -} - +# ── VM identity ─────────────────────────────────────────────────────────────── variable "vm_name" { type = string description = "Name of the Rancher server VM" default = "rancher-bootstrap" } +variable "harvester_namespace" { + type = string + description = "Harvester namespace to deploy into" + default = "default" +} + variable "node_count" { type = number - description = "Number of nodes in the bootstrap cluster" + description = "Number of VM instances (1 for single-node, 3 for HA)" default = 1 } -variable "harvester_namespace" { +# ── VM hardware ─────────────────────────────────────────────────────────────── +variable "vm_cpu" { + type = number + description = "vCPU count for the Rancher VM" + default = 4 +} + +variable "vm_memory" { type = string - description = "Harvester namespace to deploy into" - default = "default" + description = "Memory size for the Rancher VM (e.g. '8Gi', '16Gi')" + default = "8Gi" +} + +variable "vm_disk_name" { + type = string + description = "Name of the root disk in the VM spec" + default = "disk-0" +} + +variable "vm_disk_size" { + type = string + description = "Root disk size for the Rancher VM" + default = "40Gi" +} + +variable "vm_disk_auto_delete" { + type = bool + description = "Delete the root disk when the VM is destroyed. Set false for production VMs." + default = true +} + +variable "ubuntu_image_id" { + type = string + description = "Harvester resource ID of the Ubuntu cloud image (e.g. 'default/image-cwl4b')" +} + +variable "enable_usb_tablet" { + type = bool + description = "Attach a USB tablet input device (required by some VMs for correct console cursor behaviour)" + default = false } +# ── Network ─────────────────────────────────────────────────────────────────── +variable "network_type" { + type = string + description = "'masquerade' (NAT, default for greenfield) or 'bridge' (direct VLAN, for brownfield/production)" + default = "masquerade" + + validation { + condition = contains(["masquerade", "bridge"], var.network_type) + error_message = "network_type must be 'masquerade' or 'bridge'." + } +} + +variable "network_interface_name" { + type = string + description = "Name of the network interface inside the VM spec (e.g. 'nic-1' or 'default')" + default = "nic-1" +} + +variable "network_name" { + type = string + description = "NetworkAttachmentDefinition name for bridge networks (e.g. 'iaas-net/vm-subnet-001'). Only used when network_type = 'bridge'." + default = "" + validation { + condition = var.network_type != "bridge" || var.network_name != "" + error_message = "network_name is required when network_type = 'bridge'." + } +} + +variable "network_mac_address" { + type = string + description = "MAC address to assign to the bridge NIC. Leave empty to auto-assign. Only used when network_type = 'bridge'." + default = "" +} + +# Kept for backwards-compatibility (used in VLAN creation — currently disabled in the module). variable "cluster_network_name" { type = string description = "Name of the base cluster network in Harvester (e.g. 'mgmt')" @@ -30,60 +102,110 @@ variable "cluster_network_name" { variable "cluster_vlan_id" { type = number - description = "The VLAN tag ID for the bootstrap node network" + description = "VLAN tag ID for the bootstrap node network" default = 100 } variable "cluster_vlan_gateway" { type = string - description = "The gateway IP for the new VLAN (Optional)" + description = "Gateway IP for the new VLAN (optional)" default = "" } -variable "ubuntu_image_id" { +# ── SSH key ─────────────────────────────────────────────────────────────────── +variable "create_ssh_key" { + type = bool + description = "If true, generate a new RSA key-pair and register it as a Harvester SSH key. Set false to use existing ssh_key_ids." + default = true +} + +variable "ssh_key_ids" { + type = list(string) + description = "List of existing Harvester SSH key IDs to attach when create_ssh_key = false (e.g. ['default/madawa'])." + default = [] +} + +# ── Cloud-init secret ───────────────────────────────────────────────────────── +variable "create_cloudinit_secret" { + type = bool + description = "If true, render and create a cloud-init secret from the built-in template. Set false to reference an existing secret." + default = true +} + +variable "existing_cloudinit_secret_name" { type = string - description = "Harvester ID of the Ubuntu Cloud Image" + description = "Name of an existing cloud-init secret to attach when create_cloudinit_secret = false." + default = "" } variable "vm_password" { type = string - description = "Default password for the ubuntu user" + description = "Default password for the ubuntu user (used in cloud-init template). Required when create_cloudinit_secret = true." sensitive = true + default = "" } variable "rancher_hostname" { type = string - description = "FQDN for the Rancher UI" + description = "FQDN for the Rancher UI (e.g. 'rancher-lk-prod.wso2.com')" } variable "bootstrap_password" { type = string - description = "Temporary password set by the Rancher Helm chart during cloud-init install" + description = "Temporary Rancher admin password set by the Helm chart during cloud-init install. Required when create_cloudinit_secret = true." sensitive = true + default = "" } -variable "rancher_admin_password" { +# ── Load Balancer / IP Pool ─────────────────────────────────────────────────── +variable "create_lb" { + type = bool + description = "If true, create a Harvester LoadBalancer and IP pool to expose Rancher. Set false when the VM is directly reachable via a bridge IP." + default = true +} + +variable "static_rancher_ip" { type = string - description = "Permanent admin password to configure on the Rancher instance" - sensitive = true + description = "IP of the Rancher VM on the internal bridge network when create_lb = false. Passed through as rancher_lb_ip output for CoreDNS." + default = "" } variable "ippool_subnet" { type = string - description = "Subnet for the IP pool (e.g. 192.168.10.1/24)" + description = "Subnet CIDR for the IP pool (e.g. '192.168.10.0/24'). Required when create_lb = true." + default = "" + validation { + condition = !var.create_lb || var.ippool_subnet != "" + error_message = "ippool_subnet is required when create_lb = true." + } } variable "ippool_gateway" { type = string - description = "Gateway for the IP pool" + description = "Gateway for the IP pool. Required when create_lb = true." + default = "" + validation { + condition = !var.create_lb || var.ippool_gateway != "" + error_message = "ippool_gateway is required when create_lb = true." + } } variable "ippool_start" { type = string - description = "Start of the IP range for the pool" + description = "Start of the IP range for the pool. Required when create_lb = true." + default = "" + validation { + condition = !var.create_lb || var.ippool_start != "" + error_message = "ippool_start is required when create_lb = true." + } } variable "ippool_end" { type = string - description = "End of the IP range for the pool" + description = "End of the IP range for the pool. Required when create_lb = true." + default = "" + validation { + condition = !var.create_lb || var.ippool_end != "" + error_message = "ippool_end is required when create_lb = true." + } } diff --git a/modules/identity/providers/asgardeo/main.tf b/modules/identity/providers/asgardeo/main.tf index 6698133..5026223 100644 --- a/modules/identity/providers/asgardeo/main.tf +++ b/modules/identity/providers/asgardeo/main.tf @@ -38,4 +38,14 @@ resource "asgardeo_application" "this" { skip_login_consent = var.skip_consent skip_logout_consent = var.skip_consent } + + claim_configuration { + dynamic "requested_claims" { + for_each = var.requested_claims + content { + uri = requested_claims.value.uri + mandatory = requested_claims.value.mandatory + } + } + } } diff --git a/modules/identity/providers/asgardeo/variables.tf b/modules/identity/providers/asgardeo/variables.tf index 86840e9..2ef4ea9 100644 --- a/modules/identity/providers/asgardeo/variables.tf +++ b/modules/identity/providers/asgardeo/variables.tf @@ -39,3 +39,29 @@ variable "skip_consent" { description = "Skip login and logout consent screens for seamless SSO." default = true } + +variable "requested_claims" { + type = list(object({ + uri = string + mandatory = optional(bool, false) + })) + description = <<-EOT + Local claim URIs to include in the OIDC token. Controls which user attributes + the relying party (e.g. Rancher) can read from the ID token. + + Common Asgardeo claim URIs: + - "http://wso2.org/claims/emailaddress" → email + - "http://wso2.org/claims/username" → username (shown as display name) + - "http://wso2.org/claims/groups" → group memberships (required for Rancher RBAC) + - "http://wso2.org/claims/givenname" → first name + - "http://wso2.org/claims/lastname" → last name + + Defaults to email + username + groups, which is the minimum needed for readable + display names and group-based RBAC in Rancher. + EOT + default = [ + { uri = "http://wso2.org/claims/emailaddress", mandatory = false }, + { uri = "http://wso2.org/claims/username", mandatory = false }, + { uri = "http://wso2.org/claims/groups", mandatory = true }, + ] +} diff --git a/modules/identity/rancher-oidc/main.tf b/modules/identity/rancher-oidc/main.tf index 31d4a41..ba949fc 100644 --- a/modules/identity/rancher-oidc/main.tf +++ b/modules/identity/rancher-oidc/main.tf @@ -8,6 +8,7 @@ resource "rancher2_auth_config_generic_oidc" "this" { jwks_url = var.jwks_url scopes = var.scopes group_search_enabled = var.group_search_enabled + groups_field = var.groups_field access_mode = var.access_mode enabled = var.enabled } diff --git a/modules/identity/rancher-oidc/variables.tf b/modules/identity/rancher-oidc/variables.tf index c8f4794..f52134d 100644 --- a/modules/identity/rancher-oidc/variables.tf +++ b/modules/identity/rancher-oidc/variables.tf @@ -50,6 +50,16 @@ variable "group_search_enabled" { default = true } +variable "groups_field" { + type = string + description = "JWT claim name that contains the user's group memberships (e.g. \"groups\")." + default = "groups" + validation { + condition = length(trimspace(var.groups_field)) > 0 && length(regexall("\\s", var.groups_field)) == 0 + error_message = "groups_field must be a non-empty JWT claim name without whitespace." + } +} + variable "access_mode" { type = string description = "Rancher access mode: 'unrestricted', 'restricted', or 'required'." diff --git a/modules/management/cluster-roles/README.md b/modules/management/cluster-roles/README.md index d552f78..5c2ab96 100644 --- a/modules/management/cluster-roles/README.md +++ b/modules/management/cluster-roles/README.md @@ -16,7 +16,25 @@ module is not required. | Role Name | Context | Purpose | |-----------|---------|---------| -| `vm-metrics-observer` | project | Read-only access to VM status and metrics for the Harvester dashboard. No mutating verbs. | +| `vm-manager` | project | Full lifecycle management of VMs: create, configure, start/stop/restart, console, and delete. | +| `network-manager` | cluster | Manage Harvester VLAN infrastructure and NetworkAttachmentDefinitions. Bind only via `rancher2_cluster_role_template_binding`. | +| `vm-metrics-observer` | project | Read-only access to VM status and metrics. No mutating verbs. | + +### `vm-manager` Permissions + +| API Group | Resources | Verbs | +|-----------|-----------|-------| +| `kubevirt.io` | `virtualmachines`, `virtualmachineinstances`, `virtualmachineinstancepresets`, `virtualmachineinstancereplicasets` | `get`, `list`, `watch`, `create`, `update`, `patch`, `delete` | +| `subresources.kubevirt.io` | `virtualmachines/start`, `virtualmachines/stop`, `virtualmachines/restart`, `virtualmachines/migrate`, `virtualmachineinstances/vnc`, `virtualmachineinstances/console`, `virtualmachineinstances/portforward`, `virtualmachineinstances/pause`, `virtualmachineinstances/unpause` | `get`, `update` | +| `subresources.kubevirt.io` | `virtualmachineinstances/metrics` | `get` | +| `cdi.kubevirt.io` | `datavolumes` | `get`, `list`, `watch`, `create`, `update`, `patch`, `delete` | +| `harvesterhci.io` | `virtualmachineimages` | `get`, `list`, `watch` | +| `harvesterhci.io` | `keypairs` | `get`, `list`, `watch`, `create`, `delete` | +| `""` (core) | `secrets`, `configmaps` | `get`, `list`, `watch`, `create`, `update`, `patch`, `delete` | +| `""` (core) | `services/proxy` | `get` | + +This role is intended for product team members who own their VMs end-to-end within the +quota and namespace boundaries imposed by the `tenant-space` module. ### `vm-metrics-observer` Permissions @@ -27,7 +45,8 @@ module is not required. | `""` (core) | `services/proxy` | `get` | This role intentionally **excludes** `update`, `patch`, `delete`, and subresources that -control VM power state (`start`, `stop`, `restart`, `migrate`). +control VM power state (`start`, `stop`, `restart`, `migrate`). Use it for users who +need Harvester dashboard visibility only (e.g. on-call monitoring access). ## Requirements @@ -43,13 +62,22 @@ module "cluster_roles" { source = "github.com/wso2-enterprise/open-cloud-datacenter//modules/management/cluster-roles?ref=v0.1.x" } -# Pass the output to tenant-space modules -module "tenant_space_example" { +module "tenant_space_iam" { source = "github.com/wso2-enterprise/open-cloud-datacenter//modules/management/tenant-space?ref=v0.1.x" ... group_role_bindings = [ { - group_principal_id = var.team_group_id + group_principal_id = var.iam_team_group_id + role_template_id = "project-member" + }, + { + group_principal_id = var.iam_team_group_id + role_template_id = module.cluster_roles.vm_manager_role_id + }, + # Add the observer group to the same tenant space rather than creating a + # second module block — a second call would collide on project creation. + { + group_principal_id = var.sre_group_id role_template_id = module.cluster_roles.vm_metrics_observer_role_id }, ] @@ -60,6 +88,8 @@ module "tenant_space_example" { | Name | Description | |------|-------------| +| `vm_manager_role_id` | Role template ID for `vm-manager`. Pass to `tenant-space` `group_role_bindings`. | +| `network_manager_role_id` | Role template ID for `network-manager` (cluster-scoped). Pass to `rancher2_cluster_role_template_binding`. | | `vm_metrics_observer_role_id` | Role template ID for `vm-metrics-observer`. Pass to `tenant-space` `group_role_bindings`. | ## Notes diff --git a/modules/management/cluster-roles/main.tf b/modules/management/cluster-roles/main.tf index 7ed23df..db52b7d 100644 --- a/modules/management/cluster-roles/main.tf +++ b/modules/management/cluster-roles/main.tf @@ -1,6 +1,107 @@ # Cluster-level custom role templates. # Apply once per Rancher instance; referenced by tenant-space role bindings. +# Full lifecycle management of VMs within a tenant project. +# Covers create/update/delete of VMs and data volumes, power operations +# (start/stop/restart/migrate), and console/VNC access. Does NOT grant +# access to cluster-level resources or other tenants' namespaces. +resource "rancher2_role_template" "vm_manager" { + name = "vm-manager" + description = "Full lifecycle management of VMs: create, configure, start/stop/restart, console access, and delete. Scoped to the tenant project." + context = "project" + + # Full CRUD on VM objects + rules { + api_groups = ["kubevirt.io"] + resources = ["virtualmachines", "virtualmachineinstances", "virtualmachineinstancepresets", "virtualmachineinstancereplicasets"] + verbs = ["get", "list", "watch", "create", "update", "patch", "delete"] + } + + # Power operations and console/VNC access + rules { + api_groups = ["subresources.kubevirt.io"] + resources = ["virtualmachines/start", "virtualmachines/stop", "virtualmachines/restart", "virtualmachines/migrate", "virtualmachineinstances/vnc", "virtualmachineinstances/console", "virtualmachineinstances/portforward", "virtualmachineinstances/pause", "virtualmachineinstances/unpause"] + verbs = ["get", "update"] + } + + # VM metrics (for Harvester dashboard graphs) + rules { + api_groups = ["subresources.kubevirt.io"] + resources = ["virtualmachineinstances/metrics"] + verbs = ["get"] + } + + # Data volumes (VM disks backed by PVCs) + rules { + api_groups = ["cdi.kubevirt.io"] + resources = ["datavolumes"] + verbs = ["get", "list", "watch", "create", "update", "patch", "delete"] + } + + # Read access to VM images available in the namespace + rules { + api_groups = ["harvesterhci.io"] + resources = ["virtualmachineimages"] + verbs = ["get", "list", "watch"] + } + + # SSH keypairs — full CRUD so tenants can inject and remove keys via workloads/vm + rules { + api_groups = ["harvesterhci.io"] + resources = ["keypairs"] + verbs = ["get", "list", "watch", "create", "delete"] + } + + # Cloud-init secrets and SSH key secrets + rules { + api_groups = [""] + resources = ["secrets", "configmaps"] + verbs = ["get", "list", "watch", "create", "update", "patch", "delete"] + } + + # Service proxy for UI routing + rules { + api_groups = [""] + resources = ["services/proxy"] + verbs = ["get"] + } +} + +# Cluster-scoped role: exclusive control over Harvester VLAN infrastructure. +# context = "cluster" means rules apply at cluster (not project) scope, so this +# role can never be granted through a project role binding — it must be assigned +# via rancher2_cluster_role_template_binding, which requires operator-level access. +# +# Why consumers cannot create VLANs even as project-member: +# a) VlanConfig and ClusterNetwork are cluster-scoped CRDs (not namespaced). +# b) NetworkAttachmentDefinitions in harvester-public are outside their project namespace. +# c) The built-in project-member role grants no cluster-level RBAC whatsoever. +# Consumers reference pre-created networks by name only (network_name in VM spec). +resource "rancher2_role_template" "network_manager" { + name = "network-manager" + description = "Create, modify, and delete Harvester VLAN infrastructure (VlanConfig, ClusterNetwork, NodeNetwork) and NetworkAttachmentDefinitions. Restricted to DC operations group via cluster-level binding." + context = "cluster" + + # Harvester VLAN infrastructure — all cluster-scoped CRDs. + # VlanConfig: maps a VLAN ID to a ClusterNetwork interface on each node. + # ClusterNetwork: represents a physical NIC/bond available for VLAN tagging. + # NodeNetwork: per-node network status and NIC inventory. + # LinkMonitor: monitors NIC link state across the cluster. + rules { + api_groups = ["network.harvesterhci.io"] + resources = ["vlanconfigs", "clusternetworks", "nodenetworks", "linkmonitors"] + verbs = ["get", "list", "watch", "create", "update", "patch", "delete"] + } + + # NetworkAttachmentDefinition: the namespace-scoped resource VMs reference by name. + # DC ops creates these in harvester-public; consumers can list/get but not create. + rules { + api_groups = ["k8s.cni.cncf.io"] + resources = ["network-attachment-definitions"] + verbs = ["get", "list", "watch", "create", "update", "patch", "delete"] + } +} + # Grants read-only visibility into VM status and metrics for the Harvester # dashboard. Intentionally excludes all mutating verbs (update, patch, delete) # and subresources that control VM power state (start, stop, restart, migrate). diff --git a/modules/management/cluster-roles/outputs.tf b/modules/management/cluster-roles/outputs.tf index a7dc8ca..65ba707 100644 --- a/modules/management/cluster-roles/outputs.tf +++ b/modules/management/cluster-roles/outputs.tf @@ -1,4 +1,14 @@ +output "vm_manager_role_id" { + value = rancher2_role_template.vm_manager.id + description = "Role template ID for the vm-manager role. Pass to tenant-space module's group_role_bindings." +} + output "vm_metrics_observer_role_id" { value = rancher2_role_template.vm_metrics_observer.id - description = "Role template ID for the vm-metrics-observer role. Pass to tenant-space module's member_role_ids." + description = "Role template ID for the vm-metrics-observer role. Pass to tenant-space module's group_role_bindings." +} + +output "network_manager_role_id" { + value = rancher2_role_template.network_manager.id + description = "Role template ID for the network-manager cluster role. Pass to rancher2_cluster_role_template_binding for the DC ops OIDC group." } diff --git a/modules/management/harvester-integration/examples/basic/main.tf b/modules/management/harvester-integration/examples/basic/main.tf index 7ab8e88..ba96c2b 100644 --- a/modules/management/harvester-integration/examples/basic/main.tf +++ b/modules/management/harvester-integration/examples/basic/main.tf @@ -2,8 +2,7 @@ # # This example integrates a Harvester cluster into an existing Rancher server # by enabling the Harvester feature flag, installing the UI extension, creating -# a cloud credential, patching CoreDNS so Harvester can resolve the internal -# Rancher FQDN, and applying the registration manifest. +# a cloud credential, and applying the registration manifest. # # Prerequisites: # - Rancher deployed and accessible (e.g. via the bootstrap module) @@ -11,7 +10,6 @@ # - kubectl available in PATH (used by local-exec provisioners) # - The rancher2 provider configured with your Rancher URL and access key # - The harvester provider configured with your Harvester kubeconfig -# - The kubernetes provider configured against the Harvester cluster terraform { required_version = ">= 1.3" @@ -25,10 +23,6 @@ terraform { source = "harvester/harvester" version = "~> 1.7" } - kubernetes = { - source = "hashicorp/kubernetes" - version = "~> 3.0" - } } } @@ -39,18 +33,14 @@ provider "rancher2" { } provider "harvester" { - kubeconfig = file("~/.kube/harvester-config") -} - -provider "kubernetes" { - config_path = "~/.kube/harvester-config" + kubeconfig = file(pathexpand("~/.kube/harvester-config")) } module "harvester_integration" { - source = "github.com/wso2-enterprise/open-cloud-datacenter//modules/management/harvester-integration?ref=v0.1.0" + source = "github.com/wso2/open-cloud-datacenter//modules/management/harvester-integration?ref=v0.2.0" - harvester_kubeconfig = file("~/.kube/harvester-config") + harvester_kubeconfig = file(pathexpand("~/.kube/harvester-config")) harvester_cluster_name = "harvester-hci" - rancher_hostname = "rancher.example.internal" - rancher_lb_ip = "192.168.10.10" + cloud_credential_name = "harvester-local-creds" + cluster_labels = {} } diff --git a/modules/management/harvester-integration/main.tf b/modules/management/harvester-integration/main.tf index 6141ad0..e09c58e 100644 --- a/modules/management/harvester-integration/main.tf +++ b/modules/management/harvester-integration/main.tf @@ -9,29 +9,30 @@ terraform { source = "harvester/harvester" version = "~> 1.7" } - kubernetes = { - source = "hashicorp/kubernetes" - version = "~> 3.0" - } } } # 1. Enable Harvester Feature Flag -resource "rancher2_setting" "harvester_enabled" { +# These are Rancher feature flags (at /v3/features), not settings (/v3/settings). +# rancher2_feature does not support import — set manage_feature_flags = false for brownfield. +resource "rancher2_feature" "harvester" { + count = var.manage_feature_flags ? 1 : 0 name = "harvester" - value = "true" + value = true } # 2. (Optional) Enable Harvester Baremetal Container Workload (Experimental) -resource "rancher2_setting" "harvester_baremetal" { +resource "rancher2_feature" "harvester_baremetal" { + count = var.manage_feature_flags ? 1 : 0 name = "harvester-baremetal-container-workload" - value = "true" + value = true } # 3. Add Harvester UI Extension Catalog +# Rancher names this repo "harvester" by default when installed via Harvester integration. resource "rancher2_catalog_v2" "harvester_extensions" { cluster_id = "local" - name = "harvester-extensions" + name = "harvester" git_repo = "https://github.com/harvester/harvester-ui-extension" git_branch = "gh-pages" } @@ -43,26 +44,31 @@ data "rancher2_project" "local_system" { } # 6. Install Harvester UI Extension App +# Set manage_app = false for brownfield (app already installed; rancher2_app_v2 import +# does not populate name/namespace, which forces a destroy+recreate). resource "rancher2_app_v2" "harvester" { + count = var.manage_app ? 1 : 0 cluster_id = "local" - name = "harvester" # User confirmed this name + name = "harvester" namespace = "cattle-ui-plugin-system" repo_name = rancher2_catalog_v2.harvester_extensions.name chart_name = "harvester" - chart_version = "1.7.1" # User confirmed this version + chart_version = var.harvester_chart_version project_id = data.rancher2_project.local_system.id wait = true - # Ensure feature flag and catalogs are ready depends_on = [ - rancher2_setting.harvester_enabled, - rancher2_catalog_v2.harvester_extensions + rancher2_feature.harvester, + rancher2_catalog_v2.harvester_extensions, ] } # 7. Create Cloud Credential for Harvester Import +# Set create_cloud_credential = false for brownfield (rancher2_cloud_credential does not +# support import for the harvester driver; the credential already exists in production). resource "rancher2_cloud_credential" "harvester" { - name = "harvester-local-creds" + count = var.create_cloud_credential ? 1 : 0 + name = var.cloud_credential_name harvester_credential_config { cluster_id = "local" cluster_type = "imported" @@ -77,16 +83,18 @@ resource "rancher2_cluster" "harvester_hci" { name = var.harvester_cluster_name description = "Harvester HCI" - labels = { - "provider.cattle.io" = "harvester" - } + labels = merge( + { "provider.cattle.io" = "harvester" }, + var.cluster_labels, + ) depends_on = [rancher2_app_v2.harvester] } # 9. Apply Registration Command to Harvester -# We use a temporary file for the kubeconfig to apply the registration manifest +# Set apply_registration = false for brownfield (cluster already registered and active). resource "null_resource" "apply_harvester_registration" { + count = var.apply_registration ? 1 : 0 triggers = { registration_command = rancher2_cluster.harvester_hci.cluster_registration_token[0].command kubeconfig = var.harvester_kubeconfig @@ -121,70 +129,14 @@ resource "null_resource" "apply_harvester_registration" { } } - depends_on = [ - rancher2_cluster.harvester_hci, - kubernetes_config_map_v1_data.harvester_coredns_patch, - ] -} - -# 11. Patch Harvester CoreDNS (Direct ConfigMap Fix) -# This allows Harvester nodes and pods to resolve the internal Rancher URL by -# prepending a specific hosts block to the Corefile. -# Must run BEFORE registration so Harvester can reach rancher.lk.internal. -resource "kubernetes_config_map_v1_data" "harvester_coredns_patch" { - metadata { - name = "rke2-coredns-rke2-coredns" - namespace = "kube-system" - } - - data = { - Corefile = <<-EOT - .:53 { - errors - health { - lameduck 10s - } - ready - hosts { - ${var.rancher_lb_ip} ${var.rancher_hostname} - fallthrough - } - kubernetes cluster.local cluster.local in-addr.arpa ip6.arpa { - pods insecure - fallthrough in-addr.arpa ip6.arpa - ttl 30 - } - prometheus 0.0.0.0:9153 - forward . /etc/resolv.conf - cache 30 - loop - reload - loadbalance - } - EOT - } - - force = true # Ensures we overwrite the manual/helm-generated Corefile - depends_on = [rancher2_cluster.harvester_hci] } # 10. Configure Harvester Registration URL -# This is a critical step for Harvester to reach back to Rancher. -# Depends on the CoreDNS patch so rancher.lk.internal resolves before Harvester connects. +# Ensures Harvester has the correct manifest URL to connect back to Rancher. resource "harvester_setting" "registration_url" { name = "cluster-registration-url" value = rancher2_cluster.harvester_hci.cluster_registration_token[0].manifest_url - depends_on = [kubernetes_config_map_v1_data.harvester_coredns_patch] -} - -resource "harvester_setting" "rancher_cluster" { - name = "rancher-cluster" - value = jsonencode({ - clusterId = rancher2_cluster.harvester_hci.id - clusterName = rancher2_cluster.harvester_hci.name - }) - - depends_on = [kubernetes_config_map_v1_data.harvester_coredns_patch] + depends_on = [rancher2_cluster.harvester_hci] } diff --git a/modules/management/harvester-integration/variables.tf b/modules/management/harvester-integration/variables.tf index 2f65051..c9ebdc2 100644 --- a/modules/management/harvester-integration/variables.tf +++ b/modules/management/harvester-integration/variables.tf @@ -9,12 +9,46 @@ variable "harvester_cluster_name" { description = "Name for the Harvester cluster in Rancher" default = "harvester-hci" } -variable "rancher_hostname" { + +variable "cloud_credential_name" { type = string - description = "The FQDN of the Rancher server" + description = "Display name for the Harvester cloud credential in Rancher" + default = "harvester-local-creds" +} + +variable "cluster_labels" { + type = map(string) + description = "Additional labels to set on the Harvester cluster object in Rancher. Merged with the required provider.cattle.io=harvester label." + default = {} +} + +# ── Brownfield skip flags ───────────────────────────────────────────────────── +variable "manage_feature_flags" { + type = bool + description = "Create/manage rancher2_feature resources for harvester and harvester-baremetal-container-workload. Set false when the flags are already enabled (brownfield)." + default = true +} + +variable "create_cloud_credential" { + type = bool + description = "Create a Harvester cloud credential in Rancher. Set false when one already exists (brownfield import)." + default = true +} + +variable "apply_registration" { + type = bool + description = "Run the null_resource that applies the cattle-cluster-agent manifest. Set false when the cluster is already registered (brownfield)." + default = true +} + +variable "manage_app" { + type = bool + description = "Create/manage the rancher2_app_v2 Harvester UI extension. Set false when the app is already installed (brownfield import)." + default = true } -variable "rancher_lb_ip" { +variable "harvester_chart_version" { type = string - description = "The IP address of the Rancher LoadBalancer" + description = "Version of the Harvester UI extension Helm chart. Check https://github.com/harvester/harvester-ui-extension/releases for latest." + default = "1.7.1" } diff --git a/modules/management/networking/examples/basic/main.tf b/modules/management/networking/examples/basic/main.tf index 2a5eafa..9900817 100644 --- a/modules/management/networking/examples/basic/main.tf +++ b/modules/management/networking/examples/basic/main.tf @@ -13,7 +13,7 @@ terraform { required_providers { harvester = { source = "harvester/harvester" - version = "~> 0.6.0" + version = "~> 1.7" } } } diff --git a/modules/management/storage/examples/basic/main.tf b/modules/management/storage/examples/basic/main.tf index 88d2075..7f4f304 100644 --- a/modules/management/storage/examples/basic/main.tf +++ b/modules/management/storage/examples/basic/main.tf @@ -15,7 +15,7 @@ terraform { required_providers { harvester = { source = "harvester/harvester" - version = "~> 0.6.0" + version = "~> 1.7" } } } diff --git a/modules/workloads/vm/README.md b/modules/workloads/vm/README.md new file mode 100644 index 0000000..998a897 --- /dev/null +++ b/modules/workloads/vm/README.md @@ -0,0 +1,143 @@ +# Module: workloads/vm + +Creates a single Harvester virtual machine within a tenant namespace. Intended for use +by product teams who have been granted a `tenant-space` with the `vm-manager` role. + +This module creates: +- A `harvester_virtualmachine` with configurable CPU, memory, disk, and networking +- Optionally, a `harvester_ssh_key` (when `ssh_public_key` is set) +- Optionally, cloud-init user-data/network-data attached through the VM resource (when `user_data` is set) + +## When to Use + +Use this module in a workloads-phase root module (e.g. `04-workloads`) after the +management phase has: +1. Provisioned the tenant namespace via `management/tenant-space` +2. Granted the team the `vm-manager` role via `management/cluster-roles` +3. Downloaded OS images via `management/storage` (use `image_ids` output) + +## Requirements + +| Name | Version | +|------|---------| +| terraform | >= 1.3 | +| harvester/harvester | ~> 1.7 | + +## Prerequisites + +- Harvester namespace already exists (created by `tenant-space`) +- A Harvester network attachment exists for the target VLAN (created by `management/networking`) +- OS image is available in Harvester (downloaded by `management/storage`) + +## Usage + +### Minimal (image name only, no SSH key, no cloud-init) + +```hcl +module "app_vm" { + source = "github.com/wso2-enterprise/open-cloud-datacenter//modules/workloads/vm?ref=v0.1.x" + + name = "app-server-1" + namespace = "iam-team-ns" + image_name = data.terraform_remote_state.management.outputs.image_ids["ubuntu-22-04"] + network_name = "iam-team-vlan" +} +``` + +### With SSH key + +```hcl +module "app_vm" { + source = "github.com/wso2-enterprise/open-cloud-datacenter//modules/workloads/vm?ref=v0.1.x" + + name = "app-server-1" + namespace = "iam-team-ns" + cpu = 4 + memory = "8Gi" + disk_size = "80Gi" + image_name = data.terraform_remote_state.management.outputs.image_ids["ubuntu-22-04"] + network_name = "iam-team-vlan" + ssh_public_key = file(pathexpand("~/.ssh/id_rsa.pub")) +} +``` + +### With cloud-init + +```hcl +module "app_vm" { + source = "github.com/wso2-enterprise/open-cloud-datacenter//modules/workloads/vm?ref=v0.1.x" + + name = "app-server-1" + namespace = "iam-team-ns" + image_name = data.terraform_remote_state.management.outputs.image_ids["ubuntu-22-04"] + network_name = "iam-team-vlan" + ssh_public_key = file(pathexpand("~/.ssh/id_rsa.pub")) + + user_data = <<-EOT + #cloud-config + package_update: true + packages: + - nginx + runcmd: + - systemctl enable --now nginx + EOT +} +``` + +### Referencing images from the management phase + +```hcl +data "terraform_remote_state" "management" { + backend = "local" + config = { + path = "../02-management/terraform.tfstate" + } +} + +# Use the storage module's image_ids output +locals { + ubuntu_image = data.terraform_remote_state.management.outputs.image_ids["ubuntu-22-04"] +} +``` + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|----------| +| `name` | VM name | `string` | — | yes | +| `namespace` | Harvester namespace (tenant project namespace) | `string` | — | yes | +| `cpu` | Number of vCPUs | `number` | `2` | no | +| `memory` | RAM (e.g. `"4Gi"`) | `string` | `"4Gi"` | no | +| `disk_size` | Root disk size (e.g. `"40Gi"`) | `string` | `"40Gi"` | no | +| `image_name` | Harvester image in `namespace/name` format | `string` | — | yes | +| `network_name` | Harvester network attachment name | `string` | — | yes | +| `run_strategy` | `RerunOnFailure`, `Always`, `Halted`, or `Manual` | `string` | `"RerunOnFailure"` | no | +| `ssh_public_key` | SSH public key content. Used when `create_ssh_key = true`. | `string` | `null` | no | +| `create_ssh_key` | When true, create a `harvester_ssh_key` from `ssh_public_key`. | `bool` | `false` | no | +| `wait_for_lease` | Wait for IP lease on primary NIC. Set false for static cloud-init IPs. | `bool` | `true` | no | +| `user_data` | Cloud-init user-data YAML. Creates a cloud-init secret when set. | `string` | `null` | no | +| `network_data` | Cloud-init network-data config (requires `user_data` to be set). | `string` | `""` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| `vm_name` | Name of the created VM. | +| `vm_id` | Harvester resource ID (`namespace/name`). | +| `network_interfaces` | Network interface objects, including the leased IP once the VM is running. | +| `ssh_key_id` | Harvester SSH key ID attached to the VM, or `null` if not provided. | + +## Notes + +- `image_name` accepts the Harvester image ID format `namespace/name` — use the + `image_ids` output from the `management/storage` module to keep this consistent. +- `network_name` must match a network attachment definition in the same Harvester cluster + (created by `management/networking`). +- The VM's IP address is available in `network_interfaces[0].ip_address` after the + lease is obtained (requires `wait_for_lease = true`, which is the default). + Set `wait_for_lease = false` when using static IPs via cloud-init `network_data` + without qemu-guest-agent. +- The `vm-manager` custom role from `management/cluster-roles` must be bound to the + team's group in their `tenant-space` before they can create VMs in the namespace. +- Removing this module or running `terraform destroy` **deletes the VM and its disk**. + Ensure data is backed up before destroying. diff --git a/modules/workloads/vm/main.tf b/modules/workloads/vm/main.tf new file mode 100644 index 0000000..42c9759 --- /dev/null +++ b/modules/workloads/vm/main.tf @@ -0,0 +1,46 @@ +# Optional SSH key — created only when ssh_public_key is provided. +resource "harvester_ssh_key" "this" { + count = var.create_ssh_key ? 1 : 0 + + name = "${var.name}-key" + namespace = var.namespace + public_key = var.ssh_public_key +} + +resource "harvester_virtualmachine" "this" { + name = var.name + namespace = var.namespace + restart_after_update = true + + cpu = var.cpu + memory = var.memory + + run_strategy = var.run_strategy + machine_type = "q35" + + ssh_keys = var.create_ssh_key ? [harvester_ssh_key.this[0].id] : [] + + network_interface { + name = "nic-1" + wait_for_lease = var.wait_for_lease + network_name = var.network_name + } + + disk { + name = "rootdisk" + type = "disk" + size = var.disk_size + bus = "virtio" + boot_order = 1 + image = var.image_name + auto_delete = true + } + + dynamic "cloudinit" { + for_each = var.user_data != null ? [1] : [] + content { + user_data = var.user_data + network_data = var.network_data + } + } +} diff --git a/modules/workloads/vm/outputs.tf b/modules/workloads/vm/outputs.tf new file mode 100644 index 0000000..a5ae21c --- /dev/null +++ b/modules/workloads/vm/outputs.tf @@ -0,0 +1,19 @@ +output "vm_name" { + value = harvester_virtualmachine.this.name + description = "Name of the created virtual machine." +} + +output "vm_id" { + value = harvester_virtualmachine.this.id + description = "Harvester resource ID of the virtual machine (namespace/name)." +} + +output "network_interfaces" { + value = harvester_virtualmachine.this.network_interface + description = "Network interface objects, including the leased IP address once the VM is running." +} + +output "ssh_key_id" { + value = var.ssh_public_key != null ? harvester_ssh_key.this[0].id : null + description = "Harvester SSH key ID attached to the VM, or null if no SSH key was provided." +} diff --git a/modules/workloads/vm/variables.tf b/modules/workloads/vm/variables.tf new file mode 100644 index 0000000..3466575 --- /dev/null +++ b/modules/workloads/vm/variables.tf @@ -0,0 +1,78 @@ +variable "name" { + type = string + description = "Name for the virtual machine." +} + +variable "namespace" { + type = string + description = "Harvester namespace (tenant project namespace) to create the VM in." +} + +variable "cpu" { + type = number + description = "Number of vCPUs." + default = 2 +} + +variable "memory" { + type = string + description = "RAM in Gi (e.g. \"4Gi\")." + default = "4Gi" +} + +variable "disk_size" { + type = string + description = "Root disk size (e.g. \"40Gi\")." + default = "40Gi" +} + +variable "image_name" { + type = string + description = "Harvester image reference in namespace/name format (e.g. \"default/ubuntu-22-04\"). Use the image_ids output from the management/storage module." +} + +variable "network_name" { + type = string + description = "Harvester network attachment name (e.g. \"iam-team-vlan\"). Must exist in the same namespace or cluster." +} + +variable "run_strategy" { + type = string + description = "VM run strategy: RerunOnFailure, Always, Halted, or Manual." + default = "RerunOnFailure" +} + +# --- SSH access --- + +variable "ssh_public_key" { + type = string + description = "SSH public key content to inject into the VM. When set, a harvester_ssh_key resource is created and attached. Leave null to omit." + default = null + sensitive = true +} + +# --- Cloud-init --- + +variable "user_data" { + type = string + description = "Cloud-init user-data script (plain YAML, not base64). When set, a cloud-init secret is created and attached. Leave null for no cloud-init." + default = null +} + +variable "network_data" { + type = string + description = "Cloud-init network-data config. Ignored if user_data is null." + default = "" +} + +variable "create_ssh_key" { + type = bool + description = "When true, create a harvester_ssh_key from ssh_public_key and attach it to the VM. Must be true for ssh_public_key to have any effect." + default = false +} + +variable "wait_for_lease" { + type = bool + description = "Whether Terraform should wait for an IP lease on the primary NIC. Set to false when using static IPs via cloud-init network_data without qemu-guest-agent." + default = true +} diff --git a/modules/workloads/vm/versions.tf b/modules/workloads/vm/versions.tf new file mode 100644 index 0000000..fc63a15 --- /dev/null +++ b/modules/workloads/vm/versions.tf @@ -0,0 +1,9 @@ +terraform { + required_version = ">= 1.3" + required_providers { + harvester = { + source = "harvester/harvester" + version = "~> 1.7" + } + } +}