diff --git a/capi-lab/Makefile b/capi-lab/Makefile index 47f6fba..272a0db 100644 --- a/capi-lab/Makefile +++ b/capi-lab/Makefile @@ -20,6 +20,8 @@ CONTROL_PLANE_MACHINE_IMAGE ?= ubuntu-24.04 CONTROL_PLANE_MACHINE_SIZE ?= v1-small-x86 WORKER_MACHINE_IMAGE ?= ubuntu-24.04 WORKER_MACHINE_SIZE ?= v1-small-x86 +FIREWALL_MACHINE_IMAGE ?= firewall-ubuntu-3.0 +FIREWALL_MACHINE_SIZE ?= v1-small-x86 IMG ?= ghcr.io/metal-stack/cluster-api-metal-stack-controller:latest diff --git a/capi-lab/mini-lab b/capi-lab/mini-lab index 72acbfb..007c82a 160000 --- a/capi-lab/mini-lab +++ b/capi-lab/mini-lab @@ -1 +1 @@ -Subproject commit 72acbfbffce1866ea6ac8352f38506c5e2f95291 +Subproject commit 007c82a38a5cfabd1deb581b3da1905824624dde diff --git a/config/clusterctl-templates/cluster-template.yaml b/config/clusterctl-templates/cluster-template.yaml index 065ce82..06b1316 100644 --- a/config/clusterctl-templates/cluster-template.yaml +++ b/config/clusterctl-templates/cluster-template.yaml @@ -137,3 +137,50 @@ spec: nodeRegistration: kubeletExtraArgs: cloud-provider: external +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-firewall + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + nodepool: firewall +spec: + clusterName: ${CLUSTER_NAME} + replicas: 1 + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + nodepool: firewall + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + nodepool: firewall + spec: + nodeDrainTimeout: 120s + clusterName: ${CLUSTER_NAME} + version: "${KUBERNETES_VERSION}" + bootstrap: + dataSecretName: ${CLUSTER_NAME}-firewall-data + infrastructureRef: + name: ${CLUSTER_NAME}-firewall + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: MetalStackMachineTemplate +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: MetalStackMachineTemplate +metadata: + name: ${CLUSTER_NAME}-firewall +spec: + template: + spec: + size: ${FIREWALL_MACHINE_SIZE} + image: ${FIREWALL_MACHINE_IMAGE} +--- +apiVersion: v1 +kind: Secret +metadata: + name: ${CLUSTER_NAME}-firewall-data +stringData: + value: "{}" diff --git a/internal/controller/metalstackmachine_controller.go b/internal/controller/metalstackmachine_controller.go index abed6cb..57bebc0 100644 --- a/internal/controller/metalstackmachine_controller.go +++ b/internal/controller/metalstackmachine_controller.go @@ -42,6 +42,7 @@ import ( "github.com/go-logr/logr" "github.com/metal-stack/cluster-api-provider-metal-stack/api/v1alpha1" metalgo "github.com/metal-stack/metal-go" + metalfirewall "github.com/metal-stack/metal-go/api/client/firewall" ipmodels "github.com/metal-stack/metal-go/api/client/ip" metalmachine "github.com/metal-stack/metal-go/api/client/machine" "github.com/metal-stack/metal-go/api/models" @@ -314,26 +315,81 @@ func (r *machineReconciler) create() (*models.V1MachineResponse, error) { }) } - resp, err := r.metalClient.Machine().AllocateMachine(metalmachine.NewAllocateMachineParamsWithContext(r.ctx).WithBody(&models.V1MachineAllocateRequest{ - Partitionid: &r.infraCluster.Spec.Partition, - Projectid: &r.infraCluster.Spec.ProjectID, - PlacementTags: []string{tag.New(tag.ClusterID, r.infraCluster.GetClusterID())}, - Tags: append(r.machineTags(), r.additionalMachineTags()...), - Name: r.infraMachine.Name, - Hostname: r.infraMachine.Name, - Sizeid: &r.infraMachine.Spec.Size, - Imageid: &r.infraMachine.Spec.Image, - Description: fmt.Sprintf("%s/%s for cluster %s/%s", r.infraMachine.Namespace, r.infraMachine.Name, r.infraCluster.Namespace, r.infraCluster.Name), - Networks: nws, - Ips: ips, - UserData: string(bootstrapSecret.Data["value"]), - // TODO: SSHPubKeys, ... - }), nil) - if err != nil { - return nil, fmt.Errorf("failed to allocate machine: %w", err) + var m *models.V1MachineResponse + + if strings.Contains(r.infraMachine.Name, "firewall") { + fireResp, err := r.metalClient.Firewall().AllocateFirewall(metalfirewall.NewAllocateFirewallParamsWithContext(r.ctx).WithBody(&models.V1FirewallCreateRequest{ + FirewallRules: &models.V1FirewallRules{ + Egress: []*models.V1FirewallEgressRule{ + { + Comment: "allow all", + Ports: []int32{53, 80, 443, 8080}, + Protocol: "TCP", + To: []string{"0.0.0.0/0"}, + }, + { + Comment: "allow all", + Ports: []int32{53, 123}, + Protocol: "UDP", + To: []string{"0.0.0.0/0"}, + }, + }, + Ingress: []*models.V1FirewallIngressRule{ + { + Comment: "allow all", + Ports: []int32{80, 443, 8080}, + Protocol: "TCP", + From: []string{"0.0.0.0/0"}, + }, + }, + }, + Partitionid: &r.infraCluster.Spec.Partition, + Projectid: &r.infraCluster.Spec.ProjectID, + PlacementTags: []string{tag.New(tag.ClusterID, r.infraCluster.GetClusterID())}, + Tags: append(r.machineTags(), r.additionalMachineTags()...), + Name: r.infraMachine.Name, + Hostname: r.infraMachine.Name, + Sizeid: &r.infraMachine.Spec.Size, + Imageid: &r.infraMachine.Spec.Image, + Description: fmt.Sprintf("firewall %s/%s for cluster %s/%s", r.infraMachine.Namespace, r.infraMachine.Name, r.infraCluster.Namespace, r.infraCluster.Name), + Networks: append(nws, &models.V1MachineAllocationNetwork{ + Autoacquire: ptr.To(true), + Networkid: ptr.To("internet-mini-lab"), + }), + Ips: ips, + UserData: string(bootstrapSecret.Data["value"]), + }), nil) + if err != nil { + return nil, fmt.Errorf("failed to allocate firewall: %w", err) + } + resp, err := r.metalClient.Machine().FindMachine(metalmachine.NewFindMachineParamsWithContext(r.ctx).WithID(*fireResp.Payload.ID), nil) + if err != nil { + return nil, fmt.Errorf("failed to allocate firewall: %w", err) + } + m = resp.Payload + } else { + resp, err := r.metalClient.Machine().AllocateMachine(metalmachine.NewAllocateMachineParamsWithContext(r.ctx).WithBody(&models.V1MachineAllocateRequest{ + Partitionid: &r.infraCluster.Spec.Partition, + Projectid: &r.infraCluster.Spec.ProjectID, + PlacementTags: []string{tag.New(tag.ClusterID, r.infraCluster.GetClusterID())}, + Tags: append(r.machineTags(), r.additionalMachineTags()...), + Name: r.infraMachine.Name, + Hostname: r.infraMachine.Name, + Sizeid: &r.infraMachine.Spec.Size, + Imageid: &r.infraMachine.Spec.Image, + Description: fmt.Sprintf("%s/%s for cluster %s/%s", r.infraMachine.Namespace, r.infraMachine.Name, r.infraCluster.Namespace, r.infraCluster.Name), + Networks: nws, + Ips: ips, + UserData: string(bootstrapSecret.Data["value"]), + // TODO: SSHPubKeys, ... + }), nil) + if err != nil { + return nil, fmt.Errorf("failed to allocate machine: %w", err) + } + m = resp.Payload } - return resp.Payload, nil + return m, nil } func (r *machineReconciler) getMachineStatus(mr *models.V1MachineResponse) (bool, error) {