diff --git a/api/v1beta1/types.go b/api/v1beta1/types.go index 0b975e5476f..93bed4fc707 100644 --- a/api/v1beta1/types.go +++ b/api/v1beta1/types.go @@ -111,9 +111,23 @@ type NetworkSpec struct { // +optional ControlPlaneOutboundLB *LoadBalancerSpec `json:"controlPlaneOutboundLB,omitempty"` + // AdditionalAPIServerLBPorts specifies extra inbound ports for the APIServer load balancer. + // Each port specified (e.g., 9345) creates an inbound rule where the frontend port and the backend port are the same. + // +optional + AdditionalAPIServerLBPorts []LoadBalancerPort `json:"additionalAPIServerLBPorts,omitempty"` + NetworkClassSpec `json:",inline"` } +// LoadBalancerPort specifies additional port for the API server load balancer. +type LoadBalancerPort struct { + // Name for the additional port within LB definition + Name string `json:"name"` + + // Port for the LB definition + Port int32 `json:"port"` +} + // VnetSpec configures an Azure virtual network. type VnetSpec struct { // ResourceGroup is the name of the resource group of the existing virtual network @@ -893,6 +907,17 @@ func (s SubnetSpec) IsIPv6Enabled() bool { return false } +// GetSecurityRuleByDestination returns security group rule, which matches provided destination ports. +func (s SubnetSpec) GetSecurityRuleByDestination(port string) *SecurityRule { + for _, rule := range s.SecurityGroup.SecurityRules { + if rule.DestinationPorts != nil && *rule.DestinationPorts == port { + return &rule + } + } + + return nil +} + // SecurityProfile specifies the Security profile settings for a // virtual machine or virtual machine scale set. type SecurityProfile struct { diff --git a/api/v1beta1/types_template.go b/api/v1beta1/types_template.go index 8b272ae254b..e0ed7ae0683 100644 --- a/api/v1beta1/types_template.go +++ b/api/v1beta1/types_template.go @@ -75,6 +75,11 @@ type NetworkTemplateSpec struct { // This is different from APIServerLB, and is used only in private clusters (optionally) for enabling outbound traffic. // +optional ControlPlaneOutboundLB *LoadBalancerClassSpec `json:"controlPlaneOutboundLB,omitempty"` + + // AdditionalAPIServerLBPorts is the configuration for the additional inbound control-plane load balancer ports + // Each port specified (e.g., 9345) creates an inbound rule where the frontend port and the backend port are the same. + // +optional + AdditionalAPIServerLBPorts []LoadBalancerPort `json:"additionalAPIServerLBPorts,omitempty"` } // GetSubnetTemplate returns the subnet template based on the subnet role. diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index b523662a8d0..c569dedd0f5 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -2722,6 +2722,21 @@ func (in *LoadBalancerClassSpec) DeepCopy() *LoadBalancerClassSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerPort) DeepCopyInto(out *LoadBalancerPort) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerPort. +func (in *LoadBalancerPort) DeepCopy() *LoadBalancerPort { + if in == nil { + return nil + } + out := new(LoadBalancerPort) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LoadBalancerProfile) DeepCopyInto(out *LoadBalancerProfile) { *out = *in @@ -3116,6 +3131,11 @@ func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { *out = new(LoadBalancerSpec) (*in).DeepCopyInto(*out) } + if in.AdditionalAPIServerLBPorts != nil { + in, out := &in.AdditionalAPIServerLBPorts, &out.AdditionalAPIServerLBPorts + *out = make([]LoadBalancerPort, len(*in)) + copy(*out, *in) + } out.NetworkClassSpec = in.NetworkClassSpec } @@ -3152,6 +3172,11 @@ func (in *NetworkTemplateSpec) DeepCopyInto(out *NetworkTemplateSpec) { *out = new(LoadBalancerClassSpec) (*in).DeepCopyInto(*out) } + if in.AdditionalAPIServerLBPorts != nil { + in, out := &in.AdditionalAPIServerLBPorts, &out.AdditionalAPIServerLBPorts + *out = make([]LoadBalancerPort, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkTemplateSpec. diff --git a/azure/scope/cluster.go b/azure/scope/cluster.go index bff9416387c..f7f035f2e7b 100644 --- a/azure/scope/cluster.go +++ b/azure/scope/cluster.go @@ -266,6 +266,7 @@ func (s *ClusterScope) LBSpecs() []azure.ResourceSpecGetter { BackendPoolName: s.APIServerLB().BackendPool.Name, IdleTimeoutInMinutes: s.APIServerLB().IdleTimeoutInMinutes, AdditionalTags: s.AdditionalTags(), + AdditionalPorts: s.AdditionalAPIServerLBPorts(), } if s.APIServerLB().FrontendIPs != nil { @@ -299,6 +300,7 @@ func (s *ClusterScope) LBSpecs() []azure.ResourceSpecGetter { BackendPoolName: s.APIServerLB().BackendPool.Name + "-internal", IdleTimeoutInMinutes: s.APIServerLB().IdleTimeoutInMinutes, AdditionalTags: s.AdditionalTags(), + AdditionalPorts: s.AdditionalAPIServerLBPorts(), } privateIPFound := false @@ -771,6 +773,11 @@ func (s *ClusterScope) ControlPlaneOutboundLB() *infrav1.LoadBalancerSpec { return s.AzureCluster.Spec.NetworkSpec.ControlPlaneOutboundLB } +// AdditionalAPIServerLBPorts returns the additional API server ports list. +func (s *ClusterScope) AdditionalAPIServerLBPorts() []infrav1.LoadBalancerPort { + return s.AzureCluster.Spec.NetworkSpec.AdditionalAPIServerLBPorts +} + // APIServerLBName returns the API Server LB name. func (s *ClusterScope) APIServerLBName() string { apiServerLB := s.APIServerLB() @@ -1020,9 +1027,12 @@ func (s *ClusterScope) SetControlPlaneSecurityRules() { if !s.ControlPlaneEnabled() { return } - if s.ControlPlaneSubnet().SecurityGroup.SecurityRules == nil { - subnet := s.ControlPlaneSubnet() - subnet.SecurityGroup.SecurityRules = infrav1.SecurityRules{ + + subnet := s.ControlPlaneSubnet() + + missingSSH := subnet.GetSecurityRuleByDestination("22") == nil + if missingSSH { + subnet.SecurityGroup.SecurityRules = append(subnet.SecurityGroup.SecurityRules, infrav1.SecurityRule{ Name: "allow_ssh", Description: "Allow SSH", @@ -1034,20 +1044,28 @@ func (s *ClusterScope) SetControlPlaneSecurityRules() { Destination: ptr.To("*"), DestinationPorts: ptr.To("22"), Action: infrav1.SecurityRuleActionAllow, - }, - infrav1.SecurityRule{ - Name: "allow_apiserver", - Description: "Allow K8s API Server", - Priority: 2201, - Protocol: infrav1.SecurityGroupProtocolTCP, - Direction: infrav1.SecurityRuleDirectionInbound, - Source: ptr.To("*"), - SourcePorts: ptr.To("*"), - Destination: ptr.To("*"), - DestinationPorts: ptr.To(strconv.Itoa(int(s.APIServerPort()))), - Action: infrav1.SecurityRuleActionAllow, - }, - } + }) + } + + port := strconv.Itoa(int(s.APIServerPort())) + + missingAPIPort := subnet.GetSecurityRuleByDestination(port) == nil + if missingAPIPort { + subnet.SecurityGroup.SecurityRules = append(subnet.SecurityGroup.SecurityRules, infrav1.SecurityRule{ + Name: "allow_apiserver", + Description: "Allow K8s API Server", + Priority: 2201, + Protocol: infrav1.SecurityGroupProtocolTCP, + Direction: infrav1.SecurityRuleDirectionInbound, + Source: ptr.To("*"), + SourcePorts: ptr.To("*"), + Destination: ptr.To("*"), + DestinationPorts: ptr.To(port), + Action: infrav1.SecurityRuleActionAllow, + }) + } + + if missingSSH || missingAPIPort { s.AzureCluster.Spec.NetworkSpec.UpdateControlPlaneSubnet(subnet) } } diff --git a/azure/scope/cluster_test.go b/azure/scope/cluster_test.go index 9bbfe28d6ab..50a822b7e11 100644 --- a/azure/scope/cluster_test.go +++ b/azure/scope/cluster_test.go @@ -242,50 +242,165 @@ func TestAPIServerHost(t *testing.T) { } func TestGettingSecurityRules(t *testing.T) { - g := NewWithT(t) - - cluster := &clusterv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-cluster", - Namespace: "default", + tests := []struct { + name string + cluster *clusterv1.Cluster + azureCluster *infrav1.AzureCluster + expectedRuleCount int + }{ + { + name: "default control plane subnet with no rules should have 2 security rules defaulted", + cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + }, + }, + azureCluster: &infrav1.AzureCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-azure-cluster", + }, + Spec: infrav1.AzureClusterSpec{ + AzureClusterClassSpec: infrav1.AzureClusterClassSpec{ + SubscriptionID: "123", + IdentityRef: &corev1.ObjectReference{ + Kind: infrav1.AzureClusterIdentityKind, + }, + }, + ControlPlaneEnabled: true, + NetworkSpec: infrav1.NetworkSpec{ + Subnets: infrav1.Subnets{ + { + SubnetClassSpec: infrav1.SubnetClassSpec{ + Role: infrav1.SubnetNode, + Name: "node", + }, + }, + }, + }, + }, + }, + expectedRuleCount: 2, }, - } - - azureCluster := &infrav1.AzureCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-azure-cluster", + { + name: "additional rules are preserved", + cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + }, + }, + azureCluster: &infrav1.AzureCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-azure-cluster", + }, + Spec: infrav1.AzureClusterSpec{ + AzureClusterClassSpec: infrav1.AzureClusterClassSpec{ + SubscriptionID: "123", + IdentityRef: &corev1.ObjectReference{ + Kind: infrav1.AzureClusterIdentityKind, + }, + }, + ControlPlaneEnabled: true, + NetworkSpec: infrav1.NetworkSpec{ + Subnets: infrav1.Subnets{ + { + SecurityGroup: infrav1.SecurityGroup{ + SecurityGroupClass: infrav1.SecurityGroupClass{ + SecurityRules: []infrav1.SecurityRule{{ + Name: "allow_9345", + Description: "Allow port 9345", + Priority: 2200, + Protocol: infrav1.SecurityGroupProtocolTCP, + Direction: infrav1.SecurityRuleDirectionInbound, + Source: ptr.To("*"), + SourcePorts: ptr.To("*"), + Destination: ptr.To("*"), + DestinationPorts: ptr.To("9345"), + Action: infrav1.SecurityRuleActionAllow, + }}, + }, + }, + SubnetClassSpec: infrav1.SubnetClassSpec{ + Role: infrav1.SubnetControlPlane, + Name: string(infrav1.SubnetControlPlane), + }, + }, + }, + }, + }, + }, + expectedRuleCount: 3, }, - Spec: infrav1.AzureClusterSpec{ - AzureClusterClassSpec: infrav1.AzureClusterClassSpec{ - SubscriptionID: "123", - IdentityRef: &corev1.ObjectReference{ - Kind: infrav1.AzureClusterIdentityKind, + { + name: "override rules are accepted", + cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", }, }, - ControlPlaneEnabled: true, - NetworkSpec: infrav1.NetworkSpec{ - Subnets: infrav1.Subnets{ - { - SubnetClassSpec: infrav1.SubnetClassSpec{ - Role: infrav1.SubnetNode, - Name: "node", + azureCluster: &infrav1.AzureCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-azure-cluster", + }, + Spec: infrav1.AzureClusterSpec{ + AzureClusterClassSpec: infrav1.AzureClusterClassSpec{ + SubscriptionID: "123", + IdentityRef: &corev1.ObjectReference{ + Kind: infrav1.AzureClusterIdentityKind, + }, + }, + ControlPlaneEnabled: true, + NetworkSpec: infrav1.NetworkSpec{ + Subnets: infrav1.Subnets{ + { + SecurityGroup: infrav1.SecurityGroup{ + SecurityGroupClass: infrav1.SecurityGroupClass{ + SecurityRules: []infrav1.SecurityRule{{ + Name: "deny_ssh", + Description: "Deny SSH", + Priority: 2200, + Protocol: infrav1.SecurityGroupProtocolTCP, + Direction: infrav1.SecurityRuleDirectionInbound, + Source: ptr.To("*"), + SourcePorts: ptr.To("*"), + Destination: ptr.To("*"), + DestinationPorts: ptr.To("22"), + Action: infrav1.SecurityRuleActionDeny, + }}, + }, + }, + SubnetClassSpec: infrav1.SubnetClassSpec{ + Role: infrav1.SubnetControlPlane, + Name: string(infrav1.SubnetControlPlane), + }, + }, }, }, }, }, + expectedRuleCount: 2, }, } - azureCluster.Default() - clusterScope := &ClusterScope{ - Cluster: cluster, - AzureCluster: azureCluster, - } - clusterScope.SetControlPlaneSecurityRules() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) - subnet, err := clusterScope.AzureCluster.Spec.NetworkSpec.GetControlPlaneSubnet() - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(subnet.SecurityGroup.SecurityRules).To(HaveLen(2)) + tt.azureCluster.Default() + + clusterScope := &ClusterScope{ + Cluster: tt.cluster, + AzureCluster: tt.azureCluster, + } + clusterScope.SetControlPlaneSecurityRules() + + subnet, err := clusterScope.AzureCluster.Spec.NetworkSpec.GetControlPlaneSubnet() + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(subnet.SecurityGroup.SecurityRules).To(HaveLen(tt.expectedRuleCount)) + }) + } } func TestPublicIPSpecs(t *testing.T) { diff --git a/azure/services/loadbalancers/loadbalancers_test.go b/azure/services/loadbalancers/loadbalancers_test.go index 238bde7ed3a..cfcd12fae94 100644 --- a/azure/services/loadbalancers/loadbalancers_test.go +++ b/azure/services/loadbalancers/loadbalancers_test.go @@ -61,6 +61,34 @@ var ( APIServerPort: 6443, } + fakePublicAPILBSpecWithAdditionalPorts = LBSpec{ + Name: "my-publiclb", + ResourceGroup: "my-rg", + SubscriptionID: "123", + ClusterName: "my-cluster", + Location: "my-location", + Role: infrav1.APIServerRole, + Type: infrav1.Public, + SKU: infrav1.SKUStandard, + SubnetName: "my-cp-subnet", + BackendPoolName: "my-publiclb-backendPool", + IdleTimeoutInMinutes: ptr.To[int32](4), + FrontendIPConfigs: []infrav1.FrontendIP{ + { + Name: "my-publiclb-frontEnd", + PublicIP: &infrav1.PublicIPSpec{ + Name: "my-publicip", + DNSName: "my-cluster.12345.mydomain.com", + }, + }, + }, + APIServerPort: 6443, + AdditionalPorts: []infrav1.LoadBalancerPort{{ + Name: "rke2-agent", + Port: 9345, + }}, + } + fakeInternalAPILBSpec = LBSpec{ Name: "my-private-lb", ResourceGroup: "my-rg", diff --git a/azure/services/loadbalancers/spec.go b/azure/services/loadbalancers/spec.go index e017b61f5a2..bae8c3ba14a 100644 --- a/azure/services/loadbalancers/spec.go +++ b/azure/services/loadbalancers/spec.go @@ -47,6 +47,7 @@ type LBSpec struct { APIServerPort int32 IdleTimeoutInMinutes *int32 AdditionalTags map[string]string + AdditionalPorts []infrav1.LoadBalancerPort } // ResourceName returns the name of the load balancer. @@ -221,14 +222,14 @@ func getLoadBalancingRules(lbSpec LBSpec, frontendIDs []*armnetwork.SubResource) if len(frontendIDs) != 0 { frontendIPConfig = frontendIDs[0] } - return []*armnetwork.LoadBalancingRule{ + rules := []*armnetwork.LoadBalancingRule{ { Name: ptr.To(lbRuleHTTPS), Properties: &armnetwork.LoadBalancingRulePropertiesFormat{ DisableOutboundSnat: ptr.To(true), Protocol: ptr.To(armnetwork.TransportProtocolTCP), - FrontendPort: ptr.To[int32](lbSpec.APIServerPort), - BackendPort: ptr.To[int32](lbSpec.APIServerPort), + FrontendPort: ptr.To(lbSpec.APIServerPort), + BackendPort: ptr.To(lbSpec.APIServerPort), IdleTimeoutInMinutes: lbSpec.IdleTimeoutInMinutes, EnableFloatingIP: ptr.To(false), LoadDistribution: ptr.To(armnetwork.LoadDistributionDefault), @@ -242,6 +243,30 @@ func getLoadBalancingRules(lbSpec LBSpec, frontendIDs []*armnetwork.SubResource) }, }, } + + for _, port := range lbSpec.AdditionalPorts { + rules = append(rules, &armnetwork.LoadBalancingRule{ + Name: ptr.To(port.Name), + Properties: &armnetwork.LoadBalancingRulePropertiesFormat{ + DisableOutboundSnat: ptr.To(true), + Protocol: ptr.To(armnetwork.TransportProtocolTCP), + FrontendPort: ptr.To(port.Port), + BackendPort: ptr.To(port.Port), + IdleTimeoutInMinutes: lbSpec.IdleTimeoutInMinutes, + EnableFloatingIP: ptr.To(false), + LoadDistribution: ptr.To(armnetwork.LoadDistributionDefault), + FrontendIPConfiguration: frontendIPConfig, + BackendAddressPool: &armnetwork.SubResource{ + ID: ptr.To(azure.AddressPoolID(lbSpec.SubscriptionID, lbSpec.ResourceGroup, lbSpec.Name, lbSpec.BackendPoolName)), + }, + Probe: &armnetwork.SubResource{ + ID: ptr.To(azure.ProbeID(lbSpec.SubscriptionID, lbSpec.ResourceGroup, lbSpec.Name, httpsProbe)), + }, + }, + }) + } + + return rules } return []*armnetwork.LoadBalancingRule{} } diff --git a/azure/services/loadbalancers/spec_test.go b/azure/services/loadbalancers/spec_test.go index 7b8fc339f13..72abeff6fe0 100644 --- a/azure/services/loadbalancers/spec_test.go +++ b/azure/services/loadbalancers/spec_test.go @@ -97,6 +97,38 @@ func TestParameters(t *testing.T) { }, expectedError: "", }, + { + name: "load balancer exists with missing additional API server ports", + spec: &fakePublicAPILBSpecWithAdditionalPorts, + existing: getExistingLBWithMissingFrontendIPConfigs(), + expect: func(g *WithT, result interface{}) { + g.Expect(result).To(BeAssignableToTypeOf(armnetwork.LoadBalancer{})) + g.Expect(result.(armnetwork.LoadBalancer)).To(Equal(newSamplePublicAPIServerLB(false, true, true, true, true, func(lb *armnetwork.LoadBalancer) { + lb.Properties.LoadBalancingRules = append(lb.Properties.LoadBalancingRules, &armnetwork.LoadBalancingRule{ + Name: ptr.To("rke2-agent"), + Properties: &armnetwork.LoadBalancingRulePropertiesFormat{ + DisableOutboundSnat: ptr.To(true), + Protocol: ptr.To(armnetwork.TransportProtocolTCP), + FrontendPort: ptr.To[int32](9345), + BackendPort: ptr.To[int32](9345), + IdleTimeoutInMinutes: ptr.To[int32](4), + EnableFloatingIP: ptr.To(false), + LoadDistribution: ptr.To(armnetwork.LoadDistributionDefault), + FrontendIPConfiguration: &armnetwork.SubResource{ + ID: ptr.To("/subscriptions/123/resourceGroups/my-rg/providers/Microsoft.Network/loadBalancers/my-publiclb/frontendIPConfigurations/my-publiclb-frontEnd"), + }, + BackendAddressPool: &armnetwork.SubResource{ + ID: ptr.To("/subscriptions/123/resourceGroups/my-rg/providers/Microsoft.Network/loadBalancers/my-publiclb/backendAddressPools/my-publiclb-backendPool"), + }, + Probe: &armnetwork.SubResource{ + ID: ptr.To("/subscriptions/123/resourceGroups/my-rg/providers/Microsoft.Network/loadBalancers/my-publiclb/probes/HTTPSProbe"), + }, + }, + }) + }))) + }, + expectedError: "", + }, { name: "load balancer exists with missing frontend IP configs", spec: &fakePublicAPILBSpec, @@ -208,7 +240,7 @@ func newDefaultNodeOutboundLB() armnetwork.LoadBalancer { } } -func newSamplePublicAPIServerLB(verifyFrontendIP bool, verifyBackendAddressPools bool, verifyLBRules bool, verifyProbes bool, verifyOutboundRules bool) armnetwork.LoadBalancer { +func newSamplePublicAPIServerLB(verifyFrontendIP bool, verifyBackendAddressPools bool, verifyLBRules bool, verifyProbes bool, verifyOutboundRules bool, modifications ...func(*armnetwork.LoadBalancer)) armnetwork.LoadBalancer { var subnet *armnetwork.Subnet var backendAddressPoolProps *armnetwork.BackendAddressPoolPropertiesFormat enableFloatingIP := ptr.To(false) @@ -235,7 +267,7 @@ func newSamplePublicAPIServerLB(verifyFrontendIP bool, verifyBackendAddressPools idleTimeout = ptr.To[int32](1000) } - return armnetwork.LoadBalancer{ + lb := armnetwork.LoadBalancer{ Tags: map[string]*string{ "sigs.k8s.io_cluster-api-provider-azure_cluster_my-cluster": ptr.To("owned"), "sigs.k8s.io_cluster-api-provider-azure_role": ptr.To(infrav1.APIServerRole), @@ -310,6 +342,12 @@ func newSamplePublicAPIServerLB(verifyFrontendIP bool, verifyBackendAddressPools }, }, } + + for _, modify := range modifications { + modify(&lb) + } + + return lb } func newDefaultInternalAPIServerLB() armnetwork.LoadBalancer { diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclusters.yaml index 5692ad1ea9c..9c7cdabe38c 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclusters.yaml @@ -658,6 +658,26 @@ spec: description: NetworkSpec encapsulates all things related to Azure network. properties: + additionalAPIServerLBPorts: + description: |- + AdditionalAPIServerLBPorts specifies extra inbound ports for the APIServer load balancer. + Each port specified (e.g., 9345) creates an inbound rule where the frontend port and the backend port are the same. + items: + description: LoadBalancerPort specifies additional port for + the API server load balancer. + properties: + name: + description: Name for the additional port within LB definition + type: string + port: + description: Port for the LB definition + format: int32 + type: integer + required: + - name + - port + type: object + type: array apiServerLB: description: APIServerLB is the configuration for the control-plane load balancer. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclustertemplates.yaml index b1cffc10c83..2b5acaaec48 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclustertemplates.yaml @@ -520,6 +520,27 @@ spec: description: NetworkSpec encapsulates all things related to Azure network. properties: + additionalAPIServerLBPorts: + description: |- + AdditionalAPIServerLBPorts is the configuration for the additional inbound control-plane load balancer ports + Each port specified (e.g., 9345) creates an inbound rule where the frontend port and the backend port are the same. + items: + description: LoadBalancerPort specifies additional port + for the API server load balancer. + properties: + name: + description: Name for the additional port within + LB definition + type: string + port: + description: Port for the LB definition + format: int32 + type: integer + required: + - name + - port + type: object + type: array apiServerLB: description: APIServerLB is the configuration for the control-plane load balancer. diff --git a/docs/book/src/self-managed/custom-vnet.md b/docs/book/src/self-managed/custom-vnet.md index 95764a13814..63ebb302eb8 100644 --- a/docs/book/src/self-managed/custom-vnet.md +++ b/docs/book/src/self-managed/custom-vnet.md @@ -114,8 +114,13 @@ Security Rules were previously known as `ingressRule` in v1alpha3. Security rules can also be customized as part of the subnet specification in a custom network spec. -Note that ingress rules for the Kubernetes API Server port (default 6443) and SSH (22) are automatically added to the controlplane subnet only if security rules aren't specified. -It is the responsibility of the user to supply those rules themselves if using custom rules. + +Note that ingress rules for the Kubernetes API Server port (default 6443) and SSH (22) are automatically added to the controlplane subnet if these security rules aren't specified. +It is the responsibility of the user to override those rules themselves when the default configuration does not match expected ruleset. + +These rules are identified by `destinationPorts` value: +- `` for the API server access. Default port is `6443`. +- `22` for the SSH access. Here is an illustrative example of customizing rules that builds on the one above by adding an egress rule to the control plane nodes: @@ -177,6 +182,49 @@ spec: resourceGroup: cluster-example ``` +Alternatively, when default server `securityRules` apply, but the list needs to be extended, only required rules can be added, like so: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureCluster +metadata: + name: cluster-example + namespace: default +spec: + location: southcentralus + networkSpec: + vnet: + name: my-vnet + cidrBlocks: + - 10.0.0.0/16 + additionalAPIServerLBPorts: + - name: RKE2 + port: 9345 + subnets: + - name: my-subnet-cp + role: control-plane + cidrBlocks: + - 10.0.1.0/24 + securityGroup: + name: my-subnet-cp-nsg + securityRules: + - name: "allow_port_9345" + description: "RKE2 - allow node registration on port 9345" + direction: "Inbound" + priority: 2202 + protocol: "Tcp" + destination: "*" + destinationPorts: "9345" + source: "*" + sourcePorts: "*" + action: "Allow" + - name: my-subnet-node + role: node + cidrBlocks: + - 10.0.2.0/24 + resourceGroup: cluster-example +``` + ### Virtual Network service endpoints Sometimes it's desirable to use [Virtual Network service endpoints](https://learn.microsoft.com/azure/virtual-network/virtual-network-service-endpoints-overview) to establish secure and direct connectivity to Azure services from your subnet(s). Service Endpoints are configured on a per-subnet basis. Vnets managed by either `AzureCluster` or `AzureManagedControlPlane` can have `serviceEndpoints` optionally set on each subnet. diff --git a/templates/cluster-template-clusterclass-rke2.yaml b/templates/cluster-template-clusterclass-rke2.yaml new file mode 100644 index 00000000000..c2d6b587a46 --- /dev/null +++ b/templates/cluster-template-clusterclass-rke2.yaml @@ -0,0 +1,231 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: ${CLUSTER_CLASS_NAME} + namespace: default +spec: + controlPlane: + machineInfrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-control-plane + ref: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: RKE2ControlPlaneTemplate + name: ${CLUSTER_NAME}-control-plane + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterTemplate + name: ${CLUSTER_NAME}-azure-cluster + patches: + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/subscriptionID + valueFrom: + variable: subscriptionID + - op: add + path: /spec/template/spec/location + valueFrom: + variable: location + - op: add + path: /spec/template/spec/resourceGroup + valueFrom: + variable: resourceGroup + - op: add + path: /spec/template/spec/identityRef/name + valueFrom: + variable: azureClusterIdentityName + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterTemplate + matchResources: + infrastructureCluster: true + name: azureClusterTemplate + - definitions: + - jsonPatches: + - op: replace + path: /spec/template/spec/files + valueFrom: + template: | + - contentFrom: + secret: + key: worker-node-azure.json + name: "{{ .builtin.machineDeployment.infrastructureRef.name }}-azure-json" + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: RKE2ConfigTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_NAME}-worker + name: workerAzureJsonSecretName + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/files/- + valueFrom: + template: | + contentFrom: + secret: + key: control-plane-azure.json + name: "{{ .builtin.controlPlane.machineTemplate.infrastructureRef.name }}-azure-json" + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: RKE2ControlPlaneTemplate + matchResources: + controlPlane: true + - jsonPatches: + - op: add + path: /spec/template/spec/vmSize + valueFrom: + variable: vmSize + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + matchResources: + controlPlane: true + machineDeploymentClass: + names: + - ${CLUSTER_NAME}-worker + name: azureMachineTemplate + workers: + machineDeployments: + - class: ${CLUSTER_NAME}-worker + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: RKE2ConfigTemplate + name: ${CLUSTER_NAME}-worker + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-worker +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureClusterTemplate +metadata: + name: ${CLUSTER_NAME}-azure-cluster + namespace: default +spec: + template: + spec: + identityRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterIdentity + name: ${CLUSTER_IDENTITY_NAME} + networkSpec: + additionalAPIServerLBPorts: + - name: rke2 + port: 9345 + subnets: + - name: control-plane-subnet + role: control-plane + securityGroup: + securityRules: + - action: Allow + description: Allow port 9345 for RKE2 + destination: '*' + destinationPorts: "9345" + direction: Inbound + name: allow_port_9345 + priority: 2203 + protocol: Tcp + source: '*' + sourcePorts: '*' + - name: node-subnet + natGateway: + name: node-natgateway + role: node +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + template: + spec: + osDisk: + diskSizeGB: 128 + osType: Linux +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-worker + namespace: default +spec: + template: + spec: + osDisk: + diskSizeGB: 30 + osType: Linux +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: RKE2ControlPlaneTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + template: + spec: + agentConfig: {} + files: + - owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + registrationMethod: control-plane-endpoint + rolloutStrategy: + rollingUpdate: + maxSurge: 1 + type: RollingUpdate + serverConfig: + cloudProviderName: external + cni: none + disableComponents: + kubernetesComponents: + - cloudController + kubeAPIServer: + extraArgs: + - --anonymous-auth=true +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: RKE2ConfigTemplate +metadata: + name: ${CLUSTER_NAME}-worker + namespace: default +spec: + template: + spec: + files: + - contentFrom: + secret: + key: worker-node-azure.json + name: replace_me + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureClusterIdentity +metadata: + labels: + clusterctl.cluster.x-k8s.io/move-hierarchy: "true" + name: ${CLUSTER_IDENTITY_NAME} + namespace: default +spec: + allowedNamespaces: {} + clientID: ${AZURE_CLIENT_ID_USER_ASSIGNED_IDENTITY} + tenantID: ${AZURE_TENANT_ID} + type: ${CLUSTER_IDENTITY_TYPE:=WorkloadIdentity} diff --git a/templates/flavors/clusterclass-rke2/azure-cluster-template.yaml b/templates/flavors/clusterclass-rke2/azure-cluster-template.yaml new file mode 100644 index 00000000000..a88dfc7e550 --- /dev/null +++ b/templates/flavors/clusterclass-rke2/azure-cluster-template.yaml @@ -0,0 +1,34 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureClusterTemplate +metadata: + name: ${CLUSTER_NAME}-azure-cluster +spec: + template: + spec: + identityRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterIdentity + name: ${CLUSTER_IDENTITY_NAME} + networkSpec: + additionalAPIServerLBPorts: + - name: rke2 + port: 9345 + subnets: + - name: control-plane-subnet + role: control-plane + securityGroup: + securityRules: + - name: "allow_port_9345" + description: "Allow port 9345 for RKE2" + direction: "Inbound" + priority: 2203 + protocol: "Tcp" + destination: "*" + destinationPorts: "9345" + source: "*" + sourcePorts: "*" + action: "Allow" + - name: node-subnet + natGateway: + name: node-natgateway + role: node diff --git a/templates/flavors/clusterclass-rke2/azure-machine-template-controlplane.yaml b/templates/flavors/clusterclass-rke2/azure-machine-template-controlplane.yaml new file mode 100644 index 00000000000..b5621d493ed --- /dev/null +++ b/templates/flavors/clusterclass-rke2/azure-machine-template-controlplane.yaml @@ -0,0 +1,10 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane +spec: + template: + spec: + osDisk: + diskSizeGB: 128 + osType: Linux diff --git a/templates/flavors/clusterclass-rke2/azure-machine-template-worker.yaml b/templates/flavors/clusterclass-rke2/azure-machine-template-worker.yaml new file mode 100644 index 00000000000..c900af3f6d0 --- /dev/null +++ b/templates/flavors/clusterclass-rke2/azure-machine-template-worker.yaml @@ -0,0 +1,10 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-worker +spec: + template: + spec: + osDisk: + diskSizeGB: 30 + osType: Linux diff --git a/templates/flavors/clusterclass-rke2/clusterclass.yaml b/templates/flavors/clusterclass-rke2/clusterclass.yaml new file mode 100644 index 00000000000..51dc29cb251 --- /dev/null +++ b/templates/flavors/clusterclass-rke2/clusterclass.yaml @@ -0,0 +1,112 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: ${CLUSTER_CLASS_NAME} +spec: + controlPlane: + ref: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: RKE2ControlPlaneTemplate + name: ${CLUSTER_NAME}-control-plane + machineInfrastructure: + ref: + kind: AzureMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + name: ${CLUSTER_NAME}-control-plane + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterTemplate + name: ${CLUSTER_NAME}-azure-cluster + workers: + machineDeployments: + - class: ${CLUSTER_NAME}-worker + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: RKE2ConfigTemplate + name: ${CLUSTER_NAME}-worker + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-worker + patches: + - name: azureClusterTemplate + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterTemplate + matchResources: + infrastructureCluster: true + jsonPatches: + - op: add + path: "/spec/template/spec/subscriptionID" + valueFrom: + variable: subscriptionID + - op: add + path: "/spec/template/spec/location" + valueFrom: + variable: location + - op: add + path: "/spec/template/spec/resourceGroup" + valueFrom: + variable: resourceGroup + - op: add + path: "/spec/template/spec/identityRef/name" + valueFrom: + variable: azureClusterIdentityName + - name: workerAzureJsonSecretName + definitions: + - jsonPatches: + - op: replace + path: /spec/template/spec/files + valueFrom: + template: | + - contentFrom: + secret: + key: worker-node-azure.json + name: "{{ .builtin.machineDeployment.infrastructureRef.name }}-azure-json" + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: RKE2ConfigTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_NAME}-worker + - name: azureMachineTemplate + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: RKE2ControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/files/- + valueFrom: + template: | + contentFrom: + secret: + key: control-plane-azure.json + name: "{{ .builtin.controlPlane.machineTemplate.infrastructureRef.name }}-azure-json" + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + matchResources: + controlPlane: true + machineDeploymentClass: + names: + - ${CLUSTER_NAME}-worker + jsonPatches: + - op: add + path: "/spec/template/spec/vmSize" + valueFrom: + variable: vmSize diff --git a/templates/flavors/clusterclass-rke2/kustomization.yaml b/templates/flavors/clusterclass-rke2/kustomization.yaml new file mode 100644 index 00000000000..45b4fd35011 --- /dev/null +++ b/templates/flavors/clusterclass-rke2/kustomization.yaml @@ -0,0 +1,14 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: default +resources: +- clusterclass.yaml +- azure-cluster-template.yaml +- azure-machine-template-controlplane.yaml +- azure-machine-template-worker.yaml +- rke2-controlplane-template.yaml +- rke2-config-template.yaml +- ../../azure-cluster-identity + +sortOptions: + order: fifo diff --git a/templates/flavors/clusterclass-rke2/rke2-config-template.yaml b/templates/flavors/clusterclass-rke2/rke2-config-template.yaml new file mode 100644 index 00000000000..5167ecf38da --- /dev/null +++ b/templates/flavors/clusterclass-rke2/rke2-config-template.yaml @@ -0,0 +1,15 @@ +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: RKE2ConfigTemplate +metadata: + name: ${CLUSTER_NAME}-worker +spec: + template: + spec: + files: + - contentFrom: + secret: + key: worker-node-azure.json + name: replace_me + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" diff --git a/templates/flavors/clusterclass-rke2/rke2-controlplane-template.yaml b/templates/flavors/clusterclass-rke2/rke2-controlplane-template.yaml new file mode 100644 index 00000000000..3414bb06971 --- /dev/null +++ b/templates/flavors/clusterclass-rke2/rke2-controlplane-template.yaml @@ -0,0 +1,26 @@ +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: RKE2ControlPlaneTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane +spec: + template: + spec: + registrationMethod: control-plane-endpoint + rolloutStrategy: + type: "RollingUpdate" + rollingUpdate: + maxSurge: 1 + agentConfig: {} + serverConfig: + cni: none + cloudProviderName: external + disableComponents: + kubernetesComponents: + - cloudController + kubeAPIServer: + extraArgs: + - --anonymous-auth=true + files: + - owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" diff --git a/templates/test/ci/cluster-template-prow-clusterclass-ci-rke2.yaml b/templates/test/ci/cluster-template-prow-clusterclass-ci-rke2.yaml new file mode 100644 index 00000000000..94a77413d4c --- /dev/null +++ b/templates/test/ci/cluster-template-prow-clusterclass-ci-rke2.yaml @@ -0,0 +1,456 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: ${CLUSTER_CLASS_NAME} + namespace: default +spec: + controlPlane: + machineInfrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-control-plane + ref: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: RKE2ControlPlaneTemplate + name: ${CLUSTER_NAME}-control-plane + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterTemplate + name: ${CLUSTER_NAME}-azure-cluster + patches: + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/files/- + valueFrom: + template: | + contentFrom: + secret: + key: control-plane-azure.json + name: "{{ .builtin.controlPlane.machineTemplate.infrastructureRef.name }}-azure-json" + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: RKE2ControlPlaneTemplate + matchResources: + controlPlane: true + name: controlPlaneAzureJsonSecretName + - definitions: + - jsonPatches: + - op: replace + path: /spec/template/spec/files + valueFrom: + template: | + - contentFrom: + secret: + key: worker-node-azure.json + name: "{{ .builtin.machineDeployment.infrastructureRef.name }}-azure-json" + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: RKE2ConfigTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_NAME}-worker + name: workerAzureJsonSecretName + - definitions: + - jsonPatches: + - op: replace + path: /spec/template/spec/additionalTags + valueFrom: + template: | + buildProvenance: {{ .buildProvenance }} + creationTimestamp: {{ .timestamp }} + jobName: {{ .jobName }} + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterTemplate + matchResources: + infrastructureCluster: true + name: additionalTags + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/identityRef/name + valueFrom: + variable: clusterIdentityRef + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterTemplate + matchResources: + infrastructureCluster: true + name: clusterIdentityRef + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/location + valueFrom: + variable: location + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterTemplate + matchResources: + infrastructureCluster: true + name: location + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/subscriptionID + valueFrom: + variable: subscriptionID + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterTemplate + matchResources: + infrastructureCluster: true + name: subscriptionID + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/vmSize + valueFrom: + variable: controlPlaneMachineType + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + matchResources: + controlPlane: true + name: controlPlaneMachineType + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/image + valueFrom: + template: | + computeGallery: + version: {{ trimPrefix "v" (trimSuffix "+rke2r1" .builtin.cluster.topology.version) }} + name: {{ .galleryName }} + gallery: {{ .gallery }} + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + matchResources: + controlPlane: true + machineDeploymentClass: + names: + - ${CLUSTER_NAME}-worker + name: controlPlaneMachineGalleryPatch + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/resourceGroup + valueFrom: + variable: resourceGroup + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterTemplate + matchResources: + infrastructureCluster: true + enabledIf: '{{ if .resourceGroup }}true{{end}}' + name: clusterResourceGroupPatch + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/vmSize + valueFrom: + variable: workerMachineType + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_NAME}-worker + name: workerMachineType + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/sshPublicKey + valueFrom: + variable: sshPublicKey + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + matchResources: + controlPlane: true + machineDeploymentClass: + names: + - ${CLUSTER_NAME}-worker + name: sshPublicKey + variables: + - name: k8sFeatureGates + required: false + schema: + openAPIV3Schema: + type: string + - name: buildProvenance + required: false + schema: + openAPIV3Schema: + type: string + - name: timestamp + required: false + schema: + openAPIV3Schema: + type: string + - name: jobName + required: false + schema: + openAPIV3Schema: + type: string + - name: clusterIdentityRef + required: true + schema: + openAPIV3Schema: + type: string + - name: location + required: true + schema: + openAPIV3Schema: + type: string + - name: subscriptionID + required: true + schema: + openAPIV3Schema: + type: string + - name: logLevel + required: false + schema: + openAPIV3Schema: + type: string + - name: controlPlaneMachineType + required: false + schema: + openAPIV3Schema: + default: Standard_B2s + type: string + - name: workerMachineType + required: false + schema: + openAPIV3Schema: + default: Standard_B2s + type: string + - name: sshPublicKey + required: true + schema: + openAPIV3Schema: + default: "" + type: string + - name: galleryName + required: true + schema: + openAPIV3Schema: + default: capi-ubun2-2404 + type: string + - name: gallery + required: true + schema: + openAPIV3Schema: + default: ClusterAPI-f72ceb4f-5159-4c26-a0fe-2ea738f0d019 + type: string + - name: resourceGroup + schema: + openAPIV3Schema: + description: The Azure Resource Group where the Cluster will be created. + type: string + workers: + machineDeployments: + - class: ${CLUSTER_NAME}-worker + machineHealthCheck: + maxUnhealthy: 100% + unhealthyConditions: + - status: "True" + timeout: 30s + type: E2ENodeUnhealthy + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: RKE2ConfigTemplate + name: ${CLUSTER_NAME}-worker + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-worker +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: RKE2ControlPlaneTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + template: + spec: + agentConfig: {} + files: + - owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + registrationMethod: control-plane-endpoint + rolloutStrategy: + rollingUpdate: + maxSurge: 1 + type: RollingUpdate + serverConfig: + cloudProviderName: external + cni: none + disableComponents: + kubernetesComponents: + - cloudController + kubeAPIServer: + extraArgs: + - --anonymous-auth=true +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureClusterTemplate +metadata: + name: ${CLUSTER_NAME}-azure-cluster + namespace: default +spec: + template: + spec: + additionalTags: + replace_me_key: replace_me_val + identityRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterIdentity + name: ${CLUSTER_IDENTITY_NAME} + networkSpec: + additionalAPIServerLBPorts: + - name: rke2 + port: 9345 + subnets: + - name: control-plane-subnet + role: control-plane + securityGroup: + securityRules: + - action: Allow + description: Allow port 9345 for RKE2 + destination: '*' + destinationPorts: "9345" + direction: Inbound + name: allow_port_9345 + priority: 2203 + protocol: Tcp + source: '*' + sourcePorts: '*' + - name: node-subnet + natGateway: + name: node-natgateway + role: node +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + template: + spec: + osDisk: + diskSizeGB: 128 + osType: Linux +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-worker + namespace: default +spec: + template: + spec: + osDisk: + diskSizeGB: 30 + osType: Linux +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: RKE2ConfigTemplate +metadata: + name: ${CLUSTER_NAME}-worker + namespace: default +spec: + template: + spec: + files: + - contentFrom: + secret: + key: worker-node-azure.json + name: replace_me + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureClusterIdentity +metadata: + labels: + clusterctl.cluster.x-k8s.io/move-hierarchy: "true" + name: ${CLUSTER_IDENTITY_NAME} + namespace: default +spec: + allowedNamespaces: {} + clientID: ${AZURE_CLIENT_ID_USER_ASSIGNED_IDENTITY} + tenantID: ${AZURE_TENANT_ID} + type: ${CLUSTER_IDENTITY_TYPE:=WorkloadIdentity} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: calico + namespace: default +spec: + chartName: tigera-operator + clusterSelector: + matchLabels: + cni: calico + namespace: tigera-operator + releaseName: projectcalico + repoURL: https://docs.tigera.io/calico/charts + valuesTemplate: |- + installation: + cni: + type: Calico + calicoNetwork: + bgp: Disabled + mtu: 1350 + ipPools: + ipPools:{{range $i, $cidr := .Cluster.spec.clusterNetwork.pods.cidrBlocks }} + - cidr: {{ $cidr }} + encapsulation: VXLAN{{end}} + registry: mcr.microsoft.com/oss + # Image and registry configuration for the tigera/operator pod. + tigeraOperator: + image: tigera/operator + registry: mcr.microsoft.com/oss + calicoctl: + image: mcr.microsoft.com/oss/calico/ctl + version: ${CALICO_VERSION} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: cloud-provider-azure-chart + namespace: default +spec: + chartName: cloud-provider-azure + clusterSelector: + matchLabels: + cloud-provider: azure + releaseName: cloud-provider-azure-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo + valuesTemplate: | + infra: + clusterName: {{ .Cluster.metadata.name }} + cloudControllerManager: + clusterCIDR: {{ .Cluster.spec.clusterNetwork.pods.cidrBlocks | join "," }} + logVerbosity: 4 + nodeSelector: + node-role.kubernetes.io/control-plane: "true" diff --git a/templates/test/ci/cluster-template-prow-topology-rke2.yaml b/templates/test/ci/cluster-template-prow-topology-rke2.yaml new file mode 100644 index 00000000000..a2f89d170af --- /dev/null +++ b/templates/test/ci/cluster-template-prow-topology-rke2.yaml @@ -0,0 +1,47 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cloud-provider: azure + cni: calico + containerd-logger: enabled + name: ${CLUSTER_NAME} + namespace: default +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + topology: + class: ${CLUSTER_CLASS_NAME} + controlPlane: + replicas: ${CONTROL_PLANE_MACHINE_COUNT:=1} + variables: + - name: subscriptionID + value: ${AZURE_SUBSCRIPTION_ID} + - name: controlPlaneMachineType + value: ${AZURE_CONTROL_PLANE_MACHINE_TYPE:-""} + - name: workerMachineType + value: ${AZURE_NODE_MACHINE_TYPE:-""} + - name: sshPublicKey + value: ${AZURE_SSH_PUBLIC_KEY_B64:-""} + - name: buildProvenance + value: ${BUILD_PROVENANCE:-""} + - name: timestamp + value: ${TIMESTAMP:-""} + - name: jobName + value: ${JOB_NAME:-""} + - name: clusterIdentityRef + value: ${CLUSTER_IDENTITY_NAME} + - name: location + value: ${AZURE_LOCATION} + - name: k8sFeatureGates + value: ${K8S_FEATURE_GATES:-""} + - name: logLevel + value: "4" + version: ${KUBERNETES_VERSION}+rke2r1 + workers: + machineDeployments: + - class: ${CLUSTER_NAME}-worker + name: md-0 + replicas: ${WORKER_MACHINE_COUNT} diff --git a/templates/test/ci/prow-clusterclass-ci-rke2/ccm-patch.yaml b/templates/test/ci/prow-clusterclass-ci-rke2/ccm-patch.yaml new file mode 100644 index 00000000000..cd3bfc929dc --- /dev/null +++ b/templates/test/ci/prow-clusterclass-ci-rke2/ccm-patch.yaml @@ -0,0 +1,13 @@ +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: cloud-provider-azure-chart +spec: + valuesTemplate: | + infra: + clusterName: {{ .Cluster.metadata.name }} + cloudControllerManager: + clusterCIDR: {{ .Cluster.spec.clusterNetwork.pods.cidrBlocks | join "," }} + logVerbosity: 4 + nodeSelector: + node-role.kubernetes.io/control-plane: "true" diff --git a/templates/test/ci/prow-clusterclass-ci-rke2/kustomization.yaml b/templates/test/ci/prow-clusterclass-ci-rke2/kustomization.yaml new file mode 100644 index 00000000000..0f1977ecee4 --- /dev/null +++ b/templates/test/ci/prow-clusterclass-ci-rke2/kustomization.yaml @@ -0,0 +1,20 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: default +resources: +- ../../../flavors/clusterclass-rke2/clusterclass.yaml +- ../../../flavors/clusterclass-rke2/rke2-controlplane-template.yaml +- ../../../flavors/clusterclass-rke2/azure-cluster-template.yaml +- ../../../flavors/clusterclass-rke2/azure-machine-template-controlplane.yaml +- ../../../flavors/clusterclass-rke2/azure-machine-template-worker.yaml +- rke2-config-template.yaml +- ../../../azure-cluster-identity +- ../../../addons/cluster-api-helm/calico.yaml +- ../../../addons/cluster-api-helm/cloud-provider-azure.yaml +patches: +- path: patches.yaml +- path: variables.yaml +- path: ccm-patch.yaml + +sortOptions: + order: fifo diff --git a/templates/test/ci/prow-clusterclass-ci-rke2/patches.yaml b/templates/test/ci/prow-clusterclass-ci-rke2/patches.yaml new file mode 100644 index 00000000000..bbb1ffa37bf --- /dev/null +++ b/templates/test/ci/prow-clusterclass-ci-rke2/patches.yaml @@ -0,0 +1,200 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: ${CLUSTER_CLASS_NAME} +spec: + workers: + machineDeployments: + - class: ${CLUSTER_NAME}-worker + machineHealthCheck: + maxUnhealthy: 100% + unhealthyConditions: + - type: E2ENodeUnhealthy + status: "True" + timeout: 30s + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: RKE2ConfigTemplate + name: ${CLUSTER_NAME}-worker + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-worker + patches: + - name: controlPlaneAzureJsonSecretName + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: RKE2ControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/files/-" + valueFrom: + template: | + contentFrom: + secret: + key: control-plane-azure.json + name: "{{ .builtin.controlPlane.machineTemplate.infrastructureRef.name }}-azure-json" + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + - name: workerAzureJsonSecretName + definitions: + - selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: RKE2ConfigTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_NAME}-worker + jsonPatches: + - op: replace + path: "/spec/template/spec/files" + valueFrom: + template: | + - contentFrom: + secret: + key: worker-node-azure.json + name: "{{ .builtin.machineDeployment.infrastructureRef.name }}-azure-json" + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + - name: additionalTags + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterTemplate + matchResources: + infrastructureCluster: true + jsonPatches: + - op: replace + path: /spec/template/spec/additionalTags + valueFrom: + template: | + buildProvenance: {{ .buildProvenance }} + creationTimestamp: {{ .timestamp }} + jobName: {{ .jobName }} + - name: clusterIdentityRef + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterTemplate + matchResources: + infrastructureCluster: true + jsonPatches: + - op: add + path: /spec/template/spec/identityRef/name + valueFrom: + variable: clusterIdentityRef + - name: location + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterTemplate + matchResources: + infrastructureCluster: true + jsonPatches: + - op: add + path: /spec/template/spec/location + valueFrom: + variable: location + - name: subscriptionID + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterTemplate + matchResources: + infrastructureCluster: true + jsonPatches: + - op: add + path: /spec/template/spec/subscriptionID + valueFrom: + variable: subscriptionID + - name: controlPlaneMachineType + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/vmSize + valueFrom: + variable: controlPlaneMachineType + - name: controlPlaneMachineGalleryPatch + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + matchResources: + controlPlane: true + machineDeploymentClass: + names: + - ${CLUSTER_NAME}-worker + jsonPatches: + - op: add + path: "/spec/template/spec/image" + valueFrom: + template: | + computeGallery: + version: {{ trimPrefix "v" (trimSuffix "+rke2r1" .builtin.cluster.topology.version) }} + name: {{ .galleryName }} + gallery: {{ .gallery }} + - definitions: + - jsonPatches: + - op: add + path: "/spec/template/spec/resourceGroup" + valueFrom: + variable: resourceGroup + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterTemplate + matchResources: + infrastructureCluster: true + enabledIf: "{{ if .resourceGroup }}true{{end}}" + name: clusterResourceGroupPatch + - name: workerMachineType + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_NAME}-worker + jsonPatches: + - op: add + path: /spec/template/spec/vmSize + valueFrom: + variable: workerMachineType + - name: sshPublicKey + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + matchResources: + controlPlane: true + machineDeploymentClass: + names: + - ${CLUSTER_NAME}-worker + jsonPatches: + - op: add + path: /spec/template/spec/sshPublicKey + valueFrom: + variable: sshPublicKey +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureClusterTemplate +metadata: + name: ${CLUSTER_NAME}-azure-cluster +spec: + template: + spec: + additionalTags: + replace_me_key: replace_me_val diff --git a/templates/test/ci/prow-clusterclass-ci-rke2/rke2-config-template.yaml b/templates/test/ci/prow-clusterclass-ci-rke2/rke2-config-template.yaml new file mode 100644 index 00000000000..182f3275be5 --- /dev/null +++ b/templates/test/ci/prow-clusterclass-ci-rke2/rke2-config-template.yaml @@ -0,0 +1,16 @@ +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: RKE2ConfigTemplate +metadata: + name: ${CLUSTER_NAME}-worker + namespace: default +spec: + template: + spec: + files: + - contentFrom: + secret: + key: worker-node-azure.json + name: replace_me + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" diff --git a/templates/test/ci/prow-clusterclass-ci-rke2/variables.yaml b/templates/test/ci/prow-clusterclass-ci-rke2/variables.yaml new file mode 100644 index 00000000000..37439865afc --- /dev/null +++ b/templates/test/ci/prow-clusterclass-ci-rke2/variables.yaml @@ -0,0 +1,81 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: ${CLUSTER_CLASS_NAME} +spec: + variables: + - name: k8sFeatureGates + required: false + schema: + openAPIV3Schema: + type: string + - name: buildProvenance + required: false + schema: + openAPIV3Schema: + type: string + - name: timestamp + required: false + schema: + openAPIV3Schema: + type: string + - name: jobName + required: false + schema: + openAPIV3Schema: + type: string + - name: clusterIdentityRef + required: true + schema: + openAPIV3Schema: + type: string + - name: location + required: true + schema: + openAPIV3Schema: + type: string + - name: subscriptionID + required: true + schema: + openAPIV3Schema: + type: string + - name: logLevel + required: false + schema: + openAPIV3Schema: + type: string + - name: controlPlaneMachineType + required: false + schema: + openAPIV3Schema: + type: string + default: Standard_B2s + - name: workerMachineType + required: false + schema: + openAPIV3Schema: + type: string + default: Standard_B2s + - name: sshPublicKey + required: true + schema: + openAPIV3Schema: + type: string + default: "" + - name: galleryName + required: true + schema: + openAPIV3Schema: + type: string + default: capi-ubun2-2404 + - name: gallery + required: true + schema: + openAPIV3Schema: + type: string + default: ClusterAPI-f72ceb4f-5159-4c26-a0fe-2ea738f0d019 + - name: resourceGroup + schema: + openAPIV3Schema: + description: "The Azure Resource Group where the Cluster will be created." + type: string diff --git a/test/e2e/azure_test.go b/test/e2e/azure_test.go index af2ffee5fe8..83a46c30bbb 100644 --- a/test/e2e/azure_test.go +++ b/test/e2e/azure_test.go @@ -23,6 +23,7 @@ import ( "context" "fmt" "os" + "path/filepath" "time" "github.com/Azure/azure-service-operator/v2/pkg/common/config" @@ -1108,6 +1109,59 @@ var _ = Describe("Workload cluster creation", func() { }) }) + Context("Creating RKE2 clusters using clusterclass [OPTIONAL]", func() { + It("with 3 control plane node and one linux worker node", func() { + // Use ci-rke2 as the clusterclass name so test infra can find the clusterclass template + Expect(os.Setenv("CLUSTER_CLASS_NAME", "ci-rke2")).To(Succeed()) + + // Use "cc" as spec name because NAT gateway pip name exceeds limit. + clusterName = getClusterName(clusterNamePrefix, "cc") + + // Init rke2 CP and bootstrap providers + initInput := clusterctl.InitInput{ + // pass reference to the management cluster hosting this test + KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), + // pass the clusterctl config file that points to the local provider repository created for this test + ClusterctlConfigPath: clusterctlConfigPath, + // setup the desired list of providers for a single-tenant management cluster + BootstrapProviders: []string{"rke2"}, + ControlPlaneProviders: []string{"rke2"}, + // setup clusterctl logs folder + LogFolder: filepath.Join(artifactFolder, "clusters", clusterName), + } + clusterctl.Init(ctx, initInput) + + // Create a cluster using the cluster class created above + clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput( + specName, + withFlavor("topology-rke2"), + withNamespace(namespace.Name), + withClusterName(clusterName), + withControlPlaneMachineCount(3), + withWorkerMachineCount(1), + withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{ + WaitForControlPlaneInitialized: func(ctx context.Context, input clusterctl.ApplyCustomClusterTemplateAndWaitInput, result *clusterctl.ApplyCustomClusterTemplateAndWaitResult) { + }, + WaitForControlPlaneMachinesReady: func(ctx context.Context, input clusterctl.ApplyCustomClusterTemplateAndWaitInput, result *clusterctl.ApplyCustomClusterTemplateAndWaitResult) { + ensureContolPlaneReplicasMatch(ctx, input.ClusterProxy, namespace.Name, clusterName, 3, e2eConfig.GetIntervals(specName, "wait-control-plane-long")) + }, + }), + ), result) + + By("Verifying expected VM extensions are present on the node", func() { + AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput { + return AzureVMExtensionsSpecInput{ + BootstrapClusterProxy: bootstrapClusterProxy, + Namespace: namespace, + ClusterName: clusterName, + } + }) + }) + + By("PASSED!") + }) + }) + // ci-e2e.sh and Prow CI skip this test by default. To include this test, set `GINKGO_SKIP=""`. // This spec expects a user-assigned identity named "cloud-provider-user-identity" in a "capz-ci" // resource group. Override these defaults by setting the USER_IDENTITY and CI_RG environment variables. diff --git a/test/e2e/common.go b/test/e2e/common.go index 466e5314228..217034d09f9 100644 --- a/test/e2e/common.go +++ b/test/e2e/common.go @@ -43,6 +43,7 @@ import ( capi_e2e "sigs.k8s.io/cluster-api/test/e2e" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/controller-runtime/pkg/client" @@ -308,6 +309,33 @@ func ensureControlPlaneInitialized(ctx context.Context, input clusterctl.ApplyCu result.ControlPlane = controlPlane } +// ensureContolPlaneReplicasMatch waits for the control plane machine replicas to be created. +func ensureContolPlaneReplicasMatch(ctx context.Context, proxy framework.ClusterProxy, ns, clusterName string, replicas int, intervals []interface{}) { + By("Waiting for all control plane nodes to exist") + inClustersNamespaceListOption := client.InNamespace(ns) + // ControlPlane labels + matchClusterListOption := client.MatchingLabels{ + clusterv1.MachineControlPlaneLabel: "", + clusterv1.ClusterNameLabel: clusterName, + } + + Eventually(func() (int, error) { + machineList := &clusterv1.MachineList{} + lister := proxy.GetClient() + if err := lister.List(ctx, machineList, inClustersNamespaceListOption, matchClusterListOption); err != nil { + Logf("Failed to list the machines: %+v", err) + return 0, err + } + count := 0 + for _, machine := range machineList.Items { + if condition := conditions.Get(&machine, clusterv1.MachineReadyV1Beta2Condition); condition != nil && condition.Status == corev1.ConditionTrue { + count++ + } + } + return count, nil + }, intervals...).Should(Equal(replicas), "Timed out waiting for %d control plane machines to exist", replicas) +} + // CheckTestBeforeCleanup checks to see if the current running Ginkgo test failed, and prints // a status message regarding cleanup. func CheckTestBeforeCleanup() { diff --git a/test/e2e/config/azure-dev.yaml b/test/e2e/config/azure-dev.yaml index 330e25b279e..80afff626a4 100644 --- a/test/e2e/config/azure-dev.yaml +++ b/test/e2e/config/azure-dev.yaml @@ -158,10 +158,14 @@ providers: targetName: "cluster-template-dual-stack.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-clusterclass-ci-default.yaml" targetName: "clusterclass-ci-default.yaml" + - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-clusterclass-ci-rke2.yaml" + targetName: "clusterclass-ci-rke2.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-aks-clusterclass.yaml" targetName: "clusterclass-default.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-topology.yaml" targetName: "cluster-template-topology.yaml" + - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-topology-rke2.yaml" + targetName: "cluster-template-topology-rke2.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-flatcar.yaml" targetName: "cluster-template-flatcar.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-flatcar-sysext.yaml" @@ -249,6 +253,7 @@ intervals: default/wait-cluster: ["20m", "10s"] default/wait-private-cluster: ["30m", "10s"] default/wait-control-plane: ["20m", "10s"] + default/wait-control-plane-long: ["40m", "10s"] default/wait-control-plane-ha: ["30m", "10s"] default/wait-worker-nodes: ["25m", "10s"] default/wait-gpu-nodes: ["30m", "10s"]