diff --git a/README.md b/README.md index 0c7730b73f..6f1b87baa6 100644 --- a/README.md +++ b/README.md @@ -46,8 +46,7 @@ cluster on AWS. - Native Kubernetes manifests and API - Manages the bootstrapping of VPCs, gateways, security groups and instances. -- Choice of Linux distribution among Amazon Linux 2, CentOS 7, Ubuntu(18.04, 20.04) and Flatcar - using [pre-baked AMIs][published_amis]. +- Choice of Linux distribution using [pre-baked AMIs][published_amis]. - Deploys Kubernetes control planes into private subnets with a separate bastion server. - Doesn't use SSH for bootstrapping nodes. diff --git a/cmd/clusterawsadm/ami/helper.go b/cmd/clusterawsadm/ami/helper.go index ebc393084c..454a335fcb 100644 --- a/cmd/clusterawsadm/ami/helper.go +++ b/cmd/clusterawsadm/ami/helper.go @@ -38,7 +38,7 @@ const ( ) func getSupportedOsList() []string { - return []string{"centos-7", "ubuntu-22.04", "ubuntu-18.04", "ubuntu-20.04", "amazon-2", "flatcar-stable"} + return []string{"centos-7", "ubuntu-24.04", "ubuntu-22.04", "amazon-2", "flatcar-stable", "rhel-8"} } func getimageRegionList() []string { diff --git a/cmd/clusterawsadm/cmd/ami/common/copy.go b/cmd/clusterawsadm/cmd/ami/common/copy.go index c2c95c6448..4e5fded04b 100644 --- a/cmd/clusterawsadm/cmd/ami/common/copy.go +++ b/cmd/clusterawsadm/cmd/ami/common/copy.go @@ -40,8 +40,8 @@ func CopyAMICmd() *cobra.Command { `), Example: cmd.Examples(` # Copy AMI from the default AWS account where AMIs are stored. - # Available os options: centos-7, ubuntu-18.04, ubuntu-20.04, amazon-2, flatcar-stable - clusterawsadm ami copy --kubernetes-version=v1.18.12 --os=ubuntu-20.04 --region=us-west-2 + # Available os options: centos-7, ubuntu-24.04, ubuntu-22.04, amazon-2, flatcar-stable + clusterawsadm ami copy --kubernetes-version=v1.30.1 --os=ubuntu-22.04 --region=us-west-2 # owner-id and dry-run flags are optional. region can be set via flag or env clusterawsadm ami copy --os centos-7 --kubernetes-version=v1.19.4 --owner-id=111111111111 --dry-run @@ -81,7 +81,6 @@ func CopyAMICmd() *cobra.Command { SourceRegion: sourceRegion, }, ) - if err != nil { fmt.Print(err) return err diff --git a/cmd/clusterawsadm/cmd/ami/common/encryptedcopy.go b/cmd/clusterawsadm/cmd/ami/common/encryptedcopy.go index 56492e7e1f..92b82d80d5 100644 --- a/cmd/clusterawsadm/cmd/ami/common/encryptedcopy.go +++ b/cmd/clusterawsadm/cmd/ami/common/encryptedcopy.go @@ -45,7 +45,7 @@ func EncryptedCopyAMICmd() *cobra.Command { `), Example: cmd.Examples(` # Create an encrypted AMI: - # Available os options: centos-7, ubuntu-18.04, ubuntu-20.04, amazon-2, flatcar-stable + # Available os options: centos-7, ubuntu-24.04, ubuntu-22.04, amazon-2, flatcar-stable clusterawsadm ami encrypted-copy --kubernetes-version=v1.18.12 --os=ubuntu-20.04 --region=us-west-2 # owner-id and dry-run flags are optional. region can be set via flag or env diff --git a/cmd/clusterawsadm/cmd/ami/list/list.go b/cmd/clusterawsadm/cmd/ami/list/list.go index 5e1bef32ed..3458a5cee6 100644 --- a/cmd/clusterawsadm/cmd/ami/list/list.go +++ b/cmd/clusterawsadm/cmd/ami/list/list.go @@ -51,7 +51,7 @@ func ListAMICmd() *cobra.Command { `), Example: cmd.Examples(` # List AMIs from the default AWS account where AMIs are stored. - # Available os options: centos-7, ubuntu-18.04, ubuntu-20.04, amazon-2, flatcar-stable + # Available os options: centos-7, ubuntu-24.04, ubuntu-22.04, amazon-2, flatcar-stable clusterawsadm ami list --kubernetes-version=v1.18.12 --os=ubuntu-20.04 --region=us-west-2 # To list all supported AMIs in all supported Kubernetes versions, regions, and linux distributions: clusterawsadm ami list diff --git a/controllers/awsmachine_controller_test.go b/controllers/awsmachine_controller_test.go index b96047a9e9..05abd051aa 100644 --- a/controllers/awsmachine_controller_test.go +++ b/controllers/awsmachine_controller_test.go @@ -106,11 +106,13 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { g.Expect(testEnv.Cleanup(ctx, awsMachine, ns, secret)).To(Succeed()) }) - cs, err := getClusterScope(infrav1.AWSCluster{ObjectMeta: metav1.ObjectMeta{Name: "test"}, Spec: infrav1.AWSClusterSpec{NetworkSpec: infrav1.NetworkSpec{Subnets: []infrav1.SubnetSpec{ - { - ID: "subnet-1", - AvailabilityZone: "us-east-1a", - }}, + cs, err := getClusterScope(infrav1.AWSCluster{ObjectMeta: metav1.ObjectMeta{Name: "test"}, Spec: infrav1.AWSClusterSpec{NetworkSpec: infrav1.NetworkSpec{ + Subnets: []infrav1.SubnetSpec{ + { + ID: "subnet-1", + AvailabilityZone: "us-east-1a", + }, + }, }}}) g.Expect(err).To(BeNil()) cs.Cluster = &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}} @@ -131,7 +133,8 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { }, infrav1.SecurityGroupControlPlane: { ID: "3", - }} + }, + } ms, err := getMachineScope(cs, awsMachine) g.Expect(err).To(BeNil()) @@ -162,9 +165,11 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { _, err = reconciler.reconcileNormal(ctx, ms, cs, cs, cs, cs) g.Expect(err).To(BeNil()) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.SecurityGroupsReadyCondition, corev1.ConditionTrue, "", ""}, + expectConditions(g, ms.AWSMachine, []conditionAssertion{ + {infrav1.SecurityGroupsReadyCondition, corev1.ConditionTrue, "", ""}, {infrav1.InstanceReadyCondition, corev1.ConditionTrue, "", ""}, - {infrav1.ELBAttachedCondition, corev1.ConditionTrue, "", ""}}) + {infrav1.ELBAttachedCondition, corev1.ConditionTrue, "", ""}, + }) g.Expect(ms.AWSMachine.Finalizers).Should(ContainElement(infrav1.MachineFinalizer)) }) t.Run("Should successfully reconcile control plane machine deletion", func(t *testing.T) { @@ -236,7 +241,8 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { g.Expect(err).To(BeNil()) expectConditions(g, ms.AWSMachine, []conditionAssertion{ {infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, - {infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}}) + {infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, + }) g.Expect(ms.AWSMachine.Finalizers).ShouldNot(ContainElement(infrav1.MachineFinalizer)) }) t.Run("Should fail reconciling control-plane machine creation while attaching load balancer", func(t *testing.T) { @@ -280,11 +286,13 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { g.Expect(testEnv.Cleanup(ctx, awsMachine, ns, secret)).To(Succeed()) }) - cs, err := getClusterScope(infrav1.AWSCluster{ObjectMeta: metav1.ObjectMeta{Name: "test"}, Spec: infrav1.AWSClusterSpec{NetworkSpec: infrav1.NetworkSpec{Subnets: []infrav1.SubnetSpec{ - { - ID: "subnet-1", - AvailabilityZone: "us-east-1a", - }}, + cs, err := getClusterScope(infrav1.AWSCluster{ObjectMeta: metav1.ObjectMeta{Name: "test"}, Spec: infrav1.AWSClusterSpec{NetworkSpec: infrav1.NetworkSpec{ + Subnets: []infrav1.SubnetSpec{ + { + ID: "subnet-1", + AvailabilityZone: "us-east-1a", + }, + }, }}}) g.Expect(err).To(BeNil()) cs.Cluster = &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}} @@ -305,7 +313,8 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { }, infrav1.SecurityGroupControlPlane: { ID: "3", - }} + }, + } ms, err := getMachineScope(cs, awsMachine) g.Expect(err).To(BeNil()) @@ -411,8 +420,10 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { _, err = reconciler.reconcileDelete(ms, cs, cs, cs, cs) g.Expect(err).Should(HaveOccurred()) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, "DeletingFailed"}, - {infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{ + {infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, "DeletingFailed"}, + {infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, + }) g.Expect(ms.AWSMachine.Finalizers).ShouldNot(ContainElement(infrav1.MachineFinalizer)) }) } @@ -572,11 +583,11 @@ func mockedCreateInstanceCalls(m *mocks.MockEC2APIMockRecorder) { Filters: []*ec2.Filter{ { Name: aws.String("owner-id"), - Values: aws.StringSlice([]string{"258751437250"}), + Values: aws.StringSlice([]string{"819546954734"}), }, { Name: aws.String("name"), - Values: aws.StringSlice([]string{"capa-ami-ubuntu-18.04-?test-*"}), + Values: aws.StringSlice([]string{"capa-ami-ubuntu-24.04-?test-*"}), }, { Name: aws.String("architecture"), @@ -590,7 +601,8 @@ func mockedCreateInstanceCalls(m *mocks.MockEC2APIMockRecorder) { Name: aws.String("virtualization-type"), Values: aws.StringSlice([]string{"hvm"}), }, - }})).Return(&ec2.DescribeImagesOutput{Images: []*ec2.Image{ + }, + })).Return(&ec2.DescribeImagesOutput{Images: []*ec2.Image{ { ImageId: aws.String("latest"), CreationDate: aws.String("2019-02-08T17:02:31.000Z"), @@ -639,7 +651,8 @@ func mockedCreateInstanceCalls(m *mocks.MockEC2APIMockRecorder) { }, }, }, - }}, nil).MaxTimes(3) + }, + }, nil).MaxTimes(3) m.DescribeNetworkInterfaceAttributeWithContext(context.TODO(), gomock.Eq(&ec2.DescribeNetworkInterfaceAttributeInput{ NetworkInterfaceId: aws.String("eni-1"), Attribute: aws.String("groupSet"), diff --git a/docs/book/src/SUMMARY_SUFFIX.md b/docs/book/src/SUMMARY_SUFFIX.md index 4470b87f47..cba61f06b3 100644 --- a/docs/book/src/SUMMARY_SUFFIX.md +++ b/docs/book/src/SUMMARY_SUFFIX.md @@ -3,6 +3,7 @@ - [Developing E2E tests](./development/e2e.md) - [Coding Conventions](./development/conventions.md) - [Try unreleased changes with Nightly Builds](./development/nightlies.md) + - [Publishing AMIs](./development/amis.md) - [CRD Reference](./crd/index.md) - [Reference](./topics/reference/reference.md) - [Glossary](./topics/reference/glossary.md) diff --git a/docs/book/src/crd/index.md b/docs/book/src/crd/index.md index 4c456b22ce..e56b9288d8 100644 --- a/docs/book/src/crd/index.md +++ b/docs/book/src/crd/index.md @@ -6576,6 +6576,17 @@ VpcCni +restrictPrivateSubnets
+ +bool + + + +

RestrictPrivateSubnets indicates that the EKS control plane should only use private subnets.

+ + + + kubeProxy
@@ -6979,6 +6990,17 @@ VpcCni +restrictPrivateSubnets
+ +bool + + + +

RestrictPrivateSubnets indicates that the EKS control plane should only use private subnets.

+ + + + kubeProxy
@@ -17072,7 +17094,13 @@ int64 Description -

"AL2_ARM_64"

+

"AL2023_ARM_64_STANDARD"

+

Al2023Arm64 is the AL2023 Arm AMI type.

+ +

"AL2023_x86_64_STANDARD"

+

Al2023x86_64 is the AL2023 x86-64 AMI type.

+ +

"AL2_ARM_64"

Al2Arm64 is the Arm AMI type.

"AL2_x86_64"

@@ -17081,12 +17109,6 @@ int64

"AL2_x86_64_GPU"

Al2x86_64GPU is the x86-64 GPU AMI type.

-

"AL2023_ARM_64_STANDARD"

-

Al2023Arm64 is the AL2023 Arm AMI type.

- -

"AL2023_x86_64_STANDARD"

-

Al2023x86_64 is the AL2023 x86 AMI type.

-

ManagedMachinePoolCapacityType @@ -19005,6 +19027,20 @@ default value is ELBProtocolSSL

+healthCheck
+ +
+TargetGroupHealthCheckAPISpec + + + + +(Optional) +

HealthCheck sets custom health check configuration to the API target group.

+ + + + additionalSecurityGroups
[]string @@ -19275,6 +19311,20 @@ Precedence for this setting is as follows: +elasticIpPool
+ + +ElasticIPPool + + + + +(Optional) +

ElasticIPPool is the configuration to allocate Public IPv4 address (Elastic IP/EIP) from user-defined pool.

+ + + + additionalSecurityGroups
@@ -19480,6 +19530,18 @@ PrivateDNSName

PrivateDNSName is the options for the instance hostname.

+ + +capacityReservationId
+ +string + + + +(Optional) +

CapacityReservationID specifies the target Capacity Reservation into which the instance should be launched.

+ + @@ -19669,6 +19731,20 @@ Precedence for this setting is as follows: +elasticIpPool
+ +
+ElasticIPPool + + + + +(Optional) +

ElasticIPPool is the configuration to allocate Public IPv4 address (Elastic IP/EIP) from user-defined pool.

+ + + + additionalSecurityGroups
@@ -19874,6 +19950,18 @@ PrivateDNSName

PrivateDNSName is the options for the instance hostname.

+ + +capacityReservationId
+ +string + + + +(Optional) +

CapacityReservationID specifies the target Capacity Reservation into which the instance should be launched.

+ +

AWSMachineStatus @@ -20279,6 +20367,20 @@ Precedence for this setting is as follows: +elasticIpPool
+ +
+ElasticIPPool + + + + +(Optional) +

ElasticIPPool is the configuration to allocate Public IPv4 address (Elastic IP/EIP) from user-defined pool.

+ + + + additionalSecurityGroups
@@ -20484,6 +20586,18 @@ PrivateDNSName

PrivateDNSName is the options for the instance hostname.

+ + +capacityReservationId
+ +string + + + +(Optional) +

CapacityReservationID specifies the target Capacity Reservation into which the instance should be launched.

+ + @@ -20879,6 +20993,20 @@ ELBProtocol Currently only TCP is supported.

+ + +healthCheck
+ +
+TargetGroupHealthCheckAdditionalSpec + + + + +(Optional) +

HealthCheck sets the optional custom health check configuration to the API target group.

+ +

AllowedNamespaces @@ -21474,6 +21602,58 @@ will use AWS Secrets Manager instead.

ELBScheme defines the scheme of a load balancer.

+

ElasticIPPool +

+

+(Appears on:AWSMachineSpec, VPCSpec) +

+

+

ElasticIPPool allows configuring a Elastic IP pool for resources allocating +public IPv4 addresses on public subnets.

+

+ + + + + + + + + + + + + + + + + +
FieldDescription
+publicIpv4Pool
+ +string + +
+(Optional) +

PublicIpv4Pool sets a custom Public IPv4 Pool used to create Elastic IP address for resources +created in public IPv4 subnets. Every IPv4 address, Elastic IP, will be allocated from the custom +Public IPv4 pool that you brought to AWS, instead of Amazon-provided pool. The public IPv4 pool +resource ID starts with ‘ipv4pool-ec2’.

+
+publicIpv4PoolFallbackOrder
+ + +PublicIpv4PoolFallbackOrder + + +
+(Optional) +

PublicIpv4PoolFallBackOrder defines the fallback action when the Public IPv4 Pool has been exhausted, +no more IPv4 address available in the pool.

+

When set to ‘amazon-pool’, the controller check if the pool has available IPv4 address, when pool has reached the +IPv4 limit, the address will be claimed from Amazon-pool (default).

+

When set to ‘none’, the controller will fail the Elastic IP allocation when the publicIpv4Pool is exhausted.

+

Filter

@@ -21977,6 +22157,18 @@ int64 The field will be combined with source security group IDs if specified.

+ + +natGatewaysIPsSource
+ +bool + + + +(Optional) +

NatGatewaysIPsSource use the NAT gateways IPs as the source for the ingress rule.

+ +

IngressRules @@ -22325,6 +22517,18 @@ bool

PublicIPOnLaunch is the option to associate a public IP on instance launch

+ + +capacityReservationId
+ +string + + + +(Optional) +

CapacityReservationID specifies the target Capacity Reservation into which the instance should be launched.

+ +

InstanceMetadataOptions @@ -22890,6 +23094,16 @@ string +

PublicIpv4PoolFallbackOrder +(string alias)

+

+(Appears on:ElasticIPPool) +

+

+

PublicIpv4PoolFallbackOrder defines the list of available fallback action when the PublicIpv4Pool is exhausted. +‘none’ let the controllers return failures when the PublicIpv4Pool is exhausted - no more IPv4 available. +‘amazon-pool’ let the controllers to skip the PublicIpv4Pool and use the Amazon pool, the default.

+

ResourceLifecycle (string alias)

@@ -22993,6 +23207,18 @@ string

Name defines name of S3 Bucket to be created.

+ + +bestEffortDeleteObjects
+ +bool + + + +(Optional) +

BestEffortDeleteObjects defines whether access/permission errors during object deletion should be ignored.

+ +

SecretBackend @@ -23118,6 +23344,15 @@ string +

SubnetSchemaType +(string alias)

+

+(Appears on:VPCSpec) +

+

+

SubnetSchemaType specifies how given network should be divided on subnets +in the VPC depending on the number of AZs.

+

SubnetSpec

@@ -23260,6 +23495,49 @@ Tags

Tags is a collection of tags describing the resource.

+ + +zoneType
+ + +ZoneType + + + + +(Optional) +

ZoneType defines the type of the zone where the subnet is created.

+

The valid values are availability-zone, local-zone, and wavelength-zone.

+

Subnet with zone type availability-zone (regular) is always selected to create cluster +resources, like Load Balancers, NAT Gateways, Contol Plane nodes, etc.

+

Subnet with zone type local-zone or wavelength-zone is not eligible to automatically create +regular cluster resources.

+

The public subnet in availability-zone or local-zone is associated with regular public +route table with default route entry to a Internet Gateway.

+

The public subnet in wavelength-zone is associated with a carrier public +route table with default route entry to a Carrier Gateway.

+

The private subnet in the availability-zone is associated with a private route table with +the default route entry to a NAT Gateway created in that zone.

+

The private subnet in the local-zone or wavelength-zone is associated with a private route table with +the default route entry re-using the NAT Gateway in the Region (preferred from the +parent zone, the zone type availability-zone in the region, or first table available).

+ + + + +parentZoneName
+ +string + + + +(Optional) +

ParentZoneName is the zone name where the current subnet’s zone is tied when +the zone is a Local Zone.

+

The subnets in Local Zone or Wavelength Zone locations consume the ParentZoneName +to select the correct private route table to egress traffic to the internet.

+ +

Subnets @@ -23359,16 +23637,25 @@ int64 + + +unhealthyThresholdCount
+ +int64 + + + + + -

TargetGroupSpec +

TargetGroupHealthCheckAPISpec

-(Appears on:Listener) +(Appears on:AWSLoadBalancerSpec)

-

TargetGroupSpec specifies target group settings for a given listener. -This is created first, and the ARN is then passed to the listener.

+

TargetGroupHealthCheckAPISpec defines the optional health check settings for the API target group.

@@ -23380,70 +23667,65 @@ This is created first, and the ARN is then passed to the listener.

- - - -
-name
- -string - -
-

Name of the TargetGroup. Must be unique over the same group of listeners.

-
-port
+intervalSeconds
int64
-

Port is the exposed port

+(Optional) +

The approximate amount of time, in seconds, between health checks of an individual +target.

-protocol
+timeoutSeconds
- -ELBProtocol - +int64
+(Optional) +

The amount of time, in seconds, during which no response from a target means +a failed health check.

-vpcId
+thresholdCount
-string +int64
+(Optional) +

The number of consecutive health check successes required before considering +a target healthy.

-targetGroupHealthCheck
+unhealthyThresholdCount
- -TargetGroupHealthCheck - +int64
-

HealthCheck is the elb health check associated with the load balancer.

+(Optional) +

The number of consecutive health check failures required before considering +a target unhealthy.

-

VPCSpec +

TargetGroupHealthCheckAdditionalSpec

-(Appears on:NetworkSpec) +(Appears on:AdditionalListenerSpec)

-

VPCSpec configures an AWS VPC.

+

TargetGroupHealthCheckAdditionalSpec defines the optional health check settings for the additional target groups.

@@ -23455,7 +23737,192 @@ TargetGroupHealthCheck + + + + + + + + + + + + + + + + + + + + + + + + + + + +
-id
+protocol
+ +string + +
+(Optional) +

The protocol to use to health check connect with the target. When not specified the Protocol +will be the same of the listener.

+
+port
+ +string + +
+(Optional) +

The port the load balancer uses when performing health checks for additional target groups. When +not specified this value will be set for the same of listener port.

+
+path
+ +string + +
+(Optional) +

The destination for health checks on the targets when using the protocol HTTP or HTTPS, +otherwise the path will be ignored.

+
+intervalSeconds
+ +int64 + +
+(Optional) +

The approximate amount of time, in seconds, between health checks of an individual +target.

+
+timeoutSeconds
+ +int64 + +
+(Optional) +

The amount of time, in seconds, during which no response from a target means +a failed health check.

+
+thresholdCount
+ +int64 + +
+(Optional) +

The number of consecutive health check successes required before considering +a target healthy.

+
+unhealthyThresholdCount
+ +int64 + +
+(Optional) +

The number of consecutive health check failures required before considering +a target unhealthy.

+
+

TargetGroupSpec +

+

+(Appears on:Listener) +

+

+

TargetGroupSpec specifies target group settings for a given listener. +This is created first, and the ARN is then passed to the listener.

+

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+name
+ +string + +
+

Name of the TargetGroup. Must be unique over the same group of listeners.

+
+port
+ +int64 + +
+

Port is the exposed port

+
+protocol
+ + +ELBProtocol + + +
+
+vpcId
+ +string + +
+
+targetGroupHealthCheck
+ + +TargetGroupHealthCheck + + +
+

HealthCheck is the elb health check associated with the load balancer.

+
+

VPCSpec +

+

+(Appears on:NetworkSpec) +

+

+

VPCSpec configures an AWS VPC.

+

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+id
string @@ -23479,6 +23946,22 @@ Mutually exclusive with IPAMPool.

+secondaryCidrBlocks
+ + +[]VpcCidrBlock + + +
+(Optional) +

SecondaryCidrBlocks are additional CIDR blocks to be associated when the provider creates a managed VPC. +Defaults to none. Mutually exclusive with IPAMPool. This makes sense to use if, for example, you want to use +a separate IP range for pods (e.g. Cilium ENI mode).

+
ipamPool
@@ -23520,6 +24003,19 @@ string
+carrierGatewayId
+ +string + +
+(Optional) +

CarrierGatewayID is the id of the internet gateway associated with the VPC, +for carrier network (Wavelength Zones).

+
tags
@@ -23593,6 +24089,40 @@ For IPv4-only and dual-stack (IPv4 and IPv6) subnets, an instance DNS name can b or the instance ID (resource-name). For IPv6 only subnets, an instance DNS name must be based on the instance ID (resource-name).

+elasticIpPool
+ + +ElasticIPPool + + +
+(Optional) +

ElasticIPPool contains specific configuration to allocate Public IPv4 address (Elastic IP) from user-defined pool +brought to AWS for core infrastructure resources, like NAT Gateways and Public Network Load Balancers for +the API Server.

+
+subnetSchema
+ + +SubnetSchemaType + + +
+(Optional) +

SubnetSchema specifies how CidrBlock should be divided on subnets in the VPC depending on the number of AZs. +PreferPrivate - one private subnet for each AZ plus one other subnet that will be further sub-divided for the public subnets. +PreferPublic - have the reverse logic of PreferPrivate, one public subnet for each AZ plus one other subnet +that will be further sub-divided for the private subnets. +Defaults to PreferPrivate

+

Volume @@ -23710,6 +24240,43 @@ The key must already exist and be accessible by the controller.

VolumeType describes the EBS volume type. See: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html

+

VpcCidrBlock +

+

+(Appears on:VPCSpec) +

+

+

VpcCidrBlock defines the CIDR block and settings to associate with the managed VPC. Currently, only IPv4 is supported.

+

+ + + + + + + + + + + + + +
FieldDescription
+ipv4CidrBlock
+ +string + +
+

IPv4CidrBlock is the IPv4 CIDR block to associate with the managed VPC.

+
+

ZoneType +(string alias)

+

+(Appears on:SubnetSpec) +

+

+

ZoneType defines listener AWS Availability Zone type.

+

ASGStatus (string alias)

@@ -23979,6 +24546,20 @@ Volume +nonRootVolumes
+ + +[]Volume + + + + +(Optional) +

Configuration options for the non root storage volumes.

+ + + + sshKeyName
string @@ -26078,7 +26659,13 @@ int64 Description -

"AL2_ARM_64"

+

"AL2023_ARM_64_STANDARD"

+

Al2023Arm64 is the AL2023 Arm AMI type.

+ +

"AL2023_x86_64_STANDARD"

+

Al2023x86_64 is the AL2023 x86-64 AMI type.

+ +

"AL2_ARM_64"

Al2Arm64 is the Arm AMI type.

"AL2_x86_64"

@@ -26675,7 +27262,7 @@ bool (Optional)

AutoRepair specifies whether health checks should be enabled for machines -in the NodePool. The default is false.

+in the NodePool. The default is true.

@@ -26760,6 +27347,20 @@ Budgets that have not been successfully drained from a node will be forcibly evi 0 or empty value means that the MachinePool can be drained without any time limitation.

+ + +updateConfig
+ + +RosaUpdateConfig + + + + +(Optional) +

UpdateConfig specifies update configurations.

+ + @@ -26849,6 +27450,66 @@ during an instance refresh. The default is 90.

+

RollingUpdate +

+

+(Appears on:RosaUpdateConfig) +

+

+

RollingUpdate specifies MaxUnavailable & MaxSurge number of nodes during update.

+

+ + + + + + + + + + + + + + + + + +
FieldDescription
+maxUnavailable
+ +k8s.io/apimachinery/pkg/util/intstr.IntOrString + +
+(Optional) +

MaxUnavailable is the maximum number of nodes that can be unavailable during the update. +Value can be an absolute number (ex: 5) or a percentage of desired nodes (ex: 10%). +Absolute number is calculated from percentage by rounding down.

+

MaxUnavailable can not be 0 if MaxSurge is 0, default is 0. +Both MaxUnavailable & MaxSurge must use the same units (absolute value or percentage).

+

Example: when MaxUnavailable is set to 30%, old nodes can be deleted down to 70% of +desired nodes immediately when the rolling update starts. Once new nodes +are ready, more old nodes be deleted, followed by provisioning new nodes, +ensuring that the total number of nodes available at all times during the +update is at least 70% of desired nodes.

+
+maxSurge
+ +k8s.io/apimachinery/pkg/util/intstr.IntOrString + +
+(Optional) +

MaxSurge is the maximum number of nodes that can be provisioned above the desired number of nodes. +Value can be an absolute number (ex: 5) or a percentage of desired nodes (ex: 10%). +Absolute number is calculated from percentage by rounding up.

+

MaxSurge can not be 0 if MaxUnavailable is 0, default is 1. +Both MaxSurge & MaxUnavailable must use the same units (absolute value or percentage).

+

Example: when MaxSurge is set to 30%, new nodes can be provisioned immediately +when the rolling update starts, such that the total number of old and new +nodes do not exceed 130% of desired nodes. Once old nodes have been +deleted, new nodes can be provisioned, ensuring that total number of nodes +running at any time during the update is at most 130% of desired nodes.

+

RosaMachinePoolAutoScaling

@@ -27002,7 +27663,7 @@ bool (Optional)

AutoRepair specifies whether health checks should be enabled for machines -in the NodePool. The default is false.

+in the NodePool. The default is true.

@@ -27087,6 +27748,20 @@ Budgets that have not been successfully drained from a node will be forcibly evi 0 or empty value means that the MachinePool can be drained without any time limitation.

+ + +updateConfig
+ + +RosaUpdateConfig + + + + +(Optional) +

UpdateConfig specifies update configurations.

+ +

RosaMachinePoolStatus @@ -27229,6 +27904,38 @@ Valid effects are NoSchedule, PreferNoSchedule and NoExecute.

+

RosaUpdateConfig +

+

+(Appears on:RosaMachinePoolSpec) +

+

+

RosaUpdateConfig specifies update configuration

+

+ + + + + + + + + + + + + +
FieldDescription
+rollingUpdate
+ + +RollingUpdate + + +
+(Optional) +

RollingUpdate specifies MaxUnavailable & MaxSurge number of nodes during update.

+

SpotAllocationStrategy (string alias)

diff --git a/docs/book/src/development/amis.md b/docs/book/src/development/amis.md new file mode 100644 index 0000000000..6d24d65a90 --- /dev/null +++ b/docs/book/src/development/amis.md @@ -0,0 +1,82 @@ +# Publish AMIs + +Publishing new AMIs is done via manually invoking a GitHub Actions workflow. + +> NOTE: the plan is to ultimately fully automate the process in the future (see [this issue](https://github.com/kubernetes-sigs/cluster-api-provider-aws/issues/1982) for progress). + +> NOTE: there are some issues with the RHEL based images at present. + +## Get build inputs + +For a new Kubernetes version that you want to build an AMI for you will need to determine the following values: + +| Input | Description | +| ----------------- | ----------- | +| kubernetes_semver | The semver version of k8s you want to build an AMI for. In format vMAJOR.MINOR.PATCH. | +| kubernetes_series | The release series for the Kubernetes version. In format vMAJOR.MINOR. | +| kubernetes_deb_version | The version of the debian package for the release. | +| kubernetes_rpm_version | The version of the rpm package for the release | +| kubernetes_cni_semver | The version of CNI to include. It needs to match the k8s release. | +| kubernetes_cni_deb_version | The version of the debian package for the CNI release to use | +| crictl_version | The vesion of the cri-tools package to install into the AMI | + +You can determine these values directly or by looking at the publish debian apt repositories for the k8s release. + +## Build + +### Using GitHub Actions Workflow + +To build the AMI using GitHub actions you must have write access to the CAPA repository (i.e. be a maintainer or part of release team). + +To build the new version: + +1. Got to the GitHub Action +2. Click the **Start Workflow** button +3. Fill in the details of the build +4. Click **Run** + +### Manually + +> **WARNING: the manual process should only be followed in exceptional circumstances. + +To build manually you must have admin access to the CNCF AWS account used for the AMIs. + +The steps to build manually are: + +1. Clone [image-builder](https://github.com/kubernetes-sigs/image-builder) +2. Open a terminal +3. Set the AWS environment variables for the CAPA AMI account +4. Change directory into `images/capi` +5. Create a new file called `vars.json` with the following content (substituing the values with the build inputs): + +```json +{ + "kubernetes_rpm_version": "", + "kubernetes_semver": "", + "kubernetes_series": "", + "kubernetes_deb_version": "", + "kubernetes_cni_semver": "", + "kubernetes_cni_deb_version": "", + "crictl_version": "" +} +``` +6. Install dependencies by running: + +```shell +make deps-ami +``` + +7. Build the AMIs using: + +```shell +PACKER_VAR_FILES=vars.json make build-ami-ubuntu-2204 +PACKER_VAR_FILES=vars.json make build-ami-ubuntu-2404 +PACKER_VAR_FILES=vars.json make build-ami-flatcar +PACKER_VAR_FILES=vars.json make build-ami-rhel-8 +``` +## Additional Information + +- The AMIs are hosted in a CNCF owned AWS account (819546954734). +- The AWS resources that are needed to support the GitHub Actions workflow are created via terraform. Source is [here](https://github.com/kubernetes/k8s.io/tree/main/infra/aws/terraform/cncf-k8s-infra-aws-capa-ami). +- OIDC and IAM Roles are used to grant access via short lived credentials to the GitHub Action workflow instance when it runs. + diff --git a/docs/book/src/topics/images/amis.md b/docs/book/src/topics/images/amis.md index ef7743bff0..b7cc5105d6 100644 --- a/docs/book/src/topics/images/amis.md +++ b/docs/book/src/topics/images/amis.md @@ -1,11 +1,18 @@ # AWS Machine Images for CAPA Clusters CAPA requires a “machine image” containing pre-installed, matching versions of kubeadm and kubelet. -Machine image is either auto-resolved by CAPA to a public AMI that matches the Kubernetes version in `KubeadmControlPlane` or `MachineDeployment` spec, -or an appropriate custom image ID for the Kubernetes version can be set in `AWSMachineTemplate` spec. -[Pre-built public AMIs](built-amis.md) are published by the maintainers regularly for each new Kubernetes version. +## EKS Clusters -[Custom images](custom-amis.md) can be created using [image-builder][image-builder] project. +For an EKS cluster the default behaviour is to retieve the AMI to use from SSM. This is so the recommended Amazon Linux AMI is used (see [here](https://docs.aws.amazon.com/eks/latest/userguide/retrieve-ami-id.html)). + +Instead of using the auto resolved AMIs an appropriate custom image ID for the Kubernetes version can be set in `AWSMachineTemplate` spec. + +## Non-EKS Clusters + +By default the machine image is auto-resolved by CAPA to a public AMI that matches the Kubernetes version in `KubeadmControlPlane` or `MachineDeployment` spec. These AMIs are published in a community owned AWS account. See [pre-built public AMIs](built-amis.md) for details of the CAPA project published images. + +> IMPORTANT: +> The project doesn't recommend using the public AMIs for production use. Instead its recommended that you build your own AMIs for the Kubernetes versions you want to use. The AMI can then be specified in the `AWSMachineTemplate` spec. [Custom images](custom-amis.md) can be created using [image-builder][image-builder] project. [image-builder]: https://github.com/kubernetes-sigs/image-builder diff --git a/docs/book/src/topics/images/built-amis.md b/docs/book/src/topics/images/built-amis.md index aeba64259c..767dfb33d5 100644 --- a/docs/book/src/topics/images/built-amis.md +++ b/docs/book/src/topics/images/built-amis.md @@ -1,18 +1,28 @@ # Pre-built Kubernetes AMIs -New AMIs are built whenever a new Kubernetes version is released for each supported OS distribution and then published to supported regions. +New AMIs are built on a best effort basis when a new Kubernetes version is released for each supported OS distribution and then published to supported regions. -`clusterawsadm ami list` command lists pre-built reference AMIs by Kubernetes version, OS, or AWS region. -See [clusterawsadm ami list](https://cluster-api-aws.sigs.k8s.io/clusterawsadm/clusterawsadm_ami_list.html) for details. +## AMI Publication Policy -> **Note:** These images are not updated for security fixes and it is recommended to always use the latest patch version for the Kubernetes version you want to run. For production environments, it is highly recommended to build and use your own custom images. +- AMIs should only be used for non-production usage. For production environments we recommend that you build and maintain your own AMIs using the image-builder project. +- AMIs will only be published for the latest release series and 2 previous release series. For example, if the current release series is v1.30 then AMIs will only be published for v1.30, v1.29, v1.28. +- When there is a new k8s release series then any AMIs no longer covered by the previous point will be deleted. For example, when v1.31.0 is published then any AMIs for the v1.28 release series will be deleted. +- Existing AMIs are not updated for security fixes and it is recommended to always use the latest patch version for the Kubernetes version you want to run. + +> NOTE: As the old community images were located in an AWS account that the project no longer has access to and because those AMIs have been automatically deleted, we have started publishing images again starting from Kubernetes v1.29.9. + +## Finding AMIs + +`clusterawsadm ami list` command lists pre-built reference AMIs by Kubernetes version, OS, or AWS region. See [clusterawsadm ami list](https://cluster-api-aws.sigs.k8s.io/clusterawsadm/clusterawsadm_ami_list.html) for details. + +If you are using a version of clusterawsadm prior to v2.6.2 then you will need to explicitly specify the owner-id for the community account: `clusterawsadm ami list --owner-id 819546954734`. ## Supported OS Distributions -- Amazon Linux 2 (amazon-2) -- Ubuntu (ubuntu-20.04, ubuntu-22.04) -- Centos (centos-7) +- Ubuntu (ubuntu-22.04, ubuntu-24.04) - Flatcar (flatcar-stable) +> Note: Centos (centos-7) and Amazon Linux 2 (amazon-2) where supported but there are some issues with the AMI build that need fixing. See this [issue](https://github.com/kubernetes-sigs/cluster-api-provider-aws/issues/5142) for details. + ## Supported AWS Regions - ap-northeast-1 - ap-northeast-2 @@ -29,84 +39,3 @@ See [clusterawsadm ami list](https://cluster-api-aws.sigs.k8s.io/clusterawsadm/c - us-east-2 - us-west-1 - us-west-2 - -## Most recent AMIs -
- - - -If you want to query any other AMI which is not listed in the table, then use below command -``` -clusterawsadm ami list --kubernetes-version --region --os -``` \ No newline at end of file diff --git a/pkg/cloud/services/ec2/ami.go b/pkg/cloud/services/ec2/ami.go index 7897aac80f..163b155e9e 100644 --- a/pkg/cloud/services/ec2/ami.go +++ b/pkg/cloud/services/ec2/ami.go @@ -49,9 +49,9 @@ const ( // Arm64ArchitectureTag is the reference AWS uses for arm64 architecture images. Arm64ArchitectureTag = "arm64" - // DefaultMachineAMIOwnerID is a heptio/VMware owned account. Please see: - // https://github.com/kubernetes-sigs/cluster-api-provider-aws/issues/487 - DefaultMachineAMIOwnerID = "258751437250" + // DefaultMachineAMIOwnerID is a CNCF owned account. This was previously a VMWare owned account + // but the project lost access to it. See the k8s.io repo for the infra definition. + DefaultMachineAMIOwnerID = "819546954734" // ubuntuOwnerID is Ubuntu owned account. Please see: // https://ubuntu.com/server/docs/cloud-images/amazon-ec2 @@ -60,16 +60,16 @@ const ( ubuntuOwnerIDUsGov = "513442679011" // Description regex for fetching Ubuntu AMIs for bastion host. - ubuntuImageDescription = "Canonical??Ubuntu??20.04?LTS??amd64?focal?image*" + ubuntuImageDescription = "Canonical??Ubuntu??24.04?LTS??amd64?noble?image*" // defaultMachineAMILookupBaseOS is the default base operating system to use // when looking up machine AMIs. - defaultMachineAMILookupBaseOS = "ubuntu-18.04" + defaultMachineAMILookupBaseOS = "ubuntu-24.04" // DefaultAmiNameFormat is defined in the build/ directory of this project. // The pattern is: // 1. the string value `capa-ami-` - // 2. the baseOS of the AMI, for example: ubuntu-18.04, centos-7, amazon-2 + // 2. the baseOS of the AMI, for example: ubuntu-24.04, centos-7, amazon-2 // 3. the kubernetes version as defined by the packages produced by kubernetes/release with or without v as a prefix, for example: 1.13.0, 1.12.5-mybuild.1, v1.17.3 // 4. a `-` followed by any additional characters. DefaultAmiNameFormat = "capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*" diff --git a/pkg/cloud/services/ec2/instances_test.go b/pkg/cloud/services/ec2/instances_test.go index 654403427f..7c129cb0a2 100644 --- a/pkg/cloud/services/ec2/instances_test.go +++ b/pkg/cloud/services/ec2/instances_test.go @@ -1105,7 +1105,7 @@ func TestCreateInstance(t *testing.T) { }, }, expect: func(m *mocks.MockEC2APIMockRecorder) { - amiName, err := GenerateAmiName("capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*", "ubuntu-18.04", "1.16.1") + amiName, err := GenerateAmiName("capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*", "ubuntu-24.04", "1.16.1") if err != nil { t.Fatalf("Failed to process ami format: %v", err) } @@ -1258,7 +1258,7 @@ func TestCreateInstance(t *testing.T) { }, }, expect: func(m *mocks.MockEC2APIMockRecorder) { - amiName, err := GenerateAmiName("capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*", "ubuntu-18.04", "1.16.1") + amiName, err := GenerateAmiName("capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*", "ubuntu-24.04", "1.16.1") if err != nil { t.Fatalf("Failed to process ami format: %v", err) } @@ -1412,7 +1412,7 @@ func TestCreateInstance(t *testing.T) { }, }, expect: func(m *mocks.MockEC2APIMockRecorder) { - amiName, err := GenerateAmiName("capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*", "ubuntu-18.04", "1.16.1") + amiName, err := GenerateAmiName("capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*", "ubuntu-24.04", "1.16.1") if err != nil { t.Fatalf("Failed to process ami format: %v", err) } diff --git a/pkg/cloud/services/userdata/bastion.go b/pkg/cloud/services/userdata/bastion.go index da68768ca2..256a3b4985 100644 --- a/pkg/cloud/services/userdata/bastion.go +++ b/pkg/cloud/services/userdata/bastion.go @@ -26,7 +26,7 @@ curl -s -o $BASTION_BOOTSTRAP_FILE $BASTION_BOOTSTRAP chmod +x $BASTION_BOOTSTRAP_FILE # This gets us far enough in the bastion script to be useful. -apt-get -y update && apt-get -y install python-pip +apt-get -y update && apt-get -y install python3-pip pip install --upgrade pip &> /dev/null ./$BASTION_BOOTSTRAP_FILE --enable true diff --git a/test/e2e/data/e2e_conf.yaml b/test/e2e/data/e2e_conf.yaml index b272860f6b..bb3076f39a 100644 --- a/test/e2e/data/e2e_conf.yaml +++ b/test/e2e/data/e2e_conf.yaml @@ -176,10 +176,10 @@ variables: # allowing the same e2e config file to be re-used in different Prow jobs e.g. each one with a K8s version permutation. # The following Kubernetes versions should be the latest versions with already published kindest/node images. # This avoids building node images in the default case which improves the test duration significantly. - KUBERNETES_VERSION_MANAGEMENT: "v1.29.0" - KUBERNETES_VERSION: "v1.26.6" - KUBERNETES_VERSION_UPGRADE_TO: "v1.26.6" - KUBERNETES_VERSION_UPGRADE_FROM: "v1.25.3" + KUBERNETES_VERSION_MANAGEMENT: "v1.29.8" + KUBERNETES_VERSION: "v1.29.9" + KUBERNETES_VERSION_UPGRADE_TO: "v1.29.9" + KUBERNETES_VERSION_UPGRADE_FROM: "v1.29.8" # Pre and post 1.23 Kubernetes versions are being used for CSI upgrade tests PRE_1_23_KUBERNETES_VERSION: "v1.22.17" POST_1_23_KUBERNETES_VERSION: "v1.23.15" @@ -190,11 +190,11 @@ variables: AWS_NODE_MACHINE_TYPE: t3.large AWS_MACHINE_TYPE_VCPU_USAGE: 2 AWS_SSH_KEY_NAME: "cluster-api-provider-aws-sigs-k8s-io" - CONFORMANCE_CI_ARTIFACTS_KUBERNETES_VERSION: "v1.26.6" + CONFORMANCE_CI_ARTIFACTS_KUBERNETES_VERSION: "v1.29.9" CONFORMANCE_WORKER_MACHINE_COUNT: "5" CONFORMANCE_CONTROL_PLANE_MACHINE_COUNT: "3" - ETCD_VERSION_UPGRADE_TO: "3.5.6-0" - COREDNS_VERSION_UPGRADE_TO: "v1.9.3" + ETCD_VERSION_UPGRADE_TO: "3.5.11-0" + COREDNS_VERSION_UPGRADE_TO: "v1.11.1" MULTI_TENANCY_ROLE_NAME: "multi-tenancy-role" MULTI_TENANCY_NESTED_ROLE_NAME: "multi-tenancy-nested-role" IP_FAMILY: "IPv4" @@ -206,7 +206,7 @@ variables: INIT_WITH_BINARY_V1BETA1: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.0/clusterctl-{OS}-{ARCH}" # INIT_WITH_KUBERNETES_VERSION are only used by the clusterctl upgrade test to initialize # the management cluster to be upgraded. - INIT_WITH_KUBERNETES_VERSION: "v1.25.0" + INIT_WITH_KUBERNETES_VERSION: "v1.29.8" EXP_BOOTSTRAP_FORMAT_IGNITION: "true" EXP_KUBEADM_BOOTSTRAP_FORMAT_IGNITION: "true" EXP_EXTERNAL_RESOURCE_GC: "true" diff --git a/test/e2e/shared/defaults.go b/test/e2e/shared/defaults.go index 13e77c84f7..ca08e183d5 100644 --- a/test/e2e/shared/defaults.go +++ b/test/e2e/shared/defaults.go @@ -36,8 +36,8 @@ import ( // Constants. const ( DefaultSSHKeyPairName = "cluster-api-provider-aws-sigs-k8s-io" - AMIPrefix = "capa-ami-ubuntu-18.04-" - DefaultImageLookupOrg = "258751437250" + AMIPrefix = "capa-ami-ubuntu-24.04-" + DefaultImageLookupOrg = "819546954734" KubernetesVersion = "KUBERNETES_VERSION" KubernetesVersionManagement = "KUBERNETES_VERSION_MANAGEMENT" CNIPath = "CNI" diff --git a/test/e2e/suites/unmanaged/helpers_test.go b/test/e2e/suites/unmanaged/helpers_test.go index 19a1394088..c0da263b75 100644 --- a/test/e2e/suites/unmanaged/helpers_test.go +++ b/test/e2e/suites/unmanaged/helpers_test.go @@ -529,7 +529,7 @@ func makeJoinBootstrapConfigTemplate(namespace, name string) *bootstrapv1.Kubead JoinConfiguration: &bootstrapv1.JoinConfiguration{ NodeRegistration: bootstrapv1.NodeRegistrationOptions{ Name: "{{ ds.meta_data.local_hostname }}", - KubeletExtraArgs: map[string]string{"cloud-provider": "aws"}, + KubeletExtraArgs: map[string]string{"cloud-provider": "external"}, }, }, }, diff --git a/test/e2e/suites/unmanaged/unmanaged_CAPI_clusterclass_test.go b/test/e2e/suites/unmanaged/unmanaged_CAPI_clusterclass_test.go index 26b50c731d..e7f05f18fe 100644 --- a/test/e2e/suites/unmanaged/unmanaged_CAPI_clusterclass_test.go +++ b/test/e2e/suites/unmanaged/unmanaged_CAPI_clusterclass_test.go @@ -41,7 +41,7 @@ var _ = ginkgo.Context("[unmanaged] [Cluster API Framework] [ClusterClass]", fun Expect(e2eCtx.Environment.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. BootstrapClusterProxy can't be nil") }) - ginkgo.Describe("Self Hosted Spec [ClusterClass]", func() { + ginkgo.PDescribe("Self Hosted Spec [ClusterClass]", func() { ginkgo.BeforeEach(func() { // As the resources cannot be defined by the It() clause in CAPI tests, using the largest values required for all It() tests in this CAPI test. requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, EventBridgeRules: 50} @@ -65,12 +65,14 @@ var _ = ginkgo.Context("[unmanaged] [Cluster API Framework] [ClusterClass]", fun }) }) - ginkgo.Describe("Cluster Upgrade Spec - HA control plane with workers [K8s-Upgrade] [ClusterClass]", func() { + ginkgo.PDescribe("Cluster Upgrade Spec - HA control plane with workers [K8s-Upgrade] [ClusterClass]", func() { ginkgo.BeforeEach(func() { - // As the resources cannot be defined by the It() clause in CAPI tests, using the largest values required for all It() tests in this CAPI test. - requiredResources = &shared.TestResource{EC2Normal: 5 * e2eCtx.Settings.InstanceVCPU, IGW: 2, NGW: 2, VPC: 2, ClassicLB: 2, EIP: 2, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, "capi-cluster-upgrade-clusterclass-test") - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + if !e2eCtx.Settings.SkipQuotas { + // As the resources cannot be defined by the It() clause in CAPI tests, using the largest values required for all It() tests in this CAPI test. + requiredResources = &shared.TestResource{EC2Normal: 5 * e2eCtx.Settings.InstanceVCPU, IGW: 2, NGW: 2, VPC: 2, ClassicLB: 2, EIP: 2, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, "capi-cluster-upgrade-clusterclass-test") + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + } }) capi_e2e.ClusterUpgradeConformanceSpec(ctx, func() capi_e2e.ClusterUpgradeConformanceSpecInput { @@ -87,16 +89,20 @@ var _ = ginkgo.Context("[unmanaged] [Cluster API Framework] [ClusterClass]", fun }) ginkgo.AfterEach(func() { - shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } }) }) ginkgo.Describe("ClusterClass Changes Spec - SSA immutability checks [ClusterClass]", func() { ginkgo.BeforeEach(func() { - // As the resources cannot be defined by the It() clause in CAPI tests, using the largest values required for all It() tests in this CAPI test. - requiredResources = &shared.TestResource{EC2Normal: 5 * e2eCtx.Settings.InstanceVCPU, IGW: 2, NGW: 2, VPC: 2, ClassicLB: 2, EIP: 2, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, "capi-cluster-ssa-clusterclass-test") - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + if !e2eCtx.Settings.SkipQuotas { + // As the resources cannot be defined by the It() clause in CAPI tests, using the largest values required for all It() tests in this CAPI test. + requiredResources = &shared.TestResource{EC2Normal: 5 * e2eCtx.Settings.InstanceVCPU, IGW: 2, NGW: 2, VPC: 2, ClassicLB: 2, EIP: 2, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, "capi-cluster-ssa-clusterclass-test") + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + } }) capi_e2e.ClusterClassChangesSpec(ctx, func() capi_e2e.ClusterClassChangesSpecInput { @@ -133,7 +139,9 @@ var _ = ginkgo.Context("[unmanaged] [Cluster API Framework] [ClusterClass]", fun }) ginkgo.AfterEach(func() { - shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } }) }) }) diff --git a/test/e2e/suites/unmanaged/unmanaged_CAPI_test.go b/test/e2e/suites/unmanaged/unmanaged_CAPI_test.go index a4169d67bc..485fbdcd25 100644 --- a/test/e2e/suites/unmanaged/unmanaged_CAPI_test.go +++ b/test/e2e/suites/unmanaged/unmanaged_CAPI_test.go @@ -88,10 +88,12 @@ var _ = ginkgo.Context("[unmanaged] [Cluster API Framework]", func() { ginkgo.Describe("Self Hosted Spec", func() { ginkgo.BeforeEach(func() { - // As the resources cannot be defined by the It() clause in CAPI tests, using the largest values required for all It() tests in this CAPI test. - requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, "capi-clusterctl-self-hosted-test") - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + if !e2eCtx.Settings.SkipQuotas { + // As the resources cannot be defined by the It() clause in CAPI tests, using the largest values required for all It() tests in this CAPI test. + requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, "capi-clusterctl-self-hosted-test") + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + } }) capi_e2e.SelfHostedSpec(ctx, func() capi_e2e.SelfHostedSpecInput { @@ -105,16 +107,20 @@ var _ = ginkgo.Context("[unmanaged] [Cluster API Framework]", func() { } }) ginkgo.AfterEach(func() { - shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } }) }) - ginkgo.Describe("Clusterctl Upgrade Spec [from latest v1beta1 release to v1beta2]", func() { + ginkgo.PDescribe("Clusterctl Upgrade Spec [from latest v1beta1 release to v1beta2]", func() { ginkgo.BeforeEach(func() { - // As the resources cannot be defined by the It() clause in CAPI tests, using the largest values required for all It() tests in this CAPI test. - requiredResources = &shared.TestResource{EC2Normal: 5 * e2eCtx.Settings.InstanceVCPU, IGW: 2, NGW: 2, VPC: 2, ClassicLB: 2, EIP: 2, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, "capi-clusterctl-upgrade-test-v1beta1-to-v1beta2") - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + if !e2eCtx.Settings.SkipQuotas { + // As the resources cannot be defined by the It() clause in CAPI tests, using the largest values required for all It() tests in this CAPI test. + requiredResources = &shared.TestResource{EC2Normal: 5 * e2eCtx.Settings.InstanceVCPU, IGW: 2, NGW: 2, VPC: 2, ClassicLB: 2, EIP: 2, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, "capi-clusterctl-upgrade-test-v1beta1-to-v1beta2") + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + } }) capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput { @@ -135,7 +141,9 @@ var _ = ginkgo.Context("[unmanaged] [Cluster API Framework]", func() { } }) ginkgo.AfterEach(func() { - shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } }) }) diff --git a/test/e2e/suites/unmanaged/unmanaged_functional_test.go b/test/e2e/suites/unmanaged/unmanaged_functional_test.go index 2b9ab782c8..e6de547542 100644 --- a/test/e2e/suites/unmanaged/unmanaged_functional_test.go +++ b/test/e2e/suites/unmanaged/unmanaged_functional_test.go @@ -65,11 +65,13 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { ginkgo.Describe("Workload cluster with EFS driver", func() { ginkgo.It("should pass dynamic provisioning test", func() { specName := "functional-efs-support" - requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, "efs-support-test") + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, "efs-support-test") - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } Expect(e2eCtx.E2EConfig).ToNot(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName) Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.KubernetesVersion)) @@ -118,12 +120,15 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { ginkgo.Describe("GPU-enabled cluster test", func() { ginkgo.It("should create cluster with single worker", func() { specName := "functional-gpu-cluster" - // Change the multiplier for EC2GPU if GPU type is changed. g4dn.xlarge uses 2 vCPU - requiredResources = &shared.TestResource{EC2GPU: 2 * 2, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, "gpu-test") namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + // Change the multiplier for EC2GPU if GPU type is changed. g4dn.xlarge uses 2 vCPU + requiredResources = &shared.TestResource{EC2GPU: 2 * 2, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, "gpu-test") + + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } ginkgo.By("Creating cluster with a single worker") clusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6)) @@ -166,10 +171,12 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { ginkgo.It("should create cluster with nested assumed role", func() { // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. specName := "functional-multitenancy-nested" - requiredResources = &shared.TestResource{EC2Normal: 1 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, specName) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 1 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx) defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) Expect(shared.SetMultitenancyEnvVars(e2eCtx.AWSSession)).To(Succeed()) @@ -241,11 +248,13 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { ginkgo.Context("in same namespace", func() { ginkgo.It("should create the clusters", func() { specName := "upgrade-to-main-branch-k8s" - requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 3, VPC: 1, ClassicLB: 1, EIP: 3, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, "upgrade-to-master-test") - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx) + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 3, VPC: 1, ClassicLB: 1, EIP: 3, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, "upgrade-to-master-test") + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) ginkgo.By("Creating first cluster with single control plane") cluster1Name := fmt.Sprintf("%s-%s", specName, util.RandomString(6)) @@ -297,10 +306,12 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { ginkgo.PDescribe("CSI=in-tree CCM=in-tree AWSCSIMigration=off: upgrade to v1.23", func() { ginkgo.It("should create volumes dynamically with in tree CSI driver and in tree cloud provider", func() { specName := "csimigration-off-upgrade" - requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, VolumeGP2: 4, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, specName) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, VolumeGP2: 4, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } namespace := shared.SetupNamespace(ctx, specName, e2eCtx) defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) @@ -363,13 +374,15 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { }) }) - ginkgo.Describe("CSI=external CCM=in-tree AWSCSIMigration=on: upgrade to v1.23", func() { + ginkgo.PDescribe("CSI=external CCM=in-tree AWSCSIMigration=on: upgrade to v1.23", func() { ginkgo.It("should create volumes dynamically with external CSI driver and in tree cloud provider", func() { specName := "only-csi-external-upgrade" - requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, VolumeGP2: 4, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, specName) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, VolumeGP2: 4, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } namespace := shared.SetupNamespace(ctx, specName, e2eCtx) defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) ginkgo.By("Creating first cluster with single control plane") @@ -433,13 +446,15 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { }) }) - ginkgo.Describe("CSI=external CCM=external AWSCSIMigration=on: upgrade to v1.23", func() { + ginkgo.PDescribe("CSI=external CCM=external AWSCSIMigration=on: upgrade to v1.23", func() { ginkgo.It("should create volumes dynamically with external CSI driver and external cloud provider", func() { specName := "csi-ccm-external-upgrade" - requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, VolumeGP2: 4, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, specName) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, VolumeGP2: 4, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } namespace := shared.SetupNamespace(ctx, specName, e2eCtx) defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) @@ -506,10 +521,12 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { ginkgo.Describe("Workload cluster with AWS SSM Parameter as the Secret Backend", func() { ginkgo.It("should be creatable and deletable", func() { specName := "functional-test-ssm-parameter-store" - requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 3, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, specName) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 3, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx) defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) @@ -540,10 +557,12 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { ginkgo.Describe("MachineDeployment misconfigurations", func() { ginkgo.It("MachineDeployment misconfigurations", func() { specName := "functional-test-md-misconfigurations" - requiredResources = &shared.TestResource{EC2Normal: 1 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 3, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, specName) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 1 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 3, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx) defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) ginkgo.By("Creating a cluster") @@ -591,10 +610,12 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { ginkgo.Describe("Workload cluster in multiple AZs", func() { ginkgo.It("It should be creatable and deletable", func() { specName := "functional-test-multi-az" - requiredResources = &shared.TestResource{EC2Normal: 3 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 3} - requiredResources.WriteRequestedResources(e2eCtx, specName) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 3 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 3} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx) defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) ginkgo.By("Creating a cluster") @@ -646,10 +667,12 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { ginkgo.Context("in different namespaces with machine failures", func() { ginkgo.It("should setup namespaces correctly for the two clusters", func() { specName := "functional-test-multi-namespace" - requiredResources = &shared.TestResource{EC2Normal: 4 * e2eCtx.Settings.InstanceVCPU, IGW: 2, NGW: 2, VPC: 2, ClassicLB: 2, EIP: 6, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, specName) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 4 * e2eCtx.Settings.InstanceVCPU, IGW: 2, NGW: 2, VPC: 2, ClassicLB: 2, EIP: 6, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } ginkgo.By("Creating first cluster with single control plane") ns1, cf1 := framework.CreateNamespaceAndWatchEvents(ctx, framework.CreateNamespaceAndWatchEventsInput{ @@ -729,10 +752,12 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { ginkgo.Context("Defining clusters in the same namespace", func() { specName := "functional-test-multi-cluster-single-namespace" ginkgo.It("should create the clusters", func() { - requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 2, NGW: 2, VPC: 2, ClassicLB: 2, EIP: 6, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, specName) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 2, NGW: 2, VPC: 2, ClassicLB: 2, EIP: 6, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx) defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) ginkgo.By("Creating first cluster with single control plane") @@ -757,10 +782,12 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { ginkgo.Describe("Workload cluster with spot instances", func() { ginkgo.It("should be creatable and deletable", func() { specName := "functional-test-spot-instances" - requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 3, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, specName) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 3, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx) defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) ginkgo.By("Creating a cluster") @@ -806,9 +833,11 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { // Some infrastructure creation was moved to a setup node to better organize the test. ginkgo.JustBeforeEach(func() { - requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 2, NGW: 2, VPC: 2, ClassicLB: 2, EIP: 5, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, specName) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 2, NGW: 2, VPC: 2, ClassicLB: 2, EIP: 5, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + } namespace = shared.SetupSpecNamespace(ctx, specName, e2eCtx) ginkgo.By("Creating the management cluster infrastructure") mgmtClusterInfra.New(shared.AWSInfrastructureSpec{ @@ -836,7 +865,9 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { // Infrastructure cleanup is done in setup node so it is not bypassed if there is a test failure in the subject node. ginkgo.JustAfterEach(func() { - shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) if !e2eCtx.Settings.SkipCleanup { ginkgo.By("Deleting peering connection")