diff --git a/.archive/unmanaged_CAPI_dedicated_host.go b/.archive/unmanaged_CAPI_dedicated_host.go new file mode 100644 index 0000000000..e1d3cd7444 --- /dev/null +++ b/.archive/unmanaged_CAPI_dedicated_host.go @@ -0,0 +1,113 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unmanaged + +import ( + "context" + + "github.com/gofrs/flock" + "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + + "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" + capi_e2e "sigs.k8s.io/cluster-api/test/e2e" +) + +// setupNamespace initializes the namespace for the test. +func setupNamespace(ctx context.Context, e2eCtx *shared.E2EContext) *corev1.Namespace { + Expect(e2eCtx.Environment.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. BootstrapClusterProxy can't be nil") + return shared.SetupSpecNamespace(ctx, "capa-dedicate-host", e2eCtx) +} + +// setupRequiredResources allocates the required resources for the test. +func setupRequiredResources(e2eCtx *shared.E2EContext) *shared.TestResource { + requiredResources := &shared.TestResource{ + EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, + IGW: 1, + NGW: 1, + VPC: 1, + ClassicLB: 1, + EIP: 3, + EventBridgeRules: 50, + } + requiredResources.WriteRequestedResources(e2eCtx, "capa-dedicated-hosts-test") + + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + return requiredResources +} + +// releaseResources releases the resources allocated for the test. +func releaseResources(requiredResources *shared.TestResource, e2eCtx *shared.E2EContext) { + shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) +} + +// runQuickStartSpec executes the QuickStartSpec test. +func runQuickStartSpec(e2eCtx *shared.E2EContext) { + capi_e2e.QuickStartSpec(context.TODO(), func() capi_e2e.QuickStartSpecInput { + return capi_e2e.QuickStartSpecInput{ + E2EConfig: e2eCtx.E2EConfig, + ClusterctlConfigPath: e2eCtx.Environment.ClusterctlConfigPath, + BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy, + ArtifactFolder: e2eCtx.Settings.ArtifactFolder, + SkipCleanup: e2eCtx.Settings.SkipCleanup, + } + }) +} + +// cleanupNamespace cleans up the namespace and dumps resources. +func cleanupNamespace(ctx context.Context, namespace *corev1.Namespace, e2eCtx *shared.E2EContext) { + shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) +} + +var _ = ginkgo.Context("[unmanaged] [dedicated-host]", func() { + var ( + namespace *corev1.Namespace + ctx context.Context + requiredResources *shared.TestResource + dedicatedHostID string + ) + + ginkgo.BeforeEach(func() { + ctx = context.TODO() + namespace = setupNamespace(ctx, e2eCtx) + dedicatedHostID, _ = shared.GetDedicatedHost(e2eCtx) + }) + + ginkgo.Describe("Running the dedicated-hosts spec", func() { + ginkgo.BeforeEach(func() { + requiredResources = setupRequiredResources(e2eCtx) + // e2eCtx.Settings.DedicatedHostID = dedicatedHostID + }) + + ginkgo.It("should run the QuickStartSpec", func() { + runQuickStartSpec(e2eCtx) + }) + + ginkgo.AfterEach(func() { + shared.DeleteDedicatedHost(e2eCtx, dedicatedHostID) + releaseResources(requiredResources, e2eCtx) + }) + }) + + ginkgo.AfterEach(func() { + cleanupNamespace(ctx, namespace, e2eCtx) + }) +}) diff --git a/.gitignore b/.gitignore index ada3a863fa..4919a99f3f 100644 --- a/.gitignore +++ b/.gitignore @@ -27,6 +27,7 @@ envfile # kubeconfigs kind.kubeconfig minikube.kubeconfig +capi-test.kubeconfig kubeconfig !kubeconfig/ @@ -63,3 +64,5 @@ dist _artifacts awsiamconfiguration.yaml cloudformation.yaml +test-cluster.yaml +__debug* diff --git a/api/v1beta1/awscluster_conversion.go b/api/v1beta1/awscluster_conversion.go index 8c8e38a297..fef74ffa96 100644 --- a/api/v1beta1/awscluster_conversion.go +++ b/api/v1beta1/awscluster_conversion.go @@ -62,6 +62,8 @@ func (src *AWSCluster) ConvertTo(dstRaw conversion.Hub) error { dst.Status.Bastion.NetworkInterfaceType = restored.Status.Bastion.NetworkInterfaceType dst.Status.Bastion.CapacityReservationID = restored.Status.Bastion.CapacityReservationID dst.Status.Bastion.MarketType = restored.Status.Bastion.MarketType + dst.Status.Bastion.HostAffinity = restored.Status.Bastion.HostAffinity + dst.Status.Bastion.HostID = restored.Status.Bastion.HostID } dst.Spec.Partition = restored.Spec.Partition diff --git a/api/v1beta1/awsmachine_conversion.go b/api/v1beta1/awsmachine_conversion.go index c5ac50ade1..87a9b67c75 100644 --- a/api/v1beta1/awsmachine_conversion.go +++ b/api/v1beta1/awsmachine_conversion.go @@ -56,6 +56,9 @@ func (src *AWSMachine) ConvertTo(dstRaw conversion.Hub) error { } } + dst.Spec.HostAffinity = restored.Spec.HostAffinity + dst.Spec.HostID = restored.Spec.HostID + return nil } @@ -119,6 +122,8 @@ func (r *AWSMachineTemplate) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.Template.Spec.ElasticIPPool.PublicIpv4PoolFallBackOrder = restored.Spec.Template.Spec.ElasticIPPool.PublicIpv4PoolFallBackOrder } } + dst.Spec.Template.Spec.HostAffinity = restored.Spec.Template.Spec.HostAffinity + dst.Spec.Template.Spec.HostID = restored.Spec.Template.Spec.HostID return nil } diff --git a/api/v1beta1/zz_generated.conversion.go b/api/v1beta1/zz_generated.conversion.go index 8a5a11b60c..1b89ffb498 100644 --- a/api/v1beta1/zz_generated.conversion.go +++ b/api/v1beta1/zz_generated.conversion.go @@ -1435,6 +1435,8 @@ func autoConvert_v1beta2_AWSMachineSpec_To_v1beta1_AWSMachineSpec(in *v1beta2.AW // WARNING: in.PrivateDNSName requires manual conversion: does not exist in peer-type // WARNING: in.CapacityReservationID requires manual conversion: does not exist in peer-type // WARNING: in.MarketType requires manual conversion: does not exist in peer-type + // WARNING: in.HostID requires manual conversion: does not exist in peer-type + // WARNING: in.HostAffinity requires manual conversion: does not exist in peer-type return nil } @@ -2039,6 +2041,8 @@ func autoConvert_v1beta2_Instance_To_v1beta1_Instance(in *v1beta2.Instance, out // WARNING: in.PublicIPOnLaunch requires manual conversion: does not exist in peer-type // WARNING: in.CapacityReservationID requires manual conversion: does not exist in peer-type // WARNING: in.MarketType requires manual conversion: does not exist in peer-type + // WARNING: in.HostID requires manual conversion: does not exist in peer-type + // WARNING: in.HostAffinity requires manual conversion: does not exist in peer-type return nil } diff --git a/api/v1beta2/awsmachine_types.go b/api/v1beta2/awsmachine_types.go index 191e46bddf..a8f549d23a 100644 --- a/api/v1beta2/awsmachine_types.go +++ b/api/v1beta2/awsmachine_types.go @@ -223,6 +223,16 @@ type AWSMachineSpec struct { // If marketType is not specified and spotMarketOptions is provided, the marketType defaults to "Spot". // +optional MarketType MarketType `json:"marketType,omitempty"` + + // HostID specifies the Dedicated Host on which the instance should be launched. + // +optional + HostID *string `json:"hostID,omitempty"` + + // Affinity specifies the dedicated host affinity setting for the instance. + // When affinity is set to Host, an instance launched onto a specific host always restarts on the same host if stopped. + // +optional + // +kubebuilder:validation:Enum:=Default;Host + HostAffinity *string `json:"hostAffinity,omitempty"` } // CloudInit defines options related to the bootstrapping systems where diff --git a/api/v1beta2/types.go b/api/v1beta2/types.go index bee54a9f0b..fe513b6402 100644 --- a/api/v1beta2/types.go +++ b/api/v1beta2/types.go @@ -273,6 +273,14 @@ type Instance struct { // If marketType is not specified and spotMarketOptions is provided, the marketType defaults to "Spot". // +optional MarketType MarketType `json:"marketType,omitempty"` + + // HostID specifies the dedicated host on which the instance should be launched + // +optional + HostID *string `json:"hostID,omitempty"` + + // Affinity specifies the dedicated host affinity setting for the instance. + // +optional + HostAffinity *string `json:"hostAffinity,omitempty"` } // MarketType describes the market type of an Instance diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go index a3ef61f24e..297dcfdfc7 100644 --- a/api/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -771,6 +771,16 @@ func (in *AWSMachineSpec) DeepCopyInto(out *AWSMachineSpec) { *out = new(string) **out = **in } + if in.HostID != nil { + in, out := &in.HostID, &out.HostID + *out = new(string) + **out = **in + } + if in.HostAffinity != nil { + in, out := &in.HostAffinity, &out.HostAffinity + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineSpec. @@ -1610,6 +1620,16 @@ func (in *Instance) DeepCopyInto(out *Instance) { *out = new(string) **out = **in } + if in.HostID != nil { + in, out := &in.HostID, &out.HostID + *out = new(string) + **out = **in + } + if in.HostAffinity != nil { + in, out := &in.HostAffinity, &out.HostAffinity + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Instance. diff --git a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml index b9064ea810..556c2e6b32 100644 --- a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml +++ b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml @@ -1136,6 +1136,14 @@ spec: description: Specifies whether enhanced networking with ENA is enabled. type: boolean + hostAffinity: + description: Affinity specifies the dedicated host affinity setting + for the instance. + type: string + hostID: + description: HostID specifies the dedicated host on which the + instance should be launched + type: string iamProfile: description: The name of the IAM instance profile associated with the instance, if applicable. @@ -3224,6 +3232,14 @@ spec: description: Specifies whether enhanced networking with ENA is enabled. type: boolean + hostAffinity: + description: Affinity specifies the dedicated host affinity setting + for the instance. + type: string + hostID: + description: HostID specifies the dedicated host on which the + instance should be launched + type: string iamProfile: description: The name of the IAM instance profile associated with the instance, if applicable. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml index 0684070332..7a5a3c4ebc 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml @@ -2103,6 +2103,14 @@ spec: description: Specifies whether enhanced networking with ENA is enabled. type: boolean + hostAffinity: + description: Affinity specifies the dedicated host affinity setting + for the instance. + type: string + hostID: + description: HostID specifies the dedicated host on which the + instance should be launched + type: string iamProfile: description: The name of the IAM instance profile associated with the instance, if applicable. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachines.yaml index 5baacc3e2f..bbf467f1ee 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachines.yaml @@ -686,6 +686,18 @@ spec: - message: allowed values are 'none' and 'amazon-pool' rule: self in ['none','amazon-pool'] type: object + hostAffinity: + description: |- + Affinity specifies the dedicated host affinity setting for the instance. + When affinity is set to Host, an instance launched onto a specific host always restarts on the same host if stopped. + enum: + - Defailt + - Host + type: string + hostId: + description: HostID specifies the Dedicated Host on which the instance + should be launched. + type: string iamInstanceProfile: description: IAMInstanceProfile is a name of an IAM instance profile to assign to the instance diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml index 40cf10944a..80e7bcde27 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml @@ -620,6 +620,18 @@ spec: - message: allowed values are 'none' and 'amazon-pool' rule: self in ['none','amazon-pool'] type: object + hostAffinity: + description: |- + Affinity specifies the dedicated host affinity setting for the instance. + When affinity is set to Host, an instance launched onto a specific host always restarts on the same host if stopped. + enum: + - Defailt + - Host + type: string + hostId: + description: HostID specifies the Dedicated Host on which + the instance should be launched. + type: string iamInstanceProfile: description: IAMInstanceProfile is a name of an IAM instance profile to assign to the instance diff --git a/devbox.json b/devbox.json index 9525803140..4286f16112 100644 --- a/devbox.json +++ b/devbox.json @@ -1,7 +1,6 @@ { "$schema": "https://raw.githubusercontent.com/jetify-com/devbox/0.13.7/.schema/devbox.schema.json", "packages": [ - "go@1.22", "kind@latest", "docker@latest", "jq@latest", @@ -11,7 +10,8 @@ "tilt@latest", "awscli2@latest", "direnv@latest", - "kustomize@latest" + "kustomize@5.5.0", + "go@1.24.1" ], "shell": { "init_hook": [ @@ -27,4 +27,4 @@ ] } } -} \ No newline at end of file +} diff --git a/devbox.lock b/devbox.lock index 39efc10668..0f66b74ab4 100644 --- a/devbox.lock +++ b/devbox.lock @@ -236,51 +236,51 @@ "github:NixOS/nixpkgs/nixpkgs-unstable": { "resolved": "github:NixOS/nixpkgs/3549532663732bfd89993204d40543e9edaec4f2?lastModified=1742272065&narHash=sha256-ud8vcSzJsZ%2FCK%2Br8%2Fv0lyf4yUntVmDq6Z0A41ODfWbE%3D" }, - "go@1.22": { - "last_modified": "2024-12-23T21:10:33Z", - "resolved": "github:NixOS/nixpkgs/de1864217bfa9b5845f465e771e0ecb48b30e02d#go_1_22", + "go@1.24.1": { + "last_modified": "2025-03-23T05:31:05Z", + "resolved": "github:NixOS/nixpkgs/dd613136ee91f67e5dba3f3f41ac99ae89c5406b#go", "source": "devbox-search", - "version": "1.22.10", + "version": "1.24.1", "systems": { "aarch64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/34qa7mwbc1ja7758q4d9sjwmgip72lj9-go-1.22.10", + "path": "/nix/store/ja4jxx60lh1qfqfl4z4p2rff56ia1c3c-go-1.24.1", "default": true } ], - "store_path": "/nix/store/34qa7mwbc1ja7758q4d9sjwmgip72lj9-go-1.22.10" + "store_path": "/nix/store/ja4jxx60lh1qfqfl4z4p2rff56ia1c3c-go-1.24.1" }, "aarch64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/gbidq6smzj09j6qmcdklrvrjgllxmr5j-go-1.22.10", + "path": "/nix/store/6zvrmsmdg7p8yw3vii20g40b4zsh6kjr-go-1.24.1", "default": true } ], - "store_path": "/nix/store/gbidq6smzj09j6qmcdklrvrjgllxmr5j-go-1.22.10" + "store_path": "/nix/store/6zvrmsmdg7p8yw3vii20g40b4zsh6kjr-go-1.24.1" }, "x86_64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/jgz3hrbqblw65v95npdnvlymlm991s0c-go-1.22.10", + "path": "/nix/store/2bcic1xcha2k11djynr488v3pg0nnghr-go-1.24.1", "default": true } ], - "store_path": "/nix/store/jgz3hrbqblw65v95npdnvlymlm991s0c-go-1.22.10" + "store_path": "/nix/store/2bcic1xcha2k11djynr488v3pg0nnghr-go-1.24.1" }, "x86_64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/1hd6kq7rssk14py6v8mrdq2pn5ciiw6y-go-1.22.10", + "path": "/nix/store/g29rrn8qqlg4yjqv543ryrkimr7fk43h-go-1.24.1", "default": true } ], - "store_path": "/nix/store/1hd6kq7rssk14py6v8mrdq2pn5ciiw6y-go-1.22.10" + "store_path": "/nix/store/g29rrn8qqlg4yjqv543ryrkimr7fk43h-go-1.24.1" } } }, @@ -496,51 +496,51 @@ } } }, - "kustomize@latest": { - "last_modified": "2025-03-11T17:52:14Z", - "resolved": "github:NixOS/nixpkgs/0d534853a55b5d02a4ababa1d71921ce8f0aee4c#kustomize", + "kustomize@5.5.0": { + "last_modified": "2024-12-23T21:10:33Z", + "resolved": "github:NixOS/nixpkgs/de1864217bfa9b5845f465e771e0ecb48b30e02d#kustomize", "source": "devbox-search", - "version": "5.6.0", + "version": "5.5.0", "systems": { "aarch64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/y85spf2nmlffzmq9lyzl8db7i0acdsqf-kustomize-5.6.0", + "path": "/nix/store/xpypw514kxv803li525a5by78g90ygyl-kustomize-5.5.0", "default": true } ], - "store_path": "/nix/store/y85spf2nmlffzmq9lyzl8db7i0acdsqf-kustomize-5.6.0" + "store_path": "/nix/store/xpypw514kxv803li525a5by78g90ygyl-kustomize-5.5.0" }, "aarch64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/pwnmchq4bafvkbb84m62m8vqp9dqgaz8-kustomize-5.6.0", + "path": "/nix/store/4v454dywxsyzcq9ypm7aa5v0mdlj9vqn-kustomize-5.5.0", "default": true } ], - "store_path": "/nix/store/pwnmchq4bafvkbb84m62m8vqp9dqgaz8-kustomize-5.6.0" + "store_path": "/nix/store/4v454dywxsyzcq9ypm7aa5v0mdlj9vqn-kustomize-5.5.0" }, "x86_64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/hrlykqw1jcl2ykzida4amf2s5sjhdsng-kustomize-5.6.0", + "path": "/nix/store/6g9p4i5r4qvfdygh157376fhzpp55bqk-kustomize-5.5.0", "default": true } ], - "store_path": "/nix/store/hrlykqw1jcl2ykzida4amf2s5sjhdsng-kustomize-5.6.0" + "store_path": "/nix/store/6g9p4i5r4qvfdygh157376fhzpp55bqk-kustomize-5.5.0" }, "x86_64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/cwclm6315x1cn2kswzfhfcqp13qf44b0-kustomize-5.6.0", + "path": "/nix/store/5z7yq6831fsrsxsvg2ic229146ymmjaq-kustomize-5.5.0", "default": true } ], - "store_path": "/nix/store/cwclm6315x1cn2kswzfhfcqp13qf44b0-kustomize-5.6.0" + "store_path": "/nix/store/5z7yq6831fsrsxsvg2ic229146ymmjaq-kustomize-5.5.0" } } }, diff --git a/pkg/cloud/services/ec2/instances.go b/pkg/cloud/services/ec2/instances.go index 2c1756a931..3c4d829ca1 100644 --- a/pkg/cloud/services/ec2/instances.go +++ b/pkg/cloud/services/ec2/instances.go @@ -255,6 +255,10 @@ func (s *Service) CreateInstance(scope *scope.MachineScope, userData []byte, use input.MarketType = scope.AWSMachine.Spec.MarketType + input.HostID = scope.AWSMachine.Spec.HostID + + input.HostAffinity = scope.AWSMachine.Spec.HostAffinity + s.scope.Debug("Running instance", "machine-role", scope.Role()) s.scope.Debug("Running instance with instance metadata options", "metadata options", input.InstanceMetadataOptions) out, err := s.runInstance(scope.Role(), input) @@ -674,6 +678,19 @@ func (s *Service) runInstance(role string, i *infrav1.Instance) (*infrav1.Instan } } + if i.HostID != nil { + if i.HostAffinity == nil { + i.HostAffinity = aws.String("Default") + } + s.scope.Debug("Running instance with dedicated host placement", "hostId", i.HostID, "affinity", i.HostAffinity) + + input.Placement = &ec2.Placement{ + Tenancy: aws.String("host"), + Affinity: i.HostAffinity, + HostId: i.HostID, + } + } + out, err := s.EC2Client.RunInstancesWithContext(context.TODO(), input) if err != nil { return nil, errors.Wrap(err, "failed to run instance") diff --git a/templates/cluster-template-dedicated-host.yaml b/templates/cluster-template-dedicated-host.yaml new file mode 100644 index 0000000000..35636f2030 --- /dev/null +++ b/templates/cluster-template-dedicated-host.yaml @@ -0,0 +1,1015 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + ccm: external + csi: external + name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: "${CLUSTER_NAME}-control-plane" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + kind: AWSCluster + name: "${CLUSTER_NAME}" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: AWSCluster +metadata: + name: "${CLUSTER_NAME}" +spec: + controlPlaneLoadBalancer: + loadBalancerType: nlb + healthCheckProtocol: HTTPS + region: "${AWS_REGION}" + sshKeyName: "${AWS_SSH_KEY_NAME}" +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + cloud-provider: external + controllerManager: + extraArgs: + cloud-provider: external + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + name: '{{ ds.meta_data.local_hostname }}' + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + name: '{{ ds.meta_data.local_hostname }}' + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + kind: AWSMachineTemplate + name: "${CLUSTER_NAME}-control-plane" + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + version: "${KUBERNETES_VERSION}" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: AWSMachineTemplate +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + iamInstanceProfile: "control-plane.cluster-api-provider-aws.sigs.k8s.io" + instanceType: "${AWS_CONTROL_PLANE_MACHINE_TYPE}" + sshKeyName: "${AWS_SSH_KEY_NAME}" +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: null + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: "${CLUSTER_NAME}-md-0" + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + kind: AWSMachineTemplate + name: "${CLUSTER_NAME}-md-0" + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: AWSMachineTemplate +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + template: + spec: + iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io + instanceType: "${AWS_NODE_MACHINE_TYPE}" + sshKeyName: "${AWS_SSH_KEY_NAME}" +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + name: '{{ ds.meta_data.local_hostname }}' +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: crs-ccm +spec: + clusterSelector: + matchLabels: + ccm: external + resources: + - kind: ConfigMap + name: cloud-controller-manager-addon + strategy: ApplyOnce +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: crs-csi +spec: + clusterSelector: + matchLabels: + csi: external + resources: + - kind: ConfigMap + name: aws-ebs-csi-driver-addon + strategy: ApplyOnce +--- +apiVersion: v1 +data: + aws-ccm-external.yaml: | + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: aws-cloud-controller-manager + namespace: kube-system + labels: + k8s-app: aws-cloud-controller-manager + spec: + selector: + matchLabels: + k8s-app: aws-cloud-controller-manager + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + k8s-app: aws-cloud-controller-manager + spec: + nodeSelector: + node-role.kubernetes.io/control-plane: "" + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + serviceAccountName: cloud-controller-manager + containers: + - name: aws-cloud-controller-manager + image: registry.k8s.io/provider-aws/cloud-controller-manager:v1.28.3 + args: + - --v=2 + - --cloud-provider=aws + - --use-service-account-credentials=true + - --configure-cloud-routes=false + resources: + requests: + cpu: 200m + hostNetwork: true + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: cloud-controller-manager:apiserver-authentication-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - apiGroup: "" + kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - services/status + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - update + - watch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - serviceaccounts/token + verbs: + - create + --- + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - apiGroup: "" + kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system +kind: ConfigMap +metadata: + annotations: + note: generated + labels: + type: generated + name: cloud-controller-manager-addon +--- +apiVersion: v1 +data: + aws-ebs-csi-external.yaml: |- + apiVersion: v1 + kind: Secret + metadata: + name: aws-secret + namespace: kube-system + stringData: + key_id: "" + access_key: "" + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-csi-controller-sa + namespace: kube-system + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-csi-node-sa + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-external-attacher-role + rules: + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - csi.storage.k8s.io + resources: + - csinodeinfos + verbs: + - get + - list + - watch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments/status + verbs: + - patch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-csi-node + rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - patch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-external-provisioner-role + rules: + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - create + - delete + - apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch + - update + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - get + - list + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - get + - list + - apiGroups: + - storage.k8s.io + resources: + - csinodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-external-resizer-role + rules: + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - persistentvolumeclaims/status + verbs: + - update + - patch + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-external-snapshotter-role + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotclasses + verbs: + - get + - list + - watch + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - create + - get + - list + - watch + - update + - delete + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents/status + verbs: + - update + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-csi-attacher-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-external-attacher-role + subjects: + - kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-csi-provisioner-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-external-provisioner-role + subjects: + - kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-csi-resizer-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-external-resizer-role + subjects: + - kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-csi-snapshotter-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-external-snapshotter-role + subjects: + - kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-csi-node-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-csi-node + subjects: + - kind: ServiceAccount + name: ebs-csi-node-sa + namespace: kube-system + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-csi-controller + namespace: kube-system + spec: + replicas: 2 + selector: + matchLabels: + app: ebs-csi-controller + app.kubernetes.io/name: aws-ebs-csi-driver + template: + metadata: + labels: + app: ebs-csi-controller + app.kubernetes.io/name: aws-ebs-csi-driver + spec: + containers: + - args: + - --endpoint=$(CSI_ENDPOINT) + - --logtostderr + - --v=2 + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: CSI_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + key: key_id + name: aws-secret + optional: true + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + key: access_key + name: aws-secret + optional: true + image: registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.25.0 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 3 + name: ebs-plugin + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + readinessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + - --v=2 + - --feature-gates=Topology=true + - --extra-create-metadata + - --leader-election=true + - --default-fstype=ext4 + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: registry.k8s.io/sig-storage/csi-provisioner:v3.6.2 + name: csi-provisioner + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + - --v=2 + - --leader-election=true + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: registry.k8s.io/sig-storage/csi-attacher:v4.4.2 + name: csi-attacher + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + - --leader-election=true + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: registry.k8s.io/sig-storage/csi-snapshotter:v6.3.2 + name: csi-snapshotter + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + - --v=2 + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: registry.k8s.io/sig-storage/csi-resizer:v1.9.2 + imagePullPolicy: Always + name: csi-resizer + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=/csi/csi.sock + image: registry.k8s.io/sig-storage/livenessprobe:v2.11.0 + name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: ebs-csi-controller-sa + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + tolerationSeconds: 300 + - key: node-role.kubernetes.io/master + effect: NoSchedule + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + volumes: + - emptyDir: {} + name: socket-dir + --- + apiVersion: policy/v1 + kind: PodDisruptionBudget + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-csi-controller + namespace: kube-system + spec: + maxUnavailable: 1 + selector: + matchLabels: + app: ebs-csi-controller + app.kubernetes.io/name: aws-ebs-csi-driver + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-csi-node + namespace: kube-system + spec: + selector: + matchLabels: + app: ebs-csi-node + app.kubernetes.io/name: aws-ebs-csi-driver + template: + metadata: + labels: + app: ebs-csi-node + app.kubernetes.io/name: aws-ebs-csi-driver + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: eks.amazonaws.com/compute-type + operator: NotIn + values: + - fargate + containers: + - args: + - node + - --endpoint=$(CSI_ENDPOINT) + - --logtostderr + - --v=2 + env: + - name: CSI_ENDPOINT + value: unix:/csi/csi.sock + - name: CSI_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + image: registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.25.0 + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 3 + name: ebs-plugin + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + name: kubelet-dir + - mountPath: /csi + name: plugin-dir + - mountPath: /dev + name: device-dir + - args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.9.2 + name: node-driver-registrar + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - args: + - --csi-address=/csi/csi.sock + image: registry.k8s.io/sig-storage/livenessprobe:v2.11.0 + name: liveness-probe + volumeMounts: + - mountPath: /csi + name: plugin-dir + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + serviceAccountName: ebs-csi-node-sa + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + tolerationSeconds: 300 + volumes: + - hostPath: + path: /var/lib/kubelet + type: Directory + name: kubelet-dir + - hostPath: + path: /var/lib/kubelet/plugins/ebs.csi.aws.com/ + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: Directory + name: registration-dir + - hostPath: + path: /dev + type: Directory + name: device-dir + updateStrategy: + rollingUpdate: + maxUnavailable: 10% + type: RollingUpdate + --- + apiVersion: storage.k8s.io/v1 + kind: CSIDriver + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs.csi.aws.com + spec: + attachRequired: true + podInfoOnMount: false +kind: ConfigMap +metadata: + annotations: + note: generated + labels: + type: generated + name: aws-ebs-csi-driver-addon diff --git a/test/e2e/shared/aws.go b/test/e2e/shared/aws.go index e5f9f9a0a4..00a6a562d6 100644 --- a/test/e2e/shared/aws.go +++ b/test/e2e/shared/aws.go @@ -2366,3 +2366,63 @@ func GetMountTargetState(e2eCtx *E2EContext, mountTargetID string) (*string, err } return result.LifeCycleState, nil } + +func getAvailabilityZone(e2eCtx *E2EContext) string { + az := e2eCtx.E2EConfig.GetVariable(AwsAvailabilityZone1) + return az +} + +func getInstanceFamily(e2eCtx *E2EContext) string { + machineType := e2eCtx.E2EConfig.GetVariable(AwsNodeMachineType) + // from instance type get instace family behind the dot + // for example: t3a.medium -> t3 + machineTypeSplit := strings.Split(machineType, ".") + if len(machineTypeSplit) > 0 { + return machineTypeSplit[0] + } + return "t3" +} + +func AllocateHost(e2eCtx *E2EContext) (string, error) { + ec2Svc := ec2.New(e2eCtx.AWSSession) + input := &ec2.AllocateHostsInput{ + AvailabilityZone: aws.String(getAvailabilityZone(e2eCtx)), + InstanceFamily: aws.String(getInstanceFamily(e2eCtx)), + Quantity: aws.Int64(1), + } + output, err := ec2Svc.AllocateHosts(input) + Expect(err).ToNot(HaveOccurred(), "Failed to allocate host") + Expect(len(output.HostIds)).To(BeNumerically(">", 0), "No dedicated host ID returned") + fmt.Println("Allocated Host ID: ", *output.HostIds[0]) + hostId := *output.HostIds[0] + return hostId, nil +} + +func ReleaseHost(e2eCtx *E2EContext, hostID string) { + ec2Svc := ec2.New(e2eCtx.AWSSession) + + input := &ec2.ReleaseHostsInput{ + HostIds: []*string{aws.String(hostID)}, + } + + _, err := ec2Svc.ReleaseHosts(input) + Expect(err).ToNot(HaveOccurred(), "Failed to release host %s", hostID) + fmt.Println("Released Host ID: ", hostID) +} + +func GetHostId(e2eCtx *E2EContext, instanceID string) string { + ec2Svc := ec2.New(e2eCtx.AWSSession) + + input := &ec2.DescribeInstancesInput{ + InstanceIds: []*string{aws.String(instanceID)}, + } + + result, err := ec2Svc.DescribeInstances(input) + Expect(err).ToNot(HaveOccurred(), "Failed to get host ID for instance %s", instanceID) + Expect(len(result.Reservations)).To(BeNumerically(">", 0), "No reservation returned") + Expect(len(result.Reservations[0].Instances)).To(BeNumerically(">", 0), "No instance returned") + placement := *result.Reservations[0].Instances[0].Placement + hostID := *placement.HostId + fmt.Println("Host ID: ", hostID) + return hostID +} diff --git a/test/e2e/shared/defaults.go b/test/e2e/shared/defaults.go index 8e514bf913..0b8666792c 100644 --- a/test/e2e/shared/defaults.go +++ b/test/e2e/shared/defaults.go @@ -72,6 +72,7 @@ const ( ClassicElbTestKubernetesFrom = "CLASSICELB_TEST_KUBERNETES_VERSION_FROM" ClassicElbTestKubernetesTo = "CLASSICELB_TEST_KUBERNETES_VERSION_TO" + DedicatedHostFlavor = "dedicated-host" ) // ResourceQuotaFilePath is the path to the file that contains the resource usage. diff --git a/test/e2e/shared/resource.go b/test/e2e/shared/resource.go index 88cb8336b0..81cf983aed 100644 --- a/test/e2e/shared/resource.go +++ b/test/e2e/shared/resource.go @@ -44,6 +44,7 @@ type TestResource struct { EC2GPU int `json:"ec2-GPU"` VolumeGP2 int `json:"volume-GP2"` EventBridgeRules int `json:"eventBridge-rules"` + //TODO: DedicatedHost int `json:"dedicated-host"` } func WriteResourceQuotesToFile(logPath string, serviceQuotas map[string]*ServiceQuota) { diff --git a/test/e2e/suites/unmanaged/unmanaged_functional_clusterclass_test.go b/test/e2e/suites/unmanaged/unmanaged_functional_clusterclass_test.go index d7426a5ce4..022f992462 100644 --- a/test/e2e/suites/unmanaged/unmanaged_functional_clusterclass_test.go +++ b/test/e2e/suites/unmanaged/unmanaged_functional_clusterclass_test.go @@ -32,6 +32,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" + "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" @@ -75,7 +76,6 @@ var _ = ginkgo.Context("[unmanaged] [functional] [ClusterClass]", func() { ClusterName: clusterName, KubernetesVersion: e2eCtx.E2EConfig.GetVariable(shared.KubernetesVersion), ControlPlaneMachineCount: ptr.To[int64](1), - WorkerMachineCount: ptr.To[int64](0), }, WaitForClusterIntervals: e2eCtx.E2EConfig.GetIntervals(specName, "wait-cluster"), WaitForControlPlaneIntervals: e2eCtx.E2EConfig.GetIntervals(specName, "wait-control-plane"), @@ -146,7 +146,8 @@ var _ = ginkgo.Context("[unmanaged] [functional] [ClusterClass]", func() { VpcCidr: "10.0.0.0/23", PublicSubnetCidr: "10.0.0.0/24", PrivateSubnetCidr: "10.0.1.0/24", - AvailabilityZone: "us-west-2a", + //TODO: Is this standard? + AvailabilityZone: "us-west-2a", }, e2eCtx) mgmtClusterInfra.CreateInfrastructure() }) diff --git a/test/e2e/suites/unmanaged/unmanaged_functional_test.go b/test/e2e/suites/unmanaged/unmanaged_functional_test.go index e7d68f7bca..1deba367e6 100644 --- a/test/e2e/suites/unmanaged/unmanaged_functional_test.go +++ b/test/e2e/suites/unmanaged/unmanaged_functional_test.go @@ -957,4 +957,96 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { "Eventually failed waiting for AWSCluster to show VPC endpoint as deleted in conditions") }) }) + + ginkgo.Describe("Dedicated hosts cluster test", func() { + ginkgo.It("should create cluster with dedicated hosts", func() { + specName := "dedicated-host" + if !e2eCtx.Settings.SkipQuotas { + //TODO: Update TestResource to include dedicated hosts + //TODO: Shouldn't this IF be inside shared.AquireResources instead of repeating across tests? + requiredResources = &shared.TestResource{EC2Normal: 1 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } + namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx) + defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) + + //TODO: Allocate Host ID before creating the cluster + // Allocate a dedicated host and ensure it is released after the test + ginkgo.By("Allocating a dedicated host") + hostID, err := shared.AllocateHost(e2eCtx) + Expect(err).To(BeNil()) + Expect(hostID).NotTo(BeEmpty()) + ginkgo.By(fmt.Sprintf("Allocated dedicated host: %s", hostID)) + defer func() { + ginkgo.By(fmt.Sprintf("Releasing the dedicated host: %s", hostID)) + shared.ReleaseHost(e2eCtx, hostID) + }() + + ginkgo.By("Creating cluster") + clusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6)) + vars := map[string]string{ + "hostID": hostID, + } + // Create a cluster with a dedicated host + clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + ClusterProxy: e2eCtx.Environment.BootstrapClusterProxy, + ConfigCluster: clusterctl.ConfigClusterInput{ + LogFolder: filepath.Join(e2eCtx.Settings.ArtifactFolder, "clusters", e2eCtx.Environment.BootstrapClusterProxy.GetName()), + ClusterctlConfigPath: e2eCtx.Environment.ClusterctlConfigPath, + KubeconfigPath: e2eCtx.Environment.BootstrapClusterProxy.GetKubeconfigPath(), + InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + Flavor: shared.DedicatedHostFlavor, + Namespace: namespace.Name, + ClusterName: clusterName, + KubernetesVersion: e2eCtx.E2EConfig.GetVariable(shared.KubernetesVersion), + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](0), + ClusterctlVariables: vars, + }, + WaitForClusterIntervals: e2eCtx.E2EConfig.GetIntervals(specName, "wait-cluster"), + WaitForControlPlaneIntervals: e2eCtx.E2EConfig.GetIntervals(specName, "wait-control-plane"), + }, result) + + // Check if bastion host is up and running + awsCluster, err := GetAWSClusterByName(ctx, e2eCtx.Environment.BootstrapClusterProxy, namespace.Name, clusterName) + Expect(err).To(BeNil()) + //Expect(awsCluster.Status.Bastion.State).To(Equal(infrav1.InstanceStateRunning)) + expectAWSClusterConditions(awsCluster, []conditionAssertion{{infrav1.BastionHostReadyCondition, corev1.ConditionTrue, "", ""}}) + + mdName := clusterName + "-md01" + machineTemplate := makeAWSMachineTemplate(namespace.Name, mdName, e2eCtx.E2EConfig.GetVariable(shared.AwsNodeMachineType), nil) + + machineDeployment := makeMachineDeployment(namespace.Name, mdName, clusterName, nil, int32(1)) + framework.CreateMachineDeployment(ctx, framework.CreateMachineDeploymentInput{ + Creator: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), + MachineDeployment: machineDeployment, + BootstrapConfigTemplate: makeJoinBootstrapConfigTemplate(namespace.Name, mdName), + InfraMachineTemplate: machineTemplate, + }) + + framework.WaitForMachineDeploymentNodesToExist(ctx, framework.WaitForMachineDeploymentNodesToExistInput{ + Lister: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), + Cluster: result.Cluster, + MachineDeployment: machineDeployment, + }, e2eCtx.E2EConfig.GetIntervals("", "wait-worker-nodes")...) + + workerMachines := framework.GetMachinesByMachineDeployments(ctx, framework.GetMachinesByMachineDeploymentsInput{ + Lister: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), + ClusterName: clusterName, + Namespace: namespace.Name, + MachineDeployment: *machineDeployment, + }) + Expect(len(workerMachines)).To(Equal(1)) + worker := workerMachines[0] + instanceID := strings.Split(*worker.Spec.ProviderID, "/")[1] + ginkgo.By(fmt.Sprintf("Worker instance ID: %s", instanceID)) + instanceHostID := shared.GetHostId(e2eCtx, instanceID) + ginkgo.By(fmt.Sprintf("Worker instance host ID: %s", instanceHostID)) + //Expect(instanceHostID).To(Equal(hostID), fmt.Sprintf("Expected instance to be on host %s, but got %s", hostID, instanceHostID)) + ginkgo.By("PASSED!") + }) + }) + }) diff --git a/test/mocks/gomock_reflect_524470655/prog.go b/test/mocks/gomock_reflect_524470655/prog.go new file mode 100644 index 0000000000..d525a90e81 --- /dev/null +++ b/test/mocks/gomock_reflect_524470655/prog.go @@ -0,0 +1,66 @@ + +package main + +import ( + "encoding/gob" + "flag" + "fmt" + "os" + "path" + "reflect" + + "github.com/golang/mock/mockgen/model" + + pkg_ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" +) + +var output = flag.String("output", "", "The output file name, or empty to use stdout.") + +func main() { + flag.Parse() + + its := []struct{ + sym string + typ reflect.Type + }{ + + { "OCMClient", reflect.TypeOf((*pkg_.OCMClient)(nil)).Elem()}, + + } + pkg := &model.Package{ + // NOTE: This behaves contrary to documented behaviour if the + // package name is not the final component of the import path. + // The reflect package doesn't expose the package name, though. + Name: path.Base("sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa"), + } + + for _, it := range its { + intf, err := model.InterfaceFromInterfaceType(it.typ) + if err != nil { + fmt.Fprintf(os.Stderr, "Reflection: %v\n", err) + os.Exit(1) + } + intf.Name = it.sym + pkg.Interfaces = append(pkg.Interfaces, intf) + } + + outfile := os.Stdout + if len(*output) != 0 { + var err error + outfile, err = os.Create(*output) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to open output file %q", *output) + } + defer func() { + if err := outfile.Close(); err != nil { + fmt.Fprintf(os.Stderr, "failed to close output file %q", *output) + os.Exit(1) + } + }() + } + + if err := gob.NewEncoder(outfile).Encode(pkg); err != nil { + fmt.Fprintf(os.Stderr, "gob encode: %v\n", err) + os.Exit(1) + } +}