Skip to content

Commit 1c04f92

Browse files
authored
test: teardown and slight refactorings (#104)
1 parent 2139c35 commit 1c04f92

File tree

8 files changed

+233
-32
lines changed

8 files changed

+233
-32
lines changed

Makefile

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,7 @@ E2E_METAL_API_URL ?= "$(METALCTL_API_URL)"
111111
E2E_METAL_API_HMAC ?= "$(METALCTL_HMAC)"
112112
E2E_METAL_API_HMAC_AUTH_TYPE ?= "$(or $(METALCTL_HMAC_AUTH_TYPE),Metal-Admin)"
113113
E2E_METAL_PROJECT_ID ?= "00000000-0000-0000-0000-000000000001"
114+
E2E_METAL_PROJECT_NAME ?= "test"
114115
E2E_METAL_PARTITION ?= "mini-lab"
115116
E2E_METAL_PUBLIC_NETWORK ?= "internet-mini-lab"
116117
E2E_KUBERNETES_VERSIONS ?= "v1.32.9"
@@ -136,6 +137,7 @@ test-e2e: manifests generate fmt vet ginkgo
136137
METAL_API_HMAC=$(E2E_METAL_API_HMAC) \
137138
METAL_API_HMAC_AUTH_TYPE=$(E2E_METAL_API_HMAC_AUTH_TYPE) \
138139
METAL_PROJECT_ID=$(E2E_METAL_PROJECT_ID) \
140+
E2E_METAL_PROJECT_NAME=$(E2E_METAL_PROJECT_NAME) \
139141
METAL_PARTITION=$(E2E_METAL_PARTITION) \
140142
METAL_PUBLIC_NETWORK=$(E2E_METAL_PUBLIC_NETWORK) \
141143
E2E_KUBERNETES_VERSIONS=$(E2E_KUBERNETES_VERSIONS) \

test/e2e/frmwrk/cluster_basic_test.go

Lines changed: 14 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -24,19 +24,24 @@ var _ = Describe("Basic Cluster", Ordered, Label("basic"), func() {
2424
for i, v := range kubernetesVersions {
2525
Context(fmt.Sprintf("with kubernetes %s", v), Ordered, func() {
2626
var (
27-
ec *E2ECluster
28-
ctx context.Context
27+
ec *E2ECluster
28+
ctx context.Context
29+
done func()
2930
)
3031

3132
BeforeEach(func() {
32-
ctx = context.Background()
33+
ctx, done = context.WithCancel(context.Background())
3334
})
3435

35-
It("create new cluster", Label("create"), func() {
36+
AfterEach(func() {
37+
done()
38+
})
39+
40+
It("create new cluster", Label("basic", "create"), func() {
3641
ec = createE2ECluster(ctx, e2eCtx, ClusterConfig{
3742
SpecName: "basic-cluster-creation-" + v,
38-
NamespaceName: fmt.Sprintf("e2e-basic-cluster-creation-%d", i),
39-
ClusterName: fmt.Sprintf("basic-%d", i),
43+
NamespaceName: fmt.Sprintf("basic-%d", i),
44+
ClusterName: "basic-cluster",
4045
KubernetesVersion: v,
4146
ControlPlaneMachineImage: os.Getenv("E2E_CONTROL_PLANE_MACHINE_IMAGE_PREFIX") + strings.TrimPrefix(v, "v"),
4247
ControlPlaneMachineCount: 1,
@@ -46,7 +51,7 @@ var _ = Describe("Basic Cluster", Ordered, Label("basic"), func() {
4651
Expect(ec).ToNot(BeNil())
4752
})
4853

49-
It("move from bootstrap to workload cluster", Label("move"), func() {
54+
It("move from bootstrap to workload cluster", Label("basic", "move"), func() {
5055
Expect(ec).NotTo(BeNil(), "e2e cluster required")
5156

5257
clusterctl.InitManagementClusterAndWatchControllerLogs(ctx, clusterctl.InitManagementClusterAndWatchControllerLogsInput{
@@ -79,7 +84,7 @@ var _ = Describe("Basic Cluster", Ordered, Label("basic"), func() {
7984
Expect(err).ToNot(HaveOccurred(), "cluster should be present")
8085
})
8186

82-
It("move from workload to bootstrap cluster", Label("move"), func() {
87+
It("move from workload to bootstrap cluster", Label("basic", "move"), func() {
8388
Expect(ec).NotTo(BeNil(), "e2e cluster required")
8489

8590
clusterctl.Move(ctx, clusterctl.MoveInput{
@@ -105,7 +110,7 @@ var _ = Describe("Basic Cluster", Ordered, Label("basic"), func() {
105110
Expect(err).ToNot(HaveOccurred(), "cluster should be present")
106111
})
107112

108-
It("delete cluster", Label("delete"), func() {
113+
It("delete cluster", Label("basic", "teardown"), func() {
109114
ec.Teardown(ctx)
110115
})
111116
})

test/e2e/frmwrk/cluster_ha_test.go

Lines changed: 15 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -18,19 +18,24 @@ var _ = Describe("High Availability Cluster", Ordered, Label("ha"), func() {
1818
for i, v := range kubernetesVersions {
1919
Context(fmt.Sprintf("with kubernetes %s", v), Ordered, func() {
2020
var (
21-
ec *E2ECluster
22-
ctx context.Context
21+
ec *E2ECluster
22+
ctx context.Context
23+
done func()
2324
)
2425

2526
BeforeEach(func() {
26-
ctx = context.Background()
27+
ctx, done = context.WithCancel(context.Background())
2728
})
2829

29-
It("create new cluster", Label("create"), func() {
30+
AfterEach(func() {
31+
done()
32+
})
33+
34+
It("create new cluster", Label("ha", "create"), func() {
3035
ec = createE2ECluster(ctx, e2eCtx, ClusterConfig{
3136
SpecName: "ha-cluster-creation-" + v,
32-
NamespaceName: fmt.Sprintf("e2e-ha-cluster-creation-%d", i),
33-
ClusterName: fmt.Sprintf("ha-%d", i),
37+
NamespaceName: fmt.Sprintf("ha-%d", i),
38+
ClusterName: "ha-cluster",
3439
KubernetesVersion: v,
3540
ControlPlaneMachineImage: os.Getenv("E2E_CONTROL_PLANE_MACHINE_IMAGE_PREFIX") + strings.TrimPrefix(v, "v"),
3641
ControlPlaneMachineCount: 3,
@@ -40,7 +45,10 @@ var _ = Describe("High Availability Cluster", Ordered, Label("ha"), func() {
4045
Expect(ec).ToNot(BeNil())
4146
})
4247

43-
It("delete cluster", Label("delete"), func() {
48+
It("delete cluster", Label("ha", "teardown"), func() {
49+
if ec == nil {
50+
Skip("E2ECluster not initialized, skipping teardown")
51+
}
4452
ec.Teardown(ctx)
4553
})
4654
})

test/e2e/frmwrk/config/capi-e2e-config.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -135,6 +135,7 @@ intervals:
135135
default/wait-controllers: ["4m", "10s"]
136136
default/wait-cluster: ["5m", "10s"]
137137
metal-stack/wait-firewall-allocate: ["5m", "10s"]
138+
metal-stack/machine-reclaim: ["10m", "10s"]
138139
default/wait-control-plane: ["10m", "10s"]
139140
default/wait-control-plane-machine: ["5m", "10s"]
140141
default/wait-worker-nodes: ["5m", "10s"]

test/e2e/frmwrk/frmwrk_suite_test.go

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ import (
55
"testing"
66

77
"github.com/onsi/ginkgo/v2"
8+
ginkgotypes "github.com/onsi/ginkgo/v2/types"
89
. "github.com/onsi/gomega"
910

1011
"k8s.io/klog/v2"
@@ -24,6 +25,7 @@ func TestE2E(t *testing.T) {
2425

2526
var _ = ginkgo.BeforeSuite(func() {
2627
e2eCtx = NewE2EContext()
28+
e2eCtx.TeardownMetalStackProject(context.TODO())
2729
e2eCtx.ProvideBootstrapCluster()
2830
e2eCtx.CreateClusterctlConfig(context.TODO())
2931
e2eCtx.InitManagementCluster(context.TODO())
@@ -35,6 +37,12 @@ var _ = ginkgo.AfterSuite(func() {
3537
return
3638
}
3739
if e2eCtx != nil {
40+
filter, err := ginkgotypes.ParseLabelFilter(ginkgo.GinkgoLabelFilter())
41+
Expect(err).ToNot(HaveOccurred(), "failed to parse ginkgo label filter")
42+
43+
if !filter([]string{"teardown"}) {
44+
return
45+
}
3846
e2eCtx.Teardown(context.TODO())
3947
}
4048
})

test/e2e/frmwrk/shared_cases.go

Lines changed: 21 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -19,19 +19,22 @@ func createE2ECluster(ctx context.Context, e2eCtx *E2EContext, cfg ClusterConfig
1919
ec.SetupNamespace(ctx)
2020
ec.GenerateAndApplyClusterTemplate(ctx)
2121

22-
By("Wait for cluster")
23-
controlPlane := framework.GetKubeadmControlPlaneByCluster(ctx, framework.GetKubeadmControlPlaneByClusterInput{
24-
Lister: e2eCtx.Environment.Bootstrap.GetClient(),
25-
ClusterName: ec.Refs.Cluster.Name,
26-
Namespace: ec.Refs.Cluster.Namespace,
22+
DeferCleanup(func() {
23+
ec.Dump(context.Background())
2724
})
2825

26+
By("Wait for cluster")
2927
framework.DiscoveryAndWaitForCluster(ctx, framework.DiscoveryAndWaitForClusterInput{
3028
Getter: e2eCtx.Environment.Bootstrap.GetClient(),
3129
Namespace: ec.Refs.Cluster.Namespace,
3230
Name: ec.Refs.Cluster.Name,
3331
}, e2eCtx.E2EConfig.GetIntervals("default", "wait-cluster")...)
3432

33+
controlPlane := framework.GetKubeadmControlPlaneByCluster(ctx, framework.GetKubeadmControlPlaneByClusterInput{
34+
Lister: e2eCtx.Environment.Bootstrap.GetClient(),
35+
ClusterName: ec.Refs.Cluster.Name,
36+
Namespace: ec.Refs.Cluster.Namespace,
37+
})
3538
Expect(controlPlane).To(Not(BeNil()))
3639

3740
By("Wait for CNI and CCM")
@@ -58,5 +61,18 @@ func createE2ECluster(ctx context.Context, e2eCtx *E2EContext, cfg ClusterConfig
5861
Cluster: ec.Refs.Cluster,
5962
Getter: e2eCtx.Environment.Bootstrap.GetClient(),
6063
}, e2eCtx.E2EConfig.GetIntervals("default", "wait-cluster-provisioned")...)
64+
65+
framework.WaitForOneKubeadmControlPlaneMachineToExist(ctx, framework.WaitForOneKubeadmControlPlaneMachineToExistInput{
66+
Lister: e2eCtx.Environment.Bootstrap.GetClient(),
67+
Cluster: ec.Refs.Cluster,
68+
ControlPlane: controlPlane,
69+
}, e2eCtx.E2EConfig.GetIntervals("default", "wait-control-plane-machine")...)
70+
71+
framework.WaitForControlPlaneAndMachinesReady(ctx, framework.WaitForControlPlaneAndMachinesReadyInput{
72+
Cluster: ec.Refs.Cluster,
73+
GetLister: e2eCtx.Environment.Bootstrap.GetClient(),
74+
ControlPlane: controlPlane,
75+
}, e2eCtx.E2EConfig.GetIntervals("default", "wait-control-plane-and-machines-ready")...)
76+
6177
return ec
6278
}

test/e2e/frmwrk/shared_cluster.go

Lines changed: 47 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ import (
2323
"k8s.io/utils/ptr"
2424

2525
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
26+
kubeadmvbootstrap1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
2627
"sigs.k8s.io/cluster-api/test/framework"
2728
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
2829

@@ -78,7 +79,7 @@ func (e2e *E2ECluster) Variables() map[string]string {
7879
vars["METAL_API_HMAC_AUTH_TYPE"] = e2e.E2EContext.envOrVar("METAL_API_HMAC_AUTH_TYPE")
7980

8081
vars["NAMESPACE"] = e2e.NamespaceName
81-
vars["METAL_PROJECT_ID"] = e2e.E2EContext.Environment.project
82+
vars["METAL_PROJECT_ID"] = e2e.E2EContext.Environment.projectID
8283
vars["METAL_PARTITION"] = e2e.E2EContext.Environment.partition
8384
vars["METAL_NODE_NETWORK_ID"] = *e2e.Refs.NodeNetwork.ID
8485
vars["FIREWALL_MACHINE_SIZE"] = e2e.FirewallSize
@@ -103,7 +104,8 @@ func (e2e *E2ECluster) SetupNamespace(ctx context.Context) *corev1.Namespace {
103104
Name: e2e.NamespaceName,
104105
IgnoreAlreadyExists: true,
105106
Labels: map[string]string{
106-
"e2e-test": e2e.SpecName,
107+
"e2e-test": e2e.SpecName,
108+
e2eMetalStackProjectIDLabel: e2e.E2EContext.Environment.projectID,
107109
},
108110
})
109111
e2e.Refs.Namespace = ns
@@ -142,11 +144,12 @@ func (e2e *E2ECluster) setupNodeNetwork(ctx context.Context) {
142144

143145
nar := &metalmodels.V1NetworkAllocateRequest{
144146
Partitionid: e2e.E2EContext.Environment.partition,
145-
Projectid: e2e.E2EContext.Environment.project,
147+
Projectid: e2e.E2EContext.Environment.projectID,
146148
Name: e2e.ClusterName + "-node",
147149
Description: fmt.Sprintf("Node network for %s", e2e.ClusterName),
148150
Labels: map[string]string{
149-
"e2e-test": e2e.SpecName,
151+
"e2e-test": e2e.SpecName,
152+
capmsv1alpha1.TagInfraClusterResource: e2e.NamespaceName + "." + e2e.ClusterName,
150153
},
151154
}
152155
net, err := e2e.E2EContext.Environment.Metal.Network().AllocateNetwork(metalnetwork.NewAllocateNetworkParamsWithContext(ctx).WithBody(nar), nil)
@@ -174,11 +177,11 @@ func (e2e *E2ECluster) setupFirewall(ctx context.Context) {
174177
Hostname: e2e.ClusterName + "-fw",
175178
Description: "Firewall for " + e2e.ClusterName,
176179
Partitionid: &e2e.E2EContext.Environment.partition,
177-
Projectid: &e2e.E2EContext.Environment.project,
180+
Projectid: &e2e.E2EContext.Environment.projectID,
178181
Sizeid: &e2e.FirewallSize,
179182
Imageid: &e2e.FirewallImage,
180183
Tags: []string{
181-
fmt.Sprintf("%s=%s", capmsv1alpha1.TagInfraClusterResource, e2e.ClusterName),
184+
fmt.Sprintf("%s=%s.%s", capmsv1alpha1.TagInfraClusterResource, e2e.NamespaceName, e2e.ClusterName),
182185
fmt.Sprintf("%s=%s", "e2e-test", e2e.SpecName),
183186
},
184187
Networks: []*metalmodels.V1MachineAllocationNetwork{
@@ -239,6 +242,8 @@ func (e2e *E2ECluster) setupFirewall(ctx context.Context) {
239242
e2e.Refs.Firewall = fw.Payload
240243
return nil
241244
}, e2e.E2EContext.E2EConfig.GetIntervals("metal-stack", "wait-firewall-allocate")...).ShouldNot(HaveOccurred(), "firewall not available")
245+
246+
GinkgoWriter.Printf("Firewall allocated with ID: %s\n", *e2e.Refs.Firewall.ID)
242247
}
243248

244249
func (e2e *E2ECluster) teardownFirewall(ctx context.Context) {
@@ -260,11 +265,11 @@ func (e2e *E2ECluster) setupControlPlaneIP(ctx context.Context) {
260265
By("Setup Control Plane IP")
261266

262267
ipr := &metalmodels.V1IPAllocateRequest{
263-
Projectid: &e2e.E2EContext.Environment.project,
268+
Projectid: &e2e.E2EContext.Environment.projectID,
264269
Name: e2e.ClusterName + "-cp-ip",
265270
Description: "Control plane IP for " + e2e.ClusterName,
266271
Tags: []string{
267-
fmt.Sprintf("%s=%s", capmsv1alpha1.TagInfraClusterResource, e2e.ClusterName),
272+
fmt.Sprintf("%s=%s.%s", capmsv1alpha1.TagInfraClusterResource, e2e.NamespaceName, e2e.ClusterName),
268273
fmt.Sprintf("%s=%s", "e2e-test", e2e.SpecName),
269274
},
270275
Networkid: ptr.To(e2e.E2EContext.Environment.publicNetwork),
@@ -345,6 +350,40 @@ func (e2e *E2ECluster) teardownCluster(ctx context.Context) {
345350
}, e2e.E2EContext.E2EConfig.GetIntervals("default", "wait-delete-cluster")...)
346351
}
347352

353+
func (ec *E2ECluster) Dump(ctx context.Context) {
354+
framework.DumpResourcesForCluster(ctx, framework.DumpResourcesForClusterInput{
355+
Lister: ec.E2EContext.Environment.Bootstrap.GetClient(),
356+
LogPath: path.Join(ec.E2EContext.Environment.artifactsPath, "clusters", ec.Refs.Cluster.Namespace+"_"+ec.Refs.Cluster.Name),
357+
Cluster: ec.Refs.Cluster,
358+
Resources: []framework.DumpNamespaceAndGVK{
359+
{
360+
GVK: clusterv1.GroupVersion.WithKind("Cluster"),
361+
Namespace: ec.Refs.Cluster.Namespace,
362+
},
363+
{
364+
GVK: capmsv1alpha1.GroupVersion.WithKind("MetalStackCluster"),
365+
Namespace: ec.Refs.Cluster.Namespace,
366+
},
367+
{
368+
GVK: kubeadmvbootstrap1.GroupVersion.WithKind("KubeadmConfig"),
369+
Namespace: ec.Refs.Cluster.Namespace,
370+
},
371+
{
372+
GVK: clusterv1.GroupVersion.WithKind("MachineDeployment"),
373+
Namespace: ec.Refs.Cluster.Namespace,
374+
},
375+
{
376+
GVK: clusterv1.GroupVersion.WithKind("Machine"),
377+
Namespace: ec.Refs.Cluster.Namespace,
378+
},
379+
{
380+
GVK: capmsv1alpha1.GroupVersion.WithKind("MetalStackMachine"),
381+
Namespace: ec.Refs.Cluster.Namespace,
382+
},
383+
},
384+
})
385+
}
386+
348387
// deleteClusterAndWait deletes a cluster object and waits for it to be gone.
349388
// TODO: remove once cluster expectation has been fixed in framework
350389
func deleteClusterAndWait(ctx context.Context, input framework.DeleteClusterAndWaitInput, intervals ...any) {

0 commit comments

Comments
 (0)