Skip to content

Commit b65847e

Browse files
authored
Merge pull request #12688 from k8s-infra-cherrypick-robot/cherry-pick-12676-to-release-1.11
[release-1.11] 🌱 Get kind mgmt cluster logs in clusterctl upgrade test
2 parents 4763d26 + d30d3e3 commit b65847e

File tree

4 files changed

+63
-52
lines changed

4 files changed

+63
-52
lines changed

test/e2e/clusterctl_upgrade.go

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -222,10 +222,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
222222
Expect(input.InitWithKubernetesVersion).ToNot(BeEmpty(), "Invalid argument. input.InitWithKubernetesVersion can't be empty when calling %s spec", specName)
223223
if input.KindManagementClusterNewClusterProxyFunc == nil {
224224
input.KindManagementClusterNewClusterProxyFunc = func(name string, kubeconfigPath string) framework.ClusterProxy {
225-
scheme := apiruntime.NewScheme()
226-
framework.TryAddDefaultSchemes(scheme)
227-
_ = clusterv1beta1.AddToScheme(scheme)
228-
return framework.NewClusterProxy(name, kubeconfigPath, scheme)
225+
return framework.NewClusterProxy(name, kubeconfigPath, initScheme(), framework.WithMachineLogCollector(framework.DockerLogCollector{}))
229226
}
230227
}
231228

@@ -342,6 +339,9 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
342339
managementClusterProxy = input.BootstrapClusterProxy.GetWorkloadCluster(ctx, cluster.Namespace, cluster.Name, framework.WithMachineLogCollector(input.BootstrapClusterProxy.GetLogCollector()))
343340
}
344341

342+
// Add v1beta1 schema so we can get v1beta1 Clusters below.
343+
_ = clusterv1beta1.AddToScheme(managementClusterProxy.GetScheme())
344+
345345
By("Turning the new cluster into a management cluster with older versions of providers")
346346

347347
// Download the clusterctl version that should be used to initially set up the management cluster (which is later upgraded).
@@ -779,7 +779,13 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
779779
AfterEach(func() {
780780
if testNamespace != nil {
781781
// Dump all the logs from the workload cluster before deleting them.
782-
framework.DumpAllResourcesAndLogs(ctx, managementClusterProxy, input.ClusterctlConfigPath, input.ArtifactFolder, testNamespace, managementClusterResources.Cluster)
782+
framework.DumpAllResourcesAndLogs(ctx, managementClusterProxy, input.ClusterctlConfigPath, input.ArtifactFolder, testNamespace, &clusterv1.Cluster{
783+
// DumpAllResourcesAndLogs only uses Namespace + Name from the Cluster object.
784+
ObjectMeta: metav1.ObjectMeta{
785+
Namespace: testNamespace.Name,
786+
Name: workloadClusterName,
787+
},
788+
})
783789

784790
if !input.SkipCleanup {
785791
Byf("Deleting all clusters in namespace %s in management cluster %s", testNamespace.Name, managementClusterName)
@@ -811,6 +817,8 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
811817

812818
// Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself.
813819
if input.UseKindForManagementCluster {
820+
dumpKindClusterLogs(ctx, input.ArtifactFolder, managementClusterProxy)
821+
814822
if !input.SkipCleanup {
815823
managementClusterProxy.Dispose(ctx)
816824
managementClusterProvider.Dispose(ctx)

test/e2e/clusterctl_upgrade_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -304,7 +304,7 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.10=>cu
304304
WorkloadKubernetesVersion: "v1.33.0",
305305
MgmtFlavor: "topology",
306306
WorkloadFlavor: "topology",
307-
UseKindForManagementCluster: true,
307+
UseKindForManagementCluster: false, // Using false for one test case to ensure this code path of the test keeps working.
308308
}
309309
})
310310
})

test/e2e/common.go

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,16 @@ package e2e
1919
import (
2020
"context"
2121
"fmt"
22+
"path/filepath"
2223

2324
"github.com/blang/semver/v4"
2425
. "github.com/onsi/ginkgo/v2"
2526
"github.com/onsi/gomega/types"
27+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
28+
"k8s.io/apimachinery/pkg/runtime"
2629

30+
clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2"
31+
"sigs.k8s.io/cluster-api/test/framework"
2732
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
2833
)
2934

@@ -82,3 +87,46 @@ func GetLatestReleaseOfMinor(ctx context.Context, minorRelease string) (string,
8287
releaseMarker := fmt.Sprintf(latestReleaseMarkerPrefix, minorRelease)
8388
return clusterctl.ResolveRelease(ctx, releaseMarker)
8489
}
90+
91+
func initScheme() *runtime.Scheme {
92+
sc := runtime.NewScheme()
93+
framework.TryAddDefaultSchemes(sc)
94+
return sc
95+
}
96+
97+
func dumpKindClusterLogs(ctx context.Context, artifactFolder string, clusterProxy framework.ClusterProxy) {
98+
if clusterProxy == nil {
99+
return
100+
}
101+
102+
clusterLogCollector := clusterProxy.GetLogCollector()
103+
if clusterLogCollector == nil {
104+
return
105+
}
106+
107+
nodes, err := clusterProxy.GetClientSet().CoreV1().Nodes().List(ctx, metav1.ListOptions{})
108+
if err != nil {
109+
fmt.Printf("Failed to get nodes for the cluster: %v\n", err)
110+
return
111+
}
112+
113+
for i := range nodes.Items {
114+
nodeName := nodes.Items[i].GetName()
115+
err = clusterLogCollector.CollectMachineLog(
116+
ctx,
117+
clusterProxy.GetClient(),
118+
// The kind cluster is not a CAPI cluster, so in order to re-use the logCollector,
119+
// we create a fake machine that wraps the node.
120+
// NOTE: This assumes a naming convention between machines and nodes, which e.g. applies to the clusters generated with kind.
121+
// This might not work if you are using an existing cluster provided by other means.
122+
&clusterv1.Machine{
123+
Spec: clusterv1.MachineSpec{ClusterName: nodeName},
124+
ObjectMeta: metav1.ObjectMeta{Name: nodeName},
125+
},
126+
filepath.Join(artifactFolder, "clusters", clusterProxy.GetName(), "machines", nodeName),
127+
)
128+
if err != nil {
129+
fmt.Printf("Failed to get logs for the cluster node %s: %v\n", nodeName, err)
130+
}
131+
}
132+
}

test/e2e/e2e_suite_test.go

Lines changed: 1 addition & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -30,14 +30,12 @@ import (
3030

3131
. "github.com/onsi/ginkgo/v2"
3232
. "github.com/onsi/gomega"
33-
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3433
"k8s.io/apimachinery/pkg/runtime"
3534
"k8s.io/component-base/logs"
3635
logsv1 "k8s.io/component-base/logs/api/v1"
3736
"k8s.io/klog/v2"
3837
ctrl "sigs.k8s.io/controller-runtime"
3938

40-
clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2"
4139
"sigs.k8s.io/cluster-api/test/framework"
4240
"sigs.k8s.io/cluster-api/test/framework/bootstrap"
4341
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
@@ -191,20 +189,14 @@ var _ = SynchronizedAfterSuite(func() {
191189
// After all ParallelNodes.
192190

193191
By("Dumping logs from the bootstrap cluster")
194-
dumpBootstrapClusterLogs(bootstrapClusterProxy)
192+
dumpKindClusterLogs(ctx, artifactFolder, bootstrapClusterProxy)
195193

196194
By("Tearing down the management cluster")
197195
if !skipCleanup {
198196
tearDown(bootstrapClusterProvider, bootstrapClusterProxy)
199197
}
200198
})
201199

202-
func initScheme() *runtime.Scheme {
203-
sc := runtime.NewScheme()
204-
framework.TryAddDefaultSchemes(sc)
205-
return sc
206-
}
207-
208200
func loadE2EConfig(configPath string) *clusterctl.E2EConfig {
209201
config := clusterctl.LoadE2EConfig(ctx, clusterctl.LoadE2EConfigInput{ConfigPath: configPath})
210202
Expect(config).ToNot(BeNil(), "Failed to load E2E config from %s", configPath)
@@ -269,43 +261,6 @@ func initBootstrapCluster(bootstrapClusterProxy framework.ClusterProxy, config *
269261
}, config.GetIntervals(bootstrapClusterProxy.GetName(), "wait-controllers")...)
270262
}
271263

272-
func dumpBootstrapClusterLogs(bootstrapClusterProxy framework.ClusterProxy) {
273-
if bootstrapClusterProxy == nil {
274-
return
275-
}
276-
277-
clusterLogCollector := bootstrapClusterProxy.GetLogCollector()
278-
if clusterLogCollector == nil {
279-
return
280-
}
281-
282-
nodes, err := bootstrapClusterProxy.GetClientSet().CoreV1().Nodes().List(ctx, metav1.ListOptions{})
283-
if err != nil {
284-
fmt.Printf("Failed to get nodes for the bootstrap cluster: %v\n", err)
285-
return
286-
}
287-
288-
for i := range nodes.Items {
289-
nodeName := nodes.Items[i].GetName()
290-
err = clusterLogCollector.CollectMachineLog(
291-
ctx,
292-
bootstrapClusterProxy.GetClient(),
293-
// The bootstrap cluster is not expected to be a CAPI cluster, so in order to re-use the logCollector,
294-
// we create a fake machine that wraps the node.
295-
// NOTE: This assumes a naming convention between machines and nodes, which e.g. applies to the bootstrap clusters generated with kind.
296-
// This might not work if you are using an existing bootstrap cluster provided by other means.
297-
&clusterv1.Machine{
298-
Spec: clusterv1.MachineSpec{ClusterName: nodeName},
299-
ObjectMeta: metav1.ObjectMeta{Name: nodeName},
300-
},
301-
filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName(), "machines", nodeName),
302-
)
303-
if err != nil {
304-
fmt.Printf("Failed to get logs for the bootstrap cluster node %s: %v\n", nodeName, err)
305-
}
306-
}
307-
}
308-
309264
func tearDown(bootstrapClusterProvider bootstrap.ClusterProvider, bootstrapClusterProxy framework.ClusterProxy) {
310265
cancelWatches()
311266
if bootstrapClusterProxy != nil {

0 commit comments

Comments
 (0)