@@ -30,14 +30,12 @@ import (
30
30
31
31
. "github.com/onsi/ginkgo/v2"
32
32
. "github.com/onsi/gomega"
33
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
34
33
"k8s.io/apimachinery/pkg/runtime"
35
34
"k8s.io/component-base/logs"
36
35
logsv1 "k8s.io/component-base/logs/api/v1"
37
36
"k8s.io/klog/v2"
38
37
ctrl "sigs.k8s.io/controller-runtime"
39
38
40
- clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2"
41
39
"sigs.k8s.io/cluster-api/test/framework"
42
40
"sigs.k8s.io/cluster-api/test/framework/bootstrap"
43
41
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
@@ -191,20 +189,14 @@ var _ = SynchronizedAfterSuite(func() {
191
189
// After all ParallelNodes.
192
190
193
191
By ("Dumping logs from the bootstrap cluster" )
194
- dumpBootstrapClusterLogs ( bootstrapClusterProxy )
192
+ dumpKindClusterLogs ( ctx , artifactFolder , bootstrapClusterProxy )
195
193
196
194
By ("Tearing down the management cluster" )
197
195
if ! skipCleanup {
198
196
tearDown (bootstrapClusterProvider , bootstrapClusterProxy )
199
197
}
200
198
})
201
199
202
- func initScheme () * runtime.Scheme {
203
- sc := runtime .NewScheme ()
204
- framework .TryAddDefaultSchemes (sc )
205
- return sc
206
- }
207
-
208
200
func loadE2EConfig (configPath string ) * clusterctl.E2EConfig {
209
201
config := clusterctl .LoadE2EConfig (ctx , clusterctl.LoadE2EConfigInput {ConfigPath : configPath })
210
202
Expect (config ).ToNot (BeNil (), "Failed to load E2E config from %s" , configPath )
@@ -269,43 +261,6 @@ func initBootstrapCluster(bootstrapClusterProxy framework.ClusterProxy, config *
269
261
}, config .GetIntervals (bootstrapClusterProxy .GetName (), "wait-controllers" )... )
270
262
}
271
263
272
- func dumpBootstrapClusterLogs (bootstrapClusterProxy framework.ClusterProxy ) {
273
- if bootstrapClusterProxy == nil {
274
- return
275
- }
276
-
277
- clusterLogCollector := bootstrapClusterProxy .GetLogCollector ()
278
- if clusterLogCollector == nil {
279
- return
280
- }
281
-
282
- nodes , err := bootstrapClusterProxy .GetClientSet ().CoreV1 ().Nodes ().List (ctx , metav1.ListOptions {})
283
- if err != nil {
284
- fmt .Printf ("Failed to get nodes for the bootstrap cluster: %v\n " , err )
285
- return
286
- }
287
-
288
- for i := range nodes .Items {
289
- nodeName := nodes .Items [i ].GetName ()
290
- err = clusterLogCollector .CollectMachineLog (
291
- ctx ,
292
- bootstrapClusterProxy .GetClient (),
293
- // The bootstrap cluster is not expected to be a CAPI cluster, so in order to re-use the logCollector,
294
- // we create a fake machine that wraps the node.
295
- // NOTE: This assumes a naming convention between machines and nodes, which e.g. applies to the bootstrap clusters generated with kind.
296
- // This might not work if you are using an existing bootstrap cluster provided by other means.
297
- & clusterv1.Machine {
298
- Spec : clusterv1.MachineSpec {ClusterName : nodeName },
299
- ObjectMeta : metav1.ObjectMeta {Name : nodeName },
300
- },
301
- filepath .Join (artifactFolder , "clusters" , bootstrapClusterProxy .GetName (), "machines" , nodeName ),
302
- )
303
- if err != nil {
304
- fmt .Printf ("Failed to get logs for the bootstrap cluster node %s: %v\n " , nodeName , err )
305
- }
306
- }
307
- }
308
-
309
264
func tearDown (bootstrapClusterProvider bootstrap.ClusterProvider , bootstrapClusterProxy framework.ClusterProxy ) {
310
265
cancelWatches ()
311
266
if bootstrapClusterProxy != nil {
0 commit comments