@@ -30,14 +30,12 @@ import (
3030
3131 . "github.com/onsi/ginkgo/v2"
3232 . "github.com/onsi/gomega"
33- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3433 "k8s.io/apimachinery/pkg/runtime"
3534 "k8s.io/component-base/logs"
3635 logsv1 "k8s.io/component-base/logs/api/v1"
3736 "k8s.io/klog/v2"
3837 ctrl "sigs.k8s.io/controller-runtime"
3938
40- clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2"
4139 "sigs.k8s.io/cluster-api/test/framework"
4240 "sigs.k8s.io/cluster-api/test/framework/bootstrap"
4341 "sigs.k8s.io/cluster-api/test/framework/clusterctl"
@@ -191,20 +189,14 @@ var _ = SynchronizedAfterSuite(func() {
191189 // After all ParallelNodes.
192190
193191 By ("Dumping logs from the bootstrap cluster" )
194- dumpBootstrapClusterLogs ( bootstrapClusterProxy )
192+ dumpKindClusterLogs ( ctx , artifactFolder , bootstrapClusterProxy )
195193
196194 By ("Tearing down the management cluster" )
197195 if ! skipCleanup {
198196 tearDown (bootstrapClusterProvider , bootstrapClusterProxy )
199197 }
200198})
201199
202- func initScheme () * runtime.Scheme {
203- sc := runtime .NewScheme ()
204- framework .TryAddDefaultSchemes (sc )
205- return sc
206- }
207-
208200func loadE2EConfig (configPath string ) * clusterctl.E2EConfig {
209201 config := clusterctl .LoadE2EConfig (ctx , clusterctl.LoadE2EConfigInput {ConfigPath : configPath })
210202 Expect (config ).ToNot (BeNil (), "Failed to load E2E config from %s" , configPath )
@@ -269,43 +261,6 @@ func initBootstrapCluster(bootstrapClusterProxy framework.ClusterProxy, config *
269261 }, config .GetIntervals (bootstrapClusterProxy .GetName (), "wait-controllers" )... )
270262}
271263
272- func dumpBootstrapClusterLogs (bootstrapClusterProxy framework.ClusterProxy ) {
273- if bootstrapClusterProxy == nil {
274- return
275- }
276-
277- clusterLogCollector := bootstrapClusterProxy .GetLogCollector ()
278- if clusterLogCollector == nil {
279- return
280- }
281-
282- nodes , err := bootstrapClusterProxy .GetClientSet ().CoreV1 ().Nodes ().List (ctx , metav1.ListOptions {})
283- if err != nil {
284- fmt .Printf ("Failed to get nodes for the bootstrap cluster: %v\n " , err )
285- return
286- }
287-
288- for i := range nodes .Items {
289- nodeName := nodes .Items [i ].GetName ()
290- err = clusterLogCollector .CollectMachineLog (
291- ctx ,
292- bootstrapClusterProxy .GetClient (),
293- // The bootstrap cluster is not expected to be a CAPI cluster, so in order to re-use the logCollector,
294- // we create a fake machine that wraps the node.
295- // NOTE: This assumes a naming convention between machines and nodes, which e.g. applies to the bootstrap clusters generated with kind.
296- // This might not work if you are using an existing bootstrap cluster provided by other means.
297- & clusterv1.Machine {
298- Spec : clusterv1.MachineSpec {ClusterName : nodeName },
299- ObjectMeta : metav1.ObjectMeta {Name : nodeName },
300- },
301- filepath .Join (artifactFolder , "clusters" , bootstrapClusterProxy .GetName (), "machines" , nodeName ),
302- )
303- if err != nil {
304- fmt .Printf ("Failed to get logs for the bootstrap cluster node %s: %v\n " , nodeName , err )
305- }
306- }
307- }
308-
309264func tearDown (bootstrapClusterProvider bootstrap.ClusterProvider , bootstrapClusterProxy framework.ClusterProxy ) {
310265 cancelWatches ()
311266 if bootstrapClusterProxy != nil {
0 commit comments