Skip to content

Commit fd3b7f0

Browse files
authored
Enable logging for e2e tests (#430)
* Enable logging for e2e tests * Try adding hosts
1 parent 74a5992 commit fd3b7f0

File tree

4 files changed

+66
-13
lines changed

4 files changed

+66
-13
lines changed

.github/workflows/e2e.yml

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,14 @@ jobs:
4040
- name: Run e2e tests
4141
run: "make test-e2e GINKGO_SKIP=${{ env.SKIP_E2E }}"
4242

43+
- name: Upload artifacts
44+
uses: actions/upload-artifact@v4
45+
if: success() || failure()
46+
with:
47+
name: logs
48+
path: _artifacts
49+
retention-days: 7
50+
4351
- name: Cleanup kind clusters
4452
uses: gacts/run-and-post-run@v1
4553
with:

test/e2e/capmox_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ var _ = Describe("Workload cluster creation", func() {
6060
result = new(clusterctl.ApplyClusterTemplateAndWaitResult)
6161

6262
// We need to override clusterctl apply log folder to avoid getting our credentials exposed.
63-
clusterctlLogFolder = filepath.Join(os.TempDir(), "clusters", bootstrapClusterProxy.GetName())
63+
clusterctlLogFolder = filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName())
6464
})
6565

6666
AfterEach(func() {

test/e2e/data/infrastructure-proxmox/cluster-template-ci.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -153,6 +153,7 @@ spec:
153153
owner: root:root
154154
permissions: "0700"
155155
preKubeadmCommands:
156+
- echo "127.0.0.1 localhost kubernetes {{ ds.meta_data.hostname }}" >>/etc/hosts
156157
- /etc/kube-vip-prepare.sh
157158
initConfiguration:
158159
nodeRegistration:

test/e2e/suite_test.go

Lines changed: 56 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,10 @@ import (
3232
. "github.com/onsi/ginkgo/v2"
3333
. "github.com/onsi/gomega"
3434

35+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3536
"k8s.io/apimachinery/pkg/runtime"
3637
"k8s.io/klog/v2"
38+
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
3739
ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1"
3840
capi_e2e "sigs.k8s.io/cluster-api/test/e2e"
3941
"sigs.k8s.io/cluster-api/test/framework"
@@ -76,6 +78,11 @@ var (
7678

7779
// Test suite global vars.
7880
var (
81+
ctx = ctrl.SetupSignalHandler()
82+
83+
// watchesCtx is used in log streaming to be able to get canceled via cancelWatches after ending the test suite.
84+
watchesCtx, cancelWatches = context.WithCancel(ctx)
85+
7986
// e2eConfig to be used for this test, read from configPath.
8087
e2eConfig *clusterctl.E2EConfig
8188

@@ -112,22 +119,16 @@ func TestE2E(t *testing.T) {
112119

113120
ctrl.SetLogger(klog.Background())
114121

115-
// If running in prow, make sure to use the artifacts folder that will be reported in test grid (ignoring the value provided by flag).
116-
if prowArtifactFolder, exists := os.LookupEnv("ARTIFACTS"); exists {
117-
artifactFolder = prowArtifactFolder
118-
}
119-
120122
// ensure the artifacts folder exists
121123
g.Expect(os.MkdirAll(artifactFolder, 0o755)).To(Succeed(), "Invalid test suite argument. Can't create e2e.artifacts-folder %q", artifactFolder) //nolint:gosec
122124

123-
RegisterFailHandler(Fail)
124-
125125
if alsoLogToFile {
126126
w, err := ginkgoextensions.EnableFileLogging(filepath.Join(artifactFolder, "ginkgo-log.txt"))
127127
g.Expect(err).ToNot(HaveOccurred())
128128
defer w.Close()
129129
}
130130

131+
RegisterFailHandler(Fail)
131132
RunSpecs(t, "capmox-e2e")
132133
}
133134

@@ -181,7 +182,7 @@ var _ = SynchronizedBeforeSuite(func() []byte {
181182
kubeconfigPath := parts[3]
182183

183184
e2eConfig = loadE2EConfig(configPath)
184-
bootstrapClusterProxy = framework.NewClusterProxy("bootstrap", kubeconfigPath, initScheme())
185+
bootstrapClusterProxy = framework.NewClusterProxy("bootstrap", kubeconfigPath, initScheme(), framework.WithMachineLogCollector(framework.DockerLogCollector{}))
185186
})
186187

187188
// Using a SynchronizedAfterSuite for controlling how to delete resources shared across ParallelNodes (~ginkgo threads).
@@ -191,6 +192,10 @@ var _ = SynchronizedAfterSuite(func() {
191192
// After each ParallelNode.
192193
}, func() {
193194
// After all ParallelNodes.
195+
By("Dumping logs from the bootstrap cluster")
196+
if err := dumpBootstrapClusterLogs(); err != nil {
197+
GinkgoWriter.Printf("Failed to dump bootstrap cluster logs: %v", err)
198+
}
194199

195200
By("Tearing down the management cluster")
196201
if !skipCleanup {
@@ -208,7 +213,7 @@ func initScheme() *runtime.Scheme {
208213
}
209214

210215
func loadE2EConfig(configPath string) *clusterctl.E2EConfig {
211-
config := clusterctl.LoadE2EConfig(context.TODO(), clusterctl.LoadE2EConfigInput{ConfigPath: configPath})
216+
config := clusterctl.LoadE2EConfig(ctx, clusterctl.LoadE2EConfigInput{ConfigPath: configPath})
212217
Expect(config).ToNot(BeNil(), "Failed to load E2E config from %s", configPath)
213218

214219
return config
@@ -226,7 +231,7 @@ func createClusterctlLocalRepository(config *clusterctl.E2EConfig, repositoryFol
226231
Expect(cniPath).To(BeAnExistingFile(), "The %s variable should resolve to an existing file", capi_e2e.CNIPath)
227232
createRepositoryInput.RegisterClusterResourceSetConfigMapTransformation(cniPath, capi_e2e.CNIResources)
228233

229-
clusterctlConfig := clusterctl.CreateRepository(context.TODO(), createRepositoryInput)
234+
clusterctlConfig := clusterctl.CreateRepository(ctx, createRepositoryInput)
230235
Expect(clusterctlConfig).To(BeAnExistingFile(), "The clusterctl config file does not exists in the local repository %s", repositoryFolder)
231236

232237
return clusterctlConfig
@@ -236,10 +241,11 @@ func setupBootstrapCluster(config *clusterctl.E2EConfig, scheme *runtime.Scheme,
236241
var clusterProvider bootstrap.ClusterProvider
237242
kubeconfigPath := ""
238243
if !useExistingCluster {
239-
clusterProvider = bootstrap.CreateKindBootstrapClusterAndLoadImages(context.TODO(), bootstrap.CreateKindBootstrapClusterAndLoadImagesInput{
244+
clusterProvider = bootstrap.CreateKindBootstrapClusterAndLoadImages(ctx, bootstrap.CreateKindBootstrapClusterAndLoadImagesInput{
240245
Name: config.ManagementClusterName,
241246
RequiresDockerSock: config.HasDockerProvider(),
242247
Images: config.Images,
248+
LogFolder: filepath.Join(artifactFolder, "kind"),
243249
})
244250
Expect(clusterProvider).ToNot(BeNil(), "Failed to create a bootstrap cluster")
245251

@@ -254,7 +260,7 @@ func setupBootstrapCluster(config *clusterctl.E2EConfig, scheme *runtime.Scheme,
254260
}
255261

256262
func initBootstrapCluster(bootstrapClusterProxy framework.ClusterProxy, config *clusterctl.E2EConfig, clusterctlConfig, artifactFolder string) {
257-
clusterctl.InitManagementClusterAndWatchControllerLogs(context.TODO(), clusterctl.InitManagementClusterAndWatchControllerLogsInput{
263+
clusterctl.InitManagementClusterAndWatchControllerLogs(watchesCtx, clusterctl.InitManagementClusterAndWatchControllerLogsInput{
258264
ClusterProxy: bootstrapClusterProxy,
259265
ClusterctlConfigPath: clusterctlConfig,
260266
InfrastructureProviders: config.InfrastructureProviders(),
@@ -264,10 +270,48 @@ func initBootstrapCluster(bootstrapClusterProxy framework.ClusterProxy, config *
264270
}
265271

266272
func tearDown(bootstrapClusterProvider bootstrap.ClusterProvider, bootstrapClusterProxy framework.ClusterProxy) {
273+
cancelWatches()
267274
if bootstrapClusterProxy != nil {
268275
bootstrapClusterProxy.Dispose(context.TODO())
269276
}
270277
if bootstrapClusterProvider != nil {
271278
bootstrapClusterProvider.Dispose(context.TODO())
272279
}
273280
}
281+
282+
func dumpBootstrapClusterLogs() error {
283+
if bootstrapClusterProxy == nil {
284+
return nil
285+
}
286+
clusterLogCollector := bootstrapClusterProxy.GetLogCollector()
287+
if clusterLogCollector == nil {
288+
return nil
289+
}
290+
291+
nodes, err := bootstrapClusterProxy.GetClientSet().CoreV1().Nodes().List(ctx, metav1.ListOptions{})
292+
if err != nil {
293+
return fmt.Errorf("failed to get nodes for the bootstrap cluster: %w", err)
294+
}
295+
296+
for i := range nodes.Items {
297+
nodeName := nodes.Items[i].GetName()
298+
err := clusterLogCollector.CollectMachineLog(
299+
ctx,
300+
bootstrapClusterProxy.GetClient(),
301+
// The bootstrap cluster is not expected to be a CAPI cluster, so in order to reuse the logCollector,
302+
// we create a fake machine that wraps the node.
303+
// NOTE: This assumes a naming convention between machine and nodes, which e.g. applies to the bootstrap
304+
// clusters generated with kind. This might not work if you are using an existing bootstrap cluster
305+
// provided by other means
306+
&clusterv1.Machine{
307+
Spec: clusterv1.MachineSpec{ClusterName: nodeName},
308+
ObjectMeta: metav1.ObjectMeta{Name: nodeName},
309+
},
310+
filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName(), "machines", nodeName),
311+
)
312+
if err != nil {
313+
return fmt.Errorf("failed to get logs for the bootstrap cluster node %s: %w", nodeName, err)
314+
}
315+
}
316+
return nil
317+
}

0 commit comments

Comments
 (0)