release(main): v0.27.1 (#1053) #547
GitHub Actions / e2e test report
failed
Feb 13, 2025 in 0s
26 tests run, 7 passed, 18 skipped, 1 failed.
Annotations
Check failure on line 1 in caren-e2e
github-actions / e2e test report
caren-e2e.[It] Quick start Docker Calico HelmAddon topology-calico-helm-addon Should create a workload cluster [provider:Docker, Serial, cni:Calico, addonStrategy:HelmAddon]
A suite timeout occurred
Raw output
[TIMEDOUT] A suite timeout occurred
In [It] at: /home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/e2e/quick_start.go:106 @ 02/13/25 16:55:27.296
This is the Progress Report generated when the suite timeout occurred:
Quick start Docker Calico HelmAddon topology-calico-helm-addon Should create a workload cluster (Spec Runtime: 45.97s)
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/e2e/quick_start.go:106
In [It] (Node Runtime: 42.466s)
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/e2e/quick_start.go:106
At [By Step] Waiting for the control plane to be ready (Step Runtime: 2.068s)
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/controlplane_helpers.go:164
Spec Goroutine
goroutine 3998 [select]
github.com/onsi/gomega/internal.(*AsyncAssertion).match(0xc000745ea0, {0x32c5bd8, 0xc000eb2d00}, 0x1, {0xc000eb2d10, 0x1, 0x1})
/home/runner/go/pkg/mod/github.com/onsi/[email protected]/internal/async_assertion.go:546
github.com/onsi/gomega/internal.(*AsyncAssertion).Should(0xc000745ea0, {0x32c5bd8, 0xc000eb2d00}, {0xc000eb2d10, 0x1, 0x1})
/home/runner/go/pkg/mod/github.com/onsi/[email protected]/internal/async_assertion.go:145
> sigs.k8s.io/cluster-api/test/framework.WaitForControlPlaneToBeReady({0x32d75c8, 0xc00064d450}, {{0x7f30c404d328?, 0xc0039a6bd0?}, 0xc000964d88?}, {0xc0034e8440, 0x2, 0x2})
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/controlplane_helpers.go:193
|
| return true, nil
> }, intervals...).Should(BeTrue(), PrettyPrint(controlplane)+"\n")
| }
|
> sigs.k8s.io/cluster-api/test/framework.WaitForControlPlaneAndMachinesReady({0x32d75c8, 0xc00064d450}, {{0x7f30c440dd48?, 0xc0039a6bd0?}, 0xc00361e8c0?, 0xc000964d88?}, {0xc0034e8440, 0x2, 0x2})
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/controlplane_helpers.go:299
| ControlPlane: input.ControlPlane,
| }
> WaitForControlPlaneToBeReady(ctx, waitForControlPlaneToBeReadyInput, intervals...)
|
| AssertControlPlaneFailureDomains(ctx, AssertControlPlaneFailureDomainsInput{
> sigs.k8s.io/cluster-api/test/framework/clusterctl.ApplyCustomClusterTemplateAndWait.setDefaults.func3({_, _}, {{0x32ef550, 0xc003cc2380}, {0xc0004bea01, 0x14c7, 0x14c8}, {0xc003622090, 0x29}, {0xc00375a5e8, ...}, ...}, ...)
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/clusterctl/clusterctl_helpers.go:505
| if input.WaitForControlPlaneMachinesReady == nil {
| input.WaitForControlPlaneMachinesReady = func(ctx context.Context, input ApplyCustomClusterTemplateAndWaitInput, result *ApplyCustomClusterTemplateAndWaitResult) {
> framework.WaitForControlPlaneAndMachinesReady(ctx, framework.WaitForControlPlaneAndMachinesReadyInput{
| GetLister: input.ClusterProxy.GetClient(),
| Cluster: result.Cluster,
> sigs.k8s.io/cluster-api/test/framework/clusterctl.ApplyCustomClusterTemplateAndWait({_, _}, {{0x32ef550, 0xc003cc2380}, {0xc0004bea01, 0x14c7, 0x14c8}, {0xc003622090, 0x29}, {0xc00375a5e8, ...}, ...}, ...)
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/clusterctl/clusterctl_helpers.go:470
|
| log.Logf("Waiting for control plane of cluster %s to be ready", klog.KRef(input.Namespace, input.ClusterName))
> input.WaitForControlPlaneMachinesReady(ctx, input, result)
|
| log.Logf("Waiting for the machine deployments of cluster %s to be provisioned", klog.KRef(input.Namespace, input.ClusterName))
> sigs.k8s.io/cluster-api/test/framework/clusterctl.ApplyClusterTemplateAndWait({_, _}, {{0x32ef550, 0xc003cc2380}, {{0xc00184ab80, 0x7d}, {0xc000ede140, 0x31}, {0xc00396ede1, 0x17}, ...}, ...}, ...)
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/clusterctl/clusterctl_helpers.go:365
| Expect(workloadClusterTemplate).ToNot(BeNil(), "Failed to get the cluster template")
|
> ApplyCustomClusterTemplateAndWait(ctx, ApplyCustomClusterTemplateAndWaitInput{
| ClusterProxy: input.ClusterProxy,
| CustomTemplateYAML: workloadClusterTemplate,
> sigs.k8s.io/cluster-api/test/e2e.QuickStartSpec.func2()
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/e2e/quick_start.go:133
| clusterName = *input.ClusterName
| }
> clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
| ClusterProxy: input.BootstrapClusterProxy,
| ConfigCluster: clusterctl.ConfigClusterInput{
github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x0?, 0x1763ece?})
/home/runner/go/pkg/mod/github.com/onsi/ginkgo/[email protected]/internal/node.go:475
github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3()
/home/runner/go/pkg/mod/github.com/onsi/ginkgo/[email protected]/internal/suite.go:894
github.com/onsi/ginkgo/v2/internal.(*Suite).runNode in goroutine 84
/home/runner/go/pkg/mod/github.com/onsi/ginkgo/[email protected]/internal/suite.go:881
Goroutines of Interest
goroutine 3190 [chan receive (nil chan), 3 minutes]
> sigs.k8s.io/cluster-api/test/framework.watchPodLogs.func1()
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:206
| go func() {
| defer GinkgoRecover()
> <-ctx.Done()
| Expect(podInformer.RemoveEventHandler(handlerRegistration)).To(Succeed())
| }()
> sigs.k8s.io/cluster-api/test/framework.watchPodLogs in goroutine 88
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:204
| Expect(err).ToNot(HaveOccurred())
|
> go func() {
| defer GinkgoRecover()
| <-ctx.Done()
goroutine 3205 [chan receive (nil chan), 3 minutes]
> sigs.k8s.io/cluster-api/test/framework.watchPodLogs.func1()
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:206
| go func() {
| defer GinkgoRecover()
> <-ctx.Done()
| Expect(podInformer.RemoveEventHandler(handlerRegistration)).To(Succeed())
| }()
> sigs.k8s.io/cluster-api/test/framework.watchPodLogs in goroutine 88
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:204
| Expect(err).ToNot(HaveOccurred())
|
> go func() {
| defer GinkgoRecover()
| <-ctx.Done()
goroutine 3153 [sync.Cond.Wait]
sync.runtime_notifyListWait(0xc003860948, 0x63)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/runtime/sema.go:597
sync.(*Cond).Wait(0x2fb?)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/sync/cond.go:71
golang.org/x/net/http2.(*pipe).Read(0xc003860930, {0xc003a9e000, 0x8000, 0x8000})
/home/runner/go/pkg/mod/golang.org/x/[email protected]/http2/pipe.go:76
golang.org/x/net/http2.transportResponseBody.Read({0xc002cb1410?}, {0xc003a9e000?, 0x2fb?, 0x8?})
/home/runner/go/pkg/mod/golang.org/x/[email protected]/http2/transport.go:2803
io.copyBuffer({0x32aa620, 0xc001e364f0}, {0x7f30c43f1c60, 0xc003860900}, {0x0, 0x0, 0x0})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/io/io.go:429
io.Copy(...)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/io/io.go:388
os.genericReadFrom(0x289a400?, {0x7f30c43f1c60, 0xc003860900})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/os/file.go:185
os.(*File).ReadFrom(0xc002cb1410, {0x7f30c43f1c60, 0xc003860900})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/os/file.go:161
bufio.(*Writer).ReadFrom(0xc00399ba38, {0x7f30c43f1c60, 0xc003860900})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/bufio/bufio.go:798
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs.func1.1({0x32d7558, 0x4d720a0})
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:317
| out := bufio.NewWriter(f)
| defer out.Flush()
> _, err = out.ReadFrom(podLogs)
| if err != nil && err != io.ErrUnexpectedEOF {
| // Failing to stream logs should not cause the test to fail
k8s.io/apimachinery/pkg/util/wait.loopConditionUntilContext.func2(0xc0000abc08?, {0x32d7558?, 0x4d720a0?})
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/loop.go:87
k8s.io/apimachinery/pkg/util/wait.loopConditionUntilContext({0x32d7558, 0x4d720a0}, {0x32cb0c8, 0xc003549420}, 0x0, 0x0, 0xc00399bdb8)
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/loop.go:88
k8s.io/apimachinery/pkg/util/wait.PollUntilContextCancel({0x32d7558, 0x4d720a0}, 0x4d720a0?, 0x0, 0xc0000abdb8)
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/poll.go:33
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs.func1(_, {{0xc003390d58, 0x7}, {0xc000edee80, 0x32}, {0x0, 0x0, 0x0}, {0xc00307aa00, 0x4, ...}, ...})
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:291
|
| // Retry streaming the logs of the pods unless ctx.Done() or if the pod does not exist anymore.
> err = wait.PollUntilContextCancel(eh.ctx, 2*time.Second, false, func(ctx context.Context) (done bool, err error) {
| // Wait for pod to be in running state
| actual, err := eh.input.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs in goroutine 3151
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:275
|
| // Watch each container's logs in a goroutine so we can stream them all concurrently.
> go func(pod *corev1.Pod, container corev1.Container) {
| defer GinkgoRecover()
|
goroutine 3148 [select]
> sigs.k8s.io/cluster-api/test/framework.WatchPodMetrics.func3()
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:376
| defer GinkgoRecover()
| for {
> select {
| case <-ctx.Done():
| return
> sigs.k8s.io/cluster-api/test/framework.WatchPodMetrics in goroutine 88
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:373
| }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list Pods for deployment %s", klog.KObj(input.Deployment))
|
> go func() {
| defer GinkgoRecover()
| for {
goroutine 3169 [select]
> sigs.k8s.io/cluster-api/test/framework.WatchPodMetrics.func3()
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:376
| defer GinkgoRecover()
| for {
> select {
| case <-ctx.Done():
| return
> sigs.k8s.io/cluster-api/test/framework.WatchPodMetrics in goroutine 88
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:373
| }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list Pods for deployment %s", klog.KObj(input.Deployment))
|
> go func() {
| defer GinkgoRecover()
| for {
goroutine 3179 [sync.Cond.Wait]
sync.runtime_notifyListWait(0xc003861248, 0x1b)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/runtime/sema.go:597
sync.(*Cond).Wait(0x341?)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/sync/cond.go:71
golang.org/x/net/http2.(*pipe).Read(0xc003861230, {0xc003ada000, 0x8000, 0x8000})
/home/runner/go/pkg/mod/golang.org/x/[email protected]/http2/pipe.go:76
golang.org/x/net/http2.transportResponseBody.Read({0xc0034a3860?}, {0xc003ada000?, 0x341?, 0x8?})
/home/runner/go/pkg/mod/golang.org/x/[email protected]/http2/transport.go:2803
io.copyBuffer({0x32aa620, 0xc001e36668}, {0x7f30c43f1c60, 0xc003861200}, {0x0, 0x0, 0x0})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/io/io.go:429
io.Copy(...)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/io/io.go:388
os.genericReadFrom(0x289a400?, {0x7f30c43f1c60, 0xc003861200})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/os/file.go:185
os.(*File).ReadFrom(0xc0034a3860, {0x7f30c43f1c60, 0xc003861200})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/os/file.go:161
bufio.(*Writer).ReadFrom(0xc00399da38, {0x7f30c43f1c60, 0xc003861200})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/bufio/bufio.go:798
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs.func1.1({0x32d7558, 0x4d720a0})
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:317
| out := bufio.NewWriter(f)
| defer out.Flush()
> _, err = out.ReadFrom(podLogs)
| if err != nil && err != io.ErrUnexpectedEOF {
| // Failing to stream logs should not cause the test to fail
k8s.io/apimachinery/pkg/util/wait.loopConditionUntilContext.func2(0xc002611c08?, {0x32d7558?, 0x4d720a0?})
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/loop.go:87
k8s.io/apimachinery/pkg/util/wait.loopConditionUntilContext({0x32d7558, 0x4d720a0}, {0x32cb0c8, 0xc003634080}, 0x0, 0x0, 0xc00399ddb8)
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/loop.go:88
k8s.io/apimachinery/pkg/util/wait.PollUntilContextCancel({0x32d7558, 0x4d720a0}, 0x4d720a0?, 0x0, 0xc002611db8)
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/poll.go:33
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs.func1(_, {{0xc003391078, 0x7}, {0xc000edf100, 0x3f}, {0xc001e651b0, 0x1, 0x1}, {0xc003068b00, 0x5, ...}, ...})
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:291
|
| // Retry streaming the logs of the pods unless ctx.Done() or if the pod does not exist anymore.
> err = wait.PollUntilContextCancel(eh.ctx, 2*time.Second, false, func(ctx context.Context) (done bool, err error) {
| // Wait for pod to be in running state
| actual, err := eh.input.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs in goroutine 3166
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:275
|
| // Watch each container's logs in a goroutine so we can stream them all concurrently.
> go func(pod *corev1.Pod, container corev1.Container) {
| defer GinkgoRecover()
|
goroutine 3005 [chan receive (nil chan), 3 minutes]
> sigs.k8s.io/cluster-api/test/framework.watchPodLogs.func1()
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:206
| go func() {
| defer GinkgoRecover()
> <-ctx.Done()
| Expect(podInformer.RemoveEventHandler(handlerRegistration)).To(Succeed())
| }()
> sigs.k8s.io/cluster-api/test/framework.watchPodLogs in goroutine 88
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:204
| Expect(err).ToNot(HaveOccurred())
|
> go func() {
| defer GinkgoRecover()
| <-ctx.Done()
goroutine 3162 [chan receive (nil chan), 3 minutes]
> sigs.k8s.io/cluster-api/test/framework.watchPodLogs.func1()
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:206
| go func() {
| defer GinkgoRecover()
> <-ctx.Done()
| Expect(podInformer.RemoveEventHandler(handlerRegistration)).To(Succeed())
| }()
> sigs.k8s.io/cluster-api/test/framework.watchPodLogs in goroutine 88
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:204
| Expect(err).ToNot(HaveOccurred())
|
> go func() {
| defer GinkgoRecover()
| <-ctx.Done()
goroutine 3002 [select]
> sigs.k8s.io/cluster-api/test/framework.WatchPodMetrics.func3()
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:376
| defer GinkgoRecover()
| for {
> select {
| case <-ctx.Done():
| return
> sigs.k8s.io/cluster-api/test/framework.WatchPodMetrics in goroutine 88
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:373
| }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list Pods for deployment %s", klog.KObj(input.Deployment))
|
> go func() {
| defer GinkgoRecover()
| for {
goroutine 3187 [sync.Cond.Wait]
sync.runtime_notifyListWait(0xc003b66048, 0xa9)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/runtime/sema.go:597
sync.(*Cond).Wait(0x1f0?)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/sync/cond.go:71
golang.org/x/net/http2.(*pipe).Read(0xc003b66030, {0xc003b72000, 0x8000, 0x8000})
/home/runner/go/pkg/mod/golang.org/x/[email protected]/http2/pipe.go:76
golang.org/x/net/http2.transportResponseBody.Read({0xc002a10b40?}, {0xc003b72000?, 0x1f0?, 0x8?})
/home/runner/go/pkg/mod/golang.org/x/[email protected]/http2/transport.go:2803
io.copyBuffer({0x32aa620, 0xc001e36b78}, {0x7f30c43f1c60, 0xc003b66000}, {0x0, 0x0, 0x0})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/io/io.go:429
io.Copy(...)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/io/io.go:388
os.genericReadFrom(0x289a400?, {0x7f30c43f1c60, 0xc003b66000})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/os/file.go:185
os.(*File).ReadFrom(0xc002a10b40, {0x7f30c43f1c60, 0xc003b66000})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/os/file.go:161
bufio.(*Writer).ReadFrom(0xc003d5ba38, {0x7f30c43f1c60, 0xc003b66000})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/bufio/bufio.go:798
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs.func1.1({0x32d7558, 0x4d720a0})
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:317
| out := bufio.NewWriter(f)
| defer out.Flush()
> _, err = out.ReadFrom(podLogs)
| if err != nil && err != io.ErrUnexpectedEOF {
| // Failing to stream logs should not cause the test to fail
k8s.io/apimachinery/pkg/util/wait.loopConditionUntilContext.func2(0xc002ea8c08?, {0x32d7558?, 0x4d720a0?})
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/loop.go:87
k8s.io/apimachinery/pkg/util/wait.loopConditionUntilContext({0x32d7558, 0x4d720a0}, {0x32cb0c8, 0xc00336ee60}, 0x0, 0x0, 0xc003d5bdb8)
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/loop.go:88
k8s.io/apimachinery/pkg/util/wait.PollUntilContextCancel({0x32d7558, 0x4d720a0}, 0x4d720a0?, 0x0, 0xc002ea8db8)
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/poll.go:33
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs.func1(_, {{0xc003391388, 0x7}, {0xc0030588c0, 0x43}, {0xc001e65230, 0x1, 0x1}, {0xc003068c00, 0x5, ...}, ...})
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:291
|
| // Retry streaming the logs of the pods unless ctx.Done() or if the pod does not exist anymore.
> err = wait.PollUntilContextCancel(eh.ctx, 2*time.Second, false, func(ctx context.Context) (done bool, err error) {
| // Wait for pod to be in running state
| actual, err := eh.input.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs in goroutine 3203
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:275
|
| // Watch each container's logs in a goroutine so we can stream them all concurrently.
> go func(pod *corev1.Pod, container corev1.Container) {
| defer GinkgoRecover()
|
goroutine 3176 [sync.Cond.Wait, 3 minutes]
sync.runtime_notifyListWait(0xc00368f248, 0xa)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/runtime/sema.go:597
sync.(*Cond).Wait(0x170c?)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/sync/cond.go:71
golang.org/x/net/http2.(*pipe).Read(0xc00368f230, {0xc003a78000, 0x8000, 0x8000})
/home/runner/go/pkg/mod/golang.org/x/[email protected]/http2/pipe.go:76
golang.org/x/net/http2.transportResponseBody.Read({0xc0034a37a8?}, {0xc003a78000?, 0x170c?, 0x8?})
/home/runner/go/pkg/mod/golang.org/x/[email protected]/http2/transport.go:2803
io.copyBuffer({0x32aa620, 0xc001e364b8}, {0x7f30c43f1c60, 0xc00368f200}, {0x0, 0x0, 0x0})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/io/io.go:429
io.Copy(...)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/io/io.go:388
os.genericReadFrom(0x289a400?, {0x7f30c43f1c60, 0xc00368f200})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/os/file.go:185
os.(*File).ReadFrom(0xc0034a37a8, {0x7f30c43f1c60, 0xc00368f200})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/os/file.go:161
bufio.(*Writer).ReadFrom(0xc003d5da38, {0x7f30c43f1c60, 0xc00368f200})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/bufio/bufio.go:798
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs.func1.1({0x32d7558, 0x4d720a0})
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:317
| out := bufio.NewWriter(f)
| defer out.Flush()
> _, err = out.ReadFrom(podLogs)
| if err != nil && err != io.ErrUnexpectedEOF {
| // Failing to stream logs should not cause the test to fail
k8s.io/apimachinery/pkg/util/wait.loopConditionUntilContext.func2(0xc000bddc08?, {0x32d7558?, 0x4d720a0?})
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/loop.go:87
k8s.io/apimachinery/pkg/util/wait.loopConditionUntilContext({0x32d7558, 0x4d720a0}, {0x32cb0c8, 0xc0034e9c40}, 0x0, 0x0, 0xc003d5ddb8)
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/loop.go:88
k8s.io/apimachinery/pkg/util/wait.PollUntilContextCancel({0x32d7558, 0x4d720a0}, 0x4d720a0?, 0x0, 0xc000bdddb8)
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/poll.go:33
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs.func1(_, {{0xc003390a30, 0x7}, {0xc0030586e0, 0x41}, {0x0, 0x0, 0x0}, {0xc003068980, 0x5, ...}, ...})
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:291
|
| // Retry streaming the logs of the pods unless ctx.Done() or if the pod does not exist anymore.
> err = wait.PollUntilContextCancel(eh.ctx, 2*time.Second, false, func(ctx context.Context) (done bool, err error) {
| // Wait for pod to be in running state
| actual, err := eh.input.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs in goroutine 2996
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:275
|
| // Watch each container's logs in a goroutine so we can stream them all concurrently.
> go func(pod *corev1.Pod, container corev1.Container) {
| defer GinkgoRecover()
|
goroutine 3189 [select]
> sigs.k8s.io/cluster-api/test/framework.WatchPodMetrics.func3()
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:376
| defer GinkgoRecover()
| for {
> select {
| case <-ctx.Done():
| return
> sigs.k8s.io/cluster-api/test/framework.WatchPodMetrics in goroutine 88
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:373
| }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list Pods for deployment %s", klog.KObj(input.Deployment))
|
> go func() {
| defer GinkgoRecover()
| for {
goroutine 3168 [chan receive (nil chan), 3 minutes]
> sigs.k8s.io/cluster-api/test/framework.watchPodLogs.func1()
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:206
| go func() {
| defer GinkgoRecover()
> <-ctx.Done()
| Expect(podInformer.RemoveEventHandler(handlerRegistration)).To(Succeed())
| }()
> sigs.k8s.io/cluster-api/test/framework.watchPodLogs in goroutine 88
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:204
| Expect(err).ToNot(HaveOccurred())
|
> go func() {
| defer GinkgoRecover()
| <-ctx.Done()
goroutine 3221 [chan receive (nil chan), 3 minutes]
> sigs.k8s.io/cluster-api/test/framework.watchPodLogs.func1()
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:206
| go func() {
| defer GinkgoRecover()
> <-ctx.Done()
| Expect(podInformer.RemoveEventHandler(handlerRegistration)).To(Succeed())
| }()
> sigs.k8s.io/cluster-api/test/framework.watchPodLogs in goroutine 88
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:204
| Expect(err).ToNot(HaveOccurred())
|
> go func() {
| defer GinkgoRecover()
| <-ctx.Done()
goroutine 3147 [sync.Cond.Wait]
sync.runtime_notifyListWait(0xc0038604c8, 0x955)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/runtime/sema.go:597
sync.(*Cond).Wait(0x226?)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/sync/cond.go:71
golang.org/x/net/http2.(*pipe).Read(0xc0038604b0, {0xc003a6a000, 0x8000, 0x8000})
/home/runner/go/pkg/mod/golang.org/x/[email protected]/http2/pipe.go:76
golang.org/x/net/http2.transportResponseBody.Read({0xc002cb1338?}, {0xc003a6a000?, 0x226?, 0x8?})
/home/runner/go/pkg/mod/golang.org/x/[email protected]/http2/transport.go:2803
io.copyBuffer({0x32aa620, 0xc001e36478}, {0x7f30c43f1c60, 0xc003860480}, {0x0, 0x0, 0x0})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/io/io.go:429
io.Copy(...)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/io/io.go:388
os.genericReadFrom(0x289a400?, {0x7f30c43f1c60, 0xc003860480})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/os/file.go:185
os.(*File).ReadFrom(0xc002cb1338, {0x7f30c43f1c60, 0xc003860480})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/os/file.go:161
bufio.(*Writer).ReadFrom(0xc003d57a38, {0x7f30c43f1c60, 0xc003860480})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/bufio/bufio.go:798
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs.func1.1({0x32d7558, 0x4d720a0})
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:317
| out := bufio.NewWriter(f)
| defer out.Flush()
> _, err = out.ReadFrom(podLogs)
| if err != nil && err != io.ErrUnexpectedEOF {
| // Failing to stream logs should not cause the test to fail
k8s.io/apimachinery/pkg/util/wait.loopConditionUntilContext.func2(0xc003617c08?, {0x32d7558?, 0x4d720a0?})
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/loop.go:87
k8s.io/apimachinery/pkg/util/wait.loopConditionUntilContext({0x32d7558, 0x4d720a0}, {0x32cb0c8, 0xc002414900}, 0x0, 0x0, 0xc003d57db8)
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/loop.go:88
k8s.io/apimachinery/pkg/util/wait.PollUntilContextCancel({0x32d7558, 0x4d720a0}, 0x4d720a0?, 0x0, 0xc003617db8)
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/poll.go:33
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs.func1(_, {{0xc003390690, 0x7}, {0xc0030585f0, 0x43}, {0x0, 0x0, 0x0}, {0xc003068880, 0x5, ...}, ...})
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:291
|
| // Retry streaming the logs of the pods unless ctx.Done() or if the pod does not exist anymore.
> err = wait.PollUntilContextCancel(eh.ctx, 2*time.Second, false, func(ctx context.Context) (done bool, err error) {
| // Wait for pod to be in running state
| actual, err := eh.input.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs in goroutine 3160
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:275
|
| // Watch each container's logs in a goroutine so we can stream them all concurrently.
> go func(pod *corev1.Pod, container corev1.Container) {
| defer GinkgoRecover()
|
goroutine 3113 [chan receive (nil chan), 3 minutes]
sigs.k8s.io/controller-runtime/pkg/cache/internal.(*Informers).Start(0xc002da3cc0, {0x32d7558, 0x4d720a0})
/home/runner/go/pkg/mod/sigs.k8s.io/[email protected]/pkg/cache/internal/informers.go:223
> sigs.k8s.io/cluster-api/test/framework.(*clusterProxy).GetCache.func1.1()
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/cluster_proxy.go:296
| go func() {
| defer GinkgoRecover()
> Expect(p.cache.Start(ctx)).To(Succeed())
| }()
|
> sigs.k8s.io/cluster-api/test/framework.(*clusterProxy).GetCache.func1 in goroutine 88
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/cluster_proxy.go:294
| Expect(err).ToNot(HaveOccurred(), "Failed to create controller-runtime cache")
|
> go func() {
| defer GinkgoRecover()
| Expect(p.cache.Start(ctx)).To(Succeed())
goroutine 3173 [select]
> sigs.k8s.io/cluster-api/test/framework.WatchPodMetrics.func3()
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:376
| defer GinkgoRecover()
| for {
> select {
| case <-ctx.Done():
| return
> sigs.k8s.io/cluster-api/test/framework.WatchPodMetrics in goroutine 88
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:373
| }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list Pods for deployment %s", klog.KObj(input.Deployment))
|
> go func() {
| defer GinkgoRecover()
| for {
goroutine 2998 [chan receive (nil chan), 3 minutes]
> sigs.k8s.io/cluster-api/test/framework.watchPodLogs.func1()
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:206
| go func() {
| defer GinkgoRecover()
> <-ctx.Done()
| Expect(podInformer.RemoveEventHandler(handlerRegistration)).To(Succeed())
| }()
> sigs.k8s.io/cluster-api/test/framework.watchPodLogs in goroutine 88
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:204
| Expect(err).ToNot(HaveOccurred())
|
> go func() {
| defer GinkgoRecover()
| <-ctx.Done()
goroutine 3177 [chan receive (nil chan), 3 minutes]
> sigs.k8s.io/cluster-api/test/framework.watchPodLogs.func1()
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:206
| go func() {
| defer GinkgoRecover()
> <-ctx.Done()
| Expect(podInformer.RemoveEventHandler(handlerRegistration)).To(Succeed())
| }()
> sigs.k8s.io/cluster-api/test/framework.watchPodLogs in goroutine 88
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:204
| Expect(err).ToNot(HaveOccurred())
|
> go func() {
| defer GinkgoRecover()
| <-ctx.Done()
goroutine 3218 [sync.Cond.Wait]
sync.runtime_notifyListWait(0xc00368fcc8, 0x1dd)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/runtime/sema.go:597
sync.(*Cond).Wait(0x66f?)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/sync/cond.go:71
golang.org/x/net/http2.(*pipe).Read(0xc00368fcb0, {0xc000812000, 0x8000, 0x8000})
/home/runner/go/pkg/mod/golang.org/x/[email protected]/http2/pipe.go:76
golang.org/x/net/http2.transportResponseBody.Read({0xc001cb8778?}, {0xc000812000?, 0x66f?, 0x8?})
/home/runner/go/pkg/mod/golang.org/x/[email protected]/http2/transport.go:2803
io.copyBuffer({0x32aa620, 0xc001e36000}, {0x7f30c43f1c60, 0xc00368fc80}, {0x0, 0x0, 0x0})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/io/io.go:429
io.Copy(...)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/io/io.go:388
os.genericReadFrom(0x289a400?, {0x7f30c43f1c60, 0xc00368fc80})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/os/file.go:185
os.(*File).ReadFrom(0xc001cb8778, {0x7f30c43f1c60, 0xc00368fc80})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/os/file.go:161
bufio.(*Writer).ReadFrom(0xc003245a38, {0x7f30c43f1c60, 0xc00368fc80})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/bufio/bufio.go:798
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs.func1.1({0x32d7558, 0x4d720a0})
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:317
| out := bufio.NewWriter(f)
| defer out.Flush()
> _, err = out.ReadFrom(podLogs)
| if err != nil && err != io.ErrUnexpectedEOF {
| // Failing to stream logs should not cause the test to fail
k8s.io/apimachinery/pkg/util/wait.loopConditionUntilContext.func2(0xc003534c08?, {0x32d7558?, 0x4d720a0?})
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/loop.go:87
k8s.io/apimachinery/pkg/util/wait.loopConditionUntilContext({0x32d7558, 0x4d720a0}, {0x32cb0c8, 0xc003843380}, 0x0, 0x0, 0xc003245db8)
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/loop.go:88
k8s.io/apimachinery/pkg/util/wait.PollUntilContextCancel({0x32d7558, 0x4d720a0}, 0x4d720a0?, 0x0, 0xc003534db8)
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/poll.go:33
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs.func1(_, {{0xc0033916b0, 0x7}, {0xc000edfb80, 0x39}, {0xc001e652b0, 0x1, 0x1}, {0xc003068d00, 0x5, ...}, ...})
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:291
|
| // Retry streaming the logs of the pods unless ctx.Done() or if the pod does not exist anymore.
> err = wait.PollUntilContextCancel(eh.ctx, 2*time.Second, false, func(ctx context.Context) (done bool, err error) {
| // Wait for pod to be in running state
| actual, err := eh.input.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs in goroutine 3184
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:275
|
| // Watch each container's logs in a goroutine so we can stream them all concurrently.
> go func(pod *corev1.Pod, container corev1.Container) {
| defer GinkgoRecover()
|
goroutine 3193 [select]
> sigs.k8s.io/cluster-api/test/framework.WatchPodMetrics.func3()
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:376
| defer GinkgoRecover()
| for {
> select {
| case <-ctx.Done():
| return
> sigs.k8s.io/cluster-api/test/framework.WatchPodMetrics in goroutine 88
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:373
| }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list Pods for deployment %s", klog.KObj(input.Deployment))
|
> go func() {
| defer GinkgoRecover()
| for {
goroutine 3209 [sync.Cond.Wait, 3 minutes]
sync.runtime_notifyListWait(0xc0036004c8, 0x2)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/runtime/sema.go:597
sync.(*Cond).Wait(0xfb?)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/sync/cond.go:71
golang.org/x/net/http2.(*pipe).Read(0xc0036004b0, {0xc003c42000, 0x8000, 0x8000})
/home/runner/go/pkg/mod/golang.org/x/[email protected]/http2/pipe.go:76
golang.org/x/net/http2.transportResponseBody.Read({0xc002a10c18?}, {0xc003c42000?, 0xfb?, 0x8?})
/home/runner/go/pkg/mod/golang.org/x/[email protected]/http2/transport.go:2803
io.copyBuffer({0x32aa620, 0xc002a10020}, {0x7f30c43f1c60, 0xc003600480}, {0x0, 0x0, 0x0})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/io/io.go:429
io.Copy(...)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/io/io.go:388
os.genericReadFrom(0x289a400?, {0x7f30c43f1c60, 0xc003600480})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/os/file.go:185
os.(*File).ReadFrom(0xc002a10c18, {0x7f30c43f1c60, 0xc003600480})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/os/file.go:161
bufio.(*Writer).ReadFrom(0xc003d59a38, {0x7f30c43f1c60, 0xc003600480})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/bufio/bufio.go:798
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs.func1.1({0x32d7558, 0x4d720a0})
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:317
| out := bufio.NewWriter(f)
| defer out.Flush()
> _, err = out.ReadFrom(podLogs)
| if err != nil && err != io.ErrUnexpectedEOF {
| // Failing to stream logs should not cause the test to fail
k8s.io/apimachinery/pkg/util/wait.loopConditionUntilContext.func2(0xc003c1bc08?, {0x32d7558?, 0x4d720a0?})
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/loop.go:87
k8s.io/apimachinery/pkg/util/wait.loopConditionUntilContext({0x32d7558, 0x4d720a0}, {0x32cb0c8, 0xc003900120}, 0x0, 0x0, 0xc003d59db8)
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/loop.go:88
k8s.io/apimachinery/pkg/util/wait.PollUntilContextCancel({0x32d7558, 0x4d720a0}, 0x4d720a0?, 0x0, 0xc003c1bdb8)
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/poll.go:33
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs.func1(_, {{0xc0033919f0, 0xf}, {0xc003662540, 0x29}, {0x0, 0x0, 0x0}, {0xc00307af00, 0x4, ...}, ...})
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:291
|
| // Retry streaming the logs of the pods unless ctx.Done() or if the pod does not exist anymore.
> err = wait.PollUntilContextCancel(eh.ctx, 2*time.Second, false, func(ctx context.Context) (done bool, err error) {
| // Wait for pod to be in running state
| actual, err := eh.input.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs in goroutine 3207
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:275
|
| // Watch each container's logs in a goroutine so we can stream them all concurrently.
> go func(pod *corev1.Pod, container corev1.Container) {
| defer GinkgoRecover()
|
goroutine 3210 [sync.Cond.Wait, 3 minutes]
sync.runtime_notifyListWait(0xc003b66348, 0x3)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/runtime/sema.go:597
sync.(*Cond).Wait(0x976?)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/sync/cond.go:71
golang.org/x/net/http2.(*pipe).Read(0xc003b66330, {0xc003c3a000, 0x8000, 0x8000})
/home/runner/go/pkg/mod/golang.org/x/[email protected]/http2/pipe.go:76
golang.org/x/net/http2.transportResponseBody.Read({0xc001cb87b0?}, {0xc003c3a000?, 0x976?, 0x8?})
/home/runner/go/pkg/mod/golang.org/x/[email protected]/http2/transport.go:2803
io.copyBuffer({0x32aa620, 0xc002a10018}, {0x7f30c43f1c60, 0xc003b66300}, {0x0, 0x0, 0x0})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/io/io.go:429
io.Copy(...)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/io/io.go:388
os.genericReadFrom(0x289a400?, {0x7f30c43f1c60, 0xc003b66300})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/os/file.go:185
os.(*File).ReadFrom(0xc001cb87b0, {0x7f30c43f1c60, 0xc003b66300})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/os/file.go:161
bufio.(*Writer).ReadFrom(0xc00323fa38, {0x7f30c43f1c60, 0xc003b66300})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/bufio/bufio.go:798
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs.func1.1({0x32d7558, 0x4d720a0})
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:317
| out := bufio.NewWriter(f)
| defer out.Flush()
> _, err = out.ReadFrom(podLogs)
| if err != nil && err != io.ErrUnexpectedEOF {
| // Failing to stream logs should not cause the test to fail
k8s.io/apimachinery/pkg/util/wait.loopConditionUntilContext.func2(0xc002612c08?, {0x32d7558?, 0x4d720a0?})
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/loop.go:87
k8s.io/apimachinery/pkg/util/wait.loopConditionUntilContext({0x32d7558, 0x4d720a0}, {0x32cb0c8, 0xc003843560}, 0x0, 0x0, 0xc00323fdb8)
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/loop.go:88
k8s.io/apimachinery/pkg/util/wait.PollUntilContextCancel({0x32d7558, 0x4d720a0}, 0x4d720a0?, 0x0, 0xc002612db8)
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/poll.go:33
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs.func1(_, {{0xc003391a30, 0x7}, {0xc003058a00, 0x4b}, {0x0, 0x0, 0x0}, {0xc003068e00, 0x7, ...}, ...})
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:291
|
| // Retry streaming the logs of the pods unless ctx.Done() or if the pod does not exist anymore.
> err = wait.PollUntilContextCancel(eh.ctx, 2*time.Second, false, func(ctx context.Context) (done bool, err error) {
| // Wait for pod to be in running state
| actual, err := eh.input.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs in goroutine 3207
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:275
|
| // Watch each container's logs in a goroutine so we can stream them all concurrently.
> go func(pod *corev1.Pod, container corev1.Container) {
| defer GinkgoRecover()
|
goroutine 3216 [select]
> sigs.k8s.io/cluster-api/test/framework.WatchPodMetrics.func3()
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:376
| defer GinkgoRecover()
| for {
> select {
| case <-ctx.Done():
| return
> sigs.k8s.io/cluster-api/test/framework.WatchPodMetrics in goroutine 88
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:373
| }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list Pods for deployment %s", klog.KObj(input.Deployment))
|
> go func() {
| defer GinkgoRecover()
| for {
goroutine 3236 [sync.Cond.Wait]
sync.runtime_notifyListWait(0xc00368ec48, 0x210)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/runtime/sema.go:597
sync.(*Cond).Wait(0xde?)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/sync/cond.go:71
golang.org/x/net/http2.(*pipe).Read(0xc00368ec30, {0xc000a6e000, 0x8000, 0x8000})
/home/runner/go/pkg/mod/golang.org/x/[email protected]/http2/pipe.go:76
golang.org/x/net/http2.transportResponseBody.Read({0xc001e36a78?}, {0xc000a6e000?, 0xde?, 0x8?})
/home/runner/go/pkg/mod/golang.org/x/[email protected]/http2/transport.go:2803
io.copyBuffer({0x32aa620, 0xc0034a20b8}, {0x7f30c43f1c60, 0xc00368ec00}, {0x0, 0x0, 0x0})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/io/io.go:429
io.Copy(...)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/io/io.go:388
os.genericReadFrom(0x289a400?, {0x7f30c43f1c60, 0xc00368ec00})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/os/file.go:185
os.(*File).ReadFrom(0xc001e36a78, {0x7f30c43f1c60, 0xc00368ec00})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/os/file.go:161
bufio.(*Writer).ReadFrom(0xc000b9da38, {0x7f30c43f1c60, 0xc00368ec00})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/bufio/bufio.go:798
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs.func1.1({0x32d7558, 0x4d720a0})
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:317
| out := bufio.NewWriter(f)
| defer out.Flush()
> _, err = out.ReadFrom(podLogs)
| if err != nil && err != io.ErrUnexpectedEOF {
| // Failing to stream logs should not cause the test to fail
k8s.io/apimachinery/pkg/util/wait.loopConditionUntilContext.func2(0xc003af0c08?, {0x32d7558?, 0x4d720a0?})
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/loop.go:87
k8s.io/apimachinery/pkg/util/wait.loopConditionUntilContext({0x32d7558, 0x4d720a0}, {0x32cb0c8, 0xc003b381e0}, 0x0, 0x0, 0xc000b9ddb8)
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/loop.go:88
k8s.io/apimachinery/pkg/util/wait.PollUntilContextCancel({0x32d7558, 0x4d720a0}, 0x4d720a0?, 0x0, 0xc003af0db8)
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/poll.go:33
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs.func1(_, {{0xc003391cf0, 0x7}, {0xc003058b40, 0x41}, {0x0, 0x0, 0x0}, {0xc002d81c00, 0x14, ...}, ...})
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:291
|
| // Retry streaming the logs of the pods unless ctx.Done() or if the pod does not exist anymore.
> err = wait.PollUntilContextCancel(eh.ctx, 2*time.Second, false, func(ctx context.Context) (done bool, err error) {
| // Wait for pod to be in running state
| actual, err := eh.input.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs in goroutine 3219
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:275
|
| // Watch each container's logs in a goroutine so we can stream them all concurrently.
> go func(pod *corev1.Pod, container corev1.Container) {
| defer GinkgoRecover()
|
goroutine 3223 [select]
> sigs.k8s.io/cluster-api/test/framework.WatchPodMetrics.func3()
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:376
| defer GinkgoRecover()
| for {
> select {
| case <-ctx.Done():
| return
> sigs.k8s.io/cluster-api/test/framework.WatchPodMetrics in goroutine 88
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:373
| }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list Pods for deployment %s", klog.KObj(input.Deployment))
|
> go func() {
| defer GinkgoRecover()
| for {
goroutine 3198 [chan receive (nil chan), 3 minutes]
> sigs.k8s.io/cluster-api/test/framework.watchPodLogs.func1()
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:206
| go func() {
| defer GinkgoRecover()
> <-ctx.Done()
| Expect(podInformer.RemoveEventHandler(handlerRegistration)).To(Succeed())
| }()
> sigs.k8s.io/cluster-api/test/framework.watchPodLogs in goroutine 88
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:204
| Expect(err).ToNot(HaveOccurred())
|
> go func() {
| defer GinkgoRecover()
| <-ctx.Done()
goroutine 3242 [sync.Cond.Wait, 3 minutes]
sync.runtime_notifyListWait(0xc003b66ac8, 0x5)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/runtime/sema.go:597
sync.(*Cond).Wait(0x160?)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/sync/cond.go:71
golang.org/x/net/http2.(*pipe).Read(0xc003b66ab0, {0xc000aa2000, 0x8000, 0x8000})
/home/runner/go/pkg/mod/golang.org/x/[email protected]/http2/pipe.go:76
golang.org/x/net/http2.transportResponseBody.Read({0xc001e36ba0?}, {0xc000aa2000?, 0x160?, 0x8?})
/home/runner/go/pkg/mod/golang.org/x/[email protected]/http2/transport.go:2803
io.copyBuffer({0x32aa620, 0xc002a100d0}, {0x7f30c43f1c60, 0xc003b66a80}, {0x0, 0x0, 0x0})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/io/io.go:429
io.Copy(...)
/home/runner/go/pkg/mod/golang.org/[email protected]/src/io/io.go:388
os.genericReadFrom(0x289a400?, {0x7f30c43f1c60, 0xc003b66a80})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/os/file.go:185
os.(*File).ReadFrom(0xc001e36ba0, {0x7f30c43f1c60, 0xc003b66a80})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/os/file.go:161
bufio.(*Writer).ReadFrom(0xc000e35a38, {0x7f30c43f1c60, 0xc003b66a80})
/home/runner/go/pkg/mod/golang.org/[email protected]/src/bufio/bufio.go:798
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs.func1.1({0x32d7558, 0x4d720a0})
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:317
| out := bufio.NewWriter(f)
| defer out.Flush()
> _, err = out.ReadFrom(podLogs)
| if err != nil && err != io.ErrUnexpectedEOF {
| // Failing to stream logs should not cause the test to fail
k8s.io/apimachinery/pkg/util/wait.loopConditionUntilContext.func2(0xc003df0c08?, {0x32d7558?, 0x4d720a0?})
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/loop.go:87
k8s.io/apimachinery/pkg/util/wait.loopConditionUntilContext({0x32d7558, 0x4d720a0}, {0x32cb0c8, 0xc003b389a0}, 0x0, 0x0, 0xc000e35db8)
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/loop.go:88
k8s.io/apimachinery/pkg/util/wait.PollUntilContextCancel({0x32d7558, 0x4d720a0}, 0x4d720a0?, 0x0, 0xc003df0db8)
/home/runner/go/pkg/mod/k8s.io/[email protected]/pkg/util/wait/poll.go:33
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs.func1(_, {{0xc003468020, 0x7}, {0xc003662b40, 0x25}, {0x0, 0x0, 0x0}, {0xc003069000, 0x7, ...}, ...})
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:291
|
| // Retry streaming the logs of the pods unless ctx.Done() or if the pod does not exist anymore.
> err = wait.PollUntilContextCancel(eh.ctx, 2*time.Second, false, func(ctx context.Context) (done bool, err error) {
| // Wait for pod to be in running state
| actual, err := eh.input.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
> sigs.k8s.io/cluster-api/test/framework.(*watchPodLogsEventHandler).streamPodLogs in goroutine 3240
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:275
|
| // Watch each container's logs in a goroutine so we can stream them all concurrently.
> go func(pod *corev1.Pod, container corev1.Container) {
| defer GinkgoRecover()
|
goroutine 3008 [select]
> sigs.k8s.io/cluster-api/test/framework.WatchPodMetrics.func3()
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:376
| defer GinkgoRecover()
| for {
> select {
| case <-ctx.Done():
| return
> sigs.k8s.io/cluster-api/test/framework.WatchPodMetrics in goroutine 88
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/deployment_helpers.go:373
| }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list Pods for deployment %s", klog.KObj(input.Deployment))
|
> go func() {
| defer GinkgoRecover()
| for {
goroutine 4026 [chan receive]
> sigs.k8s.io/cluster-api/test/framework.WatchNamespaceEvents({0x32d75c8, 0xc001098910}, {0xc00361e540, {0xc00375a5e8, 0x12}, {0xc002392600, 0x7d}})
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/namespace_helpers.go:176
| defer close(stopInformer)
| informerFactory.Start(stopInformer)
> <-ctx.Done()
| stopInformer <- struct{}{}
| }
> sigs.k8s.io/cluster-api/test/framework.CreateNamespaceAndWatchEvents.func1()
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/namespace_helpers.go:208
| go func() {
| defer GinkgoRecover()
> WatchNamespaceEvents(watchesCtx, WatchNamespaceEventsInput{
| ClientSet: input.ClientSet,
| Name: namespace.Name,
> sigs.k8s.io/cluster-api/test/framework.CreateNamespaceAndWatchEvents in goroutine 4023
/home/runner/go/pkg/mod/sigs.k8s.io/cluster-api/[email protected]/framework/namespace_helpers.go:206
| log.Logf("Creating event watcher for namespace %q", input.Name)
| watchesCtx, cancelWatches := context.WithCancel(ctx)
> go func() {
| defer GinkgoRecover()
| WatchNamespaceEvents(watchesCtx, WatchNamespaceEventsInput{
Loading