diff --git a/test/e2e/azure_csidriver.go b/test/e2e/azure_csidriver.go index d8a572a6c01..3126c9bb81b 100644 --- a/test/e2e/azure_csidriver.go +++ b/test/e2e/azure_csidriver.go @@ -65,7 +65,7 @@ func AzureDiskCSISpec(ctx context.Context, inputGetter func() AzureDiskCSISpecIn By("[In-tree]Deploying storage class and pvc") By("Deploying managed disk storage class") scName := "managedhdd" + util.RandomString(6) - e2e_sc.Create(scName).WithWaitForFirstConsumer().DeployStorageClass(clientset) + e2e_sc.Create(scName).WithWaitForFirstConsumer().DeployStorageClass(ctx, clientset) By("Deploying persistent volume claim") pvcName = "dd-managed-hdd-5g" + util.RandomString(6) pvcBuilder, err := e2e_pvc.Create(pvcName, "5Gi") @@ -74,7 +74,7 @@ func AzureDiskCSISpec(ctx context.Context, inputGetter func() AzureDiskCSISpecIn "volume.beta.kubernetes.io/storage-class": scName, } pvcBuilder.WithAnnotations(annotations) - err = pvcBuilder.DeployPVC(clientset) + err = pvcBuilder.DeployPVC(ctx, clientset) Expect(err).NotTo(HaveOccurred()) } else { By("[External]Deploying storage class and pvc") @@ -83,13 +83,13 @@ func AzureDiskCSISpec(ctx context.Context, inputGetter func() AzureDiskCSISpecIn e2e_sc.Create(scName).WithWaitForFirstConsumer(). WithOotProvisionerName(). WithOotParameters(). - DeployStorageClass(clientset) + DeployStorageClass(ctx, clientset) By("Deploying persistent volume claim") pvcName = "oot-dd-managed-hdd-5g" + util.RandomString(6) pvcBuilder, err := e2e_pvc.Create(pvcName, "5Gi") Expect(err).NotTo(HaveOccurred()) pvcBuilder.WithStorageClass(scName) - err = pvcBuilder.DeployPVC(clientset) + err = pvcBuilder.DeployPVC(ctx, clientset) Expect(err).NotTo(HaveOccurred()) } diff --git a/test/e2e/azure_logcollector.go b/test/e2e/azure_logcollector.go index e0507cae1cd..bc5926efbf1 100644 --- a/test/e2e/azure_logcollector.go +++ b/test/e2e/azure_logcollector.go @@ -591,7 +591,7 @@ func collectVMBootLog(ctx context.Context, vmClient *armcompute.VirtualMachinesC return errors.Wrap(err, "failed to get boot diagnostics data") } - return writeBootLog(bootDiagnostics.RetrieveBootDiagnosticsDataResult, outputPath) + return writeBootLog(ctx, bootDiagnostics.RetrieveBootDiagnosticsDataResult, outputPath) } // collectVMSSInstanceBootLog collects boot logs of the vmss instance by using azure boot diagnostics. @@ -603,12 +603,12 @@ func collectVMSSInstanceBootLog(ctx context.Context, instanceClient *armcompute. return errors.Wrap(err, "failed to get boot diagnostics data") } - return writeBootLog(bootDiagnostics.RetrieveBootDiagnosticsDataResult, outputPath) + return writeBootLog(ctx, bootDiagnostics.RetrieveBootDiagnosticsDataResult, outputPath) } -func writeBootLog(bootDiagnostics armcompute.RetrieveBootDiagnosticsDataResult, outputPath string) error { +func writeBootLog(ctx context.Context, bootDiagnostics armcompute.RetrieveBootDiagnosticsDataResult, outputPath string) error { var err error - req, err := http.NewRequestWithContext(context.TODO(), http.MethodGet, *bootDiagnostics.SerialConsoleLogBlobURI, http.NoBody) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, *bootDiagnostics.SerialConsoleLogBlobURI, http.NoBody) if err != nil { return errors.Wrap(err, "failed to create HTTP request") } diff --git a/test/e2e/azure_privatecluster.go b/test/e2e/azure_privatecluster.go index 6d08323d0c5..d859adf03fc 100644 --- a/test/e2e/azure_privatecluster.go +++ b/test/e2e/azure_privatecluster.go @@ -113,7 +113,7 @@ func AzurePrivateClusterSpec(ctx context.Context, inputGetter func() AzurePrivat userID = "cloud-provider-user-identity" } resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.ManagedIdentity/userAssignedIdentities/%s", subscriptionID, identityRG, userID) - Expect(os.Setenv("UAMI_CLIENT_ID", getClientIDforMSI(resourceID))).To(Succeed()) + Expect(os.Setenv("UAMI_CLIENT_ID", getClientIDforMSI(ctx, resourceID))).To(Succeed()) Expect(os.Setenv("CLUSTER_IDENTITY_NAME", "cluster-identity-user-assigned")).To(Succeed()) Expect(os.Setenv("CLUSTER_IDENTITY_NAMESPACE", input.Namespace.Name)).To(Succeed()) @@ -407,7 +407,7 @@ func SetupExistingVNet(ctx context.Context, vnetCidr string, cpSubnetCidrs, node } // getClientIDforMSI fetches the client ID of a user assigned identity. -func getClientIDforMSI(resourceID string) string { +func getClientIDforMSI(ctx context.Context, resourceID string) string { subscriptionID := getSubscriptionID(Default) cred, err := azidentity.NewDefaultAzureCredential(nil) Expect(err).NotTo(HaveOccurred()) @@ -418,7 +418,7 @@ func getClientIDforMSI(resourceID string) string { parsed, err := azureutil.ParseResourceID(resourceID) Expect(err).NotTo(HaveOccurred()) - resp, err := msiClient.Get(context.TODO(), parsed.ResourceGroupName, parsed.Name, nil) + resp, err := msiClient.Get(ctx, parsed.ResourceGroupName, parsed.Name, nil) Expect(err).NotTo(HaveOccurred()) return *resp.Properties.ClientID diff --git a/test/e2e/azure_test.go b/test/e2e/azure_test.go index 99d35e868d9..7cd99395ff2 100644 --- a/test/e2e/azure_test.go +++ b/test/e2e/azure_test.go @@ -40,7 +40,6 @@ import ( var _ = Describe("Workload cluster creation", func() { var ( - ctx = context.TODO() specName = "create-workload-cluster" namespace *corev1.Namespace cancelWatches context.CancelFunc @@ -135,7 +134,7 @@ var _ = Describe("Workload cluster creation", func() { logCheckpoint(specTimes) }) - if os.Getenv("USE_LOCAL_KIND_REGISTRY") != "true" { + if false && os.Getenv("USE_LOCAL_KIND_REGISTRY") != "true" { // This spec expects a user-assigned identity with Contributor role assignment named "cloud-provider-user-identity" in a "capz-ci" // resource group. Override these defaults by setting the USER_IDENTITY and CI_RG environment variables. Context("Creating a private cluster [OPTIONAL]", func() { diff --git a/test/e2e/capi_test.go b/test/e2e/capi_test.go index 5dd7664f614..fb0776a26bd 100644 --- a/test/e2e/capi_test.go +++ b/test/e2e/capi_test.go @@ -43,7 +43,6 @@ const ( var _ = Describe("Running the Cluster API E2E tests", func() { var ( - ctx = context.TODO() specTimes = map[string]time.Time{} ) BeforeEach(func() { @@ -73,7 +72,7 @@ var _ = Describe("Running the Cluster API E2E tests", func() { }) Context("Running the quick-start spec", func() { - capi_e2e.QuickStartSpec(context.TODO(), func() capi_e2e.QuickStartSpecInput { + capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { return capi_e2e.QuickStartSpecInput{ E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, @@ -88,7 +87,7 @@ var _ = Describe("Running the Cluster API E2E tests", func() { }) Context("Running the MachineDeployment rollout spec", func() { - capi_e2e.MachineDeploymentRolloutSpec(context.TODO(), func() capi_e2e.MachineDeploymentRolloutSpecInput { + capi_e2e.MachineDeploymentRolloutSpec(ctx, func() capi_e2e.MachineDeploymentRolloutSpecInput { return capi_e2e.MachineDeploymentRolloutSpecInput{ E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, @@ -104,7 +103,7 @@ var _ = Describe("Running the Cluster API E2E tests", func() { if os.Getenv("USE_LOCAL_KIND_REGISTRY") != "true" { Context("Running the self-hosted spec", func() { - SelfHostedSpec(context.TODO(), func() SelfHostedSpecInput { + SelfHostedSpec(ctx, func() SelfHostedSpecInput { return SelfHostedSpecInput{ E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, @@ -121,7 +120,7 @@ var _ = Describe("Running the Cluster API E2E tests", func() { // TODO: Add test using KCPRemediationSpec Context("Should successfully remediate unhealthy worker machines with MachineHealthCheck", func() { - capi_e2e.MachineDeploymentRemediationSpec(context.TODO(), func() capi_e2e.MachineDeploymentRemediationSpecInput { + capi_e2e.MachineDeploymentRemediationSpec(ctx, func() capi_e2e.MachineDeploymentRemediationSpecInput { return capi_e2e.MachineDeploymentRemediationSpecInput{ E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, @@ -136,7 +135,7 @@ var _ = Describe("Running the Cluster API E2E tests", func() { }) Context("Should successfully exercise machine pools", func() { - capi_e2e.MachinePoolSpec(context.TODO(), func() capi_e2e.MachinePoolInput { + capi_e2e.MachinePoolSpec(ctx, func() capi_e2e.MachinePoolInput { return capi_e2e.MachinePoolInput{ E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, @@ -151,7 +150,7 @@ var _ = Describe("Running the Cluster API E2E tests", func() { }) Context("Should successfully scale out and scale in a MachineDeployment", func() { - capi_e2e.MachineDeploymentScaleSpec(context.TODO(), func() capi_e2e.MachineDeploymentScaleSpecInput { + capi_e2e.MachineDeploymentScaleSpec(ctx, func() capi_e2e.MachineDeploymentScaleSpecInput { return capi_e2e.MachineDeploymentScaleSpecInput{ E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, @@ -166,7 +165,7 @@ var _ = Describe("Running the Cluster API E2E tests", func() { }) // Context("Should successfully set and use node drain timeout", func() { - // capi_e2e.NodeDrainTimeoutSpec(context.TODO(), func() capi_e2e.NodeDrainTimeoutSpecInput { + // capi_e2e.NodeDrainTimeoutSpec(ctx, func() capi_e2e.NodeDrainTimeoutSpecInput { // return capi_e2e.NodeDrainTimeoutSpecInput{ // E2EConfig: e2eConfig, // ClusterctlConfigPath: clusterctlConfigPath, @@ -352,7 +351,7 @@ var _ = Describe("Running the Cluster API E2E tests", func() { }) Context("Running KCP upgrade in a HA cluster [K8s-Upgrade]", func() { - capi_e2e.ClusterUpgradeConformanceSpec(context.TODO(), func() capi_e2e.ClusterUpgradeConformanceSpecInput { + capi_e2e.ClusterUpgradeConformanceSpec(ctx, func() capi_e2e.ClusterUpgradeConformanceSpecInput { return capi_e2e.ClusterUpgradeConformanceSpecInput{ E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, @@ -370,7 +369,7 @@ var _ = Describe("Running the Cluster API E2E tests", func() { }) Context("Running KCP upgrade in a HA cluster using scale in rollout [K8s-Upgrade]", func() { - capi_e2e.ClusterUpgradeConformanceSpec(context.TODO(), func() capi_e2e.ClusterUpgradeConformanceSpecInput { + capi_e2e.ClusterUpgradeConformanceSpec(ctx, func() capi_e2e.ClusterUpgradeConformanceSpecInput { return capi_e2e.ClusterUpgradeConformanceSpecInput{ E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, diff --git a/test/e2e/conformance_test.go b/test/e2e/conformance_test.go index f6d3442fc4c..5f5c928cc6e 100644 --- a/test/e2e/conformance_test.go +++ b/test/e2e/conformance_test.go @@ -43,7 +43,6 @@ import ( var _ = Describe("Conformance Tests", func() { var ( - ctx = context.TODO() cancelWatches context.CancelFunc result *clusterctl.ApplyClusterTemplateAndWaitResult clusterName string @@ -94,7 +93,7 @@ var _ = Describe("Conformance Tests", func() { // clusters with CI artifacts or PR artifacts are based on a known CI version // PR artifacts will replace the CI artifacts during kubeadm init if useCIArtifacts || usePRArtifacts { - kubernetesVersion, err = resolveCIVersion(kubernetesVersion) + kubernetesVersion, err = resolveCIVersion(ctx, kubernetesVersion) Expect(err).NotTo(HaveOccurred()) Expect(os.Setenv("CI_VERSION", kubernetesVersion)).To(Succeed()) Expect(os.Setenv("CLOUD_PROVIDER_AZURE_LABEL", "azure-ci")).To(Succeed()) diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index f82419f36bc..a361656e403 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -41,6 +41,13 @@ import ( ctrl "sigs.k8s.io/controller-runtime" ) +var ( + ctx = ctrl.SetupSignalHandler() + + // watchesCtx is used in log streaming to be able to get canceled via cancelWatches after ending the test suite. + watchesCtx, cancelWatches = context.WithCancel(ctx) +) + func init() { flag.StringVar(&configPath, "e2e.config", "", "path to the e2e config file") flag.StringVar(&artifactFolder, "e2e.artifacts-folder", "", "folder where e2e test artifact should be stored") @@ -68,16 +75,16 @@ var _ = SynchronizedBeforeSuite(func() []byte { Expect(os.MkdirAll(artifactFolder, 0o755)).To(Succeed(), "Invalid test suite argument. Can't create e2e.artifacts-folder %q", artifactFolder) Byf("Loading the e2e test configuration from %q", configPath) - e2eConfig = loadE2EConfig(configPath) + e2eConfig = loadE2EConfig(ctx, configPath) Byf("Creating a clusterctl local repository into %q", artifactFolder) - clusterctlConfigPath = createClusterctlLocalRepository(e2eConfig, filepath.Join(artifactFolder, "repository")) + clusterctlConfigPath = createClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactFolder, "repository")) By("Setting up the bootstrap cluster") - bootstrapClusterProvider, bootstrapClusterProxy = setupBootstrapCluster(e2eConfig, useExistingCluster) + bootstrapClusterProvider, bootstrapClusterProxy = setupBootstrapCluster(ctx, e2eConfig, useExistingCluster) By("Initializing the bootstrap cluster") - initBootstrapCluster(bootstrapClusterProxy, e2eConfig, clusterctlConfigPath, artifactFolder) + initBootstrapCluster(watchesCtx, bootstrapClusterProxy, e2eConfig, clusterctlConfigPath, artifactFolder) // encode the e2e config into the byte array. var configBuf bytes.Buffer @@ -128,20 +135,20 @@ var _ = SynchronizedAfterSuite(func() { By("Tearing down the management cluster") if !skipCleanup { - tearDown(bootstrapClusterProvider, bootstrapClusterProxy) + tearDown(ctx, bootstrapClusterProvider, bootstrapClusterProxy) } }) -func loadE2EConfig(configPath string) *clusterctl.E2EConfig { - config := clusterctl.LoadE2EConfig(context.TODO(), clusterctl.LoadE2EConfigInput{ConfigPath: configPath}) +func loadE2EConfig(ctx context.Context, configPath string) *clusterctl.E2EConfig { + config := clusterctl.LoadE2EConfig(ctx, clusterctl.LoadE2EConfigInput{ConfigPath: configPath}) Expect(config).NotTo(BeNil(), "Failed to load E2E config from %s", configPath) - resolveKubernetesVersions(config) + resolveKubernetesVersions(ctx, config) return config } -func createClusterctlLocalRepository(config *clusterctl.E2EConfig, repositoryFolder string) string { +func createClusterctlLocalRepository(ctx context.Context, config *clusterctl.E2EConfig, repositoryFolder string) string { createRepositoryInput := clusterctl.CreateRepositoryInput{ E2EConfig: config, RepositoryFolder: repositoryFolder, @@ -153,16 +160,16 @@ func createClusterctlLocalRepository(config *clusterctl.E2EConfig, repositoryFol Expect(cniPath).To(BeAnExistingFile(), "The %s variable should resolve to an existing file", capi_e2e.CNIPath) createRepositoryInput.RegisterClusterResourceSetConfigMapTransformation(cniPath, capi_e2e.CNIResources) - clusterctlConfig := clusterctl.CreateRepository(context.TODO(), createRepositoryInput) + clusterctlConfig := clusterctl.CreateRepository(ctx, createRepositoryInput) Expect(clusterctlConfig).To(BeAnExistingFile(), "The clusterctl config file does not exists in the local repository %s", repositoryFolder) return clusterctlConfig } -func setupBootstrapCluster(config *clusterctl.E2EConfig, useExistingCluster bool) (bootstrap.ClusterProvider, framework.ClusterProxy) { +func setupBootstrapCluster(ctx context.Context, config *clusterctl.E2EConfig, useExistingCluster bool) (bootstrap.ClusterProvider, framework.ClusterProxy) { var clusterProvider bootstrap.ClusterProvider kubeconfigPath := "" if !useExistingCluster { - clusterProvider = bootstrap.CreateKindBootstrapClusterAndLoadImages(context.TODO(), bootstrap.CreateKindBootstrapClusterAndLoadImagesInput{ + clusterProvider = bootstrap.CreateKindBootstrapClusterAndLoadImages(ctx, bootstrap.CreateKindBootstrapClusterAndLoadImagesInput{ Name: config.ManagementClusterName, RequiresDockerSock: config.HasDockerProvider(), Images: config.Images, @@ -179,7 +186,7 @@ func setupBootstrapCluster(config *clusterctl.E2EConfig, useExistingCluster bool Name: "capz-e2e", Images: config.Images, } - err := bootstrap.LoadImagesToKindCluster(context.TODO(), imagesInput) + err := bootstrap.LoadImagesToKindCluster(ctx, imagesInput) Expect(err).NotTo(HaveOccurred(), "Failed to load images to the bootstrap cluster: %s", err) } clusterProxy := NewAzureClusterProxy("bootstrap", kubeconfigPath) @@ -187,8 +194,8 @@ func setupBootstrapCluster(config *clusterctl.E2EConfig, useExistingCluster bool return clusterProvider, clusterProxy } -func initBootstrapCluster(bootstrapClusterProxy framework.ClusterProxy, config *clusterctl.E2EConfig, clusterctlConfig, artifactFolder string) { - clusterctl.InitManagementClusterAndWatchControllerLogs(context.TODO(), clusterctl.InitManagementClusterAndWatchControllerLogsInput{ +func initBootstrapCluster(ctx context.Context, bootstrapClusterProxy framework.ClusterProxy, config *clusterctl.E2EConfig, clusterctlConfig, artifactFolder string) { + clusterctl.InitManagementClusterAndWatchControllerLogs(ctx, clusterctl.InitManagementClusterAndWatchControllerLogsInput{ ClusterProxy: bootstrapClusterProxy, ClusterctlConfigPath: clusterctlConfig, InfrastructureProviders: config.InfrastructureProviders(), @@ -197,11 +204,12 @@ func initBootstrapCluster(bootstrapClusterProxy framework.ClusterProxy, config * }, config.GetIntervals(bootstrapClusterProxy.GetName(), "wait-controllers")...) } -func tearDown(bootstrapClusterProvider bootstrap.ClusterProvider, bootstrapClusterProxy framework.ClusterProxy) { +func tearDown(ctx context.Context, bootstrapClusterProvider bootstrap.ClusterProvider, bootstrapClusterProxy framework.ClusterProxy) { + cancelWatches() if bootstrapClusterProxy != nil { - bootstrapClusterProxy.Dispose(context.TODO()) + bootstrapClusterProxy.Dispose(ctx) } if bootstrapClusterProvider != nil { - bootstrapClusterProvider.Dispose(context.TODO()) + bootstrapClusterProvider.Dispose(ctx) } } diff --git a/test/e2e/helpers.go b/test/e2e/helpers.go index 5b441cddfbc..7cb128ac692 100644 --- a/test/e2e/helpers.go +++ b/test/e2e/helpers.go @@ -615,12 +615,12 @@ func validateStableReleaseString(stableVersion string) (isStable bool, matches [ // resolveCIVersion resolves kubernetes version labels (e.g. latest, latest-1.xx) to the corresponding CI version numbers. // Go implementation of https://github.com/kubernetes-sigs/cluster-api/blob/d1dc87d5df3ab12a15ae5b63e50541a191b7fec4/scripts/ci-e2e-lib.sh#L75-L95. -func resolveCIVersion(label string) (string, error) { +func resolveCIVersion(ctx context.Context, label string) (string, error) { if ciVersion, ok := os.LookupEnv("CI_VERSION"); ok { return ciVersion, nil } if strings.HasPrefix(label, "latest") { - if kubernetesVersion, err := latestCIVersion(label); err == nil { + if kubernetesVersion, err := latestCIVersion(ctx, label); err == nil { return kubernetesVersion, nil } } @@ -630,9 +630,9 @@ func resolveCIVersion(label string) (string, error) { } // latestCIVersion returns the latest CI version of a given label in the form of latest-1.xx. -func latestCIVersion(label string) (string, error) { +func latestCIVersion(ctx context.Context, label string) (string, error) { ciVersionURL := fmt.Sprintf("https://dl.k8s.io/ci/%s.txt", label) - req, err := http.NewRequestWithContext(context.TODO(), http.MethodGet, ciVersionURL, http.NoBody) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, ciVersionURL, http.NoBody) if err != nil { return "", err } @@ -690,10 +690,10 @@ func resolveKubetestRepoListPath(version string, path string) (string, error) { // resolveKubernetesVersions looks at Kubernetes versions set as variables in the e2e config and sets them to a valid k8s version // that has an existing capi offer image available. For example, if the version is "stable-1.22", the function will set it to the latest 1.22 version that has a published reference image. -func resolveKubernetesVersions(config *clusterctl.E2EConfig) { - linuxVersions := getVersionsInCommunityGallery(context.TODO(), os.Getenv(AzureLocation), capiCommunityGallery, "capi-ubun2-2404") - windowsVersions := getVersionsInCommunityGallery(context.TODO(), os.Getenv(AzureLocation), capiCommunityGallery, "capi-win-2019-containerd") - flatcarK8sVersions := getFlatcarK8sVersions(context.TODO(), os.Getenv(AzureLocation), flatcarCAPICommunityGallery) +func resolveKubernetesVersions(ctx context.Context, config *clusterctl.E2EConfig) { + linuxVersions := getVersionsInCommunityGallery(ctx, os.Getenv(AzureLocation), capiCommunityGallery, "capi-ubun2-2404") + windowsVersions := getVersionsInCommunityGallery(ctx, os.Getenv(AzureLocation), capiCommunityGallery, "capi-win-2019-containerd") + flatcarK8sVersions := getFlatcarK8sVersions(ctx, os.Getenv(AzureLocation), flatcarCAPICommunityGallery) // find the intersection of ubuntu and windows versions available, since we need an image for both. var versions semver.Versions diff --git a/test/e2e/kubernetes/pvc/pvc.go b/test/e2e/kubernetes/pvc/pvc.go index dab021f95d6..6c7a5dd5efb 100644 --- a/test/e2e/kubernetes/pvc/pvc.go +++ b/test/e2e/kubernetes/pvc/pvc.go @@ -109,9 +109,9 @@ func (b *Builder) WithStorageClass(scName string) *Builder { return b } -func (b *Builder) DeployPVC(clientset *kubernetes.Clientset) error { +func (b *Builder) DeployPVC(ctx context.Context, clientset *kubernetes.Clientset) error { Eventually(func(g Gomega) { - _, err := clientset.CoreV1().PersistentVolumeClaims("default").Create(context.TODO(), b.pvc, metav1.CreateOptions{}) + _, err := clientset.CoreV1().PersistentVolumeClaims("default").Create(ctx, b.pvc, metav1.CreateOptions{}) if err != nil { log.Printf("Error trying to deploy storage class %s in namespace %s:%s\n", b.pvc.Name, b.pvc.ObjectMeta.Namespace, err.Error()) } diff --git a/test/e2e/kubernetes/storageclass/storageclass.go b/test/e2e/kubernetes/storageclass/storageclass.go index a2a4a24a0b6..30cda38f764 100644 --- a/test/e2e/kubernetes/storageclass/storageclass.go +++ b/test/e2e/kubernetes/storageclass/storageclass.go @@ -108,9 +108,9 @@ func (d *Builder) WithOotParameters() *Builder { } // DeployStorageClass creates a storage class on the k8s cluster. -func (d *Builder) DeployStorageClass(clientset *kubernetes.Clientset) { +func (d *Builder) DeployStorageClass(ctx context.Context, clientset *kubernetes.Clientset) { Eventually(func(g Gomega) { - _, err := clientset.StorageV1().StorageClasses().Create(context.TODO(), d.sc, metav1.CreateOptions{}) + _, err := clientset.StorageV1().StorageClasses().Create(ctx, d.sc, metav1.CreateOptions{}) if err != nil { log.Printf("Error trying to deploy storage class %s in namespace %s:%s\n", d.sc.Name, d.sc.ObjectMeta.Namespace, err.Error()) }