diff --git a/api/v1alpha1/metalstackcluster_types.go b/api/v1alpha1/metalstackcluster_types.go index 2746711..7f49d42 100644 --- a/api/v1alpha1/metalstackcluster_types.go +++ b/api/v1alpha1/metalstackcluster_types.go @@ -29,9 +29,12 @@ const ( // ClusterFinalizer allows to clean up resources associated with before removing it from the apiserver. ClusterFinalizer = "metal-stack.infrastructure.cluster.x-k8s.io/cluster" + TagInfraClusterResource = "metal-stack.infrastructure.cluster.x-k8s.io/cluster-resource" + ClusterControlPlaneEndpointDefaultPort = 443 ClusterControlPlaneIPEnsured clusterv1.ConditionType = "ClusterControlPlaneIPEnsured" + ClusterPaused clusterv1.ConditionType = clusterv1.PausedV1Beta2Condition ) var ( diff --git a/api/v1alpha1/metalstackmachine_types.go b/api/v1alpha1/metalstackmachine_types.go index aeece1d..afe68c1 100644 --- a/api/v1alpha1/metalstackmachine_types.go +++ b/api/v1alpha1/metalstackmachine_types.go @@ -26,11 +26,12 @@ const ( // MachineFinalizer allows to clean up resources associated with before removing it from the apiserver. MachineFinalizer = "metal-stack.infrastructure.cluster.x-k8s.io/machine" - TagInfraMachineID = "metal-stack.infrastructure.cluster.x-k8s.io/machine-id" + TagInfraMachineResource = "metal-stack.infrastructure.cluster.x-k8s.io/machine-resource" ProviderMachineCreated clusterv1.ConditionType = "MachineCreated" ProviderMachineReady clusterv1.ConditionType = "MachineReady" ProviderMachineHealthy clusterv1.ConditionType = "MachineHealthy" + ProviderMachinePaused clusterv1.ConditionType = clusterv1.PausedV1Beta2Condition ) // MetalStackMachineSpec defines the desired state of MetalStackMachine. diff --git a/internal/controller/metalstackcluster_controller.go b/internal/controller/metalstackcluster_controller.go index 3e70edb..aacee22 100644 --- a/internal/controller/metalstackcluster_controller.go +++ b/internal/controller/metalstackcluster_controller.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "net/http" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -31,6 +32,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" @@ -46,11 +48,6 @@ import ( metalgo "github.com/metal-stack/metal-go" ) -var ( - errProviderIPNotFound = errors.New("provider ip not found") - errProviderIPTooManyFound = errors.New("multiple provider ips found") -) - // MetalStackClusterReconciler reconciles a MetalStackCluster object type MetalStackClusterReconciler struct { MetalClient metalgo.Client @@ -111,12 +108,21 @@ func (r *MetalStackClusterReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, err } - if !infraCluster.DeletionTimestamp.IsZero() { + if annotations.IsPaused(cluster, infraCluster) { + conditions.MarkTrue(infraCluster, v1alpha1.ClusterPaused) + } else { + conditions.MarkFalse(infraCluster, v1alpha1.ClusterPaused, clusterv1.PausedV1Beta2Reason, clusterv1.ConditionSeverityInfo, "") + } + + switch { + case annotations.IsPaused(cluster, infraCluster): + log.Info("reconciliation is paused") + case !infraCluster.DeletionTimestamp.IsZero(): err = reconciler.delete() - } else if !controllerutil.ContainsFinalizer(infraCluster, v1alpha1.ClusterFinalizer) { + case !controllerutil.ContainsFinalizer(infraCluster, v1alpha1.ClusterFinalizer): log.Info("adding finalizer") controllerutil.AddFinalizer(infraCluster, v1alpha1.ClusterFinalizer) - } else { + default: log.Info("reconciling cluster") err = reconciler.reconcile() } @@ -226,13 +232,22 @@ func (r *clusterReconciler) ensureControlPlaneIP() (string, error) { } func (r *clusterReconciler) deleteControlPlaneIP() error { - ip, err := r.findControlPlaneIP() - if err != nil && errors.Is(err, errProviderIPNotFound) { + if r.infraCluster.Spec.ControlPlaneIP == nil { return nil } + + resp, err := r.metalClient.IP().FindIP(ipmodels.NewFindIPParams().WithID(*r.infraCluster.Spec.ControlPlaneIP).WithContext(r.ctx), nil) if err != nil { - return fmt.Errorf("unable to delete control plane ip: %w", err) + var r *ipmodels.FindIPDefault + if errors.As(err, &r) && r.Code() == http.StatusNotFound { + return nil + } + + return err } + + ip := resp.Payload + if ip.Type != nil && *ip.Type == models.V1IPBaseTypeStatic { r.log.Info("skip deletion of static control plane ip") return nil @@ -242,46 +257,12 @@ func (r *clusterReconciler) deleteControlPlaneIP() error { return fmt.Errorf("control plane ip address not set") } - if ip.Type != nil && *ip.Type == models.V1IPAllocateRequestTypeStatic { - r.log.Info("skipping deletion of static control plane ip", "ip", *ip.Ipaddress) - return nil - } _, err = r.metalClient.IP().FreeIP(ipmodels.NewFreeIPParams().WithID(*ip.Ipaddress).WithContext(r.ctx), nil) if err != nil { return err } + r.log.Info("deleted control plane ip", "address", *ip.Ipaddress) return nil } - -func (r *clusterReconciler) findControlPlaneIP() (*models.V1IPResponse, error) { - if r.infraCluster.Spec.ControlPlaneIP != nil { - resp, err := r.metalClient.IP().FindIP(ipmodels.NewFindIPParams().WithID(*r.infraCluster.Spec.ControlPlaneIP).WithContext(r.ctx), nil) - if err != nil { - return nil, err - } - - return resp.Payload, nil - } - - resp, err := r.metalClient.IP().FindIPs(ipmodels.NewFindIPsParams().WithBody(&models.V1IPFindRequest{ - Projectid: r.infraCluster.Spec.ProjectID, - Tags: []string{ - tag.New(tag.ClusterID, string(r.infraCluster.GetUID())), - v1alpha1.TagControlPlanePurpose, - }, - }).WithContext(r.ctx), nil) - if err != nil { - return nil, err - } - - switch len(resp.Payload) { - case 0: - return nil, errProviderIPNotFound - case 1: - return resp.Payload[0], nil - default: - return nil, errProviderIPTooManyFound - } -} diff --git a/internal/controller/metalstackcluster_controller_test.go b/internal/controller/metalstackcluster_controller_test.go index 180f143..d1997b1 100644 --- a/internal/controller/metalstackcluster_controller_test.go +++ b/internal/controller/metalstackcluster_controller_test.go @@ -102,6 +102,128 @@ var _ = Describe("MetalStackCluster Controller", func() { }) }) + Context("when reconcile is paused", func() { + BeforeEach(func() { + resource.Spec = v1alpha1.MetalStackClusterSpec{ + ControlPlaneEndpoint: v1alpha1.APIEndpoint{}, + ProjectID: "test-project", + NodeNetworkID: "node-network-id", + ControlPlaneIP: nil, + Partition: "test-partition", + } + }) + + It("should skip reconciles due to cluster.spec.ready", func() { + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + + By("creating the cluster resource and setting the owner reference") + owner := &clusterv1beta1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "owner-", + Namespace: "default", + }, + Spec: clusterv1beta1.ClusterSpec{ + Paused: true, + }, + } + Expect(k8sClient.Create(ctx, owner)).To(Succeed()) + + resource.OwnerReferences = []metav1.OwnerReference{ + *metav1.NewControllerRef(owner, clusterv1beta1.GroupVersion.WithKind("Cluster")), + } + Expect(k8sClient.Update(ctx, resource)).To(Succeed()) + + typeNamespacedName := types.NamespacedName{ + Name: resource.Name, + Namespace: "default", + } + const firstGen = int64(1) + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + Expect(k8sClient.Get(ctx, typeNamespacedName, resource)).To(Succeed()) + Expect(resource.Generation).To(Equal(firstGen)) + + Expect(resource.Status.Conditions).To(ContainElement(MatchFields(IgnoreExtras, Fields{ + "Type": Equal(v1alpha1.ClusterPaused), + "Status": Equal(corev1.ConditionTrue), + }))) + + By("idempotence", func() { + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + Expect(k8sClient.Get(ctx, typeNamespacedName, resource)).To(Succeed()) + Expect(resource.Generation).To(Equal(firstGen)) + + Expect(resource.Status.Conditions).To(ContainElement(MatchFields(IgnoreExtras, Fields{ + "Type": Equal(v1alpha1.ClusterPaused), + "Status": Equal(corev1.ConditionTrue), + }))) + }) + }) + + It("should skip reconciles due to infra annotation", func() { + resource.Annotations = map[string]string{ + clusterv1beta1.PausedAnnotation: "true", + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + + By("creating the cluster resource and setting the owner reference") + owner := &clusterv1beta1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "owner-", + Namespace: "default", + }, + } + Expect(k8sClient.Create(ctx, owner)).To(Succeed()) + + resource.OwnerReferences = []metav1.OwnerReference{ + *metav1.NewControllerRef(owner, clusterv1beta1.GroupVersion.WithKind("Cluster")), + } + Expect(k8sClient.Update(ctx, resource)).To(Succeed()) + + typeNamespacedName := types.NamespacedName{ + Name: resource.Name, + Namespace: "default", + } + const firstGen = int64(1) + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + Expect(k8sClient.Get(ctx, typeNamespacedName, resource)).To(Succeed()) + Expect(resource.Generation).To(Equal(firstGen)) + + Expect(resource.Status.Conditions).To(ContainElement(MatchFields(IgnoreExtras, Fields{ + "Type": Equal(v1alpha1.ClusterPaused), + "Status": Equal(corev1.ConditionTrue), + }))) + + By("idempotence", func() { + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + Expect(k8sClient.Get(ctx, typeNamespacedName, resource)).To(Succeed()) + Expect(resource.Generation).To(Equal(firstGen)) + + Expect(resource.Status.Conditions).To(ContainElement(MatchFields(IgnoreExtras, Fields{ + "Type": Equal(v1alpha1.ClusterPaused), + "Status": Equal(corev1.ConditionTrue), + }))) + }) + }) + }) + Context("reconciliation with auto-acquiring dependent resources", func() { BeforeEach(func() { resource.Spec = v1alpha1.MetalStackClusterSpec{ @@ -113,6 +235,14 @@ var _ = Describe("MetalStackCluster Controller", func() { } }) + AfterEach(func() { + Expect(resource.Status.Conditions).To(ContainElement(MatchFields(IgnoreExtras, Fields{ + "Type": Equal(v1alpha1.ClusterPaused), + "Status": Equal(corev1.ConditionFalse), + "Reason": Equal(clusterv1beta1.PausedV1Beta2Reason), + }))) + }) + It("should successfully reconcile", func() { Expect(k8sClient.Create(ctx, resource)).To(Succeed()) @@ -206,6 +336,7 @@ var _ = Describe("MetalStackCluster Controller", func() { })) }) }) + Context("reconciliation when external resources are provided", func() { var ( nodeNetworkID string @@ -226,6 +357,14 @@ var _ = Describe("MetalStackCluster Controller", func() { } }) + AfterEach(func() { + Expect(resource.Status.Conditions).To(ContainElement(MatchFields(IgnoreExtras, Fields{ + "Type": Equal(v1alpha1.ClusterPaused), + "Status": Equal(corev1.ConditionFalse), + "Reason": Equal(clusterv1beta1.PausedV1Beta2Reason), + }))) + }) + When("creating a resource and setting an ownership", func() { It("should successfully reconcile", func() { Expect(k8sClient.Create(ctx, resource)).To(Succeed()) diff --git a/internal/controller/metalstackmachine_controller.go b/internal/controller/metalstackmachine_controller.go index be2f305..ecce64b 100644 --- a/internal/controller/metalstackmachine_controller.go +++ b/internal/controller/metalstackmachine_controller.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "net/http" "strings" "time" @@ -28,7 +29,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + capierrors "sigs.k8s.io/cluster-api/errors" //nolint:staticcheck "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" ctrl "sigs.k8s.io/controller-runtime" @@ -48,10 +51,7 @@ import ( const defaultProviderMachineRequeueTime = time.Second * 30 -var ( - errProviderMachineNotFound = errors.New("provider machine not found") - errProviderMachineTooManyFound = errors.New("multiple provider machines found") -) +var errProviderMachineNotFound = errors.New("provider machine not found") // MetalStackMachineReconciler reconciles a MetalStackMachine object type MetalStackMachineReconciler struct { @@ -147,12 +147,21 @@ func (r *MetalStackMachineReconciler) Reconcile(ctx context.Context, req ctrl.Re } var result ctrl.Result - if !infraMachine.DeletionTimestamp.IsZero() { + if annotations.IsPaused(cluster, infraMachine) { + conditions.MarkTrue(infraMachine, v1alpha1.ProviderMachinePaused) + } else { + conditions.MarkFalse(infraMachine, v1alpha1.ProviderMachinePaused, clusterv1.PausedV1Beta2Reason, clusterv1.ConditionSeverityInfo, "") + } + + switch { + case annotations.IsPaused(cluster, infraMachine): + log.Info("reconciliation is paused") + case !infraMachine.DeletionTimestamp.IsZero(): err = reconciler.delete() - } else if !controllerutil.ContainsFinalizer(infraMachine, v1alpha1.MachineFinalizer) { + case !controllerutil.ContainsFinalizer(infraMachine, v1alpha1.MachineFinalizer): log.Info("adding finalizer") controllerutil.AddFinalizer(infraMachine, v1alpha1.MachineFinalizer) - } else { + default: result, err = reconciler.reconcile() } @@ -183,19 +192,30 @@ func (r *machineReconciler) reconcile() (ctrl.Result, error) { r.log.Info("reconciling machine") - m, err := r.findProviderMachine() - if err != nil && !errors.Is(err, errProviderMachineNotFound) { - conditions.MarkFalse(r.infraMachine, v1alpha1.ProviderMachineCreated, "InternalError", clusterv1.ConditionSeverityError, "%s", err.Error()) - return ctrl.Result{}, err - } + var ( + m *models.V1MachineResponse + err error + ) - if errors.Is(err, errProviderMachineNotFound) { + if r.infraMachine.Spec.ProviderID != "" { + m, err = r.findProviderMachine() + if errors.Is(err, errProviderMachineNotFound) { + r.infraMachine.Status.FailureReason = pointer.Pointer(capierrors.UpdateMachineError) + r.infraMachine.Status.FailureMessage = pointer.Pointer("machine has been deleted externally") + return ctrl.Result{}, errors.New("machine has been deleted externally") + } + if err != nil { + conditions.MarkFalse(r.infraMachine, v1alpha1.ProviderMachineCreated, "InternalError", clusterv1.ConditionSeverityError, "%s", err.Error()) + return ctrl.Result{}, err + } + } else { m, err = r.create() if err != nil { conditions.MarkFalse(r.infraMachine, v1alpha1.ProviderMachineCreated, "InternalError", clusterv1.ConditionSeverityError, "%s", err.Error()) return ctrl.Result{}, fmt.Errorf("unable to create machine at provider: %w", err) } } + conditions.MarkTrue(r.infraMachine, v1alpha1.ProviderMachineCreated) if m.ID == nil { @@ -372,26 +392,22 @@ func (r *machineReconciler) getMachineAddresses(m *models.V1MachineResponse) clu } func (r *machineReconciler) findProviderMachine() (*models.V1MachineResponse, error) { - mfr := &models.V1MachineFindRequest{ - ID: decodeProviderID(r.infraMachine.Spec.ProviderID), - AllocationProject: r.infraCluster.Spec.ProjectID, - Tags: r.machineTags(), - } + resp, err := r.metalClient.Machine().FindMachine(metalmachine.NewFindMachineParams().WithContext(r.ctx).WithID(decodeProviderID(r.infraMachine.Spec.ProviderID)), nil) - resp, err := r.metalClient.Machine().FindMachines(metalmachine.NewFindMachinesParamsWithContext(r.ctx).WithBody(mfr), nil) + var errResp *metalmachine.FindMachineDefault + if errors.As(err, &errResp) && errResp.Code() == http.StatusNotFound { + return nil, errProviderMachineNotFound + } if err != nil { + conditions.MarkFalse(r.infraMachine, v1alpha1.ProviderMachineCreated, "InternalError", clusterv1.ConditionSeverityError, "%s", err.Error()) return nil, err } - switch len(resp.Payload) { - case 0: - // metal-stack machine already freed + if resp.Payload.Allocation == nil || resp.Payload.Allocation.Project == nil || *resp.Payload.Allocation.Project != r.infraCluster.Spec.ProjectID { return nil, errProviderMachineNotFound - case 1: - return resp.Payload[0], nil - default: - return nil, errProviderMachineTooManyFound } + + return resp.Payload, nil } func (r *machineReconciler) patchMachineLabels(m *models.V1MachineResponse) { @@ -421,8 +437,8 @@ func (r *machineReconciler) patchMachineLabels(m *models.V1MachineResponse) { func (r *machineReconciler) machineTags() []string { tags := []string{ - tag.New(tag.ClusterID, string(r.infraCluster.GetUID())), - tag.New(v1alpha1.TagInfraMachineID, string(r.infraMachine.GetUID())), + tag.New(v1alpha1.TagInfraClusterResource, fmt.Sprintf("%s/%s", r.infraCluster.Namespace, r.infraCluster.Name)), + tag.New(v1alpha1.TagInfraMachineResource, fmt.Sprintf("%s/%s", r.infraMachine.Namespace, r.infraMachine.Name)), } if util.IsControlPlaneMachine(r.clusterMachine) { diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 619c6e7..5b0a67a 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -71,7 +71,7 @@ var _ = BeforeSuite(func() { ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to run make manifests") By("building the manager(Operator) image") - cmd = exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectImage)) + cmd = exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectImage)) //nolint _, err = utils.Run(cmd) ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to build the manager(Operator) image") diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 8496c44..9445280 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -59,7 +59,7 @@ var _ = Describe("Manager", Ordered, func() { Expect(err).NotTo(HaveOccurred(), "Failed to install CRDs") By("deploying the controller-manager") - cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", projectImage)) + cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", projectImage)) //nolint _, err = utils.Run(cmd) Expect(err).NotTo(HaveOccurred(), "Failed to deploy the controller-manager") }) @@ -94,7 +94,7 @@ var _ = Describe("Manager", Ordered, func() { specReport := CurrentSpecReport() if specReport.Failed() { By("Fetching controller manager pod logs") - cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace) + cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace) //nolint controllerLogs, err := utils.Run(cmd) if err == nil { _, _ = fmt.Fprintf(GinkgoWriter, "Controller logs:\n %s", controllerLogs) @@ -121,7 +121,7 @@ var _ = Describe("Manager", Ordered, func() { } By("Fetching controller manager pod description") - cmd = exec.Command("kubectl", "describe", "pod", controllerPodName, "-n", namespace) + cmd = exec.Command("kubectl", "describe", "pod", controllerPodName, "-n", namespace) //nolint podDescription, err := utils.Run(cmd) if err == nil { fmt.Println("Pod description:\n", podDescription) @@ -156,7 +156,7 @@ var _ = Describe("Manager", Ordered, func() { g.Expect(controllerPodName).To(ContainSubstring("controller-manager")) // Validate the pod's status - cmd = exec.Command("kubectl", "get", + cmd = exec.Command("kubectl", "get", //nolint "pods", controllerPodName, "-o", "jsonpath={.status.phase}", "-n", namespace, ) @@ -169,7 +169,7 @@ var _ = Describe("Manager", Ordered, func() { It("should ensure the metrics endpoint is serving metrics", func() { By("creating a ClusterRoleBinding for the service account to allow access to metrics") - cmd := exec.Command("kubectl", "create", "clusterrolebinding", metricsRoleBindingName, + cmd := exec.Command("kubectl", "create", "clusterrolebinding", metricsRoleBindingName, //nolint "--clusterrole=capms-metrics-reader", fmt.Sprintf("--serviceaccount=%s:%s", namespace, serviceAccountName), ) @@ -202,7 +202,7 @@ var _ = Describe("Manager", Ordered, func() { By("verifying that the controller manager is serving the metrics server") verifyMetricsServerStarted := func(g Gomega) { - cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace) + cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace) //nolint output, err := utils.Run(cmd) g.Expect(err).NotTo(HaveOccurred()) g.Expect(output).To(ContainSubstring("controller-runtime.metrics\tServing metrics server"), @@ -211,7 +211,7 @@ var _ = Describe("Manager", Ordered, func() { Eventually(verifyMetricsServerStarted).Should(Succeed()) By("creating the curl-metrics pod to access the metrics endpoint") - cmd = exec.Command("kubectl", "run", "curl-metrics", "--restart=Never", + cmd = exec.Command("kubectl", "run", "curl-metrics", "--restart=Never", //nolint "--namespace", "kube-system", "--image=curlimages/curl:7.78.0", "--", "/bin/sh", "-c", fmt.Sprintf( @@ -255,10 +255,7 @@ var _ = Describe("Manager", Ordered, func() { // It uses the Kubernetes TokenRequest API to generate a token by directly sending a request // and parsing the resulting token from the API response. func serviceAccountToken() (string, error) { - const tokenRequestRawString = `{ - "apiVersion": "authentication.k8s.io/v1", - "kind": "TokenRequest" - }` + const tokenRequestRawString = `{"apiVersion": "authentication.k8s.io/v1", "kind": "TokenRequest"}` //nolint // Temporary file to store the token request secretName := fmt.Sprintf("%s-token-request", serviceAccountName) @@ -271,7 +268,7 @@ func serviceAccountToken() (string, error) { var out string verifyTokenCreation := func(g Gomega) { // Execute kubectl command to create the token - cmd := exec.Command("kubectl", "create", "--raw", fmt.Sprintf( + cmd := exec.Command("kubectl", "create", "--raw", fmt.Sprintf( //nolint "/api/v1/namespaces/%s/serviceaccounts/%s/token", namespace, serviceAccountName, diff --git a/test/utils/utils.go b/test/utils/utils.go index 8319bc4..4a50cd0 100644 --- a/test/utils/utils.go +++ b/test/utils/utils.go @@ -54,7 +54,7 @@ func Run(cmd *exec.Cmd) (string, error) { _, _ = fmt.Fprintf(GinkgoWriter, "running: %s\n", command) output, err := cmd.CombinedOutput() if err != nil { - return string(output), fmt.Errorf("%s failed with error: (%v) %s", command, err, string(output)) + return string(output), fmt.Errorf("%s failed with error: (%w) %s", command, err, string(output)) } return string(output), nil @@ -63,7 +63,7 @@ func Run(cmd *exec.Cmd) (string, error) { // InstallPrometheusOperator installs the prometheus Operator to be used to export the enabled metrics. func InstallPrometheusOperator() error { url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion) - cmd := exec.Command("kubectl", "create", "-f", url) + cmd := exec.Command("kubectl", "create", "-f", url) //nolint _, err := Run(cmd) return err } @@ -71,7 +71,7 @@ func InstallPrometheusOperator() error { // UninstallPrometheusOperator uninstalls the prometheus func UninstallPrometheusOperator() { url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion) - cmd := exec.Command("kubectl", "delete", "-f", url) + cmd := exec.Command("kubectl", "delete", "-f", url) //nolint if _, err := Run(cmd); err != nil { warnError(err) } @@ -107,7 +107,7 @@ func IsPrometheusCRDsInstalled() bool { // UninstallCertManager uninstalls the cert manager func UninstallCertManager() { url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) - cmd := exec.Command("kubectl", "delete", "-f", url) + cmd := exec.Command("kubectl", "delete", "-f", url) //nolint if _, err := Run(cmd); err != nil { warnError(err) } @@ -116,7 +116,7 @@ func UninstallCertManager() { // InstallCertManager installs the cert manager bundle. func InstallCertManager() error { url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) - cmd := exec.Command("kubectl", "apply", "-f", url) + cmd := exec.Command("kubectl", "apply", "-f", url) //nolint if _, err := Run(cmd); err != nil { return err } @@ -172,7 +172,7 @@ func LoadImageToKindClusterWithName(name string) error { cluster = v } kindOptions := []string{"load", "docker-image", name, "--name", cluster} - cmd := exec.Command("kind", kindOptions...) + cmd := exec.Command("kind", kindOptions...) //nolint _, err := Run(cmd) return err }