diff --git a/e2e/common.go b/e2e/common.go index c143ec29..dfc88fc8 100644 --- a/e2e/common.go +++ b/e2e/common.go @@ -5,6 +5,7 @@ import ( "io" "os/exec" "strings" + "sync" "syscall" "time" @@ -13,7 +14,7 @@ import ( ) const ( - StartCommandWait = 30 * time.Second + StartCommandWait = 15 * time.Second RunCommandTimeout = 60 * time.Second ) @@ -32,39 +33,56 @@ func StartCommand(log *logrus.Entry, commandName string, arg ...string) (string, outPipe, _ := cmd.StdoutPipe() errPipe, _ := cmd.StderrPipe() - var sb strings.Builder + var sbOut strings.Builder + var sbErr strings.Builder + go func(_ io.ReadCloser) { reader := bufio.NewReader(errPipe) - line, err := reader.ReadString('\n') - for err == nil { - sb.WriteString(line) - line, err = reader.ReadString('\n') + for { + line, err := reader.ReadString('\n') + // Write line even if there's an error, as long as we got data + if len(line) > 0 { + sbErr.WriteString(line) + } + if err != nil { + break + } } }(errPipe) go func(_ io.ReadCloser) { reader := bufio.NewReader(outPipe) - line, err := reader.ReadString('\n') - for err == nil { - sb.WriteString(line) - line, err = reader.ReadString('\n') + for { + line, err := reader.ReadString('\n') + // Write line even if there's an error, as long as we got data + if len(line) > 0 { + sbOut.WriteString(line) + } + if err != nil { + break + } } }(outPipe) // start async go func() { log.Debug("Starting async ...") - _, err := pty.Start(cmd) + ptmx, err := pty.Start(cmd) if err != nil { log.Errorf("Start returned error: %v", err) + return } + // Note: PTY is intentionally NOT closed here as command continues running + // Keep the PTY file descriptor alive to prevent SIGHUP + _ = ptmx // Keep reference to prevent premature PTY closure }() log.Debugf("Waiting %v ...", StartCommandWait) time.Sleep(StartCommandWait) log.Debug("Returning result while command still running") - return sb.String(), nil + // Combine stderr first (errors more visible), then stdout + return sbErr.String() + sbOut.String(), nil } // run command with tty support and wait for stop @@ -77,31 +95,48 @@ func RunCommand(log *logrus.Entry, commandName string, arg ...string) (string, e outPipe, _ := cmd.StdoutPipe() errPipe, _ := cmd.StderrPipe() - var sb strings.Builder + var sbOut strings.Builder + var sbErr strings.Builder + var wg sync.WaitGroup + + wg.Add(2) go func(_ io.ReadCloser) { + defer wg.Done() reader := bufio.NewReader(errPipe) - line, err := reader.ReadString('\n') - for err == nil { - sb.WriteString(line) - line, err = reader.ReadString('\n') + for { + line, err := reader.ReadString('\n') + // Write line even if there's an error, as long as we got data + if len(line) > 0 { + sbErr.WriteString(line) + } + if err != nil { + break + } } }(errPipe) go func(_ io.ReadCloser) { + defer wg.Done() reader := bufio.NewReader(outPipe) - line, err := reader.ReadString('\n') - for err == nil { - sb.WriteString(line) - line, err = reader.ReadString('\n') + for { + line, err := reader.ReadString('\n') + // Write line even if there's an error, as long as we got data + if len(line) > 0 { + sbOut.WriteString(line) + } + if err != nil { + break + } } }(outPipe) log.Debug("Starting ...") - _, err := pty.Start(cmd) + ptmx, err := pty.Start(cmd) if err != nil { log.Errorf("Start returned error: %v", err) return "", err } + defer ptmx.Close() // Ensure PTY is closed after command finishes log.Debug("Waiting ...") err = cmd.Wait() @@ -109,12 +144,16 @@ func RunCommand(log *logrus.Entry, commandName string, arg ...string) (string, e log.Errorf("Wait returned error: %v", err) } + log.Debug("Waiting for output goroutines to finish...") + wg.Wait() + // TODO: find why this returns -1. That may be related to pty implementation /*if cmd.ProcessState.ExitCode() != 0 { - return sb.String(), fmt.Errorf("Cmd returned code %d", cmd.ProcessState.ExitCode()) + return sbErr.String() + sbOut.String(), fmt.Errorf("Cmd returned code %d", cmd.ProcessState.ExitCode()) }*/ - return sb.String(), nil + // Combine stderr first (errors more visible), then stdout + return sbErr.String() + sbOut.String(), nil } // run command with tty support and terminate it after timeout @@ -137,22 +176,38 @@ func RunCommandAndTerminate(log *logrus.Entry, commandName string, arg ...string }) defer timer.Stop() - var sb strings.Builder + var sbOut strings.Builder + var sbErr strings.Builder + var wg sync.WaitGroup + + wg.Add(2) go func(_ io.ReadCloser) { + defer wg.Done() reader := bufio.NewReader(errPipe) - line, err := reader.ReadString('\n') - for err == nil { - sb.WriteString(line) - line, err = reader.ReadString('\n') + for { + line, err := reader.ReadString('\n') + // Write line even if there's an error, as long as we got data + if len(line) > 0 { + sbErr.WriteString(line) + } + if err != nil { + break + } } }(errPipe) go func(_ io.ReadCloser) { + defer wg.Done() reader := bufio.NewReader(outPipe) - line, err := reader.ReadString('\n') - for err == nil { - sb.WriteString(line) - line, err = reader.ReadString('\n') + for { + line, err := reader.ReadString('\n') + // Write line even if there's an error, as long as we got data + if len(line) > 0 { + sbOut.WriteString(line) + } + if err != nil { + break + } } }(outPipe) @@ -178,10 +233,14 @@ func RunCommandAndTerminate(log *logrus.Entry, commandName string, arg ...string log.Errorf("Wait returned error: %v", err) } + log.Debug("Waiting for output goroutines to finish...") + wg.Wait() + // TODO: find why this returns -1. That may be related to pty implementation /*if cmd.ProcessState.ExitCode() != 0 { - return sb.String(), fmt.Errorf("Cmd returned code %d", cmd.ProcessState.ExitCode()) + return sbErr.String() + sbOut.String(), fmt.Errorf("Cmd returned code %d", cmd.ProcessState.ExitCode()) }*/ - return sb.String(), nil + // Combine stderr first (errors more visible), then stdout + return sbErr.String() + sbOut.String(), nil } diff --git a/e2e/integration-tests/cli.go b/e2e/integration-tests/cli.go index 5db8dcf7..10b81685 100644 --- a/e2e/integration-tests/cli.go +++ b/e2e/integration-tests/cli.go @@ -6,6 +6,7 @@ import ( "context" "io/fs" "os" + "path/filepath" "time" "k8s.io/apimachinery/pkg/api/errors" @@ -19,6 +20,7 @@ import ( const ( PollInterval = 5 * time.Second PollTimeout = 10 * time.Minute + outputDir = "./output/flow" ) var ( @@ -26,8 +28,8 @@ var ( ) func isNamespace(clientset *kubernetes.Clientset, cliNS string, exists bool) (bool, error) { - err := wait.PollUntilContextTimeout(context.Background(), PollInterval, PollTimeout, true, func(context.Context) (done bool, err error) { - namespace, err := getNamespace(clientset, cliNS) + err := wait.PollUntilContextTimeout(context.Background(), PollInterval, PollTimeout, true, func(ctx context.Context) (done bool, err error) { + namespace, err := getNamespace(ctx, clientset, cliNS) if exists { if err != nil { return false, err @@ -36,7 +38,7 @@ func isNamespace(clientset *kubernetes.Clientset, cliNS string, exists bool) (bo } else if errors.IsNotFound(err) { return true, nil } - return false, err + return false, nil }) if err != nil { return false, err @@ -45,8 +47,8 @@ func isNamespace(clientset *kubernetes.Clientset, cliNS string, exists bool) (bo } func isCollector(clientset *kubernetes.Clientset, cliNS string, ready bool) (bool, error) { - err := wait.PollUntilContextTimeout(context.Background(), PollInterval, PollTimeout, true, func(context.Context) (done bool, err error) { - collectorPod, err := getNamespacePods(clientset, cliNS, &metav1.ListOptions{FieldSelector: "status.phase=Running", LabelSelector: "run=collector"}) + err := wait.PollUntilContextTimeout(context.Background(), PollInterval, PollTimeout, true, func(ctx context.Context) (done bool, err error) { + collectorPod, err := getNamespacePods(ctx, clientset, cliNS, &metav1.ListOptions{FieldSelector: "status.phase=Running", LabelSelector: "run=collector"}) if err != nil { return false, err } @@ -62,9 +64,13 @@ func isCollector(clientset *kubernetes.Clientset, cliNS string, ready bool) (boo } func isDaemonsetReady(clientset *kubernetes.Clientset, daemonsetName string, cliNS string) (bool, error) { - err := wait.PollUntilContextTimeout(context.Background(), PollInterval, PollTimeout, true, func(context.Context) (done bool, err error) { - cliDaemonset, err := getDaemonSet(clientset, daemonsetName, cliNS) + err := wait.PollUntilContextTimeout(context.Background(), PollInterval, PollTimeout, true, func(ctx context.Context) (done bool, err error) { + cliDaemonset, err := getDaemonSet(ctx, clientset, daemonsetName, cliNS) if err != nil { + if errors.IsNotFound(err) { + clog.Infof("daemonset not found %v", err) + return false, nil + } return false, err } return cliDaemonset.Status.DesiredNumberScheduled == cliDaemonset.Status.NumberReady, nil @@ -110,8 +116,8 @@ func isCLIDone(clientset *kubernetes.Clientset, cliNS string) (bool, error) { func getFlowsJSONFile() (string, error) { // var files []fs.DirEntry var files []string - outputDir := "./output/flow/" dirFS := os.DirFS(outputDir) + files, err := fs.Glob(dirFS, "*.json") if err != nil { return "", err @@ -119,7 +125,7 @@ func getFlowsJSONFile() (string, error) { // this could be problematic if two tests are running in parallel with --copy=true var mostRecentFile fs.FileInfo for _, file := range files { - fileInfo, err := os.Stat(outputDir + file) + fileInfo, err := os.Stat(filepath.Join(outputDir, file)) if err != nil { return "", nil } @@ -127,5 +133,9 @@ func getFlowsJSONFile() (string, error) { mostRecentFile = fileInfo } } - return outputDir + mostRecentFile.Name(), nil + absPath, err := filepath.Abs(filepath.Join(outputDir, mostRecentFile.Name())) + if err != nil { + return "", err + } + return absPath, nil } diff --git a/e2e/integration-tests/cluster.go b/e2e/integration-tests/cluster.go index 79813ae5..1af55961 100644 --- a/e2e/integration-tests/cluster.go +++ b/e2e/integration-tests/cluster.go @@ -39,7 +39,7 @@ func getNewClient() (*kubernetes.Clientset, error) { } func getClusterNodes(clientset *kubernetes.Clientset, options *metav1.ListOptions) ([]string, error) { - nodes, err := clientset.CoreV1().Nodes().List(context.TODO(), *options) + nodes, err := clientset.CoreV1().Nodes().List(context.Background(), *options) var allNodes []string if err != nil { return allNodes, err @@ -50,8 +50,8 @@ func getClusterNodes(clientset *kubernetes.Clientset, options *metav1.ListOption return allNodes, nil } -func getDaemonSet(clientset *kubernetes.Clientset, daemonset string, ns string) (*appsv1.DaemonSet, error) { - ds, err := clientset.AppsV1().DaemonSets(ns).Get(context.TODO(), daemonset, metav1.GetOptions{}) +func getDaemonSet(ctx context.Context, clientset *kubernetes.Clientset, daemonset string, ns string) (*appsv1.DaemonSet, error) { + ds, err := clientset.AppsV1().DaemonSets(ns).Get(ctx, daemonset, metav1.GetOptions{}) if err != nil { return nil, err @@ -60,16 +60,16 @@ func getDaemonSet(clientset *kubernetes.Clientset, daemonset string, ns string) return ds, nil } -func getNamespace(clientset *kubernetes.Clientset, name string) (*corev1.Namespace, error) { - namespace, err := clientset.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{}) +func getNamespace(ctx context.Context, clientset *kubernetes.Clientset, name string) (*corev1.Namespace, error) { + namespace, err := clientset.CoreV1().Namespaces().Get(ctx, name, metav1.GetOptions{}) if err != nil { return nil, err } return namespace, nil } -func getNamespacePods(clientset *kubernetes.Clientset, namespace string, options *metav1.ListOptions) ([]string, error) { - pods, err := clientset.CoreV1().Pods(namespace).List(context.TODO(), *options) +func getNamespacePods(ctx context.Context, clientset *kubernetes.Clientset, namespace string, options *metav1.ListOptions) ([]string, error) { + pods, err := clientset.CoreV1().Pods(namespace).List(ctx, *options) var allPods []string if err != nil { return allPods, err @@ -80,8 +80,8 @@ func getNamespacePods(clientset *kubernetes.Clientset, namespace string, options return allPods, nil } -func getConfigMap(clientset *kubernetes.Clientset, name string, namespace string) (*corev1.ConfigMap, error) { - cm, err := clientset.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{}) +func getConfigMap(ctx context.Context, clientset *kubernetes.Clientset, name string, namespace string) (*corev1.ConfigMap, error) { + cm, err := clientset.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return nil, err } @@ -111,7 +111,7 @@ func queryPrometheusMetric(clientset *kubernetes.Clientset, query string) (float // Get the Prometheus route from openshift-monitoring namespace var routeGVR = schema.GroupVersionResource{Group: "route.openshift.io", Version: "v1", Resource: "routes"} - unstructuredRoute, err := dynclient.Resource(routeGVR).Namespace("openshift-monitoring").Get(context.TODO(), "prometheus-k8s", metav1.GetOptions{}) + unstructuredRoute, err := dynclient.Resource(routeGVR).Namespace("openshift-monitoring").Get(context.Background(), "prometheus-k8s", metav1.GetOptions{}) if err != nil { return 0.0, fmt.Errorf("failed to get prometheus route: %w", err) } @@ -159,7 +159,7 @@ func queryPrometheusMetric(clientset *kubernetes.Clientset, query string) (float var finalResult float64 // Poll for 5 minutes at 20-second intervals - err = wait.PollUntilContextTimeout(context.Background(), 20*time.Second, 5*time.Minute, false, func(context.Context) (done bool, err error) { + err = wait.PollUntilContextTimeout(context.Background(), 20*time.Second, 5*time.Minute, false, func(_ context.Context) (done bool, err error) { // Execute the request resp, err := httpClient.Do(req) if err != nil { @@ -235,7 +235,7 @@ func createServiceAccountToken(clientset *kubernetes.Clientset, serviceAccountNa } token, err := clientset.CoreV1().ServiceAccounts(namespace).CreateToken( - context.TODO(), + context.Background(), serviceAccountName, tokenRequest, metav1.CreateOptions{}, diff --git a/e2e/integration-tests/integration_test.go b/e2e/integration-tests/integration_test.go index 308a2141..a3a55cc5 100644 --- a/e2e/integration-tests/integration_test.go +++ b/e2e/integration-tests/integration_test.go @@ -3,12 +3,14 @@ package integrationtests import ( + "context" "encoding/json" "fmt" "os" "path" + "regexp" + "strconv" "strings" - "time" "github.com/netobserv/network-observability-cli/e2e" g "github.com/onsi/ginkgo/v2" @@ -23,34 +25,37 @@ import ( var ( cliNS = "netobserv-cli" - clientset *kubernetes.Clientset - StartupDate = time.Now().Format("20060102-150405") - lastFileName string - ilog = logrus.WithField("component", "integration_test") + clientset *kubernetes.Clientset + filePrefix string + ilog = logrus.WithField("component", "integration_test") + re *regexp.Regexp ) func writeOutput(filename string, out string) { ilog.Debugf("Writing %s...", filename) - // keep last filename written to be able to name the associated cleanup accordingly - lastFileName = filename err := os.WriteFile(path.Join(os.Getenv("ARTIFACT_DIR"), filename), []byte(out), 0666) + ilog.Info(fmt.Sprintf("Wrote file to path %s", path.Join(os.Getenv("ARTIFACT_DIR"), filename))) o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Error writing command output %v", err)) } func cleanup() { ilog.Info("Cleaning up...") + // rename dir flow with filename prefix + itlog.Debugf("Removing %s", outputDir) + err := os.RemoveAll(outputDir) + o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Couldn't remove %s: %v", outputDir, err)) + // run cli to cleanup namespace cliArgs := []string{"cleanup"} out, err := e2e.RunCommand(ilog, ocNetObservBinPath, cliArgs...) - writeOutput(strings.Replace(lastFileName, "Output", "cleanupOutput", 1), out) + writeOutput(filePrefix+"-cleanupOutput", out) o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Error during cleanup %v", err)) - // ensure namespace is fully removed before next lunch to avoid error - deleted, err := isNamespace(clientset, cliNS, false) - o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Can't check if namespace was deleted %v", err)) - o.Expect(deleted).To(o.BeTrue()) + // ensure namespace is fully removed before next test to avoid error + _, err = isNamespace(clientset, cliNS, false) + o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Namespace wasn't deleted %v", err)) ilog.Debug("Cleaned up !") } @@ -63,22 +68,31 @@ var _ = g.Describe("NetObserv CLI e2e integration test suite", g.Ordered, func() o.Expect(err).NotTo(o.HaveOccurred()) clientset = c }) + g.BeforeEach(func(ctx g.SpecContext) { + re = regexp.MustCompile(`OCP-\d+`) + var filePrefixestring []string + filePrefixestring = append(filePrefixestring, re.FindString(ctx.SpecReport().FullText())) + if ctx.SpecReport().Labels() != nil { + filePrefixestring = append(filePrefixestring, ctx.SpecReport().Labels()[0]) + } + filePrefix = strings.Join(filePrefixestring, "-") + }) g.It("OCP-73458: Verify all CLI pods are deployed", g.Label("Sanity"), func() { g.DeferCleanup(func() { cleanup() }) - cliArgs := []string{"flows", "--copy=false"} + cliArgs := []string{"flows", "--copy=false", "--max-time=1m"} out, err := e2e.StartCommand(ilog, ocNetObservBinPath, cliArgs...) - writeOutput(StartupDate+"-flowOutput", out) + writeOutput(filePrefix+"-flowOutput", out) o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Error starting command %v", err)) nodes, err := getClusterNodes(clientset, &metav1.ListOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) _, err = isCLIRuning(clientset, cliNS) o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("CLI didn't come ready %v", err)) - allPods, err := getNamespacePods(clientset, cliNS, &metav1.ListOptions{}) + allPods, err := getNamespacePods(context.Background(), clientset, cliNS, &metav1.ListOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) // agent pods + collector pods totalExpectedPods := len(nodes) + 1 @@ -94,7 +108,7 @@ var _ = g.Describe("NetObserv CLI e2e integration test suite", g.Ordered, func() nsfilter := "openshift-monitoring" cliArgs := []string{"flows", fmt.Sprintf("--query=SrcK8S_Namespace=~\"%s\"", nsfilter), "--copy=true", "--max-bytes=500000", "--max-time=1m"} out, err := e2e.RunCommand(ilog, ocNetObservBinPath, cliArgs...) - writeOutput(StartupDate+"-flowQueryOutput", out) + writeOutput(filePrefix+"-flowOutput", out) o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Error running command %v", err)) _, err = isCLIDone(clientset, cliNS) @@ -126,6 +140,7 @@ var _ = g.Describe("NetObserv CLI e2e integration test suite", g.Ordered, func() }) g.It("OCP-82597: Verify sampling value of 1 is applied in captured flows", g.Label("Sampling"), func() { + g.DeferCleanup(func() { cleanup() }) @@ -133,7 +148,7 @@ var _ = g.Describe("NetObserv CLI e2e integration test suite", g.Ordered, func() // capture upto 500KB with sampling=1 cliArgs := []string{"flows", "--sampling=1", "--copy=true", "--max-bytes=500000", "--max-time=1m"} out, err := e2e.RunCommand(ilog, ocNetObservBinPath, cliArgs...) - writeOutput(StartupDate+"-flowSamplingOutput", out) + writeOutput(filePrefix+"-flowOutput", out) o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Error running command %v", err)) _, err = isCLIDone(clientset, cliNS) @@ -171,7 +186,7 @@ var _ = g.Describe("NetObserv CLI e2e integration test suite", g.Ordered, func() // capture upto 500KB with exclude_interfaces=genev_sys_6081 cliArgs := []string{"flows", "--exclude_interfaces=genev_sys_6081", "--copy=true", "--max-bytes=500000", "--max-time=1m"} out, err := e2e.RunCommand(ilog, ocNetObservBinPath, cliArgs...) - writeOutput(StartupDate+"-flowInterfacesOutput", out) + writeOutput(filePrefix+"-flowOutput", out) o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Error running command %v", err)) _, err = isCLIDone(clientset, cliNS) @@ -208,7 +223,7 @@ var _ = g.Describe("NetObserv CLI e2e integration test suite", g.Ordered, func() // Run metrics command cliArgs := []string{"metrics", "--background"} out, err := e2e.StartCommand(ilog, ocNetObservBinPath, cliArgs...) - writeOutput(StartupDate+"-metricsOutput", out) + writeOutput(filePrefix+"-flowOutput", out) o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Error starting command %v", err)) // Wait for CLI to be ready @@ -217,7 +232,7 @@ var _ = g.Describe("NetObserv CLI e2e integration test suite", g.Ordered, func() o.Expect(daemonsetReady).To(o.BeTrue(), "agent daemonset didn't come ready") // Check that dashboard configmap is created - dashboardCM, err := getConfigMap(clientset, "netobserv-cli", "openshift-config-managed") + dashboardCM, err := getConfigMap(context.Background(), clientset, "netobserv-cli", "openshift-config-managed") o.Expect(err).NotTo(o.HaveOccurred(), "Dashboard configmap should be created in openshift-config-managed namespace") o.Expect(dashboardCM.Name).To(o.Equal("netobserv-cli"), "Dashboard configmap should be named netobserv-cli") @@ -239,33 +254,33 @@ var _ = g.Describe("NetObserv CLI e2e integration test suite", g.Ordered, func() { when: "Executing `oc netobserv flows`", it: "does not run as privileged", - cliArgs: []string{"flows"}, + cliArgs: []string{"flows", "--copy=false", "--max-time=1m"}, matcher: o.BeFalse(), }, { when: "Executing `oc netobserv flows --privileged=true`", it: "runs as privileged", - cliArgs: []string{"flows", "--privileged=true"}, + cliArgs: []string{"flows", "--privileged=true", "--copy=false", "--max-time=1m"}, matcher: o.BeTrue(), }, { when: "Executing `oc netobserv flows --drops`", it: "runs as privileged", - cliArgs: []string{"flows", "--drops"}, + cliArgs: []string{"flows", "--drops", "--copy=false", "--max-time=1m"}, matcher: o.BeTrue(), }, } - - for _, t := range tests { + for i, t := range tests { g.When(t.when, func() { g.It(t.it, func() { + filePrefix = filePrefix + "-" + strconv.Itoa(i) g.DeferCleanup(func() { cleanup() }) // run command async until done out, err := e2e.StartCommand(ilog, ocNetObservBinPath, t.cliArgs...) - writeOutput(StartupDate+"-flowOutput", out) + writeOutput(filePrefix+"-flowOutput", out) o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Error starting command %v", err)) // Wait for CLI to be ready @@ -274,7 +289,7 @@ var _ = g.Describe("NetObserv CLI e2e integration test suite", g.Ordered, func() o.Expect(daemonsetReady).To(o.BeTrue(), "agent daemonset didn't come ready") // Verify correct privilege setting - ds, err := getDaemonSet(clientset, "netobserv-cli", cliNS) + ds, err := getDaemonSet(context.Background(), clientset, "netobserv-cli", cliNS) o.Expect(err).NotTo(o.HaveOccurred(), "DeamonSet should be created in CLI namespace") containers := ds.Spec.Template.Spec.Containers o.Expect(len(containers)).To(o.Equal(1), "The number of containers specified in the template is != 1") diff --git a/e2e/integration-tests/integration_tests_suite_test.go b/e2e/integration-tests/integration_tests_suite_test.go index e7463a2b..c5b1aecf 100644 --- a/e2e/integration-tests/integration_tests_suite_test.go +++ b/e2e/integration-tests/integration_tests_suite_test.go @@ -31,11 +31,21 @@ var _ = g.BeforeSuite(func() { } // Set ARTIFACT_DIR env var to output directory if not set - if artifactDir := os.Getenv("ARTIFACT_DIR"); artifactDir == "" { - os.Setenv("ARTIFACT_DIR", "output") + artifactDir := os.Getenv("ARTIFACT_DIR") + if artifactDir == "" { + artifactDir = "output" + os.Setenv("ARTIFACT_DIR", artifactDir) + // Check if directory exists and delete it + if _, err := os.Stat(artifactDir); err == nil { + itlog.Debugf("Artifact directory already exists, removing: %s", artifactDir) + err = os.RemoveAll(artifactDir) + o.Expect(err).NotTo(o.HaveOccurred(), "Failed to remove existing artifact directory") + } + + itlog.Debugf("Creating artifact directory: %s", artifactDir) + err := os.MkdirAll(artifactDir, 0755) + o.Expect(err).NotTo(o.HaveOccurred(), "Failed to create artifact directory") } - err := os.MkdirAll(os.Getenv("ARTIFACT_DIR"), 0700) - o.Expect(err).NotTo(o.HaveOccurred()) cmd := exec.Command("which", "oc-netobserv") out, err := cmd.Output() diff --git a/scripts/functions.sh b/scripts/functions.sh index f83a4c04..aea7d182 100755 --- a/scripts/functions.sh +++ b/scripts/functions.sh @@ -442,6 +442,9 @@ function deleteNamespace() { } function cleanup() { + # Trap SIGHUP to prevent premature termination when PTY closes + trap '' HUP + if [[ "$runBackground" == "true" || "$skipCleanup" == "true" || "$outputYAML" == "true" ]]; then return fi