diff --git a/tools/flakeguard/cmd/run.go b/tools/flakeguard/cmd/run.go index fa243f095..1eebb5604 100644 --- a/tools/flakeguard/cmd/run.go +++ b/tools/flakeguard/cmd/run.go @@ -6,9 +6,9 @@ import ( "fmt" "os" "os/exec" - "time" + "path/filepath" + "strings" - "github.com/briandowns/spinner" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/spf13/cobra" @@ -21,8 +21,18 @@ import ( const ( FlakyTestsExitCode = 1 ErrorExitCode = 2 + RawOutputDir = "./flakeguard_raw_output" ) +// runState holds the configuration and results throughout the run process. +type runState struct { + cfg *runConfig + goProject string + testRunner *runner.Runner + mainResults []reports.TestResult + mainReport *reports.TestReport +} + type runConfig struct { ProjectPath string CodeownersPath string @@ -40,112 +50,309 @@ type runConfig struct { SelectTests []string UseShuffle bool ShuffleSeed string - OmitOutputsOnSuccess bool IgnoreParentFailuresOnSubtests bool FailFast bool GoTestTimeout string GoTestCount *int } -type summaryAndExit struct { +// outputManager manages the final output buffer and exit code. +type outputManager struct { buffer bytes.Buffer code int } -func (s *summaryAndExit) flush() { - fmt.Print(s.buffer.String()) - os.Exit(s.code) +// flush prints the buffered output and exits with the stored code. +func (o *outputManager) flush() { + fmt.Print(o.buffer.String()) + os.Exit(o.code) } -func (s *summaryAndExit) logErrorAndExit(err error, msg string, fields ...map[string]interface{}) { +// logErrorAndExit logs an error, sets the exit code to ErrorExitCode, and flushes. +func (o *outputManager) logErrorAndExit(err error, msg string, fields ...map[string]interface{}) { l := log.Error().Err(err) if len(fields) > 0 { l = l.Fields(fields[0]) } l.Msg(msg) - s.code = ErrorExitCode - s.flush() + fmt.Fprintf(&o.buffer, "[ERROR] %s: %v\n", msg, err) + o.code = ErrorExitCode + o.flush() } -func (s *summaryAndExit) logMsgAndExit(level zerolog.Level, msg string, code int, fields ...map[string]interface{}) { +// logMsgAndExit logs a message at a specific level, sets the exit code, and flushes. +func (o *outputManager) logMsgAndExit(level zerolog.Level, msg string, code int, fields ...map[string]interface{}) { l := log.WithLevel(level) if len(fields) > 0 { l = l.Fields(fields[0]) } l.Msg(msg) - s.code = code - s.flush() + fmt.Fprintf(&o.buffer, "[%s] %s\n", level.String(), msg) + o.code = code + o.flush() +} + +// info logs an informational message to zerolog and the output buffer. +func (o *outputManager) info(step, totalSteps int, msg string) { + stepMsg := fmt.Sprintf("(%d/%d) %s", step, totalSteps, msg) + log.Info().Msg(stepMsg) + fmt.Fprintf(&o.buffer, "\n[INFO] %s\n", stepMsg) + fmt.Fprintf(&o.buffer, "%s\n", strings.Repeat("-", len(stepMsg)+7)) +} + +// detail adds a detail line under the current step in the buffer. +func (o *outputManager) detail(msg string, args ...interface{}) { + formattedMsg := fmt.Sprintf(msg, args...) + fmt.Fprintf(&o.buffer, " %s\n", formattedMsg) + log.Debug().Msg(formattedMsg) +} + +// finalStatus adds final status messages (ERROR, WARNING, FAIL) to the buffer. +func (o *outputManager) finalStatus(level zerolog.Level, msg string) { + log.WithLevel(level).Msg(msg) + levelStr := strings.ToUpper(level.String()) + if level == zerolog.WarnLevel { + levelStr = "WARN" + } + fmt.Fprintf(&o.buffer, "[%s] %s\n", levelStr, msg) } var RunTestsCmd = &cobra.Command{ Use: "run", - Short: "Run tests to check if they are flaky", + Short: "Run tests potentially multiple times, check for flakiness, and report results.", + Long: `Runs tests using 'go test -json'. +Can run tests multiple times and rerun failed tests to detect flakiness. +Provides a structured summary of the execution flow and final results, +followed by detailed logs for all executed tests. + +Exit Codes: + 0: Success (all tests passed stability requirements) + 1: Flaky tests found or tests failed persistently after reruns + 2: Error during execution (e.g., setup failure, command error)`, Run: func(cmd *cobra.Command, args []string) { - exitHandler := &summaryAndExit{} + outputMgr := &outputManager{code: 0} // Default success + state := &runState{} + var err error - cfg, err := parseAndValidateFlags(cmd) + // Configuration & Setup + state.cfg, err = parseAndValidateFlags(cmd) if err != nil { - exitHandler.logErrorAndExit(err, "Failed to parse or validate flags") + outputMgr.logErrorAndExit(err, "Failed to parse or validate flags") } - goProject, err := utils.GetGoProjectName(cfg.ProjectPath) + state.goProject, err = utils.GetGoProjectName(state.cfg.ProjectPath) if err != nil { - log.Warn().Err(err).Str("projectPath", cfg.ProjectPath).Msg("Failed to get pretty project path") + log.Warn().Err(err).Str("projectPath", state.cfg.ProjectPath).Msg("Failed to get pretty project path for report metadata") } - if err := checkDependencies(cfg.ProjectPath); err != nil { - exitHandler.logErrorAndExit(err, "Error checking project dependencies") - } + state.testRunner = initializeRunner(state.cfg) - testPackages, err := determineTestPackages(cfg) - if err != nil { - exitHandler.logErrorAndExit(err, "Failed to determine test packages") + totalSteps := 3 // Prep, Initial Run, Final Summary + if state.cfg.RerunFailedCount > 0 { + totalSteps++ // Add Retry step } - testRunner := initializeRunner(cfg) + // Preparation + outputMgr.info(1, totalSteps, "Preparing environment...") + if err := checkDependencies(state.cfg.ProjectPath); err != nil { + outputMgr.detail("Warning: Dependency check ('go mod tidy') failed: %v", err) + } else { + outputMgr.detail("Dependency check ('go mod tidy'): OK") + } + outputMgr.detail("Preparation complete.") - s := spinner.New(spinner.CharSets[14], 100*time.Millisecond) - s.Suffix = " Running tests..." - s.Start() + // Initial Test Run + outputMgr.info(2, totalSteps, "Running initial tests...") - var mainResults []reports.TestResult var runErr error - if len(cfg.TestCmds) > 0 { - s.Suffix = " Running custom test command..." - mainResults, runErr = testRunner.RunTestCmd(cfg.TestCmds) + testPackages, determineErr := determineTestPackages(state.cfg) + if determineErr != nil { + outputMgr.logErrorAndExit(determineErr, "Failed to determine test packages") + } + + if len(state.cfg.TestCmds) > 0 { + outputMgr.detail("Using custom test command(s)...") + state.mainResults, runErr = state.testRunner.RunTestCmd(state.cfg.TestCmds) } else { - s.Suffix = " Running test packages..." - mainResults, runErr = testRunner.RunTestPackages(testPackages) + outputMgr.detail("Running test packages: %s", strings.Join(testPackages, ", ")) + state.mainResults, runErr = state.testRunner.RunTestPackages(testPackages) } - s.Stop() if runErr != nil { - exitHandler.logErrorAndExit(runErr, "Error running tests") + outputMgr.logErrorAndExit(runErr, "Error running initial tests") } - if len(mainResults) == 0 { - exitHandler.logMsgAndExit(zerolog.ErrorLevel, "No tests were run.", ErrorExitCode) + if len(state.mainResults) == 0 { + outputMgr.logMsgAndExit(zerolog.ErrorLevel, "No tests were run.", ErrorExitCode) } - mainReport, err := generateMainReport(mainResults, cfg, goProject) + state.mainReport, err = generateInitialReport(state.mainResults, state.cfg, state.goProject) if err != nil { - exitHandler.logErrorAndExit(err, "Error creating main test report") + outputMgr.logErrorAndExit(err, "Error creating initial test report") } - if cfg.MainResultsPath != "" { - if err := reports.SaveTestResultsToFile(mainResults, cfg.MainResultsPath); err != nil { - log.Error().Err(err).Str("path", cfg.MainResultsPath).Msg("Error saving main test results to file") + + if state.cfg.MainResultsPath != "" { + if err := reports.SaveTestResultsToFile(state.mainResults, state.cfg.MainResultsPath); err != nil { + log.Error().Err(err).Str("path", state.cfg.MainResultsPath).Msg("Error saving main test results to file") + outputMgr.detail("Warning: Failed to save initial results to %s", state.cfg.MainResultsPath) } else { - log.Info().Str("path", cfg.MainResultsPath).Msg("Main test report saved") + log.Info().Str("path", state.cfg.MainResultsPath).Msg("Main test report saved") + outputMgr.detail("Initial results saved to: %s", state.cfg.MainResultsPath) } } - if cfg.RerunFailedCount > 0 { - handleReruns(exitHandler, testRunner, mainReport, cfg, goProject) + initialPassed, initialFailed, initialSkipped := countResults(state.mainResults) + totalInitial := len(state.mainResults) - initialSkipped + outputMgr.detail("Initial run completed:") + outputMgr.detail(" - %d total tests run (excluding skipped)", totalInitial) + outputMgr.detail(" - %d passed", initialPassed) + outputMgr.detail(" - %d failed", initialFailed) + if initialSkipped > 0 { + outputMgr.detail(" - %d skipped", initialSkipped) + } + + initialFailedTests := reports.FilterTests(state.mainResults, func(tr reports.TestResult) bool { + return !tr.Skipped && tr.Failures > 0 + }) + + // Retry Failed Tests + persistentlyFailingTests := initialFailedTests + flakyTests := []reports.TestResult{} + var rerunReport *reports.TestReport + + if state.cfg.RerunFailedCount > 0 && len(initialFailedTests) > 0 { + outputMgr.info(3, totalSteps, "Retrying failed tests...") + + if handleCmdLineArgsEdgeCase(outputMgr, initialFailedTests, state.cfg) { + persistentlyFailingTests = initialFailedTests + } else { + suffix := fmt.Sprintf(" Rerunning %d failed test(s) up to %d times...", len(initialFailedTests), state.cfg.RerunFailedCount) + log.Info().Msg(suffix) + + rerunResults, rerunJSONPaths, rerunErr := state.testRunner.RerunFailedTests(initialFailedTests, state.cfg.RerunFailedCount) + + if rerunErr != nil { + outputMgr.logErrorAndExit(rerunErr, "Error rerunning failed tests") + } + + rerunReportVal, err := reports.NewTestReport(rerunResults, + reports.WithGoProject(state.goProject), + reports.WithCodeOwnersPath(state.cfg.CodeownersPath), + reports.WithMaxPassRatio(1), + reports.WithExcludedTests(state.cfg.SkipTests), + reports.WithSelectedTests(state.cfg.SelectTests), + reports.WithJSONOutputPaths(rerunJSONPaths), + ) + if err != nil { + outputMgr.logErrorAndExit(err, "Error creating rerun test report") + } + rerunReport = &rerunReportVal + + if state.cfg.RerunResultsPath != "" && len(rerunResults) > 0 { + if err := reports.SaveTestResultsToFile(rerunResults, state.cfg.RerunResultsPath); err != nil { + log.Error().Err(err).Str("path", state.cfg.RerunResultsPath).Msg("Error saving rerun test results to file") + outputMgr.detail("Warning: Failed to save rerun results to %s", state.cfg.RerunResultsPath) + } else { + log.Info().Str("path", state.cfg.RerunResultsPath).Msg("Rerun test report saved") + outputMgr.detail("Rerun results saved to: %s", state.cfg.RerunResultsPath) + } + } + + persistentlyFailingTests = []reports.TestResult{} + outputMgr.detail("Retry results:") + for _, result := range rerunResults { + if !result.Skipped && result.Successes == 0 { + persistentlyFailingTests = append(persistentlyFailingTests, result) + outputMgr.detail(" - %s: still FAIL", result.TestName) + } else if !result.Skipped && result.Successes > 0 && result.Runs > result.Successes { + flakyTests = append(flakyTests, result) + outputMgr.detail(" - %s: now PASS (flaky)", result.TestName) + } else if !result.Skipped { + outputMgr.detail(" - %s: now PASS", result.TestName) + } + } + } + } else if len(initialFailedTests) > 0 { + outputMgr.detail("No reruns configured or no initial failures to retry.") + if state.cfg.MinPassRatio < 1.0 { + for _, test := range initialFailedTests { + if test.PassRatio >= state.cfg.MinPassRatio { + flakyTests = append(flakyTests, test) + persistentlyFailingTests = reports.FilterTests(persistentlyFailingTests, func(pt reports.TestResult) bool { + return !(pt.TestPackage == test.TestPackage && pt.TestName == test.TestName) + }) + } + } + } + } + + // Final Summary + finalStepNum := 3 + if state.cfg.RerunFailedCount > 0 { + finalStepNum = 4 + } + outputMgr.info(finalStepNum, totalSteps, "Final summary") + + finalFailCount := len(persistentlyFailingTests) + finalFlakyCount := len(flakyTests) + finalPassCount := totalInitial - finalFailCount - finalFlakyCount + + outputMgr.detail("Total tests run: %d", totalInitial) + outputMgr.detail(" - Final PASS: %d", finalPassCount) + outputMgr.detail(" - Final FAIL: %d", finalFailCount) + outputMgr.detail(" - FLAKY: %d", finalFlakyCount) + + fmt.Fprintln(&outputMgr.buffer) + + if finalFailCount > 0 { + outputMgr.finalStatus(zerolog.ErrorLevel, fmt.Sprintf("%d stable failing test(s) found", finalFailCount)) + outputMgr.code = FlakyTestsExitCode + } + if finalFlakyCount > 0 { + outputMgr.finalStatus(zerolog.WarnLevel, fmt.Sprintf("%d flaky test(s) found", finalFlakyCount)) + if outputMgr.code == 0 { + outputMgr.code = FlakyTestsExitCode + } + } + + if outputMgr.code == 0 { + outputMgr.finalStatus(zerolog.InfoLevel, "All tests passed stability requirements.") + } + + if outputMgr.code == FlakyTestsExitCode { + outputMgr.finalStatus(zerolog.ErrorLevel, fmt.Sprintf("Exit code = %d (failures or flaky tests detected)", outputMgr.code)) + } + + // Detailed Logs + + fmt.Fprintf(&outputMgr.buffer, "\n%s\n", strings.Repeat("=", 60)) + initialRunHeader := fmt.Sprintf("=== DETAILED LOGS FOR INITIAL RUN (Initial run count: %d) ===", state.cfg.RunCount) + fmt.Fprintf(&outputMgr.buffer, "%s\n", initialRunHeader) + fmt.Fprintf(&outputMgr.buffer, "%s\n\n", strings.Repeat("=", len(initialRunHeader))) + + reportToLog := state.mainReport + if reportToLog != nil && len(reportToLog.Results) > 0 { + err = reportToLog.PrintGotestsumOutput(&outputMgr.buffer, "testname") + if err != nil { + log.Error().Err(err).Msg("Error printing initial run gotestsum output") + fmt.Fprintf(&outputMgr.buffer, "\n[ERROR] Failed to print detailed initial run logs: %v\n", err) + } } else { - handleNoReruns(exitHandler, mainReport, cfg) + fmt.Fprintf(&outputMgr.buffer, "No test execution data available for initial run logs.\n") } - exitHandler.code = 0 - exitHandler.flush() + if rerunReport != nil && len(rerunReport.Results) > 0 { + retryHeader := fmt.Sprintf("=== DETAILED LOGS FOR RETRY ATTEMPTS (%d retries per test) ===", state.cfg.RerunFailedCount) + fmt.Fprintf(&outputMgr.buffer, "\n%s\n", strings.Repeat("=", len(retryHeader))) + fmt.Fprintf(&outputMgr.buffer, "%s\n", retryHeader) + fmt.Fprintf(&outputMgr.buffer, "%s\n\n", strings.Repeat("=", len(retryHeader))) + err = rerunReport.PrintGotestsumOutput(&outputMgr.buffer, "testname") + if err != nil { + log.Error().Err(err).Msg("Error printing retry gotestsum output") + fmt.Fprintf(&outputMgr.buffer, "\n[ERROR] Failed to print detailed retry logs: %v\n", err) + } + } + + outputMgr.flush() }, } @@ -169,7 +376,6 @@ func parseAndValidateFlags(cmd *cobra.Command) (*runConfig, error) { cfg.SelectTests, _ = cmd.Flags().GetStringSlice("select-tests") cfg.UseShuffle, _ = cmd.Flags().GetBool("shuffle") cfg.ShuffleSeed, _ = cmd.Flags().GetString("shuffle-seed") - cfg.OmitOutputsOnSuccess, _ = cmd.Flags().GetBool("omit-test-outputs-on-success") cfg.IgnoreParentFailuresOnSubtests, _ = cmd.Flags().GetBool("ignore-parent-failures-on-subtests") cfg.FailFast, _ = cmd.Flags().GetBool("fail-fast") cfg.GoTestTimeout, _ = cmd.Flags().GetString("go-test-timeout") @@ -219,7 +425,7 @@ func parseAndValidateFlags(cmd *cobra.Command) (*runConfig, error) { // determineTestPackages decides which test packages to run based on the config. func determineTestPackages(cfg *runConfig) ([]string, error) { if len(cfg.TestCmds) > 0 { - return nil, nil // Not needed if running custom commands + return nil, nil } var testPackages []string @@ -237,6 +443,8 @@ func determineTestPackages(cfg *runConfig) ([]string, error) { // initializeRunner creates and configures a new test runner. func initializeRunner(cfg *runConfig) *runner.Runner { + // Force OmitOutputsOnSuccess to false because we are printing all logs at the end + omitOutputs := false return runner.NewRunner( cfg.ProjectPath, true, @@ -251,14 +459,21 @@ func initializeRunner(cfg *runConfig) *runner.Runner { cfg.SkipTests, cfg.SelectTests, cfg.IgnoreParentFailuresOnSubtests, - cfg.OmitOutputsOnSuccess, + omitOutputs, + RawOutputDir, nil, // exec nil, // parser ) } -// generateMainReport creates the initial test report from the main run results. -func generateMainReport(results []reports.TestResult, cfg *runConfig, goProject string) (*reports.TestReport, error) { +// generateInitialReport creates the initial test report from the main run results. +func generateInitialReport(results []reports.TestResult, cfg *runConfig, goProject string) (*reports.TestReport, error) { + // Get the JSON output paths from the raw output directory + jsonOutputPaths, err := getJSONOutputPaths(RawOutputDir) + if err != nil { + log.Warn().Err(err).Msg("Failed to get JSON output paths for initial report") + } + reportVal, err := reports.NewTestReport(results, reports.WithGoProject(goProject), reports.WithCodeOwnersPath(cfg.CodeownersPath), @@ -266,133 +481,75 @@ func generateMainReport(results []reports.TestResult, cfg *runConfig, goProject reports.WithGoRaceDetection(cfg.UseRace), reports.WithExcludedTests(cfg.SkipTests), reports.WithSelectedTests(cfg.SelectTests), + reports.WithJSONOutputPaths(jsonOutputPaths), ) if err != nil { - return nil, err + return nil, fmt.Errorf("error creating main test report: %w", err) } return &reportVal, nil } -// handleReruns manages the process of rerunning failed tests and reporting results. -func handleReruns(exitHandler *summaryAndExit, testRunner *runner.Runner, mainReport *reports.TestReport, cfg *runConfig, goProject string) { - failedTests := reports.FilterTests(mainReport.Results, func(tr reports.TestResult) bool { - return !tr.Skipped && tr.PassRatio < 1.0 // Rerun only tests that failed completely or partially in the main run - }) - - if len(failedTests) == 0 { - log.Info().Msg("All tests passed the initial run. No tests to rerun.") - fmt.Fprint(&exitHandler.buffer, "\nFlakeguard Summary\n") - reports.RenderTestReport(&exitHandler.buffer, *mainReport, false, false) - exitHandler.code = 0 - exitHandler.flush() - return +// getJSONOutputPaths returns a list of absolute paths for JSON output files from the given directory. +func getJSONOutputPaths(dir string) ([]string, error) { + files, err := os.ReadDir(dir) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, fmt.Errorf("failed to read directory %s: %w", dir, err) } - if len(cfg.TestCmds) > 0 { - foundCommandLineArgs := false - for _, test := range failedTests { - if test.TestPackage == "command-line-arguments" { - foundCommandLineArgs = true - break + var paths []string + for _, file := range files { + if !file.IsDir() && strings.HasSuffix(file.Name(), ".json") { + absPath, err := filepath.Abs(filepath.Join(dir, file.Name())) + if err != nil { + log.Warn().Err(err).Str("file", file.Name()).Msg("Failed to get absolute path for JSON output file") + continue } + paths = append(paths, absPath) } - - if foundCommandLineArgs { - warningMsg := "WARNING: Skipping all reruns because 'go test ' was detected within --test-cmd. " + - "Flakeguard cannot reliably rerun these tests as it loses the original directory context. " + - "Results are based on the initial run only. To enable reruns, use 'go test . -run TestPattern' instead of 'go test ' within your --test-cmd." - log.Warn().Msg(warningMsg) - fmt.Fprint(&exitHandler.buffer, "\nFailed Tests On The First Run:\n\n") - reports.PrintTestResultsTable(&exitHandler.buffer, failedTests, false, false, true, false, false, false) - fmt.Fprintf(&exitHandler.buffer, "\n\n%s\n", warningMsg) - handleNoReruns(exitHandler, mainReport, cfg) - return - } - } - - fmt.Fprint(&exitHandler.buffer, "\nFailed Tests On The First Run:\n\n") - reports.PrintTestResultsTable(&exitHandler.buffer, failedTests, false, false, true, false, false, false) - fmt.Fprintln(&exitHandler.buffer) - - log.Info().Int("count", len(failedTests)).Int("rerun_count", cfg.RerunFailedCount).Msg("Rerunning failed tests...") - - s := spinner.New(spinner.CharSets[14], 100*time.Millisecond) - s.Suffix = " Rerunning failed tests..." - s.Start() - rerunResults, rerunJsonOutputPaths, err := testRunner.RerunFailedTests(failedTests, cfg.RerunFailedCount) - s.Stop() - - if err != nil { - exitHandler.logErrorAndExit(err, "Error rerunning failed tests") - } - - rerunReport, err := reports.NewTestReport(rerunResults, - reports.WithGoProject(goProject), - reports.WithCodeOwnersPath(cfg.CodeownersPath), - reports.WithMaxPassRatio(1), - reports.WithExcludedTests(cfg.SkipTests), - reports.WithSelectedTests(cfg.SelectTests), - reports.WithJSONOutputPaths(rerunJsonOutputPaths), - ) - if err != nil { - exitHandler.logErrorAndExit(err, "Error creating rerun test report") } + return paths, nil +} - fmt.Fprint(&exitHandler.buffer, "\nTests After Rerun:\n\n") - reports.PrintTestResultsTable(&exitHandler.buffer, rerunResults, false, false, true, true, true, true) - fmt.Fprintln(&exitHandler.buffer) - - // Save the rerun test report to file - if cfg.RerunResultsPath != "" && len(rerunResults) > 0 { - if err := reports.SaveTestResultsToFile(rerunResults, cfg.RerunResultsPath); err != nil { - log.Error().Err(err).Str("path", cfg.RerunResultsPath).Msg("Error saving rerun test results to file") - } else { - log.Info().Str("path", cfg.RerunResultsPath).Msg("Rerun test report saved") +// countResults counts the number of passed, failed, and skipped tests. +func countResults(results []reports.TestResult) (passed, failed, skipped int) { + for _, r := range results { + if r.Skipped { + skipped++ + } else if r.Failures == 0 && r.Runs > 0 { + passed++ + } else if r.Failures > 0 { + failed++ } } + return +} - // Filter tests that still failed after reruns (0 successes) - failedAfterRerun := reports.FilterTests(rerunResults, func(tr reports.TestResult) bool { - return !tr.Skipped && tr.Successes == 0 - }) - - if len(failedAfterRerun) > 0 { - fmt.Fprint(&exitHandler.buffer, "\nPersistently Failing Test Logs:\n\n") - err := rerunReport.PrintGotestsumOutput(&exitHandler.buffer, "pkgname") - if err != nil { - log.Error().Err(err).Msg("Error printing gotestsum output for persistently failing tests") +// handleCmdLineArgsEdgeCase checks for and handles the 'go test file.go' edge case. +// Returns true if the edge case was detected and handled, false otherwise. +func handleCmdLineArgsEdgeCase(outputMgr *outputManager, failedTests []reports.TestResult, cfg *runConfig) bool { + foundCommandLineArgs := false + if len(cfg.TestCmds) > 0 { + for _, test := range failedTests { + if test.TestPackage == "command-line-arguments" { + foundCommandLineArgs = true + break + } } - - exitHandler.logMsgAndExit(zerolog.ErrorLevel, "Some tests are still failing after multiple reruns with no successful attempts.", ErrorExitCode, map[string]interface{}{ - "persistently_failing_count": len(failedAfterRerun), - "rerun_attempts": cfg.RerunFailedCount, - }) - } else { - log.Info().Msg("All initially failing tests passed at least once after reruns.") - exitHandler.code = 0 - exitHandler.flush() } -} -// handleNoReruns determines the outcome when reruns are disabled. -func handleNoReruns(exitHandler *summaryAndExit, mainReport *reports.TestReport, cfg *runConfig) { - flakyTests := reports.FilterTests(mainReport.Results, func(tr reports.TestResult) bool { - return !tr.Skipped && tr.PassRatio < cfg.MinPassRatio - }) - - fmt.Fprint(&exitHandler.buffer, "\nFlakeguard Summary\n") - reports.RenderTestReport(&exitHandler.buffer, *mainReport, false, false) - - if len(flakyTests) > 0 { - exitHandler.logMsgAndExit(zerolog.InfoLevel, "Found flaky tests.", FlakyTestsExitCode, map[string]interface{}{ - "flaky_count": len(flakyTests), - "stability_threshold": fmt.Sprintf("%.0f%%", cfg.MinPassRatio*100), - }) - } else { - log.Info().Msg("All tests passed stability requirements.") - exitHandler.code = 0 - exitHandler.flush() + if foundCommandLineArgs { + warningMsg := "WARNING: Skipping reruns because 'go test ' was detected within --test-cmd. " + + "Flakeguard cannot reliably rerun these tests. " + + "Final results will be based on the initial run only. " + + "To enable reruns, use 'go test . -run TestPattern' instead of 'go test ' within your --test-cmd." + log.Warn().Msg(warningMsg) + outputMgr.detail("%s", warningMsg) + return true } + return false } // init sets up the cobra command flags. @@ -402,12 +559,14 @@ func init() { RunTestsCmd.Flags().String("test-packages-json", "", "JSON-encoded string of test packages") RunTestsCmd.Flags().StringSlice("test-packages", nil, "Comma-separated list of test packages to run") RunTestsCmd.Flags().StringArray("test-cmd", nil, - "Optional custom test command(s) (e.g. 'go test -json ./... -v'), which must produce 'go test -json' output. Can be specified multiple times.", + "Optional custom test command(s) (e.g. 'go test -json ./... -v'), which must produce 'go test -json' output. "+ + "Avoid 'go test ' syntax as it prevents reliable reruns. Use 'go test . -run TestName' instead. "+ + "Can be specified multiple times.", ) RunTestsCmd.Flags().StringSlice("skip-tests", nil, "Comma-separated list of test names (regex supported by `go test -skip`) to skip") RunTestsCmd.Flags().StringSlice("select-tests", nil, "Comma-separated list of test names (regex supported by `go test -run`) to specifically run") RunTestsCmd.Flags().IntP("run-count", "c", 1, "Number of times to run the tests (for main run)") - RunTestsCmd.Flags().Int("rerun-failed-count", 0, "Number of times to rerun tests that did not achieve 100% pass rate in the main run (0 disables reruns)") + RunTestsCmd.Flags().Int("rerun-failed-count", 0, "Number of times to rerun tests that failed the main run (0 disables reruns)") RunTestsCmd.Flags().StringArray("tags", nil, "Passed on to the 'go test' command as the -tags flag") RunTestsCmd.Flags().String("go-test-timeout", "", "Passed on to the 'go test' command as the -timeout flag (e.g., '30m')") RunTestsCmd.Flags().Int("go-test-count", -1, "Passes the '-count' flag directly to 'go test'. Default (-1) omits the flag.") @@ -417,9 +576,10 @@ func init() { RunTestsCmd.Flags().Bool("fail-fast", false, "Stop test execution on the first failure (-failfast flag for 'go test')") RunTestsCmd.Flags().String("main-results-path", "", "Path to save the main test results (JSON format)") RunTestsCmd.Flags().String("rerun-results-path", "", "Path to save the rerun test results (JSON format)") - RunTestsCmd.Flags().Bool("omit-test-outputs-on-success", true, "Omit test outputs and package outputs for tests that pass all runs") + RunTestsCmd.Flags().Bool("omit-test-outputs-on-success", true, "DEPRECATED: No longer used, as all logs are shown at the end.") + _ = RunTestsCmd.Flags().MarkDeprecated("omit-test-outputs-on-success", "no longer used, as all logs are shown at the end.") RunTestsCmd.Flags().Bool("ignore-parent-failures-on-subtests", false, "Ignore failures in parent tests when only subtests fail (affects parsing)") - RunTestsCmd.Flags().Float64("min-pass-ratio", 1.0, "The minimum pass ratio (0.0-1.0) required for a test in the main run to be considered stable.") + RunTestsCmd.Flags().Float64("min-pass-ratio", 1.0, "The minimum pass ratio (0.0-1.0) required for a test in the main run to be considered stable (relevant only if reruns are disabled).") RunTestsCmd.Flags().Float64("max-pass-ratio", 1.0, "DEPRECATED: Use --min-pass-ratio instead. This flag will be removed in a future version.") _ = RunTestsCmd.Flags().MarkDeprecated("max-pass-ratio", "use --min-pass-ratio instead") } @@ -432,11 +592,13 @@ func checkDependencies(projectPath string) error { var out bytes.Buffer cmd.Stdout = &out - cmd.Stderr = &out // Capture stderr as well + cmd.Stderr = &out if err := cmd.Run(); err != nil { - return fmt.Errorf("dependency check ('go mod tidy') failed: %w\n%s", err, out.String()) + log.Warn().Err(err).Str("output", out.String()).Msg("Dependency check ('go mod tidy') failed. Continuing execution, but dependencies might be inconsistent.") + return fmt.Errorf("dependency check ('go mod tidy') failed: %w - %s", err, out.String()) + } else { + log.Debug().Msg("'go mod tidy' completed successfully.") } - log.Debug().Msg("'go mod tidy' completed successfully.") return nil } diff --git a/tools/flakeguard/main.go b/tools/flakeguard/main.go index 9c15b8fbd..460c37654 100644 --- a/tools/flakeguard/main.go +++ b/tools/flakeguard/main.go @@ -1,6 +1,7 @@ package main import ( + "io" "os" "time" @@ -30,7 +31,7 @@ func Execute() { func init() { zerolog.TimeFieldFormat = time.RFC3339Nano log.Logger = log.Output(zerolog.ConsoleWriter{ - Out: os.Stderr, + Out: io.Discard, TimeFormat: "15:04:05.00", // hh:mm:ss.ss format }) zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack diff --git a/tools/flakeguard/runner/runner.go b/tools/flakeguard/runner/runner.go index 196a93b3e..1f7e0f41d 100644 --- a/tools/flakeguard/runner/runner.go +++ b/tools/flakeguard/runner/runner.go @@ -13,10 +13,6 @@ import ( "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/parser" ) -const ( - RawOutputDir = "./flakeguard_raw_output" -) - // Runner describes the test run parameters and manages test execution and result parsing. // It delegates command execution to an Executor and result parsing to a Parser. type Runner struct { @@ -33,8 +29,8 @@ type Runner struct { FailFast bool SkipTests []string SelectTests []string + RawOutputDir string - // Configuration passed down to the parser IgnoreParentFailuresOnSubtests bool OmitOutputsOnSuccess bool @@ -48,7 +44,6 @@ type Runner struct { func NewRunner( projectPath string, verbose bool, - // Runner specific config runCount int, goTestCountFlag *int, goTestRaceFlag bool, @@ -59,12 +54,11 @@ func NewRunner( failFast bool, skipTests []string, selectTests []string, - // Parser specific config (passed during initialization) ignoreParentFailuresOnSubtests bool, omitOutputsOnSuccess bool, - // Dependencies (allow injection for testing) + rawOutputDir string, exec executor.Executor, - p parser.Parser, // Use interface type directly + p parser.Parser, ) *Runner { if exec == nil { exec = executor.NewCommandExecutor() @@ -87,6 +81,7 @@ func NewRunner( SelectTests: selectTests, IgnoreParentFailuresOnSubtests: ignoreParentFailuresOnSubtests, OmitOutputsOnSuccess: omitOutputsOnSuccess, + RawOutputDir: rawOutputDir, exec: exec, parser: p, } @@ -105,7 +100,7 @@ func (r *Runner) getExecutorConfig() executor.Config { ShuffleSeed: r.ShuffleSeed, SkipTests: r.SkipTests, SelectTests: r.SelectTests, - RawOutputDir: RawOutputDir, // Use the constant defined in this package + RawOutputDir: r.RawOutputDir, } } diff --git a/tools/flakeguard/runner/runner_integration_test.go b/tools/flakeguard/runner/runner_integration_test.go index 5971655f4..3c1da858d 100644 --- a/tools/flakeguard/runner/runner_integration_test.go +++ b/tools/flakeguard/runner/runner_integration_test.go @@ -176,6 +176,9 @@ func TestRunIntegration(t *testing.T) { absProjectPath, err := filepath.Abs(tc.cfg.ProjectPath) require.NoError(t, err) + tempDir, err := os.MkdirTemp("", "flakeguard-test") + require.NoError(t, err) + testRunner := runner.NewRunner( absProjectPath, false, @@ -191,6 +194,7 @@ func TestRunIntegration(t *testing.T) { tc.cfg.SelectTests, tc.cfg.IgnoreSubtestErr, tc.cfg.OmitOutputs, + tempDir, nil, // Use default executor nil, // Use default parser ) diff --git a/tools/flakeguard/runner/runner_test.go b/tools/flakeguard/runner/runner_test.go index e14c25260..f5c189b4d 100644 --- a/tools/flakeguard/runner/runner_test.go +++ b/tools/flakeguard/runner/runner_test.go @@ -186,6 +186,7 @@ func TestRunner_RunTestPackages(t *testing.T) { nil, // selectTests tc.expectedParseArgs.cfg.IgnoreParentFailuresOnSubtests, tc.expectedParseArgs.cfg.OmitOutputsOnSuccess, + "", mockExec, mockParse, ) @@ -310,6 +311,7 @@ func TestRunner_RunTestCmd(t *testing.T) { tc.runCount, nil, false, "", nil, false, "", tc.failFast, nil, nil, tc.expectedParseArgs.cfg.IgnoreParentFailuresOnSubtests, tc.expectedParseArgs.cfg.OmitOutputsOnSuccess, + "", mockExec, mockParse, ) @@ -433,7 +435,9 @@ func TestRunner_RerunFailedTests(t *testing.T) { } mockParse := &mockParser{} - r := runner.NewRunner(".", false, 0, nil, false, "", nil, false, "", false, nil, nil, false, false, mockExec, mockParse) + r := runner.NewRunner(".", false, 0, nil, false, "", nil, false, "", false, nil, nil, false, false, + "", + mockExec, mockParse) actualResults, _, err := r.RerunFailedTests(tc.initialFailedTests, tc.rerunCount)