diff --git a/tools/flakeguard/Makefile b/tools/flakeguard/Makefile index 12c86ff31..b6e42f25f 100644 --- a/tools/flakeguard/Makefile +++ b/tools/flakeguard/Makefile @@ -1,12 +1,12 @@ .PHONY: test_unit test_unit: - go list ./... | grep -v 'example_test_package' | xargs go test -timeout 5m -json -cover -covermode=count -coverprofile=unit-test-coverage.out 2>&1 | tee /tmp/gotest.log | gotestloghelper -ci + go test ./... -timeout 5m -json -cover -covermode=count -coverprofile=unit-test-coverage.out 2>&1 | tee /tmp/gotest.log | gotestloghelper -ci .PHONY: test test: go install github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest set -euo pipefail - go list ./... | grep -v 'example_test_package' | xargs go test -json -cover -coverprofile unit-test-coverage.out -v 2>&1 | tee /tmp/gotest.log | gotestfmt + go test ./... -json -cover -coverprofile unit-test-coverage.out -v 2>&1 | tee /tmp/gotest.log | gotestfmt .PHONY: test-package test-package: @@ -16,7 +16,7 @@ test-package: .PHONY: test-race test-race: - go list ./... | grep -v 'example_test_package' | xargs go test -count=1 -race + go test ./... -count=1 -race .PHONY: bench bench: diff --git a/tools/flakeguard/cmd/run.go b/tools/flakeguard/cmd/run.go index a2d821099..fa243f095 100644 --- a/tools/flakeguard/cmd/run.go +++ b/tools/flakeguard/cmd/run.go @@ -9,337 +9,434 @@ import ( "time" "github.com/briandowns/spinner" + "github.com/rs/zerolog" "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/reports" "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner" "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/utils" - "github.com/spf13/cobra" ) const ( - // FlakyTestsExitCode indicates that Flakeguard ran correctly and was able to identify flaky tests FlakyTestsExitCode = 1 - // ErrorExitCode indicates that Flakeguard ran into an error and was not able to complete operation - ErrorExitCode = 2 + ErrorExitCode = 2 ) +type runConfig struct { + ProjectPath string + CodeownersPath string + TestPackagesJson string + TestPackages []string + TestCmds []string + RunCount int + RerunFailedCount int + Tags []string + UseRace bool + MainResultsPath string + RerunResultsPath string + MinPassRatio float64 + SkipTests []string + SelectTests []string + UseShuffle bool + ShuffleSeed string + OmitOutputsOnSuccess bool + IgnoreParentFailuresOnSubtests bool + FailFast bool + GoTestTimeout string + GoTestCount *int +} + +type summaryAndExit struct { + buffer bytes.Buffer + code int +} + +func (s *summaryAndExit) flush() { + fmt.Print(s.buffer.String()) + os.Exit(s.code) +} + +func (s *summaryAndExit) logErrorAndExit(err error, msg string, fields ...map[string]interface{}) { + l := log.Error().Err(err) + if len(fields) > 0 { + l = l.Fields(fields[0]) + } + l.Msg(msg) + s.code = ErrorExitCode + s.flush() +} + +func (s *summaryAndExit) logMsgAndExit(level zerolog.Level, msg string, code int, fields ...map[string]interface{}) { + l := log.WithLevel(level) + if len(fields) > 0 { + l = l.Fields(fields[0]) + } + l.Msg(msg) + s.code = code + s.flush() +} + var RunTestsCmd = &cobra.Command{ Use: "run", Short: "Run tests to check if they are flaky", Run: func(cmd *cobra.Command, args []string) { - // Create a buffer to accumulate all summary output. - var summaryBuffer bytes.Buffer + exitHandler := &summaryAndExit{} - // Helper function to flush the summary buffer and exit. - flushSummaryAndExit := func(code int) { - fmt.Print(summaryBuffer.String()) - os.Exit(code) - } - - // Retrieve flags - projectPath, _ := cmd.Flags().GetString("project-path") - codeownersPath, _ := cmd.Flags().GetString("codeowners-path") - testPackagesJson, _ := cmd.Flags().GetString("test-packages-json") - testPackagesArg, _ := cmd.Flags().GetStringSlice("test-packages") - testCmdStrings, _ := cmd.Flags().GetStringArray("test-cmd") - runCount, _ := cmd.Flags().GetInt("run-count") - rerunFailedCount, _ := cmd.Flags().GetInt("rerun-failed-count") - tags, _ := cmd.Flags().GetStringArray("tags") - useRace, _ := cmd.Flags().GetBool("race") - mainResultsPath, _ := cmd.Flags().GetString("main-results-path") - rerunResultsPath, _ := cmd.Flags().GetString("rerun-results-path") - minPassRatio, _ := cmd.Flags().GetFloat64("min-pass-ratio") - // For backward compatibility, check if max-pass-ratio was used - maxPassRatio, _ := cmd.Flags().GetFloat64("max-pass-ratio") - maxPassRatioSpecified := cmd.Flags().Changed("max-pass-ratio") - skipTests, _ := cmd.Flags().GetStringSlice("skip-tests") - selectTests, _ := cmd.Flags().GetStringSlice("select-tests") - useShuffle, _ := cmd.Flags().GetBool("shuffle") - shuffleSeed, _ := cmd.Flags().GetString("shuffle-seed") - omitOutputsOnSuccess, _ := cmd.Flags().GetBool("omit-test-outputs-on-success") - ignoreParentFailuresOnSubtests, _ := cmd.Flags().GetBool("ignore-parent-failures-on-subtests") - failFast, _ := cmd.Flags().GetBool("fail-fast") - goTestTimeoutFlag, _ := cmd.Flags().GetString("go-test-timeout") - - goProject, err := utils.GetGoProjectName(projectPath) + cfg, err := parseAndValidateFlags(cmd) if err != nil { - log.Warn().Err(err).Str("projectPath", goProject).Msg("Failed to get pretty project path") + exitHandler.logErrorAndExit(err, "Failed to parse or validate flags") } - projectPath, err = utils.ResolveFullPath(projectPath) + goProject, err := utils.GetGoProjectName(cfg.ProjectPath) if err != nil { - log.Error().Err(err).Str("projectPath", projectPath).Msg("Failed to resolve full path for project path") - flushSummaryAndExit(ErrorExitCode) + log.Warn().Err(err).Str("projectPath", cfg.ProjectPath).Msg("Failed to get pretty project path") } - if mainResultsPath != "" { - mainResultsPath, err = utils.ResolveFullPath(mainResultsPath) - if err != nil { - log.Error().Err(err).Str("mainResultsPath", mainResultsPath).Msg("Failed to resolve full path for main results path") - flushSummaryAndExit(ErrorExitCode) - } + if err := checkDependencies(cfg.ProjectPath); err != nil { + exitHandler.logErrorAndExit(err, "Error checking project dependencies") } - if rerunResultsPath != "" { - rerunResultsPath, err = utils.ResolveFullPath(rerunResultsPath) - if err != nil { - log.Error().Err(err).Str("rerunResultsPath", rerunResultsPath).Msg("Failed to resolve full path for rerun results path") - flushSummaryAndExit(ErrorExitCode) - } + testPackages, err := determineTestPackages(cfg) + if err != nil { + exitHandler.logErrorAndExit(err, "Failed to determine test packages") } - // Retrieve go-test-count flag as a pointer if explicitly provided. - var goTestCountFlag *int - if cmd.Flags().Changed("go-test-count") { - v, err := cmd.Flags().GetInt("go-test-count") - if err != nil { - log.Error().Err(err).Msg("Error retrieving flag go-test-count") - flushSummaryAndExit(ErrorExitCode) - } - goTestCountFlag = &v - } + testRunner := initializeRunner(cfg) - // Handle the compatibility between min/max pass ratio - passRatioThreshold := minPassRatio - if maxPassRatioSpecified && maxPassRatio != 1.0 { - // If max-pass-ratio was explicitly set, use it (convert to min-pass-ratio) - log.Warn().Msg("--max-pass-ratio is deprecated, please use --min-pass-ratio instead") - passRatioThreshold = maxPassRatio - } + s := spinner.New(spinner.CharSets[14], 100*time.Millisecond) + s.Suffix = " Running tests..." + s.Start() - // Validate pass ratio - if passRatioThreshold < 0 || passRatioThreshold > 1 { - log.Error().Float64("pass ratio", passRatioThreshold).Msg("Error: pass ratio must be between 0 and 1") - flushSummaryAndExit(ErrorExitCode) + var mainResults []reports.TestResult + var runErr error + if len(cfg.TestCmds) > 0 { + s.Suffix = " Running custom test command..." + mainResults, runErr = testRunner.RunTestCmd(cfg.TestCmds) + } else { + s.Suffix = " Running test packages..." + mainResults, runErr = testRunner.RunTestPackages(testPackages) } + s.Stop() - // Check if project dependencies are correctly set up - if err := checkDependencies(projectPath); err != nil { - log.Error().Err(err).Msg("Error checking project dependencies") - flushSummaryAndExit(ErrorExitCode) + if runErr != nil { + exitHandler.logErrorAndExit(runErr, "Error running tests") + } + if len(mainResults) == 0 { + exitHandler.logMsgAndExit(zerolog.ErrorLevel, "No tests were run.", ErrorExitCode) } - // Determine test packages - var testPackages []string - if len(testCmdStrings) == 0 { - if testPackagesJson != "" { - if err := json.Unmarshal([]byte(testPackagesJson), &testPackages); err != nil { - log.Error().Err(err).Msg("Error decoding test packages JSON") - flushSummaryAndExit(ErrorExitCode) - } - } else if len(testPackagesArg) > 0 { - testPackages = testPackagesArg + mainReport, err := generateMainReport(mainResults, cfg, goProject) + if err != nil { + exitHandler.logErrorAndExit(err, "Error creating main test report") + } + if cfg.MainResultsPath != "" { + if err := reports.SaveTestResultsToFile(mainResults, cfg.MainResultsPath); err != nil { + log.Error().Err(err).Str("path", cfg.MainResultsPath).Msg("Error saving main test results to file") } else { - log.Error().Msg("Error: must specify either --test-packages-json or --test-packages") - flushSummaryAndExit(ErrorExitCode) + log.Info().Str("path", cfg.MainResultsPath).Msg("Main test report saved") } } - // Initialize the runner - testRunner := runner.Runner{ - ProjectPath: projectPath, - Verbose: true, - RunCount: runCount, - GoTestTimeoutFlag: goTestTimeoutFlag, - Tags: tags, - GoTestCountFlag: goTestCountFlag, - GoTestRaceFlag: useRace, - SkipTests: skipTests, - SelectTests: selectTests, - UseShuffle: useShuffle, - ShuffleSeed: shuffleSeed, - OmitOutputsOnSuccess: omitOutputsOnSuccess, - MaxPassRatio: passRatioThreshold, // Use the calculated threshold - IgnoreParentFailuresOnSubtests: ignoreParentFailuresOnSubtests, - FailFast: failFast, + if cfg.RerunFailedCount > 0 { + handleReruns(exitHandler, testRunner, mainReport, cfg, goProject) + } else { + handleNoReruns(exitHandler, mainReport, cfg) } - // Run the tests - var mainResults []reports.TestResult - if len(testCmdStrings) > 0 { - s := spinner.New(spinner.CharSets[14], 100*time.Millisecond) - s.Suffix = " Running custom test command..." - s.Start() - mainResults, err = testRunner.RunTestCmd(testCmdStrings) - if err != nil { - log.Fatal().Err(err).Msg("Error running custom test command") - flushSummaryAndExit(ErrorExitCode) - } - s.Stop() - } else { - mainResults, err = testRunner.RunTestPackages(testPackages) - if err != nil { - log.Fatal().Err(err).Msg("Error running test packages") - flushSummaryAndExit(ErrorExitCode) - } + exitHandler.code = 0 + exitHandler.flush() + }, +} + +// parseAndValidateFlags parses flags from the cobra command, validates them, and returns a runConfig. +func parseAndValidateFlags(cmd *cobra.Command) (*runConfig, error) { + cfg := &runConfig{} + var err error + + cfg.ProjectPath, _ = cmd.Flags().GetString("project-path") + cfg.CodeownersPath, _ = cmd.Flags().GetString("codeowners-path") + cfg.TestPackagesJson, _ = cmd.Flags().GetString("test-packages-json") + cfg.TestPackages, _ = cmd.Flags().GetStringSlice("test-packages") + cfg.TestCmds, _ = cmd.Flags().GetStringArray("test-cmd") + cfg.RunCount, _ = cmd.Flags().GetInt("run-count") + cfg.RerunFailedCount, _ = cmd.Flags().GetInt("rerun-failed-count") + cfg.Tags, _ = cmd.Flags().GetStringArray("tags") + cfg.UseRace, _ = cmd.Flags().GetBool("race") + cfg.MainResultsPath, _ = cmd.Flags().GetString("main-results-path") + cfg.RerunResultsPath, _ = cmd.Flags().GetString("rerun-results-path") + cfg.SkipTests, _ = cmd.Flags().GetStringSlice("skip-tests") + cfg.SelectTests, _ = cmd.Flags().GetStringSlice("select-tests") + cfg.UseShuffle, _ = cmd.Flags().GetBool("shuffle") + cfg.ShuffleSeed, _ = cmd.Flags().GetString("shuffle-seed") + cfg.OmitOutputsOnSuccess, _ = cmd.Flags().GetBool("omit-test-outputs-on-success") + cfg.IgnoreParentFailuresOnSubtests, _ = cmd.Flags().GetBool("ignore-parent-failures-on-subtests") + cfg.FailFast, _ = cmd.Flags().GetBool("fail-fast") + cfg.GoTestTimeout, _ = cmd.Flags().GetString("go-test-timeout") + + cfg.ProjectPath, err = utils.ResolveFullPath(cfg.ProjectPath) + if err != nil { + return nil, fmt.Errorf("failed to resolve full path for project path '%s': %w", cfg.ProjectPath, err) + } + if cfg.MainResultsPath != "" { + cfg.MainResultsPath, err = utils.ResolveFullPath(cfg.MainResultsPath) + if err != nil { + return nil, fmt.Errorf("failed to resolve full path for main results path '%s': %w", cfg.MainResultsPath, err) } + } + if cfg.RerunResultsPath != "" { + cfg.RerunResultsPath, err = utils.ResolveFullPath(cfg.RerunResultsPath) + if err != nil { + return nil, fmt.Errorf("failed to resolve full path for rerun results path '%s': %w", cfg.RerunResultsPath, err) + } + } - if len(mainResults) == 0 { - log.Error().Msg("No tests were run.") - flushSummaryAndExit(ErrorExitCode) + if cmd.Flags().Changed("go-test-count") { + v, err := cmd.Flags().GetInt("go-test-count") + if err != nil { + return nil, fmt.Errorf("error retrieving flag go-test-count: %w", err) } + cfg.GoTestCount = &v + } + + minPassRatio, _ := cmd.Flags().GetFloat64("min-pass-ratio") + maxPassRatio, _ := cmd.Flags().GetFloat64("max-pass-ratio") + maxPassRatioSpecified := cmd.Flags().Changed("max-pass-ratio") + + cfg.MinPassRatio = minPassRatio + if maxPassRatioSpecified && maxPassRatio != 1.0 { + log.Warn().Msg("--max-pass-ratio is deprecated, please use --min-pass-ratio instead. Using max-pass-ratio value for now.") + cfg.MinPassRatio = maxPassRatio // Use the deprecated value if specified + } + + if cfg.MinPassRatio < 0 || cfg.MinPassRatio > 1 { + return nil, fmt.Errorf("pass ratio must be between 0 and 1, got: %.2f", cfg.MinPassRatio) + } + + return cfg, nil +} + +// determineTestPackages decides which test packages to run based on the config. +func determineTestPackages(cfg *runConfig) ([]string, error) { + if len(cfg.TestCmds) > 0 { + return nil, nil // Not needed if running custom commands + } + + var testPackages []string + if cfg.TestPackagesJson != "" { + if err := json.Unmarshal([]byte(cfg.TestPackagesJson), &testPackages); err != nil { + return nil, fmt.Errorf("error decoding test packages JSON: %w", err) + } + } else if len(cfg.TestPackages) > 0 { + testPackages = cfg.TestPackages + } else { + return nil, fmt.Errorf("must specify either --test-packages-json, --test-packages, or --test-cmd") + } + return testPackages, nil +} + +// initializeRunner creates and configures a new test runner. +func initializeRunner(cfg *runConfig) *runner.Runner { + return runner.NewRunner( + cfg.ProjectPath, + true, + cfg.RunCount, + cfg.GoTestCount, + cfg.UseRace, + cfg.GoTestTimeout, + cfg.Tags, + cfg.UseShuffle, + cfg.ShuffleSeed, + cfg.FailFast, + cfg.SkipTests, + cfg.SelectTests, + cfg.IgnoreParentFailuresOnSubtests, + cfg.OmitOutputsOnSuccess, + nil, // exec + nil, // parser + ) +} + +// generateMainReport creates the initial test report from the main run results. +func generateMainReport(results []reports.TestResult, cfg *runConfig, goProject string) (*reports.TestReport, error) { + reportVal, err := reports.NewTestReport(results, + reports.WithGoProject(goProject), + reports.WithCodeOwnersPath(cfg.CodeownersPath), + reports.WithMaxPassRatio(cfg.MinPassRatio), + reports.WithGoRaceDetection(cfg.UseRace), + reports.WithExcludedTests(cfg.SkipTests), + reports.WithSelectedTests(cfg.SelectTests), + ) + if err != nil { + return nil, err + } + return &reportVal, nil +} - // Save the main test results to file - if mainResultsPath != "" && len(mainResults) > 0 { - if err := reports.SaveTestResultsToFile(mainResults, mainResultsPath); err != nil { - log.Error().Err(err).Msg("Error saving test results to file") - flushSummaryAndExit(ErrorExitCode) +// handleReruns manages the process of rerunning failed tests and reporting results. +func handleReruns(exitHandler *summaryAndExit, testRunner *runner.Runner, mainReport *reports.TestReport, cfg *runConfig, goProject string) { + failedTests := reports.FilterTests(mainReport.Results, func(tr reports.TestResult) bool { + return !tr.Skipped && tr.PassRatio < 1.0 // Rerun only tests that failed completely or partially in the main run + }) + + if len(failedTests) == 0 { + log.Info().Msg("All tests passed the initial run. No tests to rerun.") + fmt.Fprint(&exitHandler.buffer, "\nFlakeguard Summary\n") + reports.RenderTestReport(&exitHandler.buffer, *mainReport, false, false) + exitHandler.code = 0 + exitHandler.flush() + return + } + + if len(cfg.TestCmds) > 0 { + foundCommandLineArgs := false + for _, test := range failedTests { + if test.TestPackage == "command-line-arguments" { + foundCommandLineArgs = true + break } - log.Info().Str("path", mainResultsPath).Msg("Main test report saved") } - mainReport, err := reports.NewTestReport(mainResults, - reports.WithGoProject(goProject), - reports.WithCodeOwnersPath(codeownersPath), - reports.WithMaxPassRatio(passRatioThreshold), - reports.WithGoRaceDetection(useRace), - reports.WithExcludedTests(skipTests), - reports.WithSelectedTests(selectTests), - ) - if err != nil { - log.Error().Err(err).Msg("Error creating main test report") - flushSummaryAndExit(ErrorExitCode) + if foundCommandLineArgs { + warningMsg := "WARNING: Skipping all reruns because 'go test ' was detected within --test-cmd. " + + "Flakeguard cannot reliably rerun these tests as it loses the original directory context. " + + "Results are based on the initial run only. To enable reruns, use 'go test . -run TestPattern' instead of 'go test ' within your --test-cmd." + log.Warn().Msg(warningMsg) + fmt.Fprint(&exitHandler.buffer, "\nFailed Tests On The First Run:\n\n") + reports.PrintTestResultsTable(&exitHandler.buffer, failedTests, false, false, true, false, false, false) + fmt.Fprintf(&exitHandler.buffer, "\n\n%s\n", warningMsg) + handleNoReruns(exitHandler, mainReport, cfg) + return } + } - // Rerun failed tests - if rerunFailedCount > 0 { - failedTests := reports.FilterTests(mainReport.Results, func(tr reports.TestResult) bool { - return !tr.Skipped && tr.PassRatio < 1.0 - }) + fmt.Fprint(&exitHandler.buffer, "\nFailed Tests On The First Run:\n\n") + reports.PrintTestResultsTable(&exitHandler.buffer, failedTests, false, false, true, false, false, false) + fmt.Fprintln(&exitHandler.buffer) - if len(failedTests) == 0 { - log.Info().Msg("All tests passed. No tests to rerun.") - flushSummaryAndExit(0) - } + log.Info().Int("count", len(failedTests)).Int("rerun_count", cfg.RerunFailedCount).Msg("Rerunning failed tests...") - fmt.Fprint(&summaryBuffer, "\nFailed Tests On The First Run:\n\n") - reports.PrintTestResultsTable(&summaryBuffer, failedTests, false, false, true, false, false, false) - fmt.Fprintln(&summaryBuffer) + s := spinner.New(spinner.CharSets[14], 100*time.Millisecond) + s.Suffix = " Rerunning failed tests..." + s.Start() + rerunResults, rerunJsonOutputPaths, err := testRunner.RerunFailedTests(failedTests, cfg.RerunFailedCount) + s.Stop() - rerunResults, rerunJsonOutputPaths, err := testRunner.RerunFailedTests(failedTests, rerunFailedCount) - if err != nil { - log.Fatal().Err(err).Msg("Error rerunning failed tests") - flushSummaryAndExit(ErrorExitCode) - } + if err != nil { + exitHandler.logErrorAndExit(err, "Error rerunning failed tests") + } - rerunReport, err := reports.NewTestReport(rerunResults, - reports.WithGoProject(goProject), - reports.WithCodeOwnersPath(codeownersPath), - reports.WithMaxPassRatio(1), - reports.WithExcludedTests(skipTests), - reports.WithSelectedTests(selectTests), - reports.WithJSONOutputPaths(rerunJsonOutputPaths), - ) - if err != nil { - log.Error().Err(err).Msg("Error creating rerun test report") - flushSummaryAndExit(ErrorExitCode) - } + rerunReport, err := reports.NewTestReport(rerunResults, + reports.WithGoProject(goProject), + reports.WithCodeOwnersPath(cfg.CodeownersPath), + reports.WithMaxPassRatio(1), + reports.WithExcludedTests(cfg.SkipTests), + reports.WithSelectedTests(cfg.SelectTests), + reports.WithJSONOutputPaths(rerunJsonOutputPaths), + ) + if err != nil { + exitHandler.logErrorAndExit(err, "Error creating rerun test report") + } - fmt.Fprint(&summaryBuffer, "\nTests After Rerun:\n\n") - reports.PrintTestResultsTable(&summaryBuffer, rerunResults, false, false, true, true, true, true) - fmt.Fprintln(&summaryBuffer) - - // Save the rerun test report to file - if rerunResultsPath != "" && len(rerunResults) > 0 { - if err := reports.SaveTestResultsToFile(rerunResults, rerunResultsPath); err != nil { - log.Error().Err(err).Msg("Error saving test results to file") - flushSummaryAndExit(ErrorExitCode) - } - log.Info().Str("path", rerunResultsPath).Msg("Rerun test report saved") - } + fmt.Fprint(&exitHandler.buffer, "\nTests After Rerun:\n\n") + reports.PrintTestResultsTable(&exitHandler.buffer, rerunResults, false, false, true, true, true, true) + fmt.Fprintln(&exitHandler.buffer) - // Filter tests that failed after reruns - failedAfterRerun := reports.FilterTests(rerunResults, func(tr reports.TestResult) bool { - return !tr.Skipped && tr.Successes == 0 - }) - - if len(failedAfterRerun) > 0 { - fmt.Fprint(&summaryBuffer, "\nLogs:\n\n") - err := rerunReport.PrintGotestsumOutput(&summaryBuffer, "pkgname") - if err != nil { - log.Error().Err(err).Msg("Error printing gotestsum output") - } - - log.Error(). - Int("noSuccessTests", len(failedAfterRerun)). - Int("reruns", rerunFailedCount). - Msg("Some tests are still failing after multiple reruns with no successful attempts.") - flushSummaryAndExit(ErrorExitCode) - } else { - log.Info().Msg("All tests passed at least once after reruns") - flushSummaryAndExit(0) - } + // Save the rerun test report to file + if cfg.RerunResultsPath != "" && len(rerunResults) > 0 { + if err := reports.SaveTestResultsToFile(rerunResults, cfg.RerunResultsPath); err != nil { + log.Error().Err(err).Str("path", cfg.RerunResultsPath).Msg("Error saving rerun test results to file") } else { - // Filter flaky tests using FilterTests - flakyTests := reports.FilterTests(mainReport.Results, func(tr reports.TestResult) bool { - return !tr.Skipped && tr.PassRatio < passRatioThreshold - }) - - if len(flakyTests) > 0 { - log.Info(). - Int("count", len(flakyTests)). - Str("stability threshold", fmt.Sprintf("%.0f%%", passRatioThreshold*100)). - Msg("Found flaky tests") - - fmt.Fprint(&summaryBuffer, "\nFlakeguard Summary\n") - reports.RenderTestReport(&summaryBuffer, mainReport, false, false) - flushSummaryAndExit(FlakyTestsExitCode) - } else { - log.Info().Msg("All tests passed stability requirements") - } + log.Info().Str("path", cfg.RerunResultsPath).Msg("Rerun test report saved") } + } - flushSummaryAndExit(0) - }, + // Filter tests that still failed after reruns (0 successes) + failedAfterRerun := reports.FilterTests(rerunResults, func(tr reports.TestResult) bool { + return !tr.Skipped && tr.Successes == 0 + }) + + if len(failedAfterRerun) > 0 { + fmt.Fprint(&exitHandler.buffer, "\nPersistently Failing Test Logs:\n\n") + err := rerunReport.PrintGotestsumOutput(&exitHandler.buffer, "pkgname") + if err != nil { + log.Error().Err(err).Msg("Error printing gotestsum output for persistently failing tests") + } + + exitHandler.logMsgAndExit(zerolog.ErrorLevel, "Some tests are still failing after multiple reruns with no successful attempts.", ErrorExitCode, map[string]interface{}{ + "persistently_failing_count": len(failedAfterRerun), + "rerun_attempts": cfg.RerunFailedCount, + }) + } else { + log.Info().Msg("All initially failing tests passed at least once after reruns.") + exitHandler.code = 0 + exitHandler.flush() + } } +// handleNoReruns determines the outcome when reruns are disabled. +func handleNoReruns(exitHandler *summaryAndExit, mainReport *reports.TestReport, cfg *runConfig) { + flakyTests := reports.FilterTests(mainReport.Results, func(tr reports.TestResult) bool { + return !tr.Skipped && tr.PassRatio < cfg.MinPassRatio + }) + + fmt.Fprint(&exitHandler.buffer, "\nFlakeguard Summary\n") + reports.RenderTestReport(&exitHandler.buffer, *mainReport, false, false) + + if len(flakyTests) > 0 { + exitHandler.logMsgAndExit(zerolog.InfoLevel, "Found flaky tests.", FlakyTestsExitCode, map[string]interface{}{ + "flaky_count": len(flakyTests), + "stability_threshold": fmt.Sprintf("%.0f%%", cfg.MinPassRatio*100), + }) + } else { + log.Info().Msg("All tests passed stability requirements.") + exitHandler.code = 0 + exitHandler.flush() + } +} + +// init sets up the cobra command flags. func init() { RunTestsCmd.Flags().StringP("project-path", "r", ".", "The path to the Go project. Default is the current directory. Useful for subprojects") RunTestsCmd.Flags().StringP("codeowners-path", "", "", "Path to the CODEOWNERS file") RunTestsCmd.Flags().String("test-packages-json", "", "JSON-encoded string of test packages") RunTestsCmd.Flags().StringSlice("test-packages", nil, "Comma-separated list of test packages to run") RunTestsCmd.Flags().StringArray("test-cmd", nil, - "Optional custom test command (e.g. 'go test -json github.com/smartcontractkit/chainlink/integration-tests/smoke -v -run TestForwarderOCR2Basic'), which must produce go test -json output.", + "Optional custom test command(s) (e.g. 'go test -json ./... -v'), which must produce 'go test -json' output. Can be specified multiple times.", ) - RunTestsCmd.Flags().Bool("run-all-packages", false, "Run all test packages in the project. This flag overrides --test-packages and --test-packages-json") - RunTestsCmd.Flags().IntP("run-count", "c", 1, "Number of times to run the tests") + RunTestsCmd.Flags().StringSlice("skip-tests", nil, "Comma-separated list of test names (regex supported by `go test -skip`) to skip") + RunTestsCmd.Flags().StringSlice("select-tests", nil, "Comma-separated list of test names (regex supported by `go test -run`) to specifically run") + RunTestsCmd.Flags().IntP("run-count", "c", 1, "Number of times to run the tests (for main run)") + RunTestsCmd.Flags().Int("rerun-failed-count", 0, "Number of times to rerun tests that did not achieve 100% pass rate in the main run (0 disables reruns)") RunTestsCmd.Flags().StringArray("tags", nil, "Passed on to the 'go test' command as the -tags flag") - RunTestsCmd.Flags().String("go-test-timeout", "", "Passed on to the 'go test' command as the -timeout flag") - RunTestsCmd.Flags().Int("go-test-count", -1, "go test -count flag value. By default -count flag is not passed to go test") - RunTestsCmd.Flags().Bool("race", false, "Enable the race detector") - RunTestsCmd.Flags().Bool("shuffle", false, "Enable test shuffling") - RunTestsCmd.Flags().String("shuffle-seed", "", "Set seed for test shuffling. Must be used with --shuffle") - RunTestsCmd.Flags().Bool("fail-fast", false, "Stop on the first test failure") - RunTestsCmd.Flags().String("main-results-path", "", "Path to the main test results in JSON format") - RunTestsCmd.Flags().String("rerun-results-path", "", "Path to the rerun test results in JSON format") - RunTestsCmd.Flags().StringSlice("skip-tests", nil, "Comma-separated list of test names to skip from running") - RunTestsCmd.Flags().StringSlice("select-tests", nil, "Comma-separated list of test names to specifically run") - - // Add the min-pass-ratio flag (new recommended approach) - RunTestsCmd.Flags().Float64("min-pass-ratio", 1.0, "The minimum pass ratio required for a test to be considered stable (0.0-1.0)") - - // Keep max-pass-ratio for backward compatibility but mark as deprecated - RunTestsCmd.Flags().Float64("max-pass-ratio", 1.0, "DEPRECATED: Use min-pass-ratio instead") - RunTestsCmd.Flags().MarkDeprecated("max-pass-ratio", "use min-pass-ratio instead") - - RunTestsCmd.Flags().Bool("omit-test-outputs-on-success", true, "Omit test outputs and package outputs for tests that pass") - RunTestsCmd.Flags().Bool("ignore-parent-failures-on-subtests", false, "Ignore failures in parent tests when only subtests fail") - - // Add rerun failed tests flag - RunTestsCmd.Flags().Int("rerun-failed-count", 0, "Number of times to rerun tests that did not get 100 percent pass ratio (0 disables reruns)") + RunTestsCmd.Flags().String("go-test-timeout", "", "Passed on to the 'go test' command as the -timeout flag (e.g., '30m')") + RunTestsCmd.Flags().Int("go-test-count", -1, "Passes the '-count' flag directly to 'go test'. Default (-1) omits the flag.") + RunTestsCmd.Flags().Bool("race", false, "Enable the race detector (-race flag for 'go test')") + RunTestsCmd.Flags().Bool("shuffle", false, "Enable test shuffling ('go test -shuffle=on')") + RunTestsCmd.Flags().String("shuffle-seed", "", "Set seed for test shuffling. Requires --shuffle. ('go test -shuffle=on -shuffle.seed=...')") + RunTestsCmd.Flags().Bool("fail-fast", false, "Stop test execution on the first failure (-failfast flag for 'go test')") + RunTestsCmd.Flags().String("main-results-path", "", "Path to save the main test results (JSON format)") + RunTestsCmd.Flags().String("rerun-results-path", "", "Path to save the rerun test results (JSON format)") + RunTestsCmd.Flags().Bool("omit-test-outputs-on-success", true, "Omit test outputs and package outputs for tests that pass all runs") + RunTestsCmd.Flags().Bool("ignore-parent-failures-on-subtests", false, "Ignore failures in parent tests when only subtests fail (affects parsing)") + RunTestsCmd.Flags().Float64("min-pass-ratio", 1.0, "The minimum pass ratio (0.0-1.0) required for a test in the main run to be considered stable.") + RunTestsCmd.Flags().Float64("max-pass-ratio", 1.0, "DEPRECATED: Use --min-pass-ratio instead. This flag will be removed in a future version.") + _ = RunTestsCmd.Flags().MarkDeprecated("max-pass-ratio", "use --min-pass-ratio instead") } +// checkDependencies runs 'go mod tidy' to ensure dependencies are correct. func checkDependencies(projectPath string) error { + log.Debug().Str("path", projectPath).Msg("Running 'go mod tidy' to check dependencies...") cmd := exec.Command("go", "mod", "tidy") cmd.Dir = projectPath var out bytes.Buffer cmd.Stdout = &out - cmd.Stderr = &out + cmd.Stderr = &out // Capture stderr as well if err := cmd.Run(); err != nil { - return fmt.Errorf("dependency check failed: %v\n%s\nPlease run 'go mod tidy' to fix missing or unused dependencies", err, out.String()) + return fmt.Errorf("dependency check ('go mod tidy') failed: %w\n%s", err, out.String()) } - + log.Debug().Msg("'go mod tidy' completed successfully.") return nil } diff --git a/tools/flakeguard/runner/example_test_package/example_tests_test.go b/tools/flakeguard/runner/example_test_package/example_tests_test.go index 3a4b6edb5..be1590158 100644 --- a/tools/flakeguard/runner/example_test_package/example_tests_test.go +++ b/tools/flakeguard/runner/example_test_package/example_tests_test.go @@ -1,3 +1,6 @@ +//go:build example_package_tests +// +build example_package_tests + package exampletestpackage import ( diff --git a/tools/flakeguard/runner/executor/executor.go b/tools/flakeguard/runner/executor/executor.go new file mode 100644 index 000000000..af7269cff --- /dev/null +++ b/tools/flakeguard/runner/executor/executor.go @@ -0,0 +1,189 @@ +package executor + +import ( + "bytes" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/rs/zerolog/log" +) + +type Config struct { + ProjectPath string + Verbose bool + GoTestCountFlag *int + GoTestRaceFlag bool + GoTestTimeoutFlag string + Tags []string + UseShuffle bool + ShuffleSeed string + SkipTests []string + SelectTests []string + RawOutputDir string +} + +type Executor interface { + RunTestPackage(cfg Config, packageName string, runIndex int) (outputFilePath string, passed bool, err error) + RunCmd(cfg Config, testCmd []string, runIndex int) (outputFilePath string, passed bool, err error) +} + +type exitCoder interface { + ExitCode() int +} + +type commandExecutor struct{} + +func NewCommandExecutor() Executor { + return &commandExecutor{} +} + +// RunTestPackage runs the tests for a given package and returns the path to the output file. +func (e *commandExecutor) RunTestPackage(cfg Config, packageName string, runIndex int) (string, bool, error) { + args := []string{"test", packageName, "-json"} + if cfg.GoTestCountFlag != nil { + args = append(args, fmt.Sprintf("-count=%d", *cfg.GoTestCountFlag)) + } + if cfg.GoTestRaceFlag { + args = append(args, "-race") + } + if cfg.GoTestTimeoutFlag != "" { + args = append(args, fmt.Sprintf("-timeout=%s", cfg.GoTestTimeoutFlag)) + } + if len(cfg.Tags) > 0 { + args = append(args, fmt.Sprintf("-tags=%s", strings.Join(cfg.Tags, ","))) + } + if cfg.UseShuffle { + if cfg.ShuffleSeed != "" { + args = append(args, fmt.Sprintf("-shuffle=%s", cfg.ShuffleSeed)) + } else { + args = append(args, "-shuffle=on") + } + } + if len(cfg.SkipTests) > 0 { + skipPattern := strings.Join(cfg.SkipTests, "|") + args = append(args, fmt.Sprintf("-skip=%s", skipPattern)) + } + if len(cfg.SelectTests) > 0 { + selectPattern := strings.Join(cfg.SelectTests, "$|^") + args = append(args, fmt.Sprintf("-run=^%s$", selectPattern)) + } + + err := os.MkdirAll(cfg.RawOutputDir, 0o755) + if err != nil { + return "", false, fmt.Errorf("failed to create raw output directory: %w", err) + } + + saniPackageName := filepath.Base(packageName) + tmpFile, err := os.CreateTemp(cfg.RawOutputDir, fmt.Sprintf("test-output-%s-%d-*.json", saniPackageName, runIndex)) + if err != nil { + return "", false, fmt.Errorf("failed to create temp file: %w", err) + } + tempFilePath := tmpFile.Name() + if err := tmpFile.Close(); err != nil { + log.Warn().Err(err).Str("file", tempFilePath).Msg("Failed to close temporary file handle after creation") + } + + absPath, absErr := filepath.Abs(tempFilePath) + if absErr != nil { + log.Warn().Err(absErr).Str("relative_path", tempFilePath).Msg("Failed to get absolute path for log message, using relative path") + absPath = tempFilePath + } + + if cfg.Verbose { + log.Info().Str("raw_output_file", absPath).Str("command", fmt.Sprintf("go %s", strings.Join(args, " "))).Msg("Running command") + } + + cmd := exec.Command("go", args...) + cmd.Dir = cfg.ProjectPath + var stderrBuf bytes.Buffer + cmd.Stderr = &stderrBuf + + stdoutBytes, err := cmd.Output() + + writeErr := os.WriteFile(tempFilePath, stdoutBytes, 0644) + if writeErr != nil { + log.Error().Err(writeErr).Str("file", tempFilePath).Msg("Failed to write captured stdout to temp file") + _ = os.Remove(tempFilePath) + return "", false, fmt.Errorf("failed to write command output to %s: %w", tempFilePath, writeErr) + } + + if err != nil { + if stderrStr := stderrBuf.String(); stderrStr != "" { + log.Error().Str("package", packageName).Str("stderr", stderrStr).Msg("Command failed with error and stderr output") + } + + var exitErr *exec.ExitError + if errors.As(err, &exitErr) { + return tempFilePath, false, nil + } + _ = os.Remove(tempFilePath) + return "", false, fmt.Errorf("test command execution failed for package %s: %w", packageName, err) + } + + return tempFilePath, true, nil +} + +// RunCmd runs the user-supplied command once, captures its JSON output +func (e *commandExecutor) RunCmd(cfg Config, testCmd []string, runIndex int) (tempFilePath string, passed bool, err error) { + if len(testCmd) == 0 { + return "", false, errors.New("test command cannot be empty") + } + + err = os.MkdirAll(cfg.RawOutputDir, 0o755) + if err != nil { + return "", false, fmt.Errorf("failed to create raw output directory: %w", err) + } + tmpFile, err := os.CreateTemp(cfg.RawOutputDir, fmt.Sprintf("test-output-cmd-run%d-*.json", runIndex+1)) + if err != nil { + return "", false, fmt.Errorf("failed to create temp file for command run: %w", err) + } + tempFilePath = tmpFile.Name() + if err := tmpFile.Close(); err != nil { + log.Warn().Err(err).Str("file", tempFilePath).Msg("Failed to close temporary file handle after creation (cmd run)") + } + + absPath, absErr := filepath.Abs(tempFilePath) + if absErr != nil { + log.Warn().Err(absErr).Str("relative_path", tempFilePath).Msg("Failed to get absolute path for log message (cmd run), using relative path") + absPath = tempFilePath + } + + if cfg.Verbose { + log.Info().Str("raw_output_file", absPath).Str("command", strings.Join(testCmd, " ")).Msg("Running custom command") + } + + cmd := exec.Command(testCmd[0], testCmd[1:]...) //nolint:gosec + cmd.Dir = cfg.ProjectPath + var stderrBuf bytes.Buffer + cmd.Stderr = &stderrBuf + + stdoutBytes, err := cmd.Output() + + writeErr := os.WriteFile(tempFilePath, stdoutBytes, 0644) + if writeErr != nil { + log.Error().Err(writeErr).Str("file", tempFilePath).Msg("Failed to write captured stdout to temp file (cmd run)") + _ = os.Remove(tempFilePath) + return "", false, fmt.Errorf("failed to write command output to %s: %w", tempFilePath, writeErr) + } + + if err != nil { + if stderrStr := stderrBuf.String(); stderrStr != "" { + log.Error().Str("command", strings.Join(testCmd, " ")).Str("stderr", stderrStr).Msg("Custom command failed with error and stderr output") + } + var exitErr *exec.ExitError + if errors.As(err, &exitErr) { + passed = false + return tempFilePath, passed, nil + } else { + _ = os.Remove(tempFilePath) + return "", false, fmt.Errorf("error running test command %v: %w", testCmd, err) + } + } + + passed = true + return tempFilePath, passed, nil +} diff --git a/tools/flakeguard/runner/parser/attribution.go b/tools/flakeguard/runner/parser/attribution.go new file mode 100644 index 000000000..45ce19787 --- /dev/null +++ b/tools/flakeguard/runner/parser/attribution.go @@ -0,0 +1,126 @@ +package parser + +import ( + "errors" + "fmt" + "regexp" + "strings" + "time" + + "github.com/rs/zerolog/log" +) + +var ( + nestedTestNameRe = regexp.MustCompile(`\.(Test[^\s]+?)(?:\.[^(]+)?\s*\(`) + testLogAfterTestRe = regexp.MustCompile(`^panic: Log in goroutine after (Test[^\s]+) has completed:`) + didTestTimeoutRe = regexp.MustCompile(`^panic: test timed out after ([^\s]+)`) + timedOutTestNameRe = regexp.MustCompile(`^\s*(Test[^\s]+)\s+\((.*)\)`) + + ErrFailedToAttributePanicToTest = errors.New("failed to attribute panic to test") + ErrFailedToAttributeRaceToTest = errors.New("failed to attribute race to test") + ErrFailedToParseTimeoutDuration = errors.New("failed to parse timeout duration") + ErrFailedToExtractTimeoutDuration = errors.New("failed to extract timeout duration") + ErrDetectedLogAfterCompleteFailedAttribution = errors.New("detected a log after test has completed panic, but failed to properly attribute it") + ErrDetectedTimeoutFailedParse = errors.New("detected test timeout, but failed to parse the duration from the test") + ErrDetectedTimeoutFailedAttribution = errors.New("detected test timeout, but failed to attribute the timeout to a specific test") +) + +// AttributePanicToTest attributes panics to the test that caused them. +func AttributePanicToTest(outputs []string) (test string, timeout bool, err error) { + var ( + timeoutDurationStr string + timeoutDuration time.Duration + foundTestName string + ) + + for _, output := range outputs { + output = strings.TrimSpace(output) + if output == "" { + continue + } + + if match := testLogAfterTestRe.FindStringSubmatch(output); len(match) > 1 { + testName := strings.TrimSpace(match[1]) + log.Debug().Str("test", testName).Str("line", output).Msg("Attributed panic via LogAfterTest pattern") + return testName, false, nil + } + + if match := didTestTimeoutRe.FindStringSubmatch(output); len(match) > 1 { + timeout = true + timeoutDurationStr = match[1] + var parseErr error + timeoutDuration, parseErr = time.ParseDuration(timeoutDurationStr) + if parseErr != nil { + log.Warn().Str("duration_str", timeoutDurationStr).Err(parseErr).Msg("Failed to parse timeout duration from initial panic line") + return "", true, fmt.Errorf("%w: %w using output line: %s", ErrFailedToParseTimeoutDuration, parseErr, output) + } + log.Debug().Dur("duration", timeoutDuration).Msg("Detected timeout panic") + continue + } + + if timeout { + if match := timedOutTestNameRe.FindStringSubmatch(output); len(match) > 2 { + testName := strings.TrimSpace(match[1]) + testDurationStr := strings.TrimSpace(match[2]) + testDuration, parseErr := time.ParseDuration(testDurationStr) + if parseErr != nil { + log.Warn().Str("test", testName).Str("duration_str", testDurationStr).Err(parseErr).Msg("Failed to parse duration from timed-out test line") + return "", true, fmt.Errorf("%w: test '%s' listed with unparseable duration '%s': %w", ErrDetectedTimeoutFailedParse, testName, testDurationStr, parseErr) + } else if testDuration >= timeoutDuration { + log.Debug().Str("test", testName).Dur("test_duration", testDuration).Dur("timeout_duration", timeoutDuration).Msg("Attributed timeout panic via duration match") + return testName, true, nil + } else { + log.Debug().Str("test", testName).Dur("test_duration", testDuration).Dur("timeout_duration", timeoutDuration).Msg("Ignoring test line, duration too short for timeout") + } + } + } + + matchNestedTestName := nestedTestNameRe.FindStringSubmatch(output) + if len(matchNestedTestName) > 1 { + testName := strings.TrimSpace(matchNestedTestName[1]) + if !strings.HasPrefix(testName, "Test") { + continue + } + if foundTestName == "" { + log.Debug().Str("test", testName).Str("line", output).Msg("Found potential test name in panic output") + foundTestName = testName + } + } + } + + if timeout { + var errMsg string + if foundTestName != "" { + errMsg = fmt.Sprintf("timeout duration %s detected, found candidate test '%s' but duration did not meet threshold or failed parsing earlier", timeoutDurationStr, foundTestName) + } else { + errMsg = fmt.Sprintf("timeout duration %s detected, but no matching test found in output", timeoutDurationStr) + } + return "", true, fmt.Errorf("%w: %s: %w", ErrDetectedTimeoutFailedAttribution, errMsg, errors.New(strings.Join(outputs, "\n"))) + } + + if foundTestName != "" { + log.Debug().Str("test", foundTestName).Msg("Attributed non-timeout panic via test name found in stack") + return foundTestName, false, nil + } + + return "", false, fmt.Errorf("%w: using output: %w", ErrFailedToAttributePanicToTest, errors.New(strings.Join(outputs, "\n"))) +} + +// AttributeRaceToTest attributes races to the test that caused them. +func AttributeRaceToTest(outputs []string) (string, error) { + for _, output := range outputs { + output = strings.TrimSpace(output) + if output == "" { + continue + } + match := nestedTestNameRe.FindStringSubmatch(output) + if len(match) > 1 { + testName := strings.TrimSpace(match[1]) + if strings.HasPrefix(testName, "Test") { + log.Debug().Str("test", testName).Str("line", output).Msg("Attributed race via test name match") + return testName, nil + } + } + } + return "", fmt.Errorf("%w: using output: %w", ErrFailedToAttributeRaceToTest, errors.New(strings.Join(outputs, "\n"))) +} diff --git a/tools/flakeguard/runner/parser/attribution_test.go b/tools/flakeguard/runner/parser/attribution_test.go new file mode 100644 index 000000000..45e26666d --- /dev/null +++ b/tools/flakeguard/runner/parser/attribution_test.go @@ -0,0 +1,226 @@ +package parser + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAttributePanicToTest(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + expectedTestName string + expectedTimeout bool + expectedError error + outputs []string + }{ + { + name: "properly attributed panic", + expectedTestName: "TestPanic", + expectedTimeout: false, + outputs: []string{ + "panic: This test intentionally panics [recovered]", + "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/example_test_package.TestPanic(0x140000b6ea0?)", + }, + }, + { + name: "improperly attributed panic (but still findable)", + expectedTestName: "TestPanic", + expectedTimeout: false, + outputs: []string{ + "panic: This test intentionally panics [recovered]", + "TestPanic(0x140000b6ea0?)", + "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/example_test_package.TestPanic(0x140000b6ea0?)", + }, + }, + { + name: "log after test complete panic", + expectedTestName: "Test_workflowRegisteredHandler/skips_fetch_if_secrets_url_is_missing", + expectedTimeout: false, + outputs: []string{ + "panic: Log in goroutine after Test_workflowRegisteredHandler/skips_fetch_if_secrets_url_is_missing has completed: ...", + }, + }, + { + name: "timeout panic with obvious culprit", + expectedTestName: "TestTimedOut", + expectedTimeout: true, + outputs: []string{ + "panic: test timed out after 10m0s", + "running tests", + "\tTestNoTimeout (9m59s)", + "\tTestTimedOut (10m0s)", + }, + }, + { + name: "subtest panic", + expectedTestName: "TestSubTestsSomePanic", + expectedTimeout: false, + outputs: []string{ + "panic: This subtest always panics", + "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/example_test_package.TestSubTestsSomePanic.func2(0x140000c81a0?)", + }, + }, + { + name: "memory_test panic extraction", + expectedTestName: "TestJobClientJobAPI", + expectedTimeout: false, + outputs: []string{ + "panic: freeport: cannot allocate port block", + "github.com/smartcontractkit/chainlink/deployment/environment/memory_test.TestJobClientJobAPI(0xc000683dc0)", + }, + }, + { + name: "changeset_test panic extraction", + expectedTestName: "TestDeployBalanceReader", + expectedTimeout: false, + outputs: []string{ + "panic: freeport: cannot allocate port block", + "github.com/smartcontractkit/chainlink/deployment/keystone/changeset_test.TestDeployBalanceReader(0xc000583c00)", + }, + }, + { + name: "empty output", + expectedError: ErrFailedToAttributePanicToTest, + outputs: []string{}, + }, + { + name: "no test name in panic", + expectedError: ErrFailedToAttributePanicToTest, + outputs: []string{ + "panic: reflect: Elem of invalid type bool", + }, + }, + { + name: "fail to parse timeout duration", + expectedTimeout: true, + expectedError: ErrFailedToParseTimeoutDuration, + outputs: []string{ + "panic: test timed out after malformedDurationStr\n", + }, + }, + { + name: "timeout panic without obvious culprit", + expectedTimeout: true, + expectedError: ErrDetectedTimeoutFailedAttribution, + outputs: []string{ + "panic: test timed out after 10m0s\n", + "\trunning tests:\n", + "\t\tTestAlmostPanicTime (9m59s)\n", + }, + }, + { + name: "possible regex trip-up (no TestXxx)", + expectedError: ErrFailedToAttributePanicToTest, + outputs: []string{ + "panic: runtime error: invalid memory address or nil pointer dereference\n", + "github.com/smartcontractkit/chainlink/v2/core/services/workflows.newTestEngine.func4(0x0)", + }, + }, + { + name: "Panic with multiple Test names in stack", + expectedTestName: "TestInner", + expectedTimeout: false, + outputs: []string{ + "panic: Something went wrong in helper", + "main.helperFunction()", + "main.TestInner(0xc00...)", + "main.TestOuter(0xc00...)", + }, + }, + { + name: "Timeout with multiple matching durations", + expectedTestName: "TestA", + expectedTimeout: true, + outputs: []string{ + "panic: test timed out after 5m0s", + "running tests:", + "\tTestA (5m0s)", + "\tTestB (4m59s)", + "\tTestC (5m1s)", + "\tTestD (5m0s)", + }, + }, + { + name: "fail to parse test duration in timeout list", + expectedTimeout: true, + expectedError: ErrDetectedTimeoutFailedParse, + outputs: []string{ + "panic: test timed out after 10m0s\n", + "\trunning tests:\n", + "\t\tTestAddAndPromoteCandidatesForNewChain (malformedDurationStr)\n", + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + testName, timeout, err := AttributePanicToTest(tc.outputs) + + assert.Equal(t, tc.expectedTimeout, timeout, "Timeout flag mismatch") + if tc.expectedError != nil { + require.Error(t, err, "Expected an error but got none") + assert.ErrorIs(t, err, tc.expectedError, "Error mismatch") + assert.Empty(t, testName, "Test name should be empty on error") + } else { + require.NoError(t, err, "Expected no error but got one") + assert.Equal(t, tc.expectedTestName, testName, "Test name mismatch") + } + }) + } +} + +func TestAttributeRaceToTest(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + expectedTestName string + expectedError error + outputs []string + }{ + { + name: "properly attributed race", + expectedTestName: "TestRace", + outputs: []string{ + "WARNING: DATA RACE", + " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.func1()", + }, + }, + { + name: "empty output", + expectedError: ErrFailedToAttributeRaceToTest, + outputs: []string{}, + }, + { + name: "no test name in race output", + expectedError: ErrFailedToAttributeRaceToTest, + outputs: []string{ + "WARNING: DATA RACE", + " main.main.func1()", + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + testName, err := AttributeRaceToTest(tc.outputs) + + if tc.expectedError != nil { + require.Error(t, err, "Expected an error but got none") + assert.ErrorIs(t, err, tc.expectedError, "Error mismatch") + assert.Empty(t, testName, "Test name should be empty on error") + } else { + require.NoError(t, err, "Expected no error but got one") + assert.Equal(t, tc.expectedTestName, testName, "Test name mismatch") + } + }) + } +} diff --git a/tools/flakeguard/runner/parser/parser.go b/tools/flakeguard/runner/parser/parser.go new file mode 100644 index 000000000..bfb8fab58 --- /dev/null +++ b/tools/flakeguard/runner/parser/parser.go @@ -0,0 +1,618 @@ +package parser + +import ( + "bufio" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "github.com/rs/zerolog/log" + + "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/go-test-transform/pkg/transformer" + "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/reports" +) + +// Constants related to parsing and transformation outputs +const ( + // RawOutputTransformedDir defines the directory where transformed output files are stored. + RawOutputTransformedDir = "./flakeguard_raw_output_transformed" +) + +// Parser-specific errors +var ( + // ErrBuild indicates a failure during the test build phase. (Exported) + ErrBuild = errors.New("failed to build test code") + // errFailedToShowBuild indicates an error occurred while trying to read build failure details. (Internal) + errFailedToShowBuild = errors.New("flakeguard failed to show build errors") +) + +// Parser-specific regexes +var ( + startPanicRe = regexp.MustCompile(`^panic:`) + startRaceRe = regexp.MustCompile(`^WARNING: DATA RACE`) +) + +type entry struct { + Action string `json:"Action"` + Test string `json:"Test"` + Package string `json:"Package"` + Output string `json:"Output"` + Elapsed float64 `json:"Elapsed"` // Decimal value in seconds +} + +func (e entry) String() string { + return fmt.Sprintf("Action: %s, Test: %s, Package: %s, Output: %s, Elapsed: %f", e.Action, e.Test, e.Package, e.Output, e.Elapsed) +} + +type Parser interface { + // ParseFiles takes a list of raw output file paths, processes them (including potential transformation), + // and returns the aggregated test results and the list of file paths that were actually parsed. + ParseFiles(rawFilePaths []string, runPrefix string, expectedRuns int, cfg Config) ([]reports.TestResult, []string, error) +} + +type Config struct { + IgnoreParentFailuresOnSubtests bool + OmitOutputsOnSuccess bool +} + +type defaultParser struct { + transformedOutputFiles []string +} + +func NewParser() Parser { + return &defaultParser{ + transformedOutputFiles: make([]string, 0), + } +} + +// ParseFiles is the main entry point for the parser. +func (p *defaultParser) ParseFiles(rawFilePaths []string, runPrefix string, expectedRuns int, cfg Config) ([]reports.TestResult, []string, error) { + var parseFilePaths = rawFilePaths + + if cfg.IgnoreParentFailuresOnSubtests { + err := p.transformTestOutputFiles(rawFilePaths) + if err != nil { + return nil, nil, fmt.Errorf("failed during output transformation: %w", err) + } + parseFilePaths = p.transformedOutputFiles + } + + results, err := p.parseTestResults(parseFilePaths, runPrefix, expectedRuns, cfg) + if err != nil { + return nil, parseFilePaths, err // Return paths even on error? + } + + return results, parseFilePaths, nil +} + +// rawEventData stores the original event along with its run ID. +type rawEventData struct { + RunID string + Event entry +} + +// testProcessingState holds temporary state while processing events for a single test. +type testProcessingState struct { + result *reports.TestResult // Pointer to the result being built + processedRunIDs map[string]bool // runID -> true if terminal action processed + runOutcome map[string]string // runID -> "pass", "fail", "skip" + panicRaceOutputByRunID map[string][]string // runID -> []string of panic/race output + temporaryOutputsByRunID map[string][]string // runID -> []string of normal output + panicDetectionMode bool + raceDetectionMode bool + detectedEntries []entry // Raw entries collected during panic/race + key string // Test key (pkg/TestName) + filePath string // File path currently being processed (for logging) +} + +// parseTestResults orchestrates the multi-pass parsing approach. +func (p *defaultParser) parseTestResults(parseFilePaths []string, runPrefix string, totalExpectedRunsPerTest int, cfg Config) ([]reports.TestResult, error) { + eventsByTest, pkgOutputs, subTests, panickedPkgs, racedPkgs, err := p.collectAndGroupEvents(parseFilePaths, runPrefix) + if err != nil { + if errors.Is(err, ErrBuild) { + return nil, err + } + return nil, fmt.Errorf("error during event collection: %w", err) + } + + processedTestDetails, err := p.processEventsPerTest(eventsByTest, cfg) + if err != nil { + return nil, fmt.Errorf("error during event processing: %w", err) + } + + finalResults := p.aggregateAndFinalizeResults(processedTestDetails, subTests, panickedPkgs, racedPkgs, pkgOutputs, totalExpectedRunsPerTest, cfg) + + return finalResults, nil +} + +func (p *defaultParser) collectAndGroupEvents(parseFilePaths []string, runPrefix string) ( + eventsByTest map[string][]rawEventData, + packageLevelOutputs map[string][]string, + testsWithSubTests map[string][]string, + panickedPackages map[string]struct{}, + racePackages map[string]struct{}, + err error, +) { + eventsByTest = make(map[string][]rawEventData) + packageLevelOutputs = make(map[string][]string) + testsWithSubTests = make(map[string][]string) + panickedPackages = make(map[string]struct{}) + racePackages = make(map[string]struct{}) + + runNumber := 0 + for _, filePath := range parseFilePaths { + runNumber++ + runID := fmt.Sprintf("%s%d", runPrefix, runNumber) + file, fileErr := os.Open(filePath) + if fileErr != nil { + err = fmt.Errorf("failed to open test output file '%s': %w", filePath, fileErr) + return + } + + scanner := bufio.NewScanner(file) + parsingErrorOccurred := false + for scanner.Scan() { + lineBytes := scanner.Bytes() + var entryLine entry + if jsonErr := json.Unmarshal(lineBytes, &entryLine); jsonErr != nil { + if !parsingErrorOccurred { + log.Warn().Str("file", filePath).Err(jsonErr).Str("line_content", scanner.Text()).Msg("Failed to parse JSON line, skipping") + parsingErrorOccurred = true + } + continue + } + if entryLine.Action == "build-fail" { + _, seekErr := file.Seek(0, io.SeekStart) + if seekErr != nil { + log.Error().Str("file", filePath).Err(seekErr).Msg("Failed to seek to read build errors") + } + buildErrs, readErr := io.ReadAll(file) + if readErr != nil { + log.Error().Str("file", filePath).Err(readErr).Msg("Failed to read build errors") + } + fmt.Fprintf(os.Stderr, "--- Build Error in %s ---\n%s\n-------------------------\n", filePath, string(buildErrs)) + file.Close() + err = ErrBuild + return + } + if entryLine.Package != "" { + if startPanicRe.MatchString(entryLine.Output) { + panickedPackages[entryLine.Package] = struct{}{} + } + if startRaceRe.MatchString(entryLine.Output) { + racePackages[entryLine.Package] = struct{}{} + } + if entryLine.Test != "" { + key := fmt.Sprintf("%s/%s", entryLine.Package, entryLine.Test) + ev := rawEventData{RunID: runID, Event: entryLine} + eventsByTest[key] = append(eventsByTest[key], ev) + parentTestName, subTestName := parseSubTest(entryLine.Test) + if subTestName != "" { + parentTestKey := fmt.Sprintf("%s/%s", entryLine.Package, parentTestName) + if _, ok := testsWithSubTests[parentTestKey]; !ok { + testsWithSubTests[parentTestKey] = []string{} + } + found := false + for _, st := range testsWithSubTests[parentTestKey] { + if st == subTestName { + found = true + break + } + } + if !found { + testsWithSubTests[parentTestKey] = append(testsWithSubTests[parentTestKey], subTestName) + } + } + } else if entryLine.Output != "" { + if _, exists := packageLevelOutputs[entryLine.Package]; !exists { + packageLevelOutputs[entryLine.Package] = []string{} + } + packageLevelOutputs[entryLine.Package] = append(packageLevelOutputs[entryLine.Package], entryLine.Output) + } + } + } + if scanErr := scanner.Err(); scanErr != nil { + file.Close() + err = fmt.Errorf("scanner error reading file '%s': %w", filePath, scanErr) + return + } + file.Close() + } + return +} + +func (p *defaultParser) processEventsPerTest(eventsByTest map[string][]rawEventData, cfg Config) (map[string]*reports.TestResult, error) { + processedTestDetails := make(map[string]*reports.TestResult) + for key, rawEvents := range eventsByTest { + if len(rawEvents) == 0 { + continue + } + firstEvent := rawEvents[0].Event + result := &reports.TestResult{ + TestName: firstEvent.Test, + TestPackage: firstEvent.Package, + PassedOutputs: make(map[string][]string), + FailedOutputs: make(map[string][]string), + PackageOutputs: make([]string, 0), + Durations: make([]time.Duration, 0), + } + state := &testProcessingState{ + result: result, + key: key, + processedRunIDs: make(map[string]bool), + runOutcome: make(map[string]string), + panicRaceOutputByRunID: make(map[string][]string), + temporaryOutputsByRunID: make(map[string][]string), + } + + for _, rawEv := range rawEvents { + p.processEvent(state, rawEv) + } + + p.finalizeOutputs(state, cfg) + result.Runs = len(state.processedRunIDs) + processedTestDetails[key] = result + } + return processedTestDetails, nil +} + +// processEvent is the main dispatcher for processing a single event for a test. +func (p *defaultParser) processEvent(state *testProcessingState, rawEv rawEventData) { + runID := rawEv.RunID + event := rawEv.Event + + // 1. Handle Output / Panic/Race Start Detection + if event.Output != "" { + panicRaceStarted := p.handleOutputEvent(state, event, runID) + if panicRaceStarted || state.panicDetectionMode || state.raceDetectionMode { + if state.panicDetectionMode || state.raceDetectionMode { + state.detectedEntries = append(state.detectedEntries, event) + } + return + } + } + + // 2. Handle Panic/Race Termination + p.handlePanicRaceTermination(state, event, runID) + + // 3. Handle Terminal Actions (only if not already processed by panic/race) + terminalAction := event.Action == "pass" || event.Action == "fail" || event.Action == "skip" + if terminalAction && !state.processedRunIDs[runID] { + p.handleTerminalAction(state, event, runID) + } +} + +// handleOutputEvent handles output collection and panic/race start detection. +// Returns true if panic/race mode started. +func (p *defaultParser) handleOutputEvent(state *testProcessingState, event entry, runID string) (panicRaceStarted bool) { + if state.panicDetectionMode || state.raceDetectionMode { + return false + } + + if startPanicRe.MatchString(event.Output) { + state.detectedEntries = append(state.detectedEntries, event) + state.panicDetectionMode = true + return true + } + if startRaceRe.MatchString(event.Output) { + state.detectedEntries = append(state.detectedEntries, event) + state.raceDetectionMode = true + return true + } + + if state.temporaryOutputsByRunID[runID] == nil { + state.temporaryOutputsByRunID[runID] = []string{} + } + state.temporaryOutputsByRunID[runID] = append(state.temporaryOutputsByRunID[runID], event.Output) + return false +} + +// handlePanicRaceTermination processes the end of a panic/race block. +func (p *defaultParser) handlePanicRaceTermination(state *testProcessingState, event entry, runID string) { + terminalAction := event.Action == "pass" || event.Action == "fail" || event.Action == "skip" + if !(state.panicDetectionMode || state.raceDetectionMode) || !terminalAction { + return + } + + var outputs []string + for _, de := range state.detectedEntries { + outputs = append(outputs, de.Output) + } + outputStr := strings.Join(outputs, "\n") + currentPackage := event.Package + if currentPackage == "" && len(state.detectedEntries) > 0 { + currentPackage = state.detectedEntries[0].Package + } + + attributedTestName := event.Test + var isTimeout bool + var attrErr error + + if currentPackage == "" { + log.Error().Str("file", state.filePath).Msg("Cannot attribute panic/race: Package context is missing.") + } else { + if state.panicDetectionMode { + attributedTestName, isTimeout, attrErr = AttributePanicToTest(outputs) + if attrErr != nil { + log.Error().Str("test", state.key).Err(attrErr).Str("output", outputStr).Msg("Panic attribution failed") + } + state.result.Panic = true + state.result.Timeout = isTimeout + if state.panicRaceOutputByRunID[runID] == nil { + state.panicRaceOutputByRunID[runID] = []string{} + } + state.panicRaceOutputByRunID[runID] = append(state.panicRaceOutputByRunID[runID], "--- PANIC DETECTED ---") + state.panicRaceOutputByRunID[runID] = append(state.panicRaceOutputByRunID[runID], outputs...) + state.panicRaceOutputByRunID[runID] = append(state.panicRaceOutputByRunID[runID], "--- END PANIC ---") + } else { // raceDetectionMode + attributedTestName, attrErr = AttributeRaceToTest(outputs) + if attrErr != nil { + log.Warn().Str("test", state.key).Err(attrErr).Str("output", outputStr).Msg("Race attribution failed") + } + state.result.Race = true + if state.panicRaceOutputByRunID[runID] == nil { + state.panicRaceOutputByRunID[runID] = []string{} + } + state.panicRaceOutputByRunID[runID] = append(state.panicRaceOutputByRunID[runID], "--- RACE DETECTED ---") + state.panicRaceOutputByRunID[runID] = append(state.panicRaceOutputByRunID[runID], outputs...) + state.panicRaceOutputByRunID[runID] = append(state.panicRaceOutputByRunID[runID], "--- END RACE ---") + } + if attributedTestName != state.result.TestName { + log.Warn().Str("event_test", state.result.TestName).Str("attributed_test", attributedTestName).Msg("Panic/Race attribution mismatch") + } + + // Mark run as processed (failed) if not already done + if !state.processedRunIDs[runID] { + state.result.Failures++ + state.processedRunIDs[runID] = true + state.runOutcome[runID] = "fail" + } + } + + // Reset state + state.detectedEntries = []entry{} + state.panicDetectionMode = false + state.raceDetectionMode = false +} + +// handleTerminalAction processes pass/fail/skip actions. +func (p *defaultParser) handleTerminalAction(state *testProcessingState, event entry, runID string) { + switch event.Action { + case "pass": + state.result.Successes++ + state.runOutcome[runID] = "pass" + case "fail": + state.result.Failures++ + state.runOutcome[runID] = "fail" + case "skip": + state.result.Skips++ + state.result.Skipped = true + state.runOutcome[runID] = "skip" + delete(state.temporaryOutputsByRunID, runID) + } + state.processedRunIDs[runID] = true + + if event.Action == "pass" || event.Action == "fail" { + duration, parseErr := time.ParseDuration(strconv.FormatFloat(event.Elapsed, 'f', -1, 64) + "s") + if parseErr == nil { + state.result.Durations = append(state.result.Durations, duration) + } else { + log.Warn().Str("test", state.key).Float64("elapsed", event.Elapsed).Err(parseErr).Msg("Failed to parse duration") + } + } +} + +// finalizeOutputs moves collected temporary outputs to the correct final map based on run outcome. +func (p *defaultParser) finalizeOutputs(state *testProcessingState, cfg Config) { + for runID, outcome := range state.runOutcome { + normalOutputs := state.temporaryOutputsByRunID[runID] + panicOrRaceOutputs := state.panicRaceOutputByRunID[runID] + + if outcome == "pass" { + if !cfg.OmitOutputsOnSuccess { + if len(normalOutputs) > 0 { + if state.result.PassedOutputs[runID] == nil { + state.result.PassedOutputs[runID] = []string{} + } + state.result.PassedOutputs[runID] = append(state.result.PassedOutputs[runID], normalOutputs...) + } + } + } else if outcome == "fail" { + if len(panicOrRaceOutputs) > 0 || len(normalOutputs) > 0 { + if state.result.FailedOutputs[runID] == nil { + state.result.FailedOutputs[runID] = []string{} + } + } + if len(panicOrRaceOutputs) > 0 { + state.result.FailedOutputs[runID] = append(state.result.FailedOutputs[runID], panicOrRaceOutputs...) + } + if len(normalOutputs) > 0 { + state.result.FailedOutputs[runID] = append(state.result.FailedOutputs[runID], normalOutputs...) + } + if len(state.result.FailedOutputs[runID]) == 0 { + state.result.FailedOutputs[runID] = []string{"--- TEST FAILED (no specific output captured) ---"} + } + } + } +} + +// parseSubTest checks if a test name is a subtest and returns the parent and sub names. +func parseSubTest(testName string) (parentTestName, subTestName string) { + parts := strings.SplitN(testName, "/", 2) + if len(parts) == 1 { + return parts[0], "" + } + return parts[0], parts[1] +} + +// transformTestOutputFiles transforms the test output JSON files to ignore parent failures when only subtests fail. +func (p *defaultParser) transformTestOutputFiles(filePaths []string) error { + p.transformedOutputFiles = make([]string, 0, len(filePaths)) + err := os.MkdirAll(RawOutputTransformedDir, 0o755) + if err != nil { + return fmt.Errorf("failed to create transformed output directory '%s': %w", RawOutputTransformedDir, err) + } + log.Info().Int("count", len(filePaths)).Msg("Starting transformation of output files") + for i, origPath := range filePaths { + inFile, err := os.Open(origPath) + if err != nil { + return fmt.Errorf("failed to open original file %s for transformation: %w", origPath, err) + } + + baseName := filepath.Base(origPath) + outBaseName := fmt.Sprintf("transformed-%d-%s", i, strings.TrimSuffix(baseName, filepath.Ext(baseName))) + outPath := filepath.Join(RawOutputTransformedDir, outBaseName+".json") + + outFile, err := os.Create(outPath) + if err != nil { + inFile.Close() + return fmt.Errorf("failed to create transformed file '%s': %w", outPath, err) + } + + transformErr := transformer.TransformJSON(inFile, outFile, transformer.NewOptions(true)) + + closeErrIn := inFile.Close() + closeErrOut := outFile.Close() + + if transformErr != nil { + if removeErr := os.Remove(outPath); removeErr != nil { + log.Warn().Str("file", outPath).Err(removeErr).Msg("Failed to remove incomplete transformed file after error") + } + return fmt.Errorf("failed to transform output file %s to %s: %w", origPath, outPath, transformErr) + } + if closeErrIn != nil { + log.Warn().Str("file", origPath).Err(closeErrIn).Msg("Error closing input file after transformation") + } + if closeErrOut != nil { + log.Warn().Str("file", outPath).Err(closeErrOut).Msg("Error closing output file after transformation") + } + + p.transformedOutputFiles = append(p.transformedOutputFiles, outPath) + } + log.Info().Int("count", len(p.transformedOutputFiles)).Msg("Finished transforming output files") + return nil +} + +func (p *defaultParser) aggregateAndFinalizeResults( + processedTestDetails map[string]*reports.TestResult, + testsWithSubTests map[string][]string, + panickedPackages map[string]struct{}, + racePackages map[string]struct{}, + packageLevelOutputs map[string][]string, + totalExpectedRunsPerTest int, + cfg Config, +) []reports.TestResult { + finalResults := make([]reports.TestResult, 0, len(processedTestDetails)) + + // Panic Inheritance + for parentTestKey, subTests := range testsWithSubTests { + if parentTestResult, exists := processedTestDetails[parentTestKey]; exists { + if parentTestResult.Panic { + for _, subTestName := range subTests { + subTestKey := fmt.Sprintf("%s/%s", parentTestKey, subTestName) + if subTestResult, subExists := processedTestDetails[subTestKey]; subExists { + if !subTestResult.Skipped { + subTestResult.Panic = true + if subTestResult.Failures == 0 && subTestResult.Successes > 0 { + log.Warn().Str("subtest", subTestKey).Msg("Marking subtest as failed due to parent panic.") + subTestResult.Failures += subTestResult.Successes + subTestResult.Successes = 0 + for runID, outputs := range subTestResult.PassedOutputs { + if subTestResult.FailedOutputs == nil { + subTestResult.FailedOutputs = make(map[string][]string) + } + subTestResult.FailedOutputs[runID] = append(subTestResult.FailedOutputs[runID], outputs...) + } + subTestResult.PassedOutputs = make(map[string][]string) + } + } + } + } + } + } + } + + // Final Calculation, Correction, Filtering + for key, result := range processedTestDetails { + if !result.Skipped && result.Runs > totalExpectedRunsPerTest { + log.Warn().Str("test", key).Int("actualRuns", result.Runs).Int("expectedRuns", totalExpectedRunsPerTest).Msg("Correcting run count exceeding expected total runs") + targetRuns := totalExpectedRunsPerTest + if result.Panic || result.Race { + newFailures := result.Failures + if newFailures == 0 { + newFailures = 1 + } + if newFailures > targetRuns { + newFailures = targetRuns + } + newSuccesses := targetRuns - newFailures + if newSuccesses < 0 { + newSuccesses = 0 + } + result.Successes = newSuccesses + result.Failures = newFailures + } else { + if result.Runs > 0 { + newSuccesses := int(float64(result.Successes*targetRuns) / float64(result.Runs)) + newFailures := targetRuns - newSuccesses + if newFailures < 0 { + newFailures = 0 + newSuccesses = targetRuns + } + result.Successes = newSuccesses + result.Failures = newFailures + } else { + result.Successes = 0 + result.Failures = targetRuns + } + } + result.Runs = targetRuns + } + + if !result.Skipped { + if result.Runs > 0 { + result.PassRatio = float64(result.Successes) / float64(result.Runs) + } else { + result.PassRatio = 0.0 + } + } else { + result.PassRatio = 1.0 + if result.Runs != 0 { + result.Runs = 0 + } + } + + // Apply package-level flags/outputs + if _, panicked := panickedPackages[result.TestPackage]; panicked && !result.Skipped { + result.PackagePanic = true + } + if outputs, exists := packageLevelOutputs[result.TestPackage]; exists { + result.PackageOutputs = outputs + } + + // Filter out results with no runs and not skipped + if result.Runs > 0 || result.Skipped { + if cfg.OmitOutputsOnSuccess { + result.PassedOutputs = make(map[string][]string) + } + finalResults = append(finalResults, *result) + } + } + + sort.Slice(finalResults, func(i, j int) bool { + if finalResults[i].TestPackage != finalResults[j].TestPackage { + return finalResults[i].TestPackage < finalResults[j].TestPackage + } + return finalResults[i].TestName < finalResults[j].TestName + }) + + return finalResults +} diff --git a/tools/flakeguard/runner/parser/parser_test.go b/tools/flakeguard/runner/parser/parser_test.go new file mode 100644 index 000000000..6b79a0996 --- /dev/null +++ b/tools/flakeguard/runner/parser/parser_test.go @@ -0,0 +1,1077 @@ +package parser + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/reports" +) + +func jsonLine(action, pkg, test, output string, elapsed float64) string { + fixedTime, _ := time.Parse(time.RFC3339Nano, "2024-01-01T10:00:00.000Z") + entry := struct { + Time time.Time + Action string + Package string + Test string `json:",omitempty"` + Output string `json:",omitempty"` + Elapsed float64 `json:",omitempty"` + }{ + Time: fixedTime, + Action: action, + Package: pkg, + Test: test, + Output: output, + } + if action == "pass" || action == "fail" { + entry.Elapsed = elapsed + } + + jsonBytes, err := json.Marshal(entry) + if err != nil { + panic(fmt.Sprintf("test setup error: failed to marshal jsonLine: %v", err)) + } + return string(jsonBytes) +} + +func buildOutput(lines ...string) string { + return strings.Join(lines, "\n") + "\n" +} + +func TestParseTestResults_Basic(t *testing.T) { + t.Parallel() + + pkg1 := "github.com/test/package1" + pkg2 := "github.com/test/package2" + + testCases := []struct { + name string + inputFiles map[string]string + cfg Config + expectedResults map[string]reports.TestResult + expectedErrorIs error + expectedErrorMsg string + }{ + { + name: "Single Test Pass", + inputFiles: map[string]string{ + "run1.json": buildOutput( + jsonLine("run", pkg1, "TestPass", "", 0), + jsonLine("output", pkg1, "TestPass", "output line 1\n", 0), + jsonLine("pass", pkg1, "TestPass", "", 1.23), + ), + }, + cfg: Config{OmitOutputsOnSuccess: false}, + expectedResults: map[string]reports.TestResult{ + fmt.Sprintf("%s/%s", pkg1, "TestPass"): { + TestName: "TestPass", + TestPackage: pkg1, + Runs: 1, + Successes: 1, + PassRatio: 1.0, + }, + }, + }, + { + name: "Single Test Fail", + inputFiles: map[string]string{ + "run1.json": buildOutput( + jsonLine("run", pkg1, "TestFail", "", 0), + jsonLine("output", pkg1, "TestFail", "fail output\n", 0), + jsonLine("fail", pkg1, "TestFail", "", 2.34), + ), + }, + cfg: Config{OmitOutputsOnSuccess: false}, + expectedResults: map[string]reports.TestResult{ + fmt.Sprintf("%s/%s", pkg1, "TestFail"): { + TestName: "TestFail", + TestPackage: pkg1, + Runs: 1, + Failures: 1, + PassRatio: 0.0, + }, + }, + }, + { + name: "Single Test Skip", + inputFiles: map[string]string{ + "run1.json": buildOutput( + jsonLine("run", pkg1, "TestSkip", "", 0), + jsonLine("output", pkg1, "TestSkip", "skip reason\n", 0), + jsonLine("skip", pkg1, "TestSkip", "", 0), + ), + }, + cfg: Config{OmitOutputsOnSuccess: false}, + expectedResults: map[string]reports.TestResult{ + fmt.Sprintf("%s/%s", pkg1, "TestSkip"): { + TestName: "TestSkip", + TestPackage: pkg1, + Runs: 0, + Skips: 1, + Skipped: true, + PassRatio: 1.0, + }, + }, + }, + { + name: "Mixed Pass Fail Skip Multiple Runs", + inputFiles: map[string]string{ + "run1.json": buildOutput( + jsonLine("run", pkg1, "TestA", "", 0), + jsonLine("pass", pkg1, "TestA", "", 1.0), + jsonLine("run", pkg1, "TestB", "", 0), + jsonLine("fail", pkg1, "TestB", "", 1.0), + jsonLine("run", pkg2, "TestC", "", 0), + jsonLine("skip", pkg2, "TestC", "", 0), + ), + "run2.json": buildOutput( + jsonLine("run", pkg1, "TestA", "", 0), + jsonLine("fail", pkg1, "TestA", "", 1.1), // TestA fails on run 2 + jsonLine("run", pkg1, "TestB", "", 0), + jsonLine("pass", pkg1, "TestB", "", 1.1), // TestB passes on run 2 + jsonLine("run", pkg2, "TestC", "", 0), + jsonLine("skip", pkg2, "TestC", "", 0), + ), + }, + cfg: Config{OmitOutputsOnSuccess: false}, + expectedResults: map[string]reports.TestResult{ + fmt.Sprintf("%s/%s", pkg1, "TestA"): {TestName: "TestA", TestPackage: pkg1, Runs: 2, Successes: 1, Failures: 1, PassRatio: 0.5}, + fmt.Sprintf("%s/%s", pkg1, "TestB"): {TestName: "TestB", TestPackage: pkg1, Runs: 2, Successes: 1, Failures: 1, PassRatio: 0.5}, + fmt.Sprintf("%s/%s", pkg2, "TestC"): {TestName: "TestC", TestPackage: pkg2, Runs: 0, Skips: 2, Skipped: true, PassRatio: 1.0}, + }, + }, + { + name: "Build Failure", + inputFiles: map[string]string{ + "run1.json": buildOutput( + jsonLine("build-fail", "", "", "compile error message", 0), + ), + }, + cfg: Config{}, + expectedResults: nil, + expectedErrorIs: ErrBuild, + }, + } + + for _, tc := range testCases { + tc := tc // Capture range variable + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + parser := NewParser().(*defaultParser) + + tempDir := t.TempDir() + filePaths := make([]string, 0, len(tc.inputFiles)) + for name, content := range tc.inputFiles { + fpath := filepath.Join(tempDir, name) + err := os.WriteFile(fpath, []byte(content), 0644) + require.NoError(t, err, "Failed to write temp file %s", name) + filePaths = append(filePaths, fpath) + } + + actualResults, err := parser.parseTestResults(filePaths, "run", len(filePaths), tc.cfg) + + if tc.expectedErrorIs != nil { + require.Error(t, err, "Expected an error but got none") + assert.ErrorIs(t, err, tc.expectedErrorIs, "Error mismatch") + if tc.expectedErrorMsg != "" { + assert.ErrorContains(t, err, tc.expectedErrorMsg, "Error message mismatch") + } + assert.Nil(t, actualResults, "Results should be nil on error") + } else { + require.NoError(t, err, "Expected no error but got: %v", err) + require.NotNil(t, actualResults, "Results should not be nil on success") + require.Equal(t, len(tc.expectedResults), len(actualResults), "Unexpected number of results") + + actualResultsMap := make(map[string]reports.TestResult) + for _, res := range actualResults { + key := fmt.Sprintf("%s/%s", res.TestPackage, res.TestName) + actualResultsMap[key] = res + } + + for key, expected := range tc.expectedResults { + actual, ok := actualResultsMap[key] + require.True(t, ok, "Expected result for key '%s' not found", key) + + // Compare relevant fields + assertResultBasic(t, key, expected, actual) + if strings.HasSuffix(key, "TestPass") { + assert.NotEmpty(t, actual.Durations, "TestPass should have duration") + if !tc.cfg.OmitOutputsOnSuccess { + assert.Contains(t, actual.PassedOutputs["run1"], "output line 1\n", "TestPass missing expected output") + } + } else if strings.HasSuffix(key, "TestFail") { + assert.NotEmpty(t, actual.Durations, "TestFail should have duration") + assert.Contains(t, actual.FailedOutputs["run1"], "fail output\n", "TestFail missing expected output") + } else if strings.HasSuffix(key, "TestSkip") { + assert.Empty(t, actual.Durations, "TestSkip should have no duration") + assert.Empty(t, actual.PassedOutputs, "TestSkip should have no passed output") + assert.Empty(t, actual.FailedOutputs, "TestSkip should have no failed output") + } + if expected.TestName == "TestA" || expected.TestName == "TestB" { + assert.Len(t, actual.Durations, 2, "%s should have 2 durations", expected.TestName) + } + if expected.TestName == "TestC" { + assert.Empty(t, actual.Durations, "TestC should have 0 durations") + } + } + } + }) + } +} + +func TestParseTestResults_OutputHandling(t *testing.T) { + t.Parallel() + + pkg1 := "github.com/test/outputpkg" + + testCases := []struct { + name string + inputFile string + cfg Config + expectedPassOut map[string][]string + expectedFailOut map[string][]string + expectedPkgOut []string + }{ + { + name: "OmitOutputsOnSuccess=true", + inputFile: buildOutput( + jsonLine("run", pkg1, "TestPass", "", 0), + jsonLine("output", pkg1, "TestPass", "pass output 1", 0), + jsonLine("pass", pkg1, "TestPass", "", 1.0), + jsonLine("run", pkg1, "TestFail", "", 0), + jsonLine("output", pkg1, "TestFail", "fail output 1", 0), + jsonLine("fail", pkg1, "TestFail", "", 1.0), + jsonLine("output", pkg1, "", "package output 1", 0), + ), + cfg: Config{OmitOutputsOnSuccess: true}, + expectedPassOut: map[string][]string{}, + expectedFailOut: map[string][]string{ + "run1": {"fail output 1"}, + }, + expectedPkgOut: []string{"package output 1"}, + }, + { + name: "OmitOutputsOnSuccess=false", + inputFile: buildOutput( + jsonLine("run", pkg1, "TestPass", "", 0), + jsonLine("output", pkg1, "TestPass", "pass output 1", 0), + jsonLine("pass", pkg1, "TestPass", "", 1.0), + jsonLine("run", pkg1, "TestFail", "", 0), + jsonLine("output", pkg1, "TestFail", "fail output 1", 0), + jsonLine("fail", pkg1, "TestFail", "", 1.0), + jsonLine("output", pkg1, "", "package output 1", 0), + jsonLine("output", pkg1, "", "package output 2", 0), + ), + cfg: Config{OmitOutputsOnSuccess: false}, + expectedPassOut: map[string][]string{ + "run1": {"pass output 1"}, + }, + expectedFailOut: map[string][]string{ + "run1": {"fail output 1"}, + }, + expectedPkgOut: []string{"package output 1", "package output 2"}, + }, + { + name: "No test-specific output", + inputFile: buildOutput( + jsonLine("run", pkg1, "TestPass", "", 0), + jsonLine("pass", pkg1, "TestPass", "", 1.0), + jsonLine("run", pkg1, "TestFail", "", 0), + jsonLine("fail", pkg1, "TestFail", "", 1.0), + jsonLine("output", pkg1, "", "package output only", 0), + ), + cfg: Config{OmitOutputsOnSuccess: false}, + expectedPassOut: map[string][]string{}, + expectedFailOut: map[string][]string{ + "run1": {"--- TEST FAILED (no specific output captured) ---"}, + }, + expectedPkgOut: []string{"package output only"}, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + parser := NewParser().(*defaultParser) + tempDir := t.TempDir() + fpath := filepath.Join(tempDir, "run1.json") + err := os.WriteFile(fpath, []byte(tc.inputFile), 0644) + require.NoError(t, err) + + actualResults, err := parser.parseTestResults([]string{fpath}, "run", 1, tc.cfg) + require.NoError(t, err) + require.NotEmpty(t, actualResults) + + passResult := findResult(t, actualResults, "TestPass") + failResult := findResult(t, actualResults, "TestFail") + + if passResult != nil { + assert.Equal(t, len(tc.expectedPassOut), len(passResult.PassedOutputs), "PassedOutputs length mismatch for TestPass") + if len(tc.expectedPassOut) > 0 { + assert.Equal(t, tc.expectedPassOut["run1"], passResult.PassedOutputs["run1"], "PassedOutputs content mismatch for TestPass") + } + assert.Empty(t, passResult.Outputs, "General Outputs map should be empty after processing TestPass") + assert.Equal(t, tc.expectedPkgOut, passResult.PackageOutputs, "PackageOutputs mismatch for TestPass") + } + + if failResult != nil { + assert.Equal(t, len(tc.expectedFailOut), len(failResult.FailedOutputs), "FailedOutputs length mismatch for TestFail") + if len(tc.expectedFailOut) > 0 { + assert.Equal(t, tc.expectedFailOut["run1"], failResult.FailedOutputs["run1"], "FailedOutputs content mismatch for TestFail") + } + assert.Empty(t, failResult.Outputs, "General Outputs map should be empty after processing TestFail") + assert.Equal(t, tc.expectedPkgOut, failResult.PackageOutputs, "PackageOutputs mismatch for TestFail") + } + }) + } +} + +func TestParseTestResults_Subtests(t *testing.T) { + t.Parallel() + + pkg := "github.com/test/subtestpkg" + + testCases := []struct { + name string + inputFile string + cfg Config + expectedResults map[string]reports.TestResult + }{ + { + name: "Parent and Subtest Pass", + inputFile: buildOutput( + jsonLine("run", pkg, "TestParent", "", 0), + jsonLine("run", pkg, "TestParent/SubPass", "", 0), + jsonLine("output", pkg, "TestParent/SubPass", "sub output", 0), + jsonLine("pass", pkg, "TestParent/SubPass", "", 0.5), + jsonLine("output", pkg, "TestParent", "parent output after sub", 0), + jsonLine("pass", pkg, "TestParent", "", 1.0), + ), + cfg: Config{OmitOutputsOnSuccess: false}, + expectedResults: map[string]reports.TestResult{ + fmt.Sprintf("%s/TestParent", pkg): {TestName: "TestParent", TestPackage: pkg, Runs: 1, Successes: 1, PassRatio: 1.0}, + fmt.Sprintf("%s/TestParent/SubPass", pkg): {TestName: "TestParent/SubPass", TestPackage: pkg, Runs: 1, Successes: 1, PassRatio: 1.0}, + }, + }, + { + name: "Parent Pass, Subtest Fail", + inputFile: buildOutput( + jsonLine("run", pkg, "TestParent", "", 0), + jsonLine("run", pkg, "TestParent/SubFail", "", 0), + jsonLine("output", pkg, "TestParent/SubFail", "sub fail output", 0), + jsonLine("fail", pkg, "TestParent/SubFail", "", 0.6), + jsonLine("output", pkg, "TestParent", "parent output after sub fail", 0), + jsonLine("pass", pkg, "TestParent", "", 1.2), + ), + cfg: Config{OmitOutputsOnSuccess: false}, + expectedResults: map[string]reports.TestResult{ + fmt.Sprintf("%s/TestParent", pkg): {TestName: "TestParent", TestPackage: pkg, Runs: 1, Successes: 1, PassRatio: 1.0}, + fmt.Sprintf("%s/TestParent/SubFail", pkg): {TestName: "TestParent/SubFail", TestPackage: pkg, Runs: 1, Failures: 1, PassRatio: 0.0}, + }, + }, + { + name: "Parent Fail Before Subtest", + inputFile: buildOutput( + jsonLine("run", pkg, "TestParentFailEarly", "", 0), + jsonLine("output", pkg, "TestParentFailEarly", "parent fail output", 0), + jsonLine("fail", pkg, "TestParentFailEarly", "", 0.1), + ), + cfg: Config{OmitOutputsOnSuccess: false}, + expectedResults: map[string]reports.TestResult{ + fmt.Sprintf("%s/TestParentFailEarly", pkg): {TestName: "TestParentFailEarly", TestPackage: pkg, Runs: 1, Failures: 1, PassRatio: 0.0}, + }, + }, + { + name: "Parent Fail After Subtest", + inputFile: buildOutput( + jsonLine("run", pkg, "TestParentFailLate", "", 0), + jsonLine("run", pkg, "TestParentFailLate/SubPass", "", 0), + jsonLine("pass", pkg, "TestParentFailLate/SubPass", "", 0.5), + jsonLine("output", pkg, "TestParentFailLate", "parent fail output later", 0), + jsonLine("fail", pkg, "TestParentFailLate", "", 1.5), + ), + cfg: Config{OmitOutputsOnSuccess: false}, + expectedResults: map[string]reports.TestResult{ + fmt.Sprintf("%s/TestParentFailLate", pkg): {TestName: "TestParentFailLate", TestPackage: pkg, Runs: 1, Failures: 1, PassRatio: 0.0}, + fmt.Sprintf("%s/TestParentFailLate/SubPass", pkg): {TestName: "TestParentFailLate/SubPass", TestPackage: pkg, Runs: 1, Successes: 1, PassRatio: 1.0}, + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + parser := NewParser().(*defaultParser) + tempDir := t.TempDir() + fpath := filepath.Join(tempDir, "run1.json") + err := os.WriteFile(fpath, []byte(tc.inputFile), 0644) + require.NoError(t, err) + + actualResults, err := parser.parseTestResults([]string{fpath}, "run", 1, tc.cfg) + require.NoError(t, err) + require.Equal(t, len(tc.expectedResults), len(actualResults), "Unexpected number of results") + + actualResultsMap := resultsToMap(actualResults) + for key, expected := range tc.expectedResults { + actual, ok := actualResultsMap[key] + require.True(t, ok, "Expected result for key '%s' not found", key) + assertResultBasic(t, key, expected, actual) + if strings.Contains(key, "SubPass") { + assert.Len(t, actual.Durations, 1, "SubPass should have 1 duration") + if !tc.cfg.OmitOutputsOnSuccess { + if tc.name == "Parent and Subtest Pass" { + require.Contains(t, actual.PassedOutputs, "run1", "PassedOutputs map missing run1 key for %s in %s", key, tc.name) + assert.Contains(t, actual.PassedOutputs["run1"], "sub output", "SubPass missing expected output in %s", tc.name) + } else { + assert.Empty(t, actual.PassedOutputs["run1"], "PassedOutputs[run1] should be empty for %s in %s", key, tc.name) + } + } + } else if strings.Contains(key, "SubFail") { + assert.Len(t, actual.Durations, 1, "SubFail should have 1 duration") + require.Contains(t, actual.FailedOutputs, "run1", "FailedOutputs map missing run1 key for %s", key) + assert.Contains(t, actual.FailedOutputs["run1"], "sub fail output", "SubFail missing expected output") + } + } + }) + } +} + +func TestParseTestResults_Durations(t *testing.T) { + t.Parallel() + pkg := "github.com/test/durationpkg" + + inputFile := buildOutput( + jsonLine("run", pkg, "TestA", "", 0), + jsonLine("pass", pkg, "TestA", "", 1.5), + jsonLine("run", pkg, "TestB", "", 0), + jsonLine("fail", pkg, "TestB", "", 2.5), + jsonLine("run", pkg, "TestC", "", 0), + jsonLine("pass", pkg, "TestC", "", 0), + jsonLine("run", pkg, "TestD", "", 0), + jsonLine("skip", pkg, "TestD", "", 0), + ) + + parser := NewParser().(*defaultParser) + tempDir := t.TempDir() + fpath := filepath.Join(tempDir, "run1.json") + err := os.WriteFile(fpath, []byte(inputFile), 0644) + require.NoError(t, err) + + actualResults, err := parser.parseTestResults([]string{fpath}, "run", 1, Config{}) + require.NoError(t, err) + + resultsMap := resultsToMap(actualResults) + + resA, ok := resultsMap[fmt.Sprintf("%s/TestA", pkg)] + require.True(t, ok, "TestA not found") + require.Len(t, resA.Durations, 1, "TestA should have 1 duration") + assert.Equal(t, int64(1500), resA.Durations[0].Milliseconds(), "TestA duration mismatch") + + resB, ok := resultsMap[fmt.Sprintf("%s/TestB", pkg)] + require.True(t, ok, "TestB not found") + require.Len(t, resB.Durations, 1, "TestB should have 1 duration") + assert.Equal(t, int64(2500), resB.Durations[0].Milliseconds(), "TestB duration mismatch") + + resC, ok := resultsMap[fmt.Sprintf("%s/TestC", pkg)] + require.True(t, ok, "TestC not found") + require.Len(t, resC.Durations, 1, "TestC should have 1 duration") + assert.Equal(t, int64(0), resC.Durations[0].Milliseconds(), "TestC duration mismatch") + + resD, ok := resultsMap[fmt.Sprintf("%s/TestD", pkg)] + require.True(t, ok, "TestD not found") + assert.Empty(t, resD.Durations, "TestD should have 0 durations") +} + +func TestParseTestResults_PanicRace(t *testing.T) { + t.Parallel() + pkg := "github.com/test/panicracepkg" + + panicOutput := []string{ + "panic: This test intentionally panics", + "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/example_test_package.TestRegularPanic(...)", + } + raceOutput := []string{ + "WARNING: DATA RACE", + " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/example_test_package.TestDataRace(...)", + } + timeoutOutput := []string{ + "panic: test timed out after 1m0s", + "running tests:", + "\tTestTimeoutCulprit (1m0s)", + } + + testCases := []struct { + name string + inputFile string + cfg Config + expectedResults map[string]reports.TestResult + }{ + { + name: "Regular Panic", + inputFile: buildOutput( + jsonLine("run", pkg, "TestRegularPanic", "", 0), + jsonLine("output", pkg, "TestRegularPanic", panicOutput[0], 0), + jsonLine("output", pkg, "TestRegularPanic", panicOutput[1], 0), + jsonLine("fail", pkg, "TestRegularPanic", "", 0.5), + ), + cfg: Config{}, + expectedResults: map[string]reports.TestResult{ + fmt.Sprintf("%s/TestRegularPanic", pkg): { + TestName: "TestRegularPanic", TestPackage: pkg, Runs: 1, Failures: 1, Panic: true, + }, + }, + }, + { + name: "Data Race", + inputFile: buildOutput( + jsonLine("run", pkg, "TestDataRace", "", 0), + jsonLine("output", pkg, "TestDataRace", raceOutput[0], 0), + jsonLine("output", pkg, "TestDataRace", raceOutput[1], 0), + jsonLine("fail", pkg, "TestDataRace", "", 0.6), + ), + cfg: Config{}, + expectedResults: map[string]reports.TestResult{ + fmt.Sprintf("%s/TestDataRace", pkg): { + TestName: "TestDataRace", TestPackage: pkg, Runs: 1, Failures: 1, Race: true, + }, + }, + }, + { + name: "Timeout Panic", + inputFile: buildOutput( + jsonLine("run", pkg, "TestTimeoutCulprit", "", 0), + jsonLine("output", pkg, "TestTimeoutCulprit", timeoutOutput[0], 0), + jsonLine("output", pkg, "TestTimeoutCulprit", timeoutOutput[1], 0), + jsonLine("output", pkg, "TestTimeoutCulprit", timeoutOutput[2], 0), + jsonLine("fail", pkg, "TestTimeoutCulprit", "", 60.1), + ), + cfg: Config{}, + expectedResults: map[string]reports.TestResult{ + fmt.Sprintf("%s/TestTimeoutCulprit", pkg): { + TestName: "TestTimeoutCulprit", TestPackage: pkg, Runs: 1, Failures: 1, Panic: true, Timeout: true, + }, + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + parser := NewParser().(*defaultParser) + tempDir := t.TempDir() + fpath := filepath.Join(tempDir, "run1.json") + err := os.WriteFile(fpath, []byte(tc.inputFile), 0644) + require.NoError(t, err) + + actualResults, err := parser.parseTestResults([]string{fpath}, "run", 1, tc.cfg) + require.NoError(t, err) + require.Equal(t, len(tc.expectedResults), len(actualResults), "Unexpected number of results") + + actualResultsMap := resultsToMap(actualResults) + for key, expected := range tc.expectedResults { + actual, ok := actualResultsMap[key] + require.True(t, ok, "Expected result for key '%s' not found", key) + assertResultBasic(t, key, expected, actual) + assert.Equal(t, expected.Panic, actual.Panic, "Panic flag mismatch for %s", key) + assert.Equal(t, expected.Race, actual.Race, "Race flag mismatch for %s", key) + assert.Equal(t, expected.Timeout, actual.Timeout, "Timeout flag mismatch for %s", key) + + // Check if panic/race output was added to FailedOutputs + if expected.Panic || expected.Race { + outputs, ok := actual.FailedOutputs["run1"] + require.True(t, ok, "FailedOutputs map missing run1 key for %s", key) + require.NotEmpty(t, outputs, "FailedOutputs should contain panic/race info for %s", key) + + if expected.Panic { + assert.Contains(t, outputs[0], "PANIC DETECTED", "Missing PANIC marker for %s", key) + if tc.name == "Regular Panic" { + assert.Contains(t, outputs, panicOutput[0]) + assert.Contains(t, outputs, panicOutput[1]) + } else if tc.name == "Timeout Panic" { + assert.Contains(t, outputs, timeoutOutput[0]) + assert.Contains(t, outputs, timeoutOutput[1]) + assert.Contains(t, outputs, timeoutOutput[2]) + } + } else if expected.Race { + assert.Contains(t, outputs[0], "RACE DETECTED", "Missing RACE marker for %s", key) + assert.Contains(t, outputs, raceOutput[0]) + assert.Contains(t, outputs, raceOutput[1]) + } + } + } + }) + } +} + +func TestParseTestResults_RunCountCorrection(t *testing.T) { + t.Parallel() + pkg := "github.com/test/runcountpkg" + + // Simulate a panic happening, which often causes a 'fail' event without a preceding 'pass' + // for the same test in the same run, leading to potential overcounting if not handled. + // This example simulates 2 expected runs, but the panic causes 3 fail events for TestA. + inputFileRun1 := buildOutput( + jsonLine("run", pkg, "TestA", "", 0), + jsonLine("output", pkg, "TestA", "panic: Error in TestA", 0), + jsonLine("output", pkg, "TestA", "github.com/test/runcountpkg.TestA(...)", 0), + jsonLine("fail", pkg, "TestA", "", 0.1), // Fail from panic + jsonLine("run", pkg, "TestB", "", 0), + jsonLine("pass", pkg, "TestB", "", 0.2), + ) + inputFileRun2 := buildOutput( + jsonLine("run", pkg, "TestA", "", 0), + jsonLine("pass", pkg, "TestA", "", 1.1), // Passes on run 2 + jsonLine("run", pkg, "TestB", "", 0), + jsonLine("pass", pkg, "TestB", "", 1.2), + ) + + parser := NewParser().(*defaultParser) + tempDir := t.TempDir() + filePaths := []string{ + filepath.Join(tempDir, "run1.json"), + filepath.Join(tempDir, "run2.json"), + } + err := os.WriteFile(filePaths[0], []byte(inputFileRun1), 0644) + require.NoError(t, err) + err = os.WriteFile(filePaths[1], []byte(inputFileRun2), 0644) + require.NoError(t, err) + + // Pass 2 as totalExpectedRunsPerTest + actualResults, err := parser.parseTestResults(filePaths, "run", 2, Config{}) + require.NoError(t, err) + + resultsMap := resultsToMap(actualResults) + + resA, ok := resultsMap[fmt.Sprintf("%s/TestA", pkg)] + require.True(t, ok, "TestA not found") + // Final runs = 2 (processed run1 fail, processed run2 pass) + assert.Equal(t, 2, resA.Runs, "TestA Runs should be 2") + assert.Equal(t, 1, resA.Successes, "TestA Successes should be 1") + assert.Equal(t, 1, resA.Failures, "TestA Failures should be 1") + assert.True(t, resA.Panic, "TestA should be marked panicked") // Panic flag from attribution + assert.InDelta(t, 0.5, resA.PassRatio, 0.001, "TestA PassRatio mismatch") + + resB, ok := resultsMap[fmt.Sprintf("%s/TestB", pkg)] + require.True(t, ok, "TestB not found") + // Final runs = 2 (processed run1 pass, processed run2 pass) + assert.Equal(t, 2, resB.Runs, "TestB Runs should be 2") + assert.Equal(t, 2, resB.Successes, "TestB Successes should be 2") + assert.Equal(t, 0, resB.Failures, "TestB Failures should be 0") + assert.False(t, resB.Panic, "TestB should not be panicked") + assert.Equal(t, 1.0, resB.PassRatio, "TestB PassRatio mismatch") +} + +func TestParseTestResults_RunCountCorrectionRefined(t *testing.T) { + t.Parallel() + pkg := "github.com/test/runcountpkg2" + + testCases := []struct { + name string + inputFiles map[string]string + expectedTotalRuns int + expectedResultTestA reports.TestResult + }{ + { + name: "Panic within expected runs", + inputFiles: map[string]string{ + "run1.json": buildOutput( + jsonLine("run", pkg, "TestA", "", 0), + jsonLine("output", pkg, "TestA", "panic: Error", 0), // Panic + jsonLine("output", pkg, "TestA", "github.com/test/pkg.TestA(...)", 0), + jsonLine("fail", pkg, "TestA", "", 0.1), + ), + "run2.json": buildOutput( + jsonLine("run", pkg, "TestA", "", 0), + jsonLine("pass", pkg, "TestA", "", 0.2), + ), + "run3.json": buildOutput( + jsonLine("run", pkg, "TestA", "", 0), + jsonLine("pass", pkg, "TestA", "", 0.3), + ), + }, + expectedTotalRuns: 3, + expectedResultTestA: reports.TestResult{ // Runs=3, Success=2, Fail=1, Panic=true + TestName: "TestA", TestPackage: pkg, Runs: 3, Successes: 2, Failures: 1, Panic: true, PassRatio: 2.0 / 3.0, + }, + }, + { + name: "Panic exceeding expected runs (capped)", + inputFiles: map[string]string{ + "run1.json": buildOutput( // This run fails due to panic + jsonLine("run", pkg, "TestA", "", 0), + jsonLine("output", pkg, "TestA", "panic: Error", 0), + jsonLine("output", pkg, "TestA", "github.com/test/pkg.TestA(...)", 0), + jsonLine("fail", pkg, "TestA", "", 0.1), + jsonLine("fail", pkg, "TestA", "", 0.11), + ), + "run2.json": buildOutput( + jsonLine("run", pkg, "TestA", "", 0), + jsonLine("pass", pkg, "TestA", "", 0.2), + ), + }, + expectedTotalRuns: 2, // Only expected 2 runs total + expectedResultTestA: reports.TestResult{ // Expect correction: Runs=2, Success=1, Fail=1, Panic=true + TestName: "TestA", TestPackage: pkg, Runs: 2, Successes: 1, Failures: 1, Panic: true, PassRatio: 0.5, + }, + }, + { + name: "Normal overcount (no panic/race, capped)", + inputFiles: map[string]string{ + "run1.json": buildOutput( + jsonLine("run", pkg, "TestA", "", 0), + jsonLine("pass", pkg, "TestA", "", 0.1), + jsonLine("pass", pkg, "TestA", "", 0.11), + ), + "run2.json": buildOutput( + jsonLine("run", pkg, "TestA", "", 0), + jsonLine("fail", pkg, "TestA", "", 0.2), + ), + }, + expectedTotalRuns: 2, + expectedResultTestA: reports.TestResult{ + TestName: "TestA", TestPackage: pkg, Runs: 2, Successes: 1, Failures: 1, Panic: false, PassRatio: 0.5, + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + parser := NewParser().(*defaultParser) + tempDir := t.TempDir() + filePaths := make([]string, 0, len(tc.inputFiles)) + for name, content := range tc.inputFiles { + fpath := filepath.Join(tempDir, name) + err := os.WriteFile(fpath, []byte(content), 0644) + require.NoError(t, err) + filePaths = append(filePaths, fpath) + } + + actualResults, err := parser.parseTestResults(filePaths, "run", tc.expectedTotalRuns, Config{}) + require.NoError(t, err) + + resultsMap := resultsToMap(actualResults) + actualA, ok := resultsMap[fmt.Sprintf("%s/TestA", pkg)] + require.True(t, ok, "TestA not found") + assertResultBasic(t, "TestA", tc.expectedResultTestA, actualA) + assert.Equal(t, tc.expectedResultTestA.Panic, actualA.Panic, "TestA Panic mismatch") + + }) + } +} + +func TestParseTestResults_PanicInheritance(t *testing.T) { + t.Parallel() + pkg := "github.com/test/panicinheritpkg" + + parentPanicInput := buildOutput( + jsonLine("run", pkg, "TestParentPanics", "", 0), + jsonLine("run", pkg, "TestParentPanics/SubPass", "", 0), + jsonLine("pass", pkg, "TestParentPanics/SubPass", "", 0.1), + jsonLine("run", pkg, "TestParentPanics/SubFail", "", 0), + jsonLine("fail", pkg, "TestParentPanics/SubFail", "", 0.2), + jsonLine("output", pkg, "TestParentPanics", "panic: Parent panics here!", 0), + jsonLine("output", pkg, "TestParentPanics", "github.com/test/panicinheritpkg.TestParentPanics(...)", 0), + jsonLine("fail", pkg, "TestParentPanics", "", 0.3), + ) + + testCases := []struct { + name string + cfg Config + expectedResults map[string]reports.TestResult + }{ + { + name: "Inheritance Enabled (Default)", + cfg: Config{IgnoreParentFailuresOnSubtests: false}, + expectedResults: map[string]reports.TestResult{ + fmt.Sprintf("%s/TestParentPanics", pkg): {TestName: "TestParentPanics", TestPackage: pkg, Runs: 1, Failures: 1, Panic: true}, + fmt.Sprintf("%s/TestParentPanics/SubPass", pkg): {TestName: "TestParentPanics/SubPass", TestPackage: pkg, Runs: 1, Successes: 0, Failures: 1, Panic: true}, + fmt.Sprintf("%s/TestParentPanics/SubFail", pkg): {TestName: "TestParentPanics/SubFail", TestPackage: pkg, Runs: 1, Failures: 1, Panic: true}, + }, + }, + { + name: "IgnoreParentFailures (No Transform Effect Here)", + cfg: Config{IgnoreParentFailuresOnSubtests: true}, + expectedResults: map[string]reports.TestResult{ + fmt.Sprintf("%s/TestParentPanics", pkg): {TestName: "TestParentPanics", TestPackage: pkg, Runs: 1, Failures: 1, Panic: true}, + fmt.Sprintf("%s/TestParentPanics/SubPass", pkg): {TestName: "TestParentPanics/SubPass", TestPackage: pkg, Runs: 1, Successes: 0, Failures: 1, Panic: true}, + fmt.Sprintf("%s/TestParentPanics/SubFail", pkg): {TestName: "TestParentPanics/SubFail", TestPackage: pkg, Runs: 1, Failures: 1, Panic: true}, + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + parser := NewParser().(*defaultParser) + tempDir := t.TempDir() + fpath := filepath.Join(tempDir, "run1.json") + err := os.WriteFile(fpath, []byte(parentPanicInput), 0644) + require.NoError(t, err) + + actualResults, err := parser.parseTestResults([]string{fpath}, "run", 1, tc.cfg) + require.NoError(t, err) + require.Equal(t, len(tc.expectedResults), len(actualResults), "Unexpected number of results") + + actualResultsMap := resultsToMap(actualResults) + for key, expected := range tc.expectedResults { + actual, ok := actualResultsMap[key] + require.True(t, ok, "Expected result for key '%s' not found", key) + assertResultBasic(t, key, expected, actual) + assert.Equal(t, expected.Panic, actual.Panic, "Panic flag mismatch for %s", key) + } + }) + } +} + +func TestParseTestResults_JSONErrors(t *testing.T) { + t.Parallel() + pkg := "github.com/test/jsonerrpkg" + + inputFile := strings.Join([]string{ + jsonLine("run", pkg, "TestBeforeError", "", 0), // Valid line + jsonLine("pass", pkg, "TestBeforeError", "", 1.0), // Valid line + `{"Action":"run","Package":"github.com/test/jsonerrpkg","Test":"TestWithError"}`, // Missing fields + `this is not json`, // Invalid line + jsonLine("run", pkg, "TestAfterError", "", 0), // Valid line + jsonLine("pass", pkg, "TestAfterError", "", 1.0), // Valid line + }, "\n") + "\n" + + parser := NewParser().(*defaultParser) + tempDir := t.TempDir() + fpath := filepath.Join(tempDir, "run1.json") + err := os.WriteFile(fpath, []byte(inputFile), 0644) + require.NoError(t, err) + + actualResults, err := parser.parseTestResults([]string{fpath}, "run", 1, Config{}) + require.NoError(t, err, "Parsing should continue despite invalid JSON lines") + + require.Len(t, actualResults, 2, "Expected results only from tests with terminal actions") + resultsMap := resultsToMap(actualResults) + + resBefore, okBefore := resultsMap[fmt.Sprintf("%s/TestBeforeError", pkg)] + assert.True(t, okBefore, "TestBeforeError should be parsed") + assert.Equal(t, 1, resBefore.Runs, "TestBeforeError Runs mismatch") + assert.Equal(t, 1, resBefore.Successes, "TestBeforeError Successes mismatch") + + resAfter, okAfter := resultsMap[fmt.Sprintf("%s/TestAfterError", pkg)] + assert.True(t, okAfter, "TestAfterError should be parsed") + assert.Equal(t, 1, resAfter.Runs, "TestAfterError Runs mismatch") + assert.Equal(t, 1, resAfter.Successes, "TestAfterError Successes mismatch") + + _, okMid := resultsMap[fmt.Sprintf("%s/TestWithError", pkg)] + assert.False(t, okMid, "TestWithError should not be in final results") +} + +func TestParseFiles_Transformation(t *testing.T) { + t.Parallel() + pkg := "github.com/test/transformpkg" + + inputFile := buildOutput( + jsonLine("run", pkg, "TestParentTransform", "", 0), + jsonLine("output", pkg, "TestParentTransform", "parent output", 0), + jsonLine("run", pkg, "TestParentTransform/SubFail", "", 0), + jsonLine("output", pkg, "TestParentTransform/SubFail", "sub fail output", 0), + jsonLine("fail", pkg, "TestParentTransform/SubFail", "", 0.1), + jsonLine("fail", pkg, "TestParentTransform", "", 0.2), + ) + + parser := NewParser() + tempDir := t.TempDir() + fpath := filepath.Join(tempDir, "run1.json") + err := os.WriteFile(fpath, []byte(inputFile), 0644) + require.NoError(t, err) + + cfg := Config{IgnoreParentFailuresOnSubtests: true, OmitOutputsOnSuccess: false} + actualResults, _, err := parser.ParseFiles([]string{fpath}, "run", 1, cfg) + require.NoError(t, err) + + require.Len(t, actualResults, 2, "Expected 2 results after transformation") + resultsMap := resultsToMap(actualResults) + + parentRes, okP := resultsMap[fmt.Sprintf("%s/TestParentTransform", pkg)] + require.True(t, okP, "Parent test not found") + assert.Equal(t, 1, parentRes.Runs, "Parent Runs mismatch") + assert.Equal(t, 1, parentRes.Successes, "Parent Successes mismatch (should pass)") + assert.Equal(t, 0, parentRes.Failures, "Parent Failures mismatch (should pass)") + assert.Equal(t, 1.0, parentRes.PassRatio, "Parent PassRatio mismatch") + assert.False(t, parentRes.Panic, "Parent Panic mismatch") + require.Contains(t, parentRes.PassedOutputs, "run1", "Parent PassedOutputs missing run1") + assert.Contains(t, parentRes.PassedOutputs["run1"], "parent output", "Parent output missing from PassedOutputs") + assert.NotContains(t, parentRes.PassedOutputs["run1"][0], "=== PASS", "Parent output should not be transformed unless original contained FAIL markers") + + subRes, okS := resultsMap[fmt.Sprintf("%s/TestParentTransform/SubFail", pkg)] + require.True(t, okS, "Subtest not found") + assert.Equal(t, 1, subRes.Runs, "Subtest Runs mismatch") + assert.Equal(t, 0, subRes.Successes, "Subtest Successes mismatch") + assert.Equal(t, 1, subRes.Failures, "Subtest Failures mismatch") + assert.Equal(t, 0.0, subRes.PassRatio, "Subtest PassRatio mismatch") +} + +func TestParseTestResults_EmptyOrIncomplete(t *testing.T) { + t.Parallel() + pkg := "github.com/test/empty" + + testCases := []struct { + name string + inputFiles map[string]string + numExpResults int + expError bool + }{ + { + name: "Empty File", + inputFiles: map[string]string{"run1.json": ""}, + numExpResults: 0, + expError: false, + }, + { + name: "Only Run Action", + inputFiles: map[string]string{"run1.json": buildOutput(jsonLine("run", pkg, "TestOnlyRun", "", 0))}, + numExpResults: 0, + expError: false, + }, + { + name: "Run and Output Only", + inputFiles: map[string]string{"run1.json": buildOutput(jsonLine("run", pkg, "TestRunOutput", "", 0), jsonLine("output", pkg, "TestRunOutput", "out", 0))}, + numExpResults: 0, + expError: false, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + parser := NewParser().(*defaultParser) + tempDir := t.TempDir() + filePaths := make([]string, 0, len(tc.inputFiles)) + for name, content := range tc.inputFiles { + fpath := filepath.Join(tempDir, name) + err := os.WriteFile(fpath, []byte(content), 0644) + require.NoError(t, err) + filePaths = append(filePaths, fpath) + } + + actualResults, err := parser.parseTestResults(filePaths, "run", 1, Config{}) + + if tc.expError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Len(t, actualResults, tc.numExpResults) + } + }) + } +} + +// findResult finds a specific test result by name from a slice. +func findResult(t *testing.T, results []reports.TestResult, testName string) *reports.TestResult { + t.Helper() + for i := range results { + if results[i].TestName == testName { + return &results[i] + } + } + return nil +} + +// resultsToMap converts a slice of results to a map keyed by "package/testName". +func resultsToMap(results []reports.TestResult) map[string]reports.TestResult { + m := make(map[string]reports.TestResult, len(results)) + for _, res := range results { + key := fmt.Sprintf("%s/%s", res.TestPackage, res.TestName) + m[key] = res + } + return m +} + +// assertResultBasic performs basic assertions on core result fields. +func assertResultBasic(t *testing.T, key string, expected, actual reports.TestResult) { + t.Helper() + assert.Equal(t, expected.TestName, actual.TestName, "TestName mismatch for %s", key) + assert.Equal(t, expected.TestPackage, actual.TestPackage, "TestPackage mismatch for %s", key) + assert.Equal(t, expected.Runs, actual.Runs, "Runs mismatch for %s", key) + assert.Equal(t, expected.Successes, actual.Successes, "Successes mismatch for %s", key) + assert.Equal(t, expected.Failures, actual.Failures, "Failures mismatch for %s", key) + assert.Equal(t, expected.Skips, actual.Skips, "Skips mismatch for %s", key) + assert.Equal(t, expected.Skipped, actual.Skipped, "Skipped flag mismatch for %s", key) + assert.InDelta(t, expected.PassRatio, actual.PassRatio, 0.001, "PassRatio mismatch for %s", key) +} + +// TestParseFiles_WithTransformationScenarios verifies the interaction between the parser and the transformer. +func TestParseFiles_IgnoreParentFailures(t *testing.T) { + t.Parallel() + pkg := "github.com/test/transformpkg" + + testCases := []struct { + name string + inputFile string + expectedResults map[string]reports.TestResult + }{ + { + name: "Parent only fails due to subtest", + inputFile: buildOutput( + jsonLine("run", pkg, "TestParentTransform", "", 0), + jsonLine("output", pkg, "TestParentTransform", "parent setup output", 0), // Regular output + jsonLine("run", pkg, "TestParentTransform/SubFail", "", 0), + jsonLine("output", pkg, "TestParentTransform/SubFail", "sub fail output", 0), + jsonLine("output", pkg, "TestParentTransform/SubFail", "--- FAIL: TestParentTransform/SubFail (0.1s)", 0), // Subtest fail marker + jsonLine("fail", pkg, "TestParentTransform/SubFail", "", 0.1), + jsonLine("output", pkg, "TestParentTransform", "--- FAIL: TestParentTransform (0.2s)", 0), // Parent fail marker (due to subtest) + jsonLine("fail", pkg, "TestParentTransform", "", 0.2), + ), + expectedResults: map[string]reports.TestResult{ + fmt.Sprintf("%s/TestParentTransform", pkg): {TestName: "TestParentTransform", TestPackage: pkg, Runs: 1, Successes: 1, Failures: 0, PassRatio: 1.0}, + fmt.Sprintf("%s/TestParentTransform/SubFail", pkg): {TestName: "TestParentTransform/SubFail", TestPackage: pkg, Runs: 1, Successes: 0, Failures: 1, PassRatio: 0.0}, + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + parser := NewParser() + tempDir := t.TempDir() + fpath := filepath.Join(tempDir, "run1.json") + err := os.WriteFile(fpath, []byte(tc.inputFile), 0644) + require.NoError(t, err) + + cfg := Config{IgnoreParentFailuresOnSubtests: true, OmitOutputsOnSuccess: false} + actualResults, _, err := parser.ParseFiles([]string{fpath}, "run", 1, cfg) + require.NoError(t, err) + + require.Equal(t, len(tc.expectedResults), len(actualResults), "Unexpected number of results") + actualResultsMap := resultsToMap(actualResults) + for key, expected := range tc.expectedResults { + actual, ok := actualResultsMap[key] + require.True(t, ok, "Expected result for key '%s' not found", key) + assertResultBasic(t, key, expected, actual) + if expected.TestName == "TestParentTransform" { + require.Contains(t, actual.PassedOutputs, "run1", "PassedOutputs missing run1 for transformed parent") + assert.Contains(t, actual.PassedOutputs["run1"], "parent setup output", "Parent original output missing") + assert.Contains(t, actual.PassedOutputs["run1"], "--- PASS: TestParentTransform (0.2s)", "Parent output marker not transformed to PASS") + } + if expected.TestName == "TestParentTransform/SubFail" { + require.Contains(t, actual.FailedOutputs, "run1", "FailedOutputs missing run1 for failing subtest %s", key) + assert.Contains(t, actual.FailedOutputs["run1"], "sub fail output", "Subtest fail output missing for %s", key) + assert.Contains(t, actual.FailedOutputs["run1"], "--- FAIL: TestParentTransform/SubFail (0.1s)", "Subtest fail marker missing for %s", key) + } + } + }) + } +} diff --git a/tools/flakeguard/runner/runner.go b/tools/flakeguard/runner/runner.go index fa650b15d..196a93b3e 100644 --- a/tools/flakeguard/runner/runner.go +++ b/tools/flakeguard/runner/runner.go @@ -1,800 +1,345 @@ package runner import ( - "bufio" - "encoding/json" "errors" "fmt" - "io" - "os" - "os/exec" - "path/filepath" "regexp" - "strconv" "strings" - "time" "github.com/rs/zerolog/log" - "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/go-test-transform/pkg/transformer" "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/reports" + "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/executor" + "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/parser" ) const ( - RawOutputDir = "./flakeguard_raw_output" - RawOutputTransformedDir = "./flakeguard_raw_output_transformed" + RawOutputDir = "./flakeguard_raw_output" ) -var ( - startPanicRe = regexp.MustCompile(`^panic:`) - startRaceRe = regexp.MustCompile(`^WARNING: DATA RACE`) - buildErr = errors.New("failed to build test code") - failedToShowBuildErr = errors.New("flakeguard failed to show build errors") -) - -// Runner describes the test run parameters and raw test outputs +// Runner describes the test run parameters and manages test execution and result parsing. +// It delegates command execution to an Executor and result parsing to a Parser. type Runner struct { - ProjectPath string // Path to the Go project directory. - Verbose bool // If true, provides detailed logging. - RunCount int // Number of times to run the tests. - GoTestCountFlag *int // Run go test with -count flag. - GoTestRaceFlag bool // Run go test with -race flag. - GoTestTimeoutFlag string // Run go test with -timeout flag - Tags []string // Build tags. - UseShuffle bool // Enable test shuffling. -shuffle=on flag. - ShuffleSeed string // Set seed for test shuffling -shuffle={seed} flag. Must be used with UseShuffle. - FailFast bool // Stop on first test failure. - SkipTests []string // Test names to exclude. - SelectTests []string // Test names to include. - OmitOutputsOnSuccess bool // Set to true to omit test outputs on success. - MaxPassRatio float64 // Maximum pass ratio threshold for a test to be considered flaky. - IgnoreParentFailuresOnSubtests bool // Ignore failures in parent tests when only subtests fail. - rawOutputFiles []string // Raw output files for each test run. - transformedOutputFiles []string // Transformed output files for each test run. + // Configuration fields + ProjectPath string + Verbose bool + RunCount int + GoTestCountFlag *int + GoTestRaceFlag bool + GoTestTimeoutFlag string + Tags []string + UseShuffle bool + ShuffleSeed string + FailFast bool + SkipTests []string + SelectTests []string + + // Configuration passed down to the parser + IgnoreParentFailuresOnSubtests bool + OmitOutputsOnSuccess bool + + // Dependencies + exec executor.Executor // Injected Executor + parser parser.Parser // Injected Parser (interface defined in parser.go) + +} + +// NewRunner creates a new Runner with the default command executor. +func NewRunner( + projectPath string, + verbose bool, + // Runner specific config + runCount int, + goTestCountFlag *int, + goTestRaceFlag bool, + goTestTimeoutFlag string, + tags []string, + useShuffle bool, + shuffleSeed string, + failFast bool, + skipTests []string, + selectTests []string, + // Parser specific config (passed during initialization) + ignoreParentFailuresOnSubtests bool, + omitOutputsOnSuccess bool, + // Dependencies (allow injection for testing) + exec executor.Executor, + p parser.Parser, // Use interface type directly +) *Runner { + if exec == nil { + exec = executor.NewCommandExecutor() + } + if p == nil { + p = parser.NewParser() // Use constructor from parser.go + } + return &Runner{ + ProjectPath: projectPath, + Verbose: verbose, + RunCount: runCount, + GoTestCountFlag: goTestCountFlag, + GoTestRaceFlag: goTestRaceFlag, + GoTestTimeoutFlag: goTestTimeoutFlag, + Tags: tags, + UseShuffle: useShuffle, + ShuffleSeed: shuffleSeed, + FailFast: failFast, + SkipTests: skipTests, + SelectTests: selectTests, + IgnoreParentFailuresOnSubtests: ignoreParentFailuresOnSubtests, + OmitOutputsOnSuccess: omitOutputsOnSuccess, + exec: exec, + parser: p, + } +} + +// Helper function to create executor.Config from Runner fields +func (r *Runner) getExecutorConfig() executor.Config { + return executor.Config{ + ProjectPath: r.ProjectPath, + Verbose: r.Verbose, + GoTestCountFlag: r.GoTestCountFlag, + GoTestRaceFlag: r.GoTestRaceFlag, + GoTestTimeoutFlag: r.GoTestTimeoutFlag, + Tags: r.Tags, + UseShuffle: r.UseShuffle, + ShuffleSeed: r.ShuffleSeed, + SkipTests: r.SkipTests, + SelectTests: r.SelectTests, + RawOutputDir: RawOutputDir, // Use the constant defined in this package + } +} + +// Helper function to create parser.Config from Runner fields +func (r *Runner) getParserConfig() parser.Config { + return parser.Config{ + IgnoreParentFailuresOnSubtests: r.IgnoreParentFailuresOnSubtests, + OmitOutputsOnSuccess: r.OmitOutputsOnSuccess, + } } -// RunTestPackages executes the tests for each provided package and aggregates all results. -// It returns all test results and any error encountered during testing. // RunTestPackages executes the tests for each provided package and aggregates all results. func (r *Runner) RunTestPackages(packages []string) ([]reports.TestResult, error) { - // Initial runs. + rawOutputFiles := make([]string, 0) // Collect output file paths for this run + execCfg := r.getExecutorConfig() + for _, p := range packages { - for i := range r.RunCount { - jsonFilePath, passed, err := r.runTestPackage(p, i) + for runIdx := 0; runIdx < r.RunCount; runIdx++ { + // Delegate execution to the executor + jsonFilePath, passed, err := r.exec.RunTestPackage(execCfg, p, runIdx) if err != nil { - return nil, fmt.Errorf("failed to run tests in package %s: %w", p, err) + // Handle executor errors (e.g., command not found, setup issues) + return nil, fmt.Errorf("executor failed for package %s on run %d: %w", p, runIdx, err) + } + if jsonFilePath != "" { // Append path even if tests failed (passed == false) + rawOutputFiles = append(rawOutputFiles, jsonFilePath) } - r.rawOutputFiles = append(r.rawOutputFiles, jsonFilePath) if !passed && r.FailFast { - break + log.Warn().Msgf("FailFast enabled: Stopping run after failure in package %s", p) + goto ParseResults // Exit outer loop early } } } - // Parse initial results. - results, err := r.parseTestResults("run", r.RunCount) +ParseResults: + // Delegate parsing to the parser + if len(rawOutputFiles) == 0 { + log.Warn().Msg("No output files were generated, likely due to FailFast or an early error.") + return []reports.TestResult{}, nil // Return empty results + } + + log.Info().Int("file_count", len(rawOutputFiles)).Msg("Parsing output files") + // Create parser config and pass it + parserCfg := r.getParserConfig() + // Ignore the returned file paths here, as they aren't used in this flow + results, _, err := r.parser.ParseFiles(rawOutputFiles, "run", len(rawOutputFiles), parserCfg) if err != nil { + // Check if it's a build error from the parser + if errors.Is(err, parser.ErrBuild) { // Updated check + // No extra wrapping needed if buildErr already provides enough context + return nil, err + } return nil, fmt.Errorf("failed to parse test results: %w", err) } return results, nil } -// RunTestCmd runs an arbitrary command testCmd (like ["go", "run", "my_test.go", ...]) -// that produces the same JSON lines that 'go test -json' would produce on stdout. -// It captures those lines in a temp file, then parses them for pass/fail/panic/race data. +// RunTestCmd runs an arbitrary command testCmd that produces Go test JSON output. func (r *Runner) RunTestCmd(testCmd []string) ([]reports.TestResult, error) { - for i := range r.RunCount { - jsonOutputPath, passed, err := r.runCmd(testCmd, i) + rawOutputFiles := make([]string, 0) // Reset output files for this run + execCfg := r.getExecutorConfig() + + for i := 0; i < r.RunCount; i++ { + // Delegate execution to the executor + jsonOutputPath, passed, err := r.exec.RunCmd(execCfg, testCmd, i) if err != nil { - return nil, fmt.Errorf("failed to run test command: %w", err) + // Handle executor errors + return nil, fmt.Errorf("executor failed for custom command on run %d: %w", i, err) + } + if jsonOutputPath != "" { + rawOutputFiles = append(rawOutputFiles, jsonOutputPath) } - r.rawOutputFiles = append(r.rawOutputFiles, jsonOutputPath) if !passed && r.FailFast { - break + log.Warn().Msgf("FailFast enabled: Stopping run after custom command failure") + break // Exit loop early } } - results, err := r.parseTestResults("run", r.RunCount) - if err != nil { - return nil, fmt.Errorf("failed to parse test results: %w", err) + // Delegate parsing to the parser + if len(rawOutputFiles) == 0 { + log.Warn().Msg("No output files were generated for custom command, likely due to FailFast or an early error.") + return []reports.TestResult{}, nil } - return results, nil -} - -type exitCoder interface { - ExitCode() int -} - -// runTestPackage runs the tests for a given package and returns the path to the output file. -func (r *Runner) runTestPackage(packageName string, runCount int) (string, bool, error) { - args := []string{"test", packageName, "-json"} - if r.GoTestCountFlag != nil { - args = append(args, fmt.Sprintf("-count=%d", *r.GoTestCountFlag)) - } - if r.GoTestRaceFlag { - args = append(args, "-race") - } - if r.GoTestTimeoutFlag != "" { - args = append(args, fmt.Sprintf("-timeout=%s", r.GoTestTimeoutFlag)) - } - if len(r.Tags) > 0 { - args = append(args, fmt.Sprintf("-tags=%s", strings.Join(r.Tags, ","))) - } - if r.UseShuffle { - if r.ShuffleSeed != "" { - args = append(args, fmt.Sprintf("-shuffle=%s", r.ShuffleSeed)) - } else { - args = append(args, "-shuffle=on") + log.Info().Int("file_count", len(rawOutputFiles)).Msg("Parsing output files from custom command") + // Create parser config and pass it + parserCfg := r.getParserConfig() + // Ignore the returned file paths here as well + results, _, err := r.parser.ParseFiles(rawOutputFiles, "run", len(rawOutputFiles), parserCfg) + if err != nil { + if errors.Is(err, parser.ErrBuild) { // Updated check + return nil, err } - } - if len(r.SkipTests) > 0 { - skipPattern := strings.Join(r.SkipTests, "|") - args = append(args, fmt.Sprintf("-skip=%s", skipPattern)) - } - if len(r.SelectTests) > 0 { - selectPattern := strings.Join(r.SelectTests, "$|^") - args = append(args, fmt.Sprintf("-run=^%s$", selectPattern)) + return nil, fmt.Errorf("failed to parse test results from custom command: %w", err) } - err := os.MkdirAll(RawOutputDir, 0o755) - if err != nil { - return "", false, fmt.Errorf("failed to create raw output directory: %w", err) - } - // Create a temporary file to store the output - saniPackageName := filepath.Base(packageName) - tmpFile, err := os.CreateTemp(RawOutputDir, fmt.Sprintf("test-output-%s-%d-*.json", saniPackageName, runCount)) - if err != nil { - return "", false, fmt.Errorf("failed to create temp file: %w", err) - } - defer tmpFile.Close() + return results, nil +} - if r.Verbose { - log.Info().Str("raw output file", tmpFile.Name()).Str("command", fmt.Sprintf("go %s\n", strings.Join(args, " "))).Msg("Running command") +// RerunFailedTests reruns specific tests that failed in previous runs using the Executor and Parser. +func (r *Runner) RerunFailedTests(failedTests []reports.TestResult, rerunCount int) ([]reports.TestResult, []string, error) { + if len(failedTests) == 0 || rerunCount <= 0 { + log.Info().Msg("No failed tests provided or rerun count is zero. Skipping reruns.") + return []reports.TestResult{}, []string{}, nil // Nothing to rerun } - // Run the command with output directed to the file - cmd := exec.Command("go", args...) - cmd.Dir = r.ProjectPath - cmd.Stdout = tmpFile - - err = cmd.Run() - if err != nil { - var exErr exitCoder - // Check if the error is due to a non-zero exit code - if errors.As(err, &exErr) && exErr.ExitCode() == 0 { - return "", false, fmt.Errorf("test command failed at %s: %w", packageName, err) + // Use a map for efficient lookup and update of currently failing tests + currentlyFailing := make(map[string]map[string]struct{}) // pkg -> testName -> exists + for _, tr := range failedTests { + if tr.TestPackage == "" || tr.TestName == "" { + log.Warn().Interface("test_result", tr).Msg("Skipping rerun for test result with missing package or name") + continue + } + if _, ok := currentlyFailing[tr.TestPackage]; !ok { + currentlyFailing[tr.TestPackage] = make(map[string]struct{}) } - return tmpFile.Name(), false, nil // Test failed + currentlyFailing[tr.TestPackage][tr.TestName] = struct{}{} } - return tmpFile.Name(), true, nil // Test succeeded -} - -// runCmd runs the user-supplied command once, captures its JSON output, -// and returns the temp file path, whether the test passed, and an error if any. -func (r *Runner) runCmd(testCmd []string, runIndex int) (tempFilePath string, passed bool, err error) { - // Create temp file for JSON output - err = os.MkdirAll(RawOutputDir, 0o755) - if err != nil { - return "", false, fmt.Errorf("failed to create raw output directory: %w", err) - } - tmpFile, err := os.CreateTemp(RawOutputDir, fmt.Sprintf("test-output-cmd-run%d-*.json", runIndex+1)) - if err != nil { - err = fmt.Errorf("failed to create temp file: %w", err) - return "", false, err + if len(currentlyFailing) == 0 { + log.Warn().Msg("No valid failed tests found to rerun after filtering.") + return []reports.TestResult{}, []string{}, nil } - defer tmpFile.Close() - - cmd := exec.Command(testCmd[0], testCmd[1:]...) //nolint:gosec - cmd.Dir = r.ProjectPath - - cmd.Stdout = tmpFile - cmd.Stderr = os.Stderr - err = cmd.Run() - - tempFilePath = tmpFile.Name() - - // Determine pass/fail from exit code - type exitCoder interface { - ExitCode() int - } - var ec exitCoder - if errors.As(err, &ec) { - // Non-zero exit code => test failure - passed = ec.ExitCode() == 0 - err = nil // Clear error since we handled it - return - } else if err != nil { - // Some other error that doesn't implement ExitCode() => real error - tempFilePath = "" - err = fmt.Errorf("error running test command: %w", err) - return tempFilePath, passed, err + if r.Verbose { + log.Info().Int("packages", len(currentlyFailing)).Int("rerun_count", rerunCount).Msg("Starting test reruns for failed tests") } - // Otherwise, test passed - passed = true - return tempFilePath, passed, nil -} - -type entry struct { - Action string `json:"Action"` - Test string `json:"Test"` - Package string `json:"Package"` - Output string `json:"Output"` - Elapsed float64 `json:"Elapsed"` // Decimal value in seconds -} + rerunOutputFiles := make([]string, 0) + baseExecCfg := r.getExecutorConfig() -func (e entry) String() string { - return fmt.Sprintf("Action: %s, Test: %s, Package: %s, Output: %s, Elapsed: %f", e.Action, e.Test, e.Package, e.Output, e.Elapsed) -} - -// parseTestResults reads the test output Go test json output files and returns processed TestResults. -// -// Go test results have a lot of edge cases and strange behavior, especially when running in parallel, -// and any panic throws the whole thing into disarray. -// If any test in packageA panics, all tests in packageA will stop running and never report their results. -// The test that panicked will report its panic in Go test output, but will often misattribute the panic to a different test. -// It will also sometimes mark the test with both a panic and a failure, double-counting the test run. -// It's possible to properly attribute panics to the test that caused them, but it's not possible to distinguish between -// panics and failures at that point. -// Subtests add more complexity, as panics in subtests are only reported in their parent's output, -// and cannot be accurately attributed to the subtest that caused them. -func (r *Runner) parseTestResults(runPrefix string, runCount int) ([]reports.TestResult, error) { - var parseFilePaths = r.rawOutputFiles - - // If the option is enabled, transform each JSON output file before parsing. - if r.IgnoreParentFailuresOnSubtests { - err := r.transformTestOutputFiles(r.rawOutputFiles) - if err != nil { - return nil, err + // 2. Iterate Rerun Count + for i := 0; i < rerunCount; i++ { + if len(currentlyFailing) == 0 { + log.Info().Int("iteration", i).Msg("All previously failing tests passed in reruns. Stopping reruns early.") + break // Stop if no more tests are failing } - parseFilePaths = r.transformedOutputFiles - } - var ( - testDetails = make(map[string]*reports.TestResult) // Holds run, pass counts, and other details for each test - panickedPackages = map[string]struct{}{} // Packages with tests that panicked - racePackages = map[string]struct{}{} // Packages with tests that raced - packageLevelOutputs = map[string][]string{} // Package-level outputs - testsWithSubTests = map[string][]string{} // Parent tests that have subtests - panicDetectionMode = false - raceDetectionMode = false - detectedEntries = []entry{} // race or panic entries - expectedRuns = runCount - ) - - runNumber := 0 - // Process each file - for _, filePath := range parseFilePaths { - runNumber++ - runID := fmt.Sprintf("%s%d", runPrefix, runNumber) - file, err := os.Open(filePath) - if err != nil { - return nil, fmt.Errorf("failed to open test output file: %w", err) + if r.Verbose { + log.Info().Int("iteration", i+1).Int("total", rerunCount).Int("tests_to_rerun", countMapKeys(currentlyFailing)).Msg("Running rerun iteration") } - scanner := bufio.NewScanner(file) - var precedingLines []string // Store preceding lines for context - var followingLines []string // To collect lines after an error + failingThisIteration := make(map[string]map[string]struct{}) // Track tests still failing *after this iteration* - for scanner.Scan() { - line := scanner.Text() - precedingLines = append(precedingLines, line) - - // Limit precedingLines to the last 15 lines - if len(precedingLines) > 15 { - precedingLines = precedingLines[1:] + // 3. Execute Rerun per Package for currently failing tests + for pkg, testsMap := range currentlyFailing { + if len(testsMap) == 0 { + continue } - var entryLine entry - if err := json.Unmarshal(scanner.Bytes(), &entryLine); err != nil { - // Collect 15 lines after the error for more context - for scanner.Scan() && len(followingLines) < 15 { - followingLines = append(followingLines, scanner.Text()) - } - - // Combine precedingLines and followingLines to provide 15 lines before and after - context := append(precedingLines, followingLines...) - return nil, fmt.Errorf("failed to parse json test output near lines:\n%s\nerror: %w", strings.Join(context, "\n"), err) - } - if entryLine.Action == "build-fail" { - _, err := file.Seek(0, 0) - if err != nil { - return nil, fmt.Errorf("%w: %w", failedToShowBuildErr, buildErr) - } - // Print all build errors - buildErrs, err := io.ReadAll(file) - if err != nil { - return nil, fmt.Errorf("%w: %w", failedToShowBuildErr, buildErr) - } - fmt.Println(string(buildErrs)) - return nil, buildErr + testsToRun := make([]string, 0, len(testsMap)) + for testName := range testsMap { + testsToRun = append(testsToRun, testName) } - var result *reports.TestResult - if entryLine.Test != "" { - // If it's a subtest, associate it with its parent for easier processing of panics later - key := fmt.Sprintf("%s/%s", entryLine.Package, entryLine.Test) - parentTestName, subTestName := parseSubTest(entryLine.Test) - if subTestName != "" { - parentTestKey := fmt.Sprintf("%s/%s", entryLine.Package, parentTestName) - testsWithSubTests[parentTestKey] = append(testsWithSubTests[parentTestKey], subTestName) - } - - if _, exists := testDetails[key]; !exists { - testDetails[key] = &reports.TestResult{ - TestName: entryLine.Test, - TestPackage: entryLine.Package, - PassRatio: 0, - PassedOutputs: make(map[string][]string), - FailedOutputs: make(map[string][]string), - PackageOutputs: []string{}, - } - } - result = testDetails[key] + // Escape test names for regex and join with | + escapedTests := make([]string, len(testsToRun)) + for j, testName := range testsToRun { + escapedTests[j] = regexp.QuoteMeta(testName) } + testPattern := fmt.Sprintf("^(?:%s)$", strings.Join(escapedTests, "|")) - if entryLine.Output != "" { - if panicDetectionMode || raceDetectionMode { // currently collecting panic or race output - detectedEntries = append(detectedEntries, entryLine) - continue - } else if startPanicRe.MatchString(entryLine.Output) { // found a panic, start collecting output - panickedPackages[entryLine.Package] = struct{}{} - detectedEntries = append(detectedEntries, entryLine) - panicDetectionMode = true - continue // Don't process this entry further - } else if startRaceRe.MatchString(entryLine.Output) { - racePackages[entryLine.Package] = struct{}{} - detectedEntries = append(detectedEntries, entryLine) - raceDetectionMode = true - continue // Don't process this entry further - } else if entryLine.Test != "" && entryLine.Action == "output" { - // Collect outputs regardless of pass or fail - if result.Outputs == nil { - result.Outputs = make(map[string][]string) - } - result.Outputs[runID] = append(result.Outputs[runID], entryLine.Output) - } else if entryLine.Test == "" { - if _, exists := packageLevelOutputs[entryLine.Package]; !exists { - packageLevelOutputs[entryLine.Package] = []string{} - } - packageLevelOutputs[entryLine.Package] = append(packageLevelOutputs[entryLine.Package], entryLine.Output) - } else { - // Collect outputs per run, per test action - switch entryLine.Action { - case "pass": - result.PassedOutputs[runID] = append(result.PassedOutputs[runID], entryLine.Output) - case "fail": - result.FailedOutputs[runID] = append(result.FailedOutputs[runID], entryLine.Output) - default: - // Handle other actions if necessary - } - } - } + // Create specific executor config for this rerun invocation + rerunExecCfg := baseExecCfg // Copy base config + one := 1 + rerunExecCfg.GoTestCountFlag = &one // Force -count=1 for rerun + rerunExecCfg.SelectTests = []string{testPattern} // Target specific tests via -run + rerunExecCfg.SkipTests = nil // Ensure no tests are skipped via -skip - // TODO: An argument could be made to check for entryLine.Action != "output" instead, but need to check edge cases - if (panicDetectionMode || raceDetectionMode) && entryLine.Action == "fail" { // End of panic or race output - var outputs []string - for _, entry := range detectedEntries { - outputs = append(outputs, entry.Output) - } - if panicDetectionMode { - panicTest, timeout, err := attributePanicToTest(outputs) - if err != nil { - log.Error().Msg("Unable to attribute panic to a test") - fmt.Println(err.Error()) - panicTest = "UnableToAttributePanicToTestPleaseInvestigate" - } - panicTestKey := fmt.Sprintf("%s/%s", entryLine.Package, panicTest) - - // Ensure the test exists in testDetails - result, exists := testDetails[panicTestKey] - if !exists { - // Create a new TestResult if it doesn't exist - result = &reports.TestResult{ - TestName: panicTest, - TestPackage: entryLine.Package, - PassRatio: 0, - PassedOutputs: make(map[string][]string), - FailedOutputs: make(map[string][]string), - PackageOutputs: []string{}, - } - testDetails[panicTestKey] = result - } - - result.Panic = true - result.Timeout = timeout - result.Failures++ - result.Runs++ - - // Handle outputs - for _, entry := range detectedEntries { - if entry.Test == "" { - result.PackageOutputs = append(result.PackageOutputs, entry.Output) - } else { - runID := fmt.Sprintf("run%d", runNumber) - result.FailedOutputs[runID] = append(result.FailedOutputs[runID], entry.Output) - } - } - } else if raceDetectionMode { - raceTest, err := attributeRaceToTest(outputs) - if err != nil { - log.Warn().Msg("Unable to attribute race to a test") - fmt.Println(err.Error()) - raceTest = "UnableToAttributeRaceTestPleaseInvestigate" - } - raceTestKey := fmt.Sprintf("%s/%s", entryLine.Package, raceTest) - - // Ensure the test exists in testDetails - result, exists := testDetails[raceTestKey] - if !exists { - // Create a new TestResult if it doesn't exist - result = &reports.TestResult{ - TestName: raceTest, - TestPackage: entryLine.Package, - PassRatio: 0, - PassedOutputs: make(map[string][]string), - FailedOutputs: make(map[string][]string), - PackageOutputs: []string{}, - } - testDetails[raceTestKey] = result - } - - result.Race = true - result.Failures++ - result.Runs++ - - // Handle outputs - for _, entry := range detectedEntries { - if entry.Test == "" { - result.PackageOutputs = append(result.PackageOutputs, entry.Output) - } else { - runID := fmt.Sprintf("run%d", runNumber) - result.FailedOutputs[runID] = append(result.FailedOutputs[runID], entry.Output) - } - } - } - - detectedEntries = []entry{} - panicDetectionMode = false - raceDetectionMode = false - continue + if r.Verbose { + log.Info().Str("package", pkg).Str("pattern", testPattern).Int("rerun_iter", i+1).Msg("Executing package rerun") } - switch entryLine.Action { - case "pass": - if entryLine.Test != "" { - duration, err := time.ParseDuration(strconv.FormatFloat(entryLine.Elapsed, 'f', -1, 64) + "s") - if err != nil { - return nil, fmt.Errorf("failed to parse duration: %w", err) - } - result.Durations = append(result.Durations, duration) - result.Successes++ - - // Move outputs to PassedOutputs - if result.PassedOutputs == nil { - result.PassedOutputs = make(map[string][]string) - } - result.PassedOutputs[runID] = result.Outputs[runID] - // Clear temporary outputs - delete(result.Outputs, runID) - } - case "fail": - if entryLine.Test != "" { - duration, err := time.ParseDuration(strconv.FormatFloat(entryLine.Elapsed, 'f', -1, 64) + "s") - if err != nil { - return nil, fmt.Errorf("failed to parse duration: %w", err) - } - result.Durations = append(result.Durations, duration) - result.Failures++ - - // Move outputs to FailedOutputs - if result.FailedOutputs == nil { - result.FailedOutputs = make(map[string][]string) - } - result.FailedOutputs[runID] = result.Outputs[runID] - // Clear temporary outputs - delete(result.Outputs, runID) - } - case "skip": - if entryLine.Test != "" { - result.Skipped = true - result.Skips++ - } - case "output": - // Handled above when entryLine.Test is not empty + jsonOutputPath, passed, err := r.exec.RunTestPackage(rerunExecCfg, pkg, i) + if err != nil { + // If execution fails for a package, return the error immediately + log.Error().Err(err).Str("package", pkg).Int("rerun_iteration", i+1).Msg("Error executing rerun command for package") + return nil, nil, fmt.Errorf("error on rerun execution for package %s: %w", pkg, err) } - if entryLine.Test != "" { - result.Runs = result.Successes + result.Failures - if result.Runs > 0 { - result.PassRatio = float64(result.Successes) / float64(result.Runs) - } else { - result.PassRatio = 1 - } + if jsonOutputPath != "" { + rerunOutputFiles = append(rerunOutputFiles, jsonOutputPath) } - } - - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("reading test output file: %w", err) - } - if err = file.Close(); err != nil { - log.Warn().Err(err).Str("file", filePath).Msg("failed to close file") - } - } - var results []reports.TestResult - // Check through parent tests for panics, and bubble possible panics down - for parentTestKey, subTests := range testsWithSubTests { - if parentTestResult, exists := testDetails[parentTestKey]; exists { - if parentTestResult.Panic { - for _, subTest := range subTests { - // Include parent test name in subTestKey - subTestKey := fmt.Sprintf("%s/%s/%s", parentTestResult.TestPackage, parentTestResult.TestName, subTest) - if subTestResult, exists := testDetails[subTestKey]; exists { - if subTestResult.Failures > 0 { - subTestResult.Panic = true - // Initialize Outputs map if nil - if subTestResult.FailedOutputs == nil { - subTestResult.FailedOutputs = make(map[string][]string) - } - // Add the message to each run's output - for runID := range subTestResult.FailedOutputs { - subTestResult.FailedOutputs[runID] = append(subTestResult.FailedOutputs[runID], "Panic in parent test") - } - } - } else { - log.Warn().Str("expected subtest", subTestKey).Str("parent test", parentTestKey).Msg("expected subtest not found in parent test") - } + // If the command failed (exit code != 0), keep all tests from this package in the failing list for the next iteration. + // Otherwise (passed=true), assume tests in this run passed and remove them from the failing list. + if !passed { + if _, ok := failingThisIteration[pkg]; !ok { + failingThisIteration[pkg] = make(map[string]struct{}) } - } - } else { - log.Warn().Str("parent test", parentTestKey).Msg("expected parent test not found") - } - } - for _, result := range testDetails { - if result.Runs > expectedRuns { // Panics can introduce double-counting test failures, this is a correction for it - if result.Panic { - result.Failures = expectedRuns - result.Runs = expectedRuns - } else { - log.Warn().Str("test", result.TestName).Int("actual runs", result.Runs).Int("expected runs", expectedRuns).Msg("unexpected test runs") - } - } - // If a package panicked, all tests in that package will be marked as panicking - if _, panicked := panickedPackages[result.TestPackage]; panicked { - result.PackagePanic = true - } - if outputs, exists := packageLevelOutputs[result.TestPackage]; exists { - result.PackageOutputs = outputs - } - results = append(results, *result) - } - - // Omit success outputs if requested - if r.OmitOutputsOnSuccess { - for i := range results { - results[i].PassedOutputs = make(map[string][]string) - results[i].Outputs = make(map[string][]string) - } - } - - return results, nil -} - -// transformTestOutputFiles transforms the test output JSON files to ignore parent failures when only subtests fail. -// It returns the paths to the transformed files. -func (r *Runner) transformTestOutputFiles(filePaths []string) error { - err := os.MkdirAll(RawOutputTransformedDir, 0o755) - if err != nil { - return fmt.Errorf("failed to create raw output directory: %w", err) - } - for _, origPath := range filePaths { - inFile, err := os.Open(origPath) - if err != nil { - return fmt.Errorf("failed to open original file %s: %w", origPath, err) - } - // Create a temporary file for the transformed output. - outFile, err := os.Create(filepath.Join(RawOutputTransformedDir, fmt.Sprintf("transformed-%s.json", filepath.Base(origPath)))) - if err != nil { - inFile.Close() - return fmt.Errorf("failed to create transformed temp file: %w", err) - } - // Transform the JSON output. - // The transformer option is set to ignore parent failures when only subtests fail. - err = transformer.TransformJSON(inFile, outFile, transformer.NewOptions(true)) - inFile.Close() - outFile.Close() - if err != nil { - return fmt.Errorf("failed to transform output file %s: %v", origPath, err) - } - // Use the transformed file path. - r.transformedOutputFiles = append(r.transformedOutputFiles, outFile.Name()) - } - return nil -} - -var ( - // Regex to extract a valid test function name from a panic message. - // This is the most common situation for test panics, e.g. - // github.com/smartcontractkit/chainlink/deployment/keystone/changeset_test.TestDeployBalanceReader(0xc000583c00) - nestedTestNameRe = regexp.MustCompile(`\.(Test[^\s]+?)(?:\.[^(]+)?\s*\(`) - - ErrFailedToAttributePanicToTest = errors.New("failed to attribute panic to test") - ErrFailedToAttributeRaceToTest = errors.New("failed to attribute race to test") - ErrFailedToParseTimeoutDuration = errors.New("failed to parse timeout duration") - ErrFailedToExtractTimeoutDuration = errors.New("failed to extract timeout duration") - ErrDetectedLogAfterCompleteFailedAttribution = errors.New("detected a log after test has completed panic, but failed to properly attribute it") - ErrDetectedTimeoutFailedParse = errors.New("detected test timeout, but failed to parse the duration from the test") - ErrDetectedTimeoutFailedAttribution = errors.New("detected test timeout, but failed to attribute the timeout to a specific test") -) - -// attributePanicToTest properly attributes panics to the test that caused them. -// There are a lot of edge cases and strange behavior in Go test output when it comes to panics. -func attributePanicToTest(outputs []string) (test string, timeout bool, err error) { - var ( - // Regex to check if the panic is from a log after a goroutine, e.g. - // panic: Log in goroutine after Test_workflowRegisteredHandler/skips_fetch_if_secrets_url_is_missing has completed: - testLogAfterTestRe = regexp.MustCompile(`^panic: Log in goroutine after (Test[^\s]+) has completed:`) - - // Check if the panic message indicates a timeout, e.g. - // panic: test timed out after 10m0s - didTestTimeoutRe = regexp.MustCompile(`^panic: test timed out after ([^\s]+)`) - // Regex to extract a valid test function name from a panic message if the panic is a timeout, e.g. - // TestTimedOut (10m0s) - timedOutTestNameRe = regexp.MustCompile(`^(Test[^\s]+)\s+\((.*)\)`) - timeoutDurationStr string - timeoutDuration time.Duration - ) - - for _, output := range outputs { - output = strings.TrimSpace(output) - // Check if the panic message indicates a timeout - // If so, extract the timeout duration and switch to timeout mode - if didTestTimeoutRe.MatchString(output) { - timeout = true - if len(didTestTimeoutRe.FindStringSubmatch(output)) > 1 { - timeoutDurationStr = didTestTimeoutRe.FindStringSubmatch(output)[1] - timeoutDuration, err = time.ParseDuration(timeoutDurationStr) - if err != nil { - return "", true, fmt.Errorf("%w: %w using this output:\n\n%s", err, ErrFailedToParseTimeoutDuration, output) + for testName := range testsMap { + failingThisIteration[pkg][testName] = struct{}{} } - } else { - return "", true, fmt.Errorf("%w using this output:\n\n%s", ErrFailedToExtractTimeoutDuration, output) - } - } - - if testLogAfterTestRe.MatchString(output) { - // If the panic message indicates a log after a test, extract the test name - match := testLogAfterTestRe.FindStringSubmatch(output) - if len(match) > 1 { - testName := strings.TrimSpace(match[1]) - return testName, timeout, nil - } else { - return "", false, fmt.Errorf( - "%w using this output:\n\n%s", ErrDetectedLogAfterCompleteFailedAttribution, strings.Join(outputs, ""), - ) } - } + } // end loop over packages for this iteration - // If in timeout mode, look for test names in the panic message and check if any match the timeout duration, e.g. - // panic: test timed out after 10m0s - // running tests: - // TestAddAndPromoteCandidatesForNewChain (22s) // Nope - // TestAddAndPromoteCandidatesForNewChain/Remote_chains_owned_by_MCMS (22s) // Nope - // TestTimeout (10m0s) // Yes - // TestConnectNewChain/Use_production_router_(with_MCMS) (1m1s) // Nope - // TestJobSpecChangeset (0s) // Nope - // Test_ActiveCandidate (1m1s) // Nope - if timeout { - if timedOutTestNameRe.MatchString(output) { - matchTimedOutTestName := timedOutTestNameRe.FindStringSubmatch(output) - if len(matchTimedOutTestName) > 1 { - testName := strings.TrimSpace(matchTimedOutTestName[1]) - testDurationStr := strings.TrimSpace(matchTimedOutTestName[2]) - testDuration, err := time.ParseDuration(testDurationStr) - if err != nil { - return "", true, fmt.Errorf("%w: %w using this output:\n\n%s", err, ErrDetectedTimeoutFailedParse, output) - } - if testDuration >= timeoutDuration { - return testName, true, nil - } - } - } - } else { - matchNestedTestName := nestedTestNameRe.FindStringSubmatch(output) - if len(matchNestedTestName) > 1 { - return strings.TrimSpace(matchNestedTestName[1]), false, nil - } - } - } - // If we reach here, we couldn't attribute the panic to a test in the loop + // Update the set of failing tests for the next iteration + currentlyFailing = failingThisIteration + } // end loop over rerunCount - if timeout { - return "", timeout, fmt.Errorf("%w using this output:\n\n%s", ErrDetectedTimeoutFailedAttribution, strings.Join(outputs, "")) + // 4. Parse Rerun Outputs + if len(rerunOutputFiles) == 0 { + log.Warn().Msg("No output files were generated during reruns (possibly due to execution errors).") + return []reports.TestResult{}, []string{}, nil } - return "", timeout, fmt.Errorf("%w using this output:\n\n%s", ErrFailedToAttributePanicToTest, strings.Join(outputs, "")) -} - -// attributeRaceToTest properly attributes races to the test that caused them. -func attributeRaceToTest(outputs []string) (string, error) { - for _, output := range outputs { - match := nestedTestNameRe.FindStringSubmatch(output) - if len(match) > 1 { - return strings.TrimSpace(match[1]), nil + log.Info().Int("file_count", len(rerunOutputFiles)).Msg("Parsing rerun output files") + // Create parser config and pass it + parserCfg := r.getParserConfig() + // For parsing reruns, the effective number of runs *per test included in the output* is `rerunCount`. + // The parser's `expectedRuns` helps adjust for potential overcounting within each file, using `rerunCount` seems correct here. + rerunResults, parsedFilePaths, err := r.parser.ParseFiles(rerunOutputFiles, "rerun", rerunCount, parserCfg) + if err != nil { + // Check for build error specifically? + if errors.Is(err, parser.ErrBuild) { // Updated check + log.Error().Err(err).Msg("Build error occurred unexpectedly during test reruns") + // Fallthrough to return wrapped error } + // Return the file paths even if parsing failed? No, the report wouldn't be useful. + return nil, nil, fmt.Errorf("failed to parse rerun results: %w", err) } - return "", fmt.Errorf("%w, using this output:\n\n%s", - ErrFailedToAttributeRaceToTest, strings.Join(outputs, ""), - ) -} -// parseSubTest checks if a test name is a subtest and returns the parent and sub names. -func parseSubTest(testName string) (parentTestName, subTestName string) { - parts := strings.SplitN(testName, "/", 2) - if len(parts) == 1 { - return parts[0], "" - } - return parts[0], parts[1] + // 5. Return Results + log.Info().Int("result_count", len(rerunResults)).Msg("Finished parsing rerun results") + // Return the parsed results AND the list of files parsed. + // Note: The function signature needs to change back to return []string + return rerunResults, parsedFilePaths, nil } -func (r *Runner) RerunFailedTests(failedTests []reports.TestResult, rerunCount int) ([]reports.TestResult, []string, error) { - // Group the provided failed tests by package for more efficient reruns - failingTestsByPackage := make(map[string][]string) - for _, tr := range failedTests { - failingTestsByPackage[tr.TestPackage] = append(failingTestsByPackage[tr.TestPackage], tr.TestName) +// Helper function to count keys in the nested map for logging +func countMapKeys(m map[string]map[string]struct{}) int { + count := 0 + for _, subMap := range m { + count += len(subMap) } - - if r.Verbose { - log.Info().Msgf("Rerunning failing tests grouped by package: %v", failingTestsByPackage) - } - - // Rerun each failing test package up to RerunCount times - for i := range rerunCount { - for pkg, tests := range failingTestsByPackage { - // Build regex pattern to match all failing tests in this package - testPattern := fmt.Sprintf("^(%s)$", strings.Join(tests, "|")) - - cmd := []string{ - "go", "test", - pkg, - "-count=1", - "-run", testPattern, - "-json", - } - - // Add other test flags - if r.GoTestRaceFlag { - cmd = append(cmd, "-race") - } - if r.GoTestTimeoutFlag != "" { - cmd = append(cmd, fmt.Sprintf("-timeout=%s", r.GoTestTimeoutFlag)) - } - if len(r.Tags) > 0 { - cmd = append(cmd, fmt.Sprintf("-tags=%s", strings.Join(r.Tags, ","))) - } - if r.Verbose { - cmd = append(cmd, "-v") - log.Info().Msgf("Rerun iteration %d for package %s: %v", i+1, pkg, cmd) - } - - // Run the package tests - jsonOutputPath, _, err := r.runCmd(cmd, i) - if err != nil { - return nil, nil, fmt.Errorf("error on rerunCmd for package %s: %w", pkg, err) - } - r.rawOutputFiles = append(r.rawOutputFiles, jsonOutputPath) - } - } - - // Parse all rerun results at once with a consistent prefix - rerunResults, err := r.parseTestResults("rerun", rerunCount) - if err != nil { - return nil, r.rawOutputFiles, fmt.Errorf("failed to parse rerun results: %w", err) - } - - return rerunResults, r.rawOutputFiles, nil + return count } diff --git a/tools/flakeguard/runner/runner_integration_test.go b/tools/flakeguard/runner/runner_integration_test.go new file mode 100644 index 000000000..5971655f4 --- /dev/null +++ b/tools/flakeguard/runner/runner_integration_test.go @@ -0,0 +1,332 @@ +// Integration tests for the runner package, executing real tests. +package runner_test + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/reports" + "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner" +) + +var ( + flakyTestPackagePath = "./example_test_package" + debugDir = "_debug_outputs_integration" +) + +type expectedTestResult struct { + allSuccesses bool + someSuccesses bool + allFailures bool + someFailures bool + allSkips bool + testPanic bool + packagePanic bool + race bool + maximumRuns int + + exactRuns *int + minimumRuns *int + exactPassRate *float64 + minimumPassRate *float64 + maximumPassRate *float64 + + seen bool +} + +func TestRunIntegration(t *testing.T) { + var ( + zeroRuns = 0 + oneCount = 1 + defaultRunCount = 3 + successPassRate = 1.0 + failPassRate = 0.0 + ) + testCases := []struct { + name string + cfg runnerConfig + expectedTests map[string]*expectedTestResult + expectBuildErr bool + }{ + { + name: "default (integration)", + cfg: runnerConfig{ + ProjectPath: "../", + RunCount: defaultRunCount, + SkipTests: []string{"TestPanic", "TestFlakyPanic", "TestSubTestsSomePanic", "TestTimeout"}, + GoTestCountFlag: &oneCount, + OmitOutputs: true, + IgnoreSubtestErr: false, + Tags: []string{"example_package_tests"}, + }, + expectedTests: map[string]*expectedTestResult{ + "TestFlaky": {exactRuns: &defaultRunCount, someSuccesses: true, someFailures: true}, + "TestFail": {exactRuns: &defaultRunCount, allFailures: true, exactPassRate: &failPassRate}, + "TestFailLargeOutput": {exactRuns: &defaultRunCount, allFailures: true, exactPassRate: &failPassRate}, + "TestPass": {exactRuns: &defaultRunCount, allSuccesses: true, exactPassRate: &successPassRate}, + "TestSkipped": {exactRuns: &zeroRuns, allSkips: true, exactPassRate: &successPassRate}, + "TestRace": {exactRuns: &defaultRunCount, allSuccesses: true, exactPassRate: &successPassRate}, + "TestSubTestsAllPass": {exactRuns: &defaultRunCount, allSuccesses: true}, + "TestSubTestsAllPass/Pass1": {exactRuns: &defaultRunCount, allSuccesses: true}, + "TestSubTestsAllPass/Pass2": {exactRuns: &defaultRunCount, allSuccesses: true}, + "TestFailInParentAfterSubTests": {exactRuns: &defaultRunCount, allFailures: true}, + "TestFailInParentAfterSubTests/Pass1": {exactRuns: &defaultRunCount, allSuccesses: true}, + "TestFailInParentAfterSubTests/Pass2": {exactRuns: &defaultRunCount, allSuccesses: true}, + "TestFailInParentBeforeSubTests": {exactRuns: &defaultRunCount, allFailures: true}, + "TestSubTestsAllFail": {exactRuns: &defaultRunCount, allFailures: true}, + "TestSubTestsAllFail/Fail1": {exactRuns: &defaultRunCount, allFailures: true}, + "TestSubTestsAllFail/Fail2": {exactRuns: &defaultRunCount, allFailures: true}, + "TestSubTestsSomeFail": {exactRuns: &defaultRunCount, allFailures: true}, + "TestSubTestsSomeFail/Pass": {exactRuns: &defaultRunCount, allSuccesses: true}, + "TestSubTestsSomeFail/Fail": {exactRuns: &defaultRunCount, allFailures: true}, + }, + }, + { + name: "race (integration)", + cfg: runnerConfig{ + ProjectPath: "../", + RunCount: defaultRunCount, + SelectTests: []string{"TestRace"}, + GoTestRaceFlag: true, + OmitOutputs: true, + IgnoreSubtestErr: false, + Tags: []string{"example_package_tests"}, + }, + expectedTests: map[string]*expectedTestResult{ + "TestRace": {race: true, maximumRuns: defaultRunCount, allFailures: true}, + }, + }, + { + name: "always panic (integration)", + cfg: runnerConfig{ + ProjectPath: "../", + RunCount: defaultRunCount, + SelectTests: []string{"TestPanic"}, + GoTestCountFlag: &oneCount, + OmitOutputs: true, + Tags: []string{"example_package_tests"}, + }, + expectedTests: map[string]*expectedTestResult{ + "TestPanic": {packagePanic: true, testPanic: true, maximumRuns: defaultRunCount, allFailures: true}, + }, + }, + { + name: "flaky panic (integration)", + cfg: runnerConfig{ + ProjectPath: "../", + RunCount: defaultRunCount, + SelectTests: []string{"TestFlakyPanic"}, + GoTestCountFlag: &oneCount, + OmitOutputs: true, + Tags: []string{"example_package_tests"}, + }, + expectedTests: map[string]*expectedTestResult{ + // This test panics on first run, passes on second. We run 3 times. + // Expect PackagePanic=true, TestPanic=true (as it panicked at least once) + // Expect some failures (at least 1), some successes (at least 1). + // Exact runs should be defaultRunCount. + "TestFlakyPanic": {exactRuns: &defaultRunCount, packagePanic: true, testPanic: true, someSuccesses: true, someFailures: true}, + }, + }, + { + name: "subtest panic (integration)", + cfg: runnerConfig{ + ProjectPath: "../", + RunCount: defaultRunCount, + SelectTests: []string{"TestSubTestsSomePanic"}, + GoTestCountFlag: &oneCount, + OmitOutputs: true, + Tags: []string{"example_package_tests"}, + }, + expectedTests: map[string]*expectedTestResult{ + "TestSubTestsSomePanic": {exactRuns: &defaultRunCount, packagePanic: true, testPanic: true, allFailures: true}, // Parent fails due to subtest panic + "TestSubTestsSomePanic/Pass": {exactRuns: &defaultRunCount, packagePanic: true, testPanic: true, allFailures: true}, // Inherits panic, successes become failures + "TestSubTestsSomePanic/Panic": {exactRuns: &defaultRunCount, packagePanic: true, testPanic: true, allFailures: true}, // Panics directly + }, + }, + { + name: "failfast (integration)", + cfg: runnerConfig{ + ProjectPath: "../", + RunCount: defaultRunCount, // Will try 3 times, but fail-fast stops early + SelectTests: []string{"TestFail", "TestPass"}, + GoTestCountFlag: &oneCount, + FailFast: true, + OmitOutputs: true, + Tags: []string{"example_package_tests"}, + }, + expectedTests: map[string]*expectedTestResult{ + // Only one execution attempt happens because FailFast=true and TestFail fails. + "TestFail": {exactRuns: &oneCount, allFailures: true}, + "TestPass": {exactRuns: &oneCount, allSuccesses: true}, + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + absProjectPath, err := filepath.Abs(tc.cfg.ProjectPath) + require.NoError(t, err) + + testRunner := runner.NewRunner( + absProjectPath, + false, + tc.cfg.RunCount, + tc.cfg.GoTestCountFlag, + tc.cfg.GoTestRaceFlag, + tc.cfg.GoTestTimeoutFlag, + tc.cfg.Tags, + tc.cfg.UseShuffle, + tc.cfg.ShuffleSeed, + tc.cfg.FailFast, + tc.cfg.SkipTests, + tc.cfg.SelectTests, + tc.cfg.IgnoreSubtestErr, + tc.cfg.OmitOutputs, + nil, // Use default executor + nil, // Use default parser + ) + + testResults, err := testRunner.RunTestPackages([]string{"./runner/example_test_package"}) + + if tc.expectBuildErr { + require.Error(t, err) + return + } + require.NoError(t, err) + + t.Cleanup(func() { + if !t.Failed() { + return + } + if err := os.MkdirAll(debugDir, 0755); err != nil { + t.Logf("error creating directory: %v", err) + return + } + saniTName := strings.ReplaceAll(t.Name(), "/", "_") + resultsFileName := filepath.Join(debugDir, fmt.Sprintf("test_results_%s.json", saniTName)) + jsonResults, err := json.MarshalIndent(testResults, "", " ") + if err != nil { + t.Logf("error marshalling test report: %v", err) + return + } + err = os.WriteFile(resultsFileName, jsonResults, 0644) //nolint:gosec + if err != nil { + t.Logf("error writing test results: %v", err) + return + } + t.Logf("Saved failing test results to %s", resultsFileName) + }) + + checkTestResults(t, tc.expectedTests, testResults) + }) + } +} + +// Helper function to check results against expectations +func checkTestResults(t *testing.T, expectedTests map[string]*expectedTestResult, actualResults []reports.TestResult) { + t.Helper() + assert.Equal(t, len(expectedTests), len(actualResults), "unexpected number of test results recorded") + + for _, result := range actualResults { + t.Run(fmt.Sprintf("checking results of %s", result.TestName), func(t *testing.T) { + require.NotNil(t, result, "test result was nil") + expected, ok := expectedTests[result.TestName] + require.True(t, ok, "unexpected test name found in results: %s", result.TestName) + require.False(t, expected.seen, "test '%s' was seen multiple times", result.TestName) + expected.seen = true + + if !expected.testPanic { + assert.False(t, result.Panic, "test '%s' should not have panicked", result.TestName) + } + + if expected.minimumRuns != nil { + assert.GreaterOrEqual(t, result.Runs, *expected.minimumRuns, "test '%s' had fewer runs (%d) than expected minimum (%d)", result.TestName, result.Runs, *expected.minimumRuns) + } + if expected.exactRuns != nil { + assert.Equal(t, *expected.exactRuns, result.Runs, "test '%s' had an unexpected number of runs", result.TestName) + } else { + assert.LessOrEqual(t, result.Runs, expected.maximumRuns, "test '%s' had more runs (%d) than expected maximum (%d)", result.TestName, result.Runs, expected.maximumRuns) + } + if expected.exactPassRate != nil { + assert.InDelta(t, *expected.exactPassRate, result.PassRatio, 0.001, "test '%s' had an unexpected pass ratio", result.TestName) + } + if expected.minimumPassRate != nil { + assert.Greater(t, result.PassRatio, *expected.minimumPassRate, "test '%s' had a pass ratio below the minimum", result.TestName) + } + if expected.maximumPassRate != nil { + assert.Less(t, result.PassRatio, *expected.maximumPassRate, "test '%s' had a pass ratio above the maximum", result.TestName) + } + if expected.allSuccesses { + assert.Equal(t, result.Runs, result.Successes, "test '%s' has %d runs and should have passed all, only passed %d", result.TestName, result.Runs, result.Successes) + assert.Zero(t, result.Failures, "test '%s' has %d runs and should have passed all, but failed %d", result.TestName, result.Runs, result.Failures) + assert.False(t, result.Panic, "test '%s' should not have panicked", result.TestName) + assert.False(t, result.Race, "test '%s' should not have raced", result.TestName) + } + if expected.someSuccesses { + assert.Greater(t, result.Successes, 0, "test '%s' has %d runs and should have passed some runs, passed none", result.TestName, result.Runs) + } + if expected.allFailures { + assert.Equal(t, result.Runs, result.Failures, "test '%s' has %d runs and should have failed all, only failed %d", result.TestName, result.Runs, result.Failures) + assert.Zero(t, result.Successes, "test '%s' has %d runs and should have failed all, but succeeded %d", result.TestName, result.Runs, result.Successes) + } + if expected.packagePanic { + assert.True(t, result.PackagePanic, "test '%s' should have package panicked", result.TestName) + } + if expected.testPanic { + assert.True(t, result.Panic, "test '%s' should have panicked", result.TestName) + assert.True(t, result.PackagePanic, "test '%s' should have package panicked", result.TestName) + expected.someFailures = true + } + if expected.someFailures { + assert.Greater(t, result.Failures, 0, "test '%s' has %d runs and should have failed some runs, failed none", result.TestName, result.Runs) + } + if expected.allSkips { + assert.Equal(t, 0, result.Runs, "test '%s' has %d runs and should have skipped all of them, no runs expected", result.TestName, result.Runs) + assert.True(t, result.Skipped, "test '%s' should be marked skipped", result.TestName) + assert.Zero(t, result.Successes, "test '%s' should have skipped all runs, but succeeded some", result.TestName) + assert.Zero(t, result.Failures, "test '%s' should have skipped all runs, but failed some", result.TestName) + assert.False(t, result.Panic, "test '%s' should not have panicked", result.TestName) + assert.False(t, result.Race, "test '%s' should not have raced", result.TestName) + } + if expected.race { + assert.True(t, result.Race, "test '%s' should have a data race", result.TestName) + assert.GreaterOrEqual(t, result.Failures, 1, "test '%s' should have failed due to race", result.TestName) + } + }) + } + + allTestsRun := []string{} + for testName, expected := range expectedTests { + if expected.seen { + allTestsRun = append(allTestsRun, testName) + } + } + for testName, expected := range expectedTests { + require.True(t, expected.seen, "expected test '%s' not found in test runs\nAll tests run: %s", testName, strings.Join(allTestsRun, ", ")) + } +} + +type runnerConfig struct { + ProjectPath string + RunCount int + GoTestCountFlag *int + GoTestRaceFlag bool + GoTestTimeoutFlag string + Tags []string + UseShuffle bool + ShuffleSeed string + FailFast bool + SkipTests []string + SelectTests []string + OmitOutputs bool + IgnoreSubtestErr bool +} diff --git a/tools/flakeguard/runner/runner_test.go b/tools/flakeguard/runner/runner_test.go index c97ea11e0..e14c25260 100644 --- a/tools/flakeguard/runner/runner_test.go +++ b/tools/flakeguard/runner/runner_test.go @@ -1,1197 +1,462 @@ -package runner +//go:build !integration_tests +// +build !integration_tests + +package runner_test import ( - "encoding/json" "fmt" - "os" - "path/filepath" - "strings" + "regexp" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/reports" - "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/utils" + "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner" + "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/executor" + "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/parser" ) -var ( - defaultTestRunCount = 5 - flakyTestPackagePath = "./example_test_package" - debugDir = "debug_outputs" -) +type mockExecutor struct { + RunTestPackageFn func(cfg executor.Config, packageName string, runIndex int) (outputFilePath string, passed bool, err error) + RunCmdFn func(cfg executor.Config, testCmd []string, runIndex int) (outputFilePath string, passed bool, err error) -type expectedTestResult struct { - allSuccesses bool - someSuccesses bool - allFailures bool - someFailures bool - allSkips bool - testPanic bool - packagePanic bool - race bool - maximumRuns int + RunTestPackageCalls []executor.Config + RunCmdCalls [][]string +} - exactRuns *int - minimumRuns *int - exactPassRate *float64 - minimumPassRate *float64 - maximumPassRate *float64 +func (m *mockExecutor) RunTestPackage(cfg executor.Config, packageName string, runIndex int) (string, bool, error) { + m.RunTestPackageCalls = append(m.RunTestPackageCalls, cfg) + if m.RunTestPackageFn != nil { + return m.RunTestPackageFn(cfg, packageName, runIndex) + } + return fmt.Sprintf("mock_output_%s_%d.json", packageName, runIndex), true, nil +} - seen bool +func (m *mockExecutor) RunCmd(cfg executor.Config, testCmd []string, runIndex int) (string, bool, error) { + m.RunCmdCalls = append(m.RunCmdCalls, testCmd) + if m.RunCmdFn != nil { + return m.RunCmdFn(cfg, testCmd, runIndex) + } + return fmt.Sprintf("mock_cmd_output_%d.json", runIndex), true, nil } -func TestNoCompileTests(t *testing.T) { - // Test that we are not swallowing test compilation errors - t.Parallel() +type mockParser struct { + ParseFilesFn func(rawFilePaths []string, runPrefix string, expectedRuns int, cfg parser.Config) ([]reports.TestResult, []string, error) - runner := Runner{ - ProjectPath: "./", - Verbose: true, - RunCount: 1, - GoTestRaceFlag: false, - FailFast: false, - } + ParseFilesCalls [][]string + LastParseCfg parser.Config +} - _, err := runner.RunTestPackages([]string{"./example_bad_test_package"}) - require.Error(t, err) - require.ErrorIs(t, err, buildErr, "expected a compile error") - require.NotErrorIs(t, err, failedToShowBuildErr, "should be able to print out build errors") +func (m *mockParser) ParseFiles(rawFilePaths []string, runPrefix string, expectedRuns int, cfg parser.Config) ([]reports.TestResult, []string, error) { + m.ParseFilesCalls = append(m.ParseFilesCalls, rawFilePaths) + m.LastParseCfg = cfg + if m.ParseFilesFn != nil { + return m.ParseFilesFn(rawFilePaths, runPrefix, expectedRuns, cfg) + } + return []reports.TestResult{{TestName: "DefaultMockTest"}}, rawFilePaths, nil } -func TestPrettyProjectPath(t *testing.T) { +func TestRunner_RunTestPackages(t *testing.T) { t.Parallel() - prettyPath, err := utils.GetGoProjectName("./") - require.NoError(t, err) - assert.Equal(t, "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard", prettyPath) -} - -func TestRun(t *testing.T) { - var ( - zeroRuns = 0 - oneCount = 1 - successPassRate = 1.0 - failPassRate = 0.0 - ) testCases := []struct { - name string - runner Runner - expectedTests map[string]*expectedTestResult + name string + runCount int + failFast bool + packages []string + executorResponses map[string]struct { + passed bool + err error + } + expectedExecCalls int + expectedParseArgs struct { + fileCount int + cfg parser.Config + } + expectedResultCount int + expectedError bool }{ { - name: "default", - runner: Runner{ - ProjectPath: "./", - Verbose: true, - RunCount: defaultTestRunCount, - GoTestRaceFlag: false, - SkipTests: []string{"TestPanic", "TestFlakyPanic", "TestSubTestsSomePanic", "TestTimeout"}, - FailFast: false, - }, - expectedTests: map[string]*expectedTestResult{ - "TestFlaky": { - exactRuns: &defaultTestRunCount, - minimumPassRate: &failPassRate, - maximumPassRate: &successPassRate, - someSuccesses: true, - someFailures: true, - }, - "TestFail": { - exactRuns: &defaultTestRunCount, - exactPassRate: &failPassRate, - allFailures: true, - }, - "TestFailLargeOutput": { - exactRuns: &defaultTestRunCount, - exactPassRate: &failPassRate, - allFailures: true, - }, - "TestPass": { - exactRuns: &defaultTestRunCount, - exactPassRate: &successPassRate, - allSuccesses: true, - }, - "TestSkipped": { - exactRuns: &zeroRuns, - exactPassRate: &successPassRate, - allSkips: true, - }, - "TestRace": { - exactRuns: &defaultTestRunCount, - exactPassRate: &successPassRate, - allSuccesses: true, - }, - "TestSubTestsAllPass": { - exactRuns: &defaultTestRunCount, - allSuccesses: true, - }, - "TestFailInParentAfterSubTests": { - exactRuns: &defaultTestRunCount, - allFailures: true, - }, - "TestFailInParentAfterSubTests/Pass1": { - exactRuns: &defaultTestRunCount, - allSuccesses: true, - }, - "TestFailInParentAfterSubTests/Pass2": { - exactRuns: &defaultTestRunCount, - allSuccesses: true, - }, - "TestFailInParentBeforeSubTests": { - exactRuns: &defaultTestRunCount, - allFailures: true, - }, - "TestSubTestsAllPass/Pass1": { - exactRuns: &defaultTestRunCount, - allSuccesses: true, - }, - "TestSubTestsAllPass/Pass2": { - exactRuns: &defaultTestRunCount, - allSuccesses: true, - }, - "TestSubTestsAllFail": { - exactRuns: &defaultTestRunCount, - allFailures: true, - }, - "TestSubTestsAllFail/Fail1": { - exactRuns: &defaultTestRunCount, - allFailures: true, - }, - "TestSubTestsAllFail/Fail2": { - exactRuns: &defaultTestRunCount, - allFailures: true, - }, - "TestSubTestsSomeFail": { - exactRuns: &defaultTestRunCount, - allFailures: true, - }, - "TestSubTestsSomeFail/Pass": { - exactRuns: &defaultTestRunCount, - allSuccesses: true, - }, - "TestSubTestsSomeFail/Fail": { - exactRuns: &defaultTestRunCount, - allFailures: true, - }, + name: "Happy path - 2 runs, 2 packages", + runCount: 2, + failFast: false, + packages: []string{"pkgA", "pkgB"}, + executorResponses: map[string]struct { + passed bool + err error + }{ + "pkgA-0": {passed: true, err: nil}, + "pkgA-1": {passed: true, err: nil}, + "pkgB-0": {passed: true, err: nil}, + "pkgB-1": {passed: true, err: nil}, }, + expectedExecCalls: 4, + expectedParseArgs: struct { + fileCount int + cfg parser.Config + }{fileCount: 4, cfg: parser.Config{IgnoreParentFailuresOnSubtests: false, OmitOutputsOnSuccess: false}}, + expectedResultCount: 1, + expectedError: false, }, { - name: "always panic", - runner: Runner{ - ProjectPath: "./", - Verbose: true, - RunCount: defaultTestRunCount, - GoTestRaceFlag: false, - SkipTests: []string{}, - SelectTests: []string{"TestPanic"}, - FailFast: false, - }, - expectedTests: map[string]*expectedTestResult{ - "TestPanic": { - packagePanic: true, - testPanic: true, - maximumRuns: defaultTestRunCount, - }, + name: "FailFast stops execution", + runCount: 5, + failFast: true, + packages: []string{"pkgA", "pkgB"}, + executorResponses: map[string]struct { + passed bool + err error + }{ + "pkgA-0": {passed: false, err: nil}, }, + expectedExecCalls: 1, + expectedParseArgs: struct { + fileCount int + cfg parser.Config + }{fileCount: 1, cfg: parser.Config{IgnoreParentFailuresOnSubtests: false, OmitOutputsOnSuccess: true}}, + expectedResultCount: 1, + expectedError: false, }, { - name: "flaky panic", - runner: Runner{ - ProjectPath: "./", - Verbose: true, - RunCount: defaultTestRunCount, - GoTestRaceFlag: false, - GoTestCountFlag: &oneCount, - SkipTests: []string{}, - SelectTests: []string{"TestFlakyPanic"}, - FailFast: false, - }, - expectedTests: map[string]*expectedTestResult{ - "TestFlakyPanic": { - packagePanic: true, - testPanic: true, - maximumRuns: defaultTestRunCount, - }, + name: "Executor error stops execution", + runCount: 3, + failFast: false, + packages: []string{"pkgA", "pkgB"}, + executorResponses: map[string]struct { + passed bool + err error + }{ + "pkgA-0": {passed: true, err: nil}, + "pkgA-1": {passed: true, err: fmt.Errorf("executor boom")}, }, + expectedExecCalls: 2, + expectedResultCount: 0, + expectedError: true, }, { - name: "subtest panic", - runner: Runner{ - ProjectPath: "./", - Verbose: true, - RunCount: defaultTestRunCount, - GoTestRaceFlag: false, - SkipTests: []string{}, - SelectTests: []string{"TestSubTestsSomePanic"}, - FailFast: false, - }, - expectedTests: map[string]*expectedTestResult{ - "TestSubTestsSomePanic": { - packagePanic: true, - testPanic: true, - maximumRuns: defaultTestRunCount, - }, - "TestSubTestsSomePanic/Pass": { - packagePanic: true, - allSuccesses: true, - maximumRuns: defaultTestRunCount, - }, - "TestSubTestsSomePanic/Panic": { - packagePanic: true, - testPanic: true, - maximumRuns: defaultTestRunCount, - }, - }, - }, - { - name: "failfast", - runner: Runner{ - ProjectPath: "./", - Verbose: true, - RunCount: defaultTestRunCount, - GoTestRaceFlag: false, - SkipTests: []string{}, - SelectTests: []string{"TestFail", "TestPass"}, - FailFast: true, - }, - expectedTests: map[string]*expectedTestResult{ - "TestFail": { - exactRuns: &oneCount, - allFailures: true, - }, - "TestPass": { - exactRuns: &oneCount, - allSuccesses: true, - }, + name: "Parser error propagated", + runCount: 1, + failFast: false, + packages: []string{"pkgA"}, + executorResponses: map[string]struct { + passed bool + err error + }{ + "pkgA-0": {passed: true, err: nil}, }, + expectedExecCalls: 1, + expectedResultCount: 0, + expectedError: true, }, } for _, tc := range testCases { + tc := tc t.Run(tc.name, func(t *testing.T) { - t.Parallel() - - testResults, err := tc.runner.RunTestPackages([]string{flakyTestPackagePath}) - require.NoError(t, err) - - t.Cleanup(func() { - if !t.Failed() { - return - } - if err := os.MkdirAll(debugDir, 0755); err != nil { - t.Logf("error creating directory: %v", err) - return - } - saniTName := strings.ReplaceAll(t.Name(), "/", "_") - resultsFileName := filepath.Join(debugDir, fmt.Sprintf("test_results_%s.json", saniTName)) - jsonResults, err := json.Marshal(testResults) - if err != nil { - t.Logf("error marshalling test report: %v", err) - return - } - err = os.WriteFile(resultsFileName, jsonResults, 0644) //nolint:gosec - if err != nil { - t.Logf("error writing test results: %v", err) - return - } - }) - - assert.Equal(t, len(tc.expectedTests), len(testResults), "unexpected number of test results") - for _, result := range testResults { - t.Run(fmt.Sprintf("checking results of %s", result.TestName), func(t *testing.T) { - require.NotNil(t, result, "test result was nil") - expected, ok := tc.expectedTests[result.TestName] - require.True(t, ok, "unexpected test name: %s", result.TestName) - require.False(t, expected.seen, "test '%s' was seen multiple times", result.TestName) - expected.seen = true - - if !expected.testPanic { // Panics end up wrecking durations - assert.Len(t, result.Durations, result.Runs, "test '%s' has a mismatch of runs %d and duration counts %d", - result.TestName, result.Runs, len(result.Durations), - ) - assert.False(t, result.Panic, "test '%s' should not have panicked", result.TestName) - } - resultCounts := result.Successes + result.Failures - assert.Equal(t, result.Runs, resultCounts, - "test '%s' doesn't match Runs count with results counts\n%s", result.TestName, result.Runs, resultsString(result), - ) - - if expected.minimumRuns != nil { - assert.GreaterOrEqual(t, result.Runs, *expected.minimumRuns, "test '%s' had fewer runs than expected", result.TestName) - } - if expected.exactRuns != nil { - assert.Equal(t, *expected.exactRuns, result.Runs, "test '%s' had an unexpected number of runs", result.TestName) - } else { - assert.LessOrEqual(t, result.Runs, expected.maximumRuns, "test '%s' had more runs than expected", result.TestName) - } - if expected.exactPassRate != nil { - assert.Equal(t, *expected.exactPassRate, result.PassRatio, "test '%s' had an unexpected pass ratio", result.TestName) - } - if expected.minimumPassRate != nil { - assert.Greater(t, result.PassRatio, *expected.minimumPassRate, "test '%s' had a pass ratio below the minimum", result.TestName) - } - if expected.maximumPassRate != nil { - assert.Less(t, result.PassRatio, *expected.maximumPassRate, "test '%s' had a pass ratio above the maximum", result.TestName) - } - if expected.allSuccesses { - assert.Equal(t, result.Successes, result.Runs, "test '%s' has %d total runs and should have passed all runs, only passed %d\n%s", result.TestName, result.Runs, result.Successes, resultsString(result)) - assert.Zero(t, result.Failures, "test '%s' has %d total runs and should have passed all runs, but failed some\n%s", result.TestName, result.Runs, resultsString(result)) - assert.Zero(t, result.Skips, "test '%s' has %d total runs and should have passed all runs, but skipped some\n%s", result.TestName, result.Runs, resultsString(result)) - assert.False(t, result.Panic, "test '%s' should not have panicked\n%s", result.TestName, resultsString(result)) - assert.False(t, result.Race, "test '%s' should not have raced\n%s", result.TestName, resultsString(result)) + mockExec := &mockExecutor{ + RunTestPackageFn: func(cfg executor.Config, pkg string, idx int) (string, bool, error) { + key := fmt.Sprintf("%s-%d", pkg, idx) + resp, ok := tc.executorResponses[key] + if !ok { + return fmt.Sprintf("mock_%s_%d.json", pkg, idx), true, nil } - if expected.someSuccesses { - assert.Greater(t, result.Successes, 0, "test '%s' has %d total runs and should have passed some runs, passed none\n%s", result.TestName, result.Runs, resultsString(result)) - } - if expected.allFailures { - assert.Equal(t, result.Failures, result.Runs, "test '%s' has %d total runs and should have failed all runs, only failed %d\n%s", result.TestName, result.Runs, result.Failures, resultsString(result)) - assert.Zero(t, result.Successes, "test '%s' has %d total runs and should have failed all runs, but succeeded some\n%s", result.TestName, result.Runs, resultsString(result)) - assert.Zero(t, result.Skips, "test '%s' has %d total runs and should have failed all runs, but skipped some\n%s", result.TestName, result.Runs, resultsString(result)) - assert.False(t, result.Race, "test '%s' should not have raced\n%s", result.TestName, resultsString(result)) - } - if expected.packagePanic { - assert.True(t, result.PackagePanic, "test '%s' should have package panicked", result.TestName) - } - if expected.testPanic { - assert.True(t, result.Panic, "test '%s' should have panicked", result.TestName) - assert.True(t, result.PackagePanic, "test '%s' should have package panicked", result.TestName) - expected.someFailures = true - } - if expected.someFailures { - assert.Greater(t, result.Failures, 0, "test '%s' has %d total runs and should have failed some runs, failed none\n%s", result.TestName, result.Runs, resultsString(result)) - } - if expected.allSkips { - assert.Equal(t, 0, result.Runs, "test '%s' has %d total runs and should have skipped all of them, no runs expected\n%s", result.TestName, result.Runs, resultsString(result)) - assert.Zero(t, result.Successes, "test '%s' has %d total runs and should have skipped all runs, but succeeded some\n%s", result.TestName, result.Runs, resultsString(result)) - assert.Zero(t, result.Failures, "test '%s' has %d total runs and should have skipped all runs, but panicked some\n%s", result.TestName, result.Runs, resultsString(result)) - assert.False(t, result.Panic, "test '%s' should not have panicked\n%s", result.TestName, resultsString(result)) - assert.False(t, result.Race, "test '%s' should not have raced\n%s", result.TestName, resultsString(result)) - } - if expected.race { - assert.True(t, result.Race, "test '%s' should have a data race\n%s", result.TestName, resultsString(result)) - assert.False(t, result.Panic, "test '%s' should not have panicked\n%s", result.TestName, resultsString(result)) - assert.Zero(t, result.Successes, "test '%s' has %d total runs and should have raced all runs, but succeeded some\n%s", result.TestName, result.Runs, resultsString(result)) - assert.Zero(t, result.Failures, "test '%s' has %d total runs and should have raced all runs, but panicked some\n%s", result.TestName, result.Runs, resultsString(result)) - assert.Zero(t, result.Skips, "test '%s' has %d total runs and should have raced all runs, but skipped some\n%s", result.TestName, result.Runs, resultsString(result)) - assert.Zero(t, result.Skips, "test '%s' has %d total runs and should have raced all runs, but panicked some\n%s", result.TestName, result.Runs, resultsString(result)) - } - }) + return fmt.Sprintf("mock_%s_%d.json", pkg, idx), resp.passed, resp.err + }, } - - allTestsRun := []string{} - for testName, expected := range tc.expectedTests { - if expected.seen { - allTestsRun = append(allTestsRun, testName) + mockParse := &mockParser{} + if tc.name == "Parser error propagated" { + mockParse.ParseFilesFn = func(_ []string, _ string, _ int, _ parser.Config) ([]reports.TestResult, []string, error) { + return nil, nil, fmt.Errorf("parser failed") } } - for testName, expected := range tc.expectedTests { - require.True(t, expected.seen, "expected test '%s' not found in test runs\nAll tests run: %s", testName, strings.Join(allTestsRun, ", ")) + + r := runner.NewRunner( + ".", + false, // Verbose + tc.runCount, + nil, // goTestCountFlag + false, // goTestRaceFlag + "", // goTestTimeoutFlag + nil, // tags + false, // useShuffle + "", // shuffleSeed + tc.failFast, + nil, // skipTests + nil, // selectTests + tc.expectedParseArgs.cfg.IgnoreParentFailuresOnSubtests, + tc.expectedParseArgs.cfg.OmitOutputsOnSuccess, + mockExec, + mockParse, + ) + + actualResults, err := r.RunTestPackages(tc.packages) + + assert.Len(t, mockExec.RunTestPackageCalls, tc.expectedExecCalls, "Unexpected number of executor calls") + + if tc.expectedError { + assert.Error(t, err) + if tc.name == "Executor error stops execution" { + assert.Len(t, mockParse.ParseFilesCalls, 0, "Parser should not be called on executor error") + } + } else { + assert.NoError(t, err) + assert.Len(t, mockParse.ParseFilesCalls, 1, "Parser should be called once on success/parser error") + if len(mockParse.ParseFilesCalls) > 0 { + assert.Len(t, mockParse.ParseFilesCalls[0], tc.expectedParseArgs.fileCount, "Parser called with wrong number of files") + assert.Equal(t, tc.expectedParseArgs.cfg, mockParse.LastParseCfg, "Parser called with wrong config") + } + assert.Len(t, actualResults, tc.expectedResultCount, "Unexpected number of results returned") } }) } } -func resultsString(result reports.TestResult) string { - resultCounts := result.Successes + result.Failures + result.Skips - return fmt.Sprintf("Runs: %d\nPanicked: %t\nRace: %t\nSuccesses: %d\nFailures: %d\nSkips: %d\nTotal Results: %d", - result.Runs, result.Panic, result.Race, result.Successes, result.Failures, result.Skips, resultCounts) -} - -func TestAttributePanicToTest(t *testing.T) { +func TestRunner_RunTestCmd(t *testing.T) { t.Parallel() - // Test cases: each test case contains a slice of output strings. testCases := []struct { - name string - expectedTestName string - expectedTimeout bool - outputs []string + name string + runCount int + failFast bool + cmd []string + executorResponses []struct { + passed bool + err error + } + expectedExecCalls int + expectedParseArgs struct { + fileCount int + cfg parser.Config + } + expectedResultCount int + expectedError bool }{ { - name: "properly attributed panic", - expectedTestName: "TestPanic", - expectedTimeout: false, - outputs: []string{ - "panic: This test intentionally panics [recovered]", - "\tpanic: This test intentionally panics", - "goroutine 25 [running]:", - "testing.tRunner.func1.2({0x1008cde80, 0x1008f7d90})", - "\t/opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1632 +0x1bc", - "testing.tRunner.func1()", - "\t/opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1635 +0x334", - "panic({0x1008cde80?, 0x1008f7d90?})", - "\t/opt/homebrew/Cellar/go/1.23.2/libexec/src/runtime/panic.go:785 +0x124", - "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/example_test_package.TestPanic(0x140000b6ea0?)", - }, - }, - { - name: "improperly attributed panic", - expectedTestName: "TestPanic", - expectedTimeout: false, - outputs: []string{ - "panic: This test intentionally panics [recovered]", - "TestPanic(0x140000b6ea0?)", - "goroutine 25 [running]:", - "testing.tRunner.func1.2({0x1008cde80, 0x1008f7d90})", - "\t/opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1632 +0x1bc", - "testing.tRunner.func1()", - "\t/opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1635 +0x334", - "panic({0x1008cde80?, 0x1008f7d90?})", - "\t/opt/homebrew/Cellar/go/1.23.2/libexec/src/runtime/panic.go:785 +0x124", - "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/example_test_package.TestPanic(0x140000b6ea0?)", + name: "Happy path - 2 runs", + runCount: 2, + failFast: false, + cmd: []string{"go", "test", "./..."}, + executorResponses: []struct { + passed bool + err error + }{ + {passed: true, err: nil}, + {passed: true, err: nil}, }, + expectedExecCalls: 2, + expectedParseArgs: struct { + fileCount int + cfg parser.Config + }{fileCount: 2, cfg: parser.Config{IgnoreParentFailuresOnSubtests: false, OmitOutputsOnSuccess: false}}, + expectedResultCount: 1, + expectedError: false, }, { - name: "log after test complete panic", - expectedTestName: "Test_workflowRegisteredHandler/skips_fetch_if_secrets_url_is_missing", - expectedTimeout: false, - outputs: []string{ - "panic: Log in goroutine after Test_workflowRegisteredHandler/skips_fetch_if_secrets_url_is_missing has completed: 2025-03-28T17:18:16.703Z\tDEBUG\tCapabilitiesRegistry\tcapabilities/registry.go:69\tget capability\t{\"version\": \"unset@unset\", \"id\": \"basic-test-trigger@1.0.0\"}", - "goroutine 646 [running]:", - "testing.(*common).logDepth(0xc000728000, {0xc0001b9400, 0x9b}, 0x3)", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:1064 +0x69f", - "testing.(*common).log(...)", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:1046", - "testing.(*common).Logf(0xc000728000, {0x6000752, 0x2}, {0xc001070430, 0x1, 0x1})", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:1097 +0x9f", - "go.uber.org/zap/zaptest.TestingWriter.Write({{0x7fb811aa2818?, 0xc000728000?}, 0x20?}, {0xc001074000, 0x9c, 0x400})", - "\t/home/runner/go/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/logger.go:146 +0x11d", - "go.uber.org/zap/zapcore.(*ioCore).Write(0xc000bff1d0, {0xff, {0xc1f1d45629e99436, 0x252667087c, 0x87b3fa0}, {0x602a730, 0x14}, {0x601d42f, 0xe}, {0x1, ...}, ...}, ...)", - "\t/home/runner/go/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/core.go:99 +0x18e", - "go.uber.org/zap/zapcore.(*CheckedEntry).Write(0xc00106dba0, {0xc00101d400, 0x1, 0x2})", - "\t/home/runner/go/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/entry.go:253 +0x1ed", - "go.uber.org/zap.(*SugaredLogger).log(0xc0001e48b8, 0xff, {0x601d42f, 0xe}, {0x0, 0x0, 0x0}, {0xc00101bf40, 0x2, 0x2})", - "\t/home/runner/go/pkg/mod/go.uber.org/zap@v1.27.0/sugar.go:355 +0x12d", - "go.uber.org/zap.(*SugaredLogger).Debugw(...)", - "\t/home/runner/go/pkg/mod/go.uber.org/zap@v1.27.0/sugar.go:251", - "github.com/smartcontractkit/chainlink/v2/core/capabilities.(*Registry).Get(0xc000ab88c0, {0x20?, 0x87bb320?}, {0xc0013282a0, 0x18})", - "\t/home/runner/work/chainlink/chainlink/core/capabilities/registry.go:69 +0x1cf", - "github.com/smartcontractkit/chainlink/v2/core/capabilities.(*Registry).GetTrigger(0xc000ab88c0, {0x67d38a8, 0xc0011f22d0}, {0xc0013282a0, 0x18})", - "\t/home/runner/work/chainlink/chainlink/core/capabilities/registry.go:80 +0x6f", - "github.com/smartcontractkit/chainlink/v2/core/services/workflows.(*Engine).resolveWorkflowCapabilities(0xc000e75188, {0x67d38a8, 0xc0011f22d0})", - "\t/home/runner/work/chainlink/chainlink/core/services/workflows/engine.go:198 +0x173", - "github.com/smartcontractkit/chainlink/v2/core/services/workflows.(*Engine).init.func1()", - "\t/home/runner/work/chainlink/chainlink/core/services/workflows/engine.go:348 +0x2aa", - "github.com/smartcontractkit/chainlink/v2/core/services/workflows.retryable({0x67d38a8, 0xc0011f22d0}, {0x680c850, 0xc000e08210}, 0x1388, 0x0, 0xc000f0bf08)", - "\t/home/runner/work/chainlink/chainlink/core/services/workflows/retry.go:45 +0x402", - "github.com/smartcontractkit/chainlink/v2/core/services/workflows.(*Engine).init(0xc000e75188, {0x67d38a8, 0xc0011f22d0})", - "\t/home/runner/work/chainlink/chainlink/core/services/workflows/engine.go:339 +0x225", - "created by github.com/smartcontractkit/chainlink/v2/core/services/workflows.(*Engine).Start.func1 in goroutine 390", - "\t/home/runner/work/chainlink/chainlink/core/services/workflows/engine.go:179 +0xf37", - "FAIL\tgithub.com/smartcontractkit/chainlink/v2/core/services/workflows/syncer\t159.643s", + name: "FailFast stops execution", + runCount: 3, + failFast: true, + cmd: []string{"go", "test", "./..."}, + executorResponses: []struct { + passed bool + err error + }{ + {passed: false, err: nil}, // Fails on first run }, + expectedExecCalls: 1, + expectedParseArgs: struct { + fileCount int + cfg parser.Config + }{fileCount: 1, cfg: parser.Config{IgnoreParentFailuresOnSubtests: false, OmitOutputsOnSuccess: true}}, + expectedResultCount: 1, + expectedError: false, }, { - name: "timeout panic with obvious culprit", - expectedTestName: "TestTimedOut", - expectedTimeout: true, - outputs: []string{ - "panic: test timed out after 10m0s", - "running tests", - "\tTestNoTimeout (9m59s)", - "\tTestTimedOut (10m0s)", - "goroutine 397631 [running]:", - "testing.(*M).startAlarm.func1()", - "\t/opt/hostedtoolcache/go/1.23.3/x64/src/testing/testing.go:2373 +0x385", - "created by time.goFunc", - "/opt/hostedtoolcache/go/1.23.3/x64/src/time/sleep.go:215 +0x2d", + name: "Executor error stops execution", + runCount: 3, + failFast: false, + cmd: []string{"go", "test", "./..."}, + executorResponses: []struct { + passed bool + err error + }{ + {passed: true, err: nil}, + {passed: false, err: fmt.Errorf("exec boom")}, // Error on second run }, - }, - { - name: "subtest panic", - expectedTestName: "TestSubTestsSomePanic", - expectedTimeout: false, - outputs: []string{ - "panic: This subtest always panics [recovered]", - "panic: This subtest always panics", - "goroutine 23 [running]:", - "testing.tRunner.func1.2({0x100489e80, 0x1004b3e30})", - "\t/opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1632 +0x1bc", - "testing.tRunner.func1()", - "\t/opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1635 +0x334", - "panic({0x100489e80?, 0x1004b3e30?})", - "\t/opt/homebrew/Cellar/go/1.23.2/libexec/src/runtime/panic.go:785 +0x124", - "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/example_test_package.TestSubTestsSomePanic.func2(0x140000c81a0?)", - }, - }, - { - name: "memory_test panic extraction", - expectedTestName: "TestJobClientJobAPI", - expectedTimeout: false, - outputs: []string{ - "panic: freeport: cannot allocate port block [recovered]", - "\tpanic: freeport: cannot allocate port block", - "goroutine 321 [running]:", - "testing.tRunner.func1.2({0x5e0dd80, 0x72ebb40})", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:1734 +0x21c", - "testing.tRunner.func1()", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:1737 +0x35e", - "panic({0x5e0dd80?, 0x72ebb40?})", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/runtime/panic.go:787 +0x132", - "github.com/hashicorp/consul/sdk/freeport.alloc()", - "\t/home/runner/go/pkg/mod/github.com/hashicorp/consul/sdk@v0.16.1/freeport/freeport.go:274 +0xad", - "github.com/hashicorp/consul/sdk/freeport.initialize()", - "\t/home/runner/go/pkg/mod/github.com/hashicorp/consul/sdk@v0.16.1/freeport/freeport.go:124 +0x2d7", - "sync.(*Once).doSlow(0xc0018eb600?, 0xc000da4a98?)", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/sync/once.go:78 +0xab", - "sync.(*Once).Do(...)", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/sync/once.go:69", - "github.com/hashicorp/consul/sdk/freeport.Take(0x1)", - "\t/home/runner/go/pkg/mod/github.com/hashicorp/consul/sdk@v0.16.1/freeport/freeport.go:303 +0xe5", - "github.com/hashicorp/consul/sdk/freeport.GetN({0x7337708, 0xc000683dc0}, 0x1)", - "\t/home/runner/go/pkg/mod/github.com/hashicorp/consul/sdk@v0.16.1/freeport/freeport.go:427 +0x48", - "github.com/smartcontractkit/chainlink/deployment/environment/memory_test.TestJobClientJobAPI(0xc000683dc0)", - "\t/home/runner/work/chainlink/chainlink/deployment/environment/memory/job_service_client_test.go:116 +0xc6", - "testing.tRunner(0xc000683dc0, 0x6d6c838)", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:1792 +0xf4", - "created by testing.(*T).Run in goroutine 1", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:1851 +0x413", - }, - }, - { - name: "changeset_test panic extraction", - expectedTestName: "TestDeployBalanceReader", - expectedTimeout: false, - outputs: []string{ - "panic: freeport: cannot allocate port block [recovered]", - "\tpanic: freeport: cannot allocate port block", - "goroutine 378 [running]:", - "testing.tRunner.func1.2({0x6063f40, 0x76367f0})", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:1734 +0x21c", - "testing.tRunner.func1()", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:1737 +0x35e", - "panic({0x6063f40?, 0x76367f0?})", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/runtime/panic.go:787 +0x132", - "github.com/hashicorp/consul/sdk/freeport.alloc()", - "\t/home/runner/go/pkg/mod/github.com/hashicorp/consul/sdk@v0.16.1/freeport/freeport.go:274 +0xad", - "github.com/hashicorp/consul/sdk/freeport.initialize()", - "\t/home/runner/go/pkg/mod/github.com/hashicorp/consul/sdk@v0.16.1/freeport/freeport.go:124 +0x2d7", - "sync.(*Once).doSlow(0xa94f820?, 0xa8000a?)", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/sync/once.go:78 +0xab", - "sync.(*Once).Do(...)", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/sync/once.go:69", - "github.com/hashicorp/consul/sdk/freeport.Take(0x1)", - "\t/home/runner/go/pkg/mod/github.com/hashicorp/consul/sdk@v0.16.1/freeport/freeport.go:303 +0xe5", - "github.com/hashicorp/consul/sdk/freeport.GetN({0x7684150, 0xc000583c00}, 0x1)", - "\t/home/runner/go/pkg/mod/github.com/hashicorp/consul/sdk@v0.16.1/freeport/freeport.go:427 +0x48", - "github.com/smartcontractkit/chainlink/deployment/environment/memory.NewNodes(0xc000583c00, 0xff, 0xc001583d10, 0xc005aa0030, 0x1, 0x0, {0x0, {0x0, 0x0, 0x0, ...}, ...}, ...)", - "\t/home/runner/work/chainlink/chainlink/deployment/environment/memory/environment.go:177 +0xa5", - "github.com/smartcontractkit/chainlink/deployment/environment/memory.NewMemoryEnvironment(_, {_, _}, _, {0x2, 0x0, 0x0, 0x1, 0x0, {0x0, ...}})", - "\t/home/runner/work/chainlink/chainlink/deployment/environment/memory/environment.go:223 +0x10c", - "github.com/smartcontractkit/chainlink/deployment/keystone/changeset_test.TestDeployBalanceReader(0xc000583c00)", - "\t/home/runner/work/chainlink/chainlink/deployment/keystone/changeset/deploy_balance_reader_test.go:23 +0xf5", - "testing.tRunner(0xc000583c00, 0x70843d0)", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:1792 +0xf4", - "created by testing.(*T).Run in goroutine 1", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:1851 +0x413", - " logger.go:146: 03:14:04.485880684\tINFO\tDeployed KeystoneForwarder 1.0.0 chain selector 909606746561742123 addr 0x72B66019aCEdc35F7F6e58DF94De95f3cBCC5971\t{\"version\": \"(devel)@unset\"}", - " logger.go:146: 03:14:04.486035865\tINFO\tdeploying forwarder\t{\"version\": \"(devel)@unset\", \"chainSelector\": 5548718428018410741}", - " logger.go:146: 2025-03-08T03:14:04.490Z\tINFO\tchangeset/jd_register_nodes.go:91\tregistered node\t{\"version\": \"unset@unset\", \"name\": \"node1\", \"id\": \"node:{id:\\\"895776f5ba0cc11c570a47b5cc3dbb8771da9262cfb545cd5d48251796af7f\\\" public_key:\\\"895776f5ba0cc11c570a47b5cc3dbb8771da9262cfb545cd5d48251796af7f\\\" is_enabled:true is_connected:true labels:{key:\\\"product\\\" value:\\\"test-product\\\"} labels:{key:\\\"environment\\\" value:\\\"test-env\\\"} labels:{key:\\\"nodeType\\\" value:\\\"bootstrap\\\"} labels:{key:\\\"don-0-don1\\\"}\"}", - }, - }, - { - name: "empty", - expectedTestName: "", - expectedTimeout: false, - outputs: []string{}, + expectedExecCalls: 2, + expectedResultCount: 0, + expectedError: true, }, } for _, tc := range testCases { + tc := tc t.Run(tc.name, func(t *testing.T) { - testName, timeout, err := attributePanicToTest(tc.outputs) - assert.Equal(t, tc.expectedTimeout, timeout, "timeout flag mismatch") - if tc.expectedTestName == "" { - require.Error(t, err) + execCallCount := 0 + mockExec := &mockExecutor{ + RunCmdFn: func(cfg executor.Config, cmd []string, idx int) (string, bool, error) { + execCallCount++ + if idx < len(tc.executorResponses) { + resp := tc.executorResponses[idx] + return fmt.Sprintf("mock_cmd_%d.json", idx), resp.passed, resp.err + } + return fmt.Sprintf("mock_cmd_%d.json", idx), true, nil + }, + } + mockParse := &mockParser{} + + r := runner.NewRunner( + ".", tc.failFast, // other fields default/nil + tc.runCount, nil, false, "", nil, false, "", tc.failFast, nil, nil, + tc.expectedParseArgs.cfg.IgnoreParentFailuresOnSubtests, + tc.expectedParseArgs.cfg.OmitOutputsOnSuccess, + mockExec, mockParse, + ) + + actualResults, err := r.RunTestCmd(tc.cmd) + + assert.Equal(t, tc.expectedExecCalls, execCallCount, "Unexpected number of executor RunCmd calls") + + if tc.expectedError { + assert.Error(t, err) + assert.Len(t, mockParse.ParseFilesCalls, 0, "Parser should not be called on executor error") } else { - require.NoError(t, err) - assert.Equal(t, tc.expectedTestName, testName, "test name mismatch") + assert.NoError(t, err) + assert.Len(t, mockParse.ParseFilesCalls, 1, "Parser should be called once on success") + if len(mockParse.ParseFilesCalls) > 0 { + assert.Len(t, mockParse.ParseFilesCalls[0], tc.expectedParseArgs.fileCount, "Parser called with wrong number of files") + assert.Equal(t, tc.expectedParseArgs.cfg, mockParse.LastParseCfg, "Parser called with wrong config") + } + assert.Len(t, actualResults, tc.expectedResultCount, "Unexpected number of results returned") } }) } } -func TestFailToAttributePanicToTest(t *testing.T) { +func TestRunner_RerunFailedTests(t *testing.T) { t.Parallel() testCases := []struct { - name string - expectedTimeout bool - expectedError error - outputs []string + name string + initialFailedTests []reports.TestResult + rerunCount int + executorResponses map[string]struct { + passed bool + err error + } + expectedExecCalls int + expectedParseArgs struct { + fileCount int + cfg parser.Config + } + expectedFinalResultCount int + expectedError bool }{ { - name: "no test name in panic", - expectedTimeout: false, - expectedError: ErrFailedToAttributePanicToTest, - outputs: []string{ - "panic: reflect: Elem of invalid type bool", - "goroutine 104182 [running]:", - "reflect.elem(0xc0569d9998?)", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/reflect/type.go:733 +0x9a", - "reflect.(*rtype).Elem(0xa4dd940?)", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/reflect/type.go:737 +0x15", - "github.com/smartcontractkit/chainlink-solana/pkg/solana/chainreader.setPollingFilterOverrides(0x0, {0xc052040510, 0x1, 0xc?})", - "\t/home/runner/go/pkg/mod/github.com/smartcontractkit/chainlink-solana@v1.1.2-0.20250319030827-8e2f4d76eb79/pkg/solana/chainreader/chain_reader.go:942 +0x492", - "github.com/smartcontractkit/chainlink-solana/pkg/solana/chainreader.(*ContractReaderService).addEventRead(_, _, {_, _}, {_, _}, {{0xc0544c4270, 0x9}, {0xc0544c4280, 0xc}, ...}, ...)", - "\t/home/runner/go/pkg/mod/github.com/smartcontractkit/chainlink-solana@v1.1.2-0.20250319030827-8e2f4d76eb79/pkg/solana/chainreader/chain_reader.go:605 +0x13d", - "github.com/smartcontractkit/chainlink-solana/pkg/solana/chainreader.(*ContractReaderService).initNamespace(0xc054472540, 0xc01c37d440?)", - "\t/home/runner/go/pkg/mod/github.com/smartcontractkit/chainlink-solana@v1.1.2-0.20250319030827-8e2f4d76eb79/pkg/solana/chainreader/chain_reader.go:443 +0x28b", - "github.com/smartcontractkit/chainlink-solana/pkg/solana/chainreader.NewContractReaderService({0x7fcf8b532040?, 0xc015b223e0?}, {0xc6ac960, 0xc05464e470}, {0xc0544384e0?, {0xc01c37d440?, 0xc054163b84?, 0xc054163b80?}}, {0x7fcf8071c7a0, 0xc0157928c0})", - "\t/home/runner/go/pkg/mod/github.com/smartcontractkit/chainlink-solana@v1.1.2-0.20250319030827-8e2f4d76eb79/pkg/solana/chainreader/chain_reader.go:97 +0x287", - "github.com/smartcontractkit/chainlink-solana/pkg/solana.(*Relayer).NewContractReader(0xc015b2e150, {0x4d0102030cb384f5?, 0xb938300b5ca1aa13?}, {0xc05469c000, 0x1eedf, 0x20000})", - "\t/home/runner/go/pkg/mod/github.com/smartcontractkit/chainlink-solana@v1.1.2-0.20250319030827-8e2f4d76eb79/pkg/solana/relay.go:160 +0x205", - "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/oraclecreator.(*pluginOracleCreator).createReadersAndWriters(_, {_, _}, {_, _}, _, {0x3, {0x0, 0xa, 0x93, ...}, ...}, ...)", - "\t/home/runner/work/chainlink/chainlink/core/capabilities/ccip/oraclecreator/plugin.go:446 +0x338", - "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/oraclecreator.(*pluginOracleCreator).Create(0xc033a69ad0, {0xc6f5a10, 0xc02e4f9a40}, 0x3, {0x3, {0x0, 0xa, 0x93, 0x8f, 0x67, ...}, ...})", - "\t/home/runner/work/chainlink/chainlink/core/capabilities/ccip/oraclecreator/plugin.go:215 +0xc0c", - "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/launcher.createDON({0xc6f5a10, 0xc02e4f9a40}, {0x7fcf8b533ad0, 0xc015b97340}, {0xb6, 0x5e, 0x31, 0xd0, 0x35, 0xef, ...}, ...)", - "\t/home/runner/work/chainlink/chainlink/core/capabilities/ccip/launcher/launcher.go:367 +0x451", - "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/launcher.(*launcher).processAdded(0xc015723080, {0xc6f5a10, 0xc02e4f9a40}, 0xc053de2ff0)", - "\t/home/runner/work/chainlink/chainlink/core/capabilities/ccip/launcher/launcher.go:254 +0x239", - "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/launcher.(*launcher).processDiff(0xc015723080, {0xc6f5a10, 0xc02e4f9a40}, {0xc053de2ff0?, 0xc053de3020?, 0xc053de3050?})", - "\t/home/runner/work/chainlink/chainlink/core/capabilities/ccip/launcher/launcher.go:192 +0x68", - "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/launcher.(*launcher).tick(0xc015723080, {0xc6f5a10, 0xc02e4f9a40})", - "\t/home/runner/work/chainlink/chainlink/core/capabilities/ccip/launcher/launcher.go:178 +0x20b", - "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/launcher.(*launcher).monitor(0xc015723080)", - "\t/home/runner/work/chainlink/chainlink/core/capabilities/ccip/launcher/launcher.go:152 +0x112", - "created by github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/launcher.(*launcher).Start.func1 in goroutine 1335", - "\t/home/runner/work/chainlink/chainlink/core/capabilities/ccip/launcher/launcher.go:134 +0xa5", - "FAIL\tgithub.com/smartcontractkit/chainlink/deployment/ccip/changeset/solana\t184.801s", + name: "Rerun successful", + initialFailedTests: []reports.TestResult{ + {TestName: "TestFailA", TestPackage: "pkgA", Failures: 1, Runs: 1}, + {TestName: "TestFailB", TestPackage: "pkgB", Failures: 1, Runs: 1}, }, - }, - { - name: "fail to parse timeout duration", - expectedTimeout: true, - expectedError: ErrFailedToParseTimeoutDuration, - outputs: []string{ - "panic: test timed out after malformedDurationStr\n", - "\trunning tests:\n", - "\t\tTestAddAndPromoteCandidatesForNewChain (22s)\n", - "\t\tTestAddAndPromoteCandidatesForNewChain/Remote_chains_owned_by_MCMS (22s)\n", - "\t\tTestAlmostPanicTime (9m59s)\n", - "\t\tTestConnectNewChain (1m1s)\n", - "\t\tTestConnectNewChain/Use_production_router_(with_MCMS) (1m1s)\n", - "\t\tTestJobSpecChangeset (0s)\n", - "\t\tTest_ActiveCandidate (1m1s)\n", - "goroutine 971967 [running]:\n", - "testing.(*M).startAlarm.func1()\n", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:2484 +0x605\n", - "created by time.goFunc\n", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/time/sleep.go:215 +0x45\n", - "goroutine 1 [chan receive]:\n", - "testing.tRunner.func1()\n", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:1753 +0x965\n", - "testing.tRunner(0xc0013dac40, 0xc0025b7ae0)\n", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:1798 +0x25f\n", - "testing.runTests(0xc0010a0b70, {0x14366840, 0x25, 0x25}, {0x3?, 0x0?, 0x146214a0?})\n", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:2277 +0x96d\n", - "testing.(*M).Run(0xc0014732c0)\n", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:2142 +0xeeb\n", - "main.main()\n", - "\t_testmain.go:119 +0x165\n", + rerunCount: 2, + executorResponses: map[string]struct { + passed bool + err error + }{ + "pkgA-0": {passed: true, err: nil}, + "pkgB-0": {passed: false, err: nil}, + "pkgB-1": {passed: true, err: nil}, }, + expectedExecCalls: 3, + expectedParseArgs: struct { + fileCount int + cfg parser.Config + }{fileCount: 3, cfg: parser.Config{OmitOutputsOnSuccess: false}}, + expectedFinalResultCount: 1, + expectedError: false, }, { - name: "fail to parse test duration", - expectedTimeout: true, - expectedError: ErrDetectedTimeoutFailedParse, - outputs: []string{ - "panic: test timed out after 10m0s\n", - "\trunning tests:\n", - "\t\tTestAddAndPromoteCandidatesForNewChain (malformedDurationStr)\n", - "\t\tTestAddAndPromoteCandidatesForNewChain/Remote_chains_owned_by_MCMS (22s)\n", - "\t\tTestAlmostPanicTime (9m59s)\n", - "\t\tTestConnectNewChain (1m1s)\n", - "\t\tTestConnectNewChain/Use_production_router_(with_MCMS) (1m1s)\n", - "\t\tTestJobSpecChangeset (0s)\n", - "\t\tTest_ActiveCandidate (1m1s)\n", - "goroutine 971967 [running]:\n", - "testing.(*M).startAlarm.func1()\n", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:2484 +0x605\n", - "created by time.goFunc\n", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/time/sleep.go:215 +0x45\n", - "goroutine 1 [chan receive]:\n", - "testing.tRunner.func1()\n", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:1753 +0x965\n", - "testing.tRunner(0xc0013dac40, 0xc0025b7ae0)\n", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:1798 +0x25f\n", - "testing.runTests(0xc0010a0b70, {0x14366840, 0x25, 0x25}, {0x3?, 0x0?, 0x146214a0?})\n", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:2277 +0x96d\n", - "testing.(*M).Run(0xc0014732c0)\n", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:2142 +0xeeb\n", - "main.main()\n", - "\t_testmain.go:119 +0x165\n", - }, + name: "No failed tests to rerun", + initialFailedTests: []reports.TestResult{}, + rerunCount: 3, + expectedExecCalls: 0, + expectedParseArgs: struct { + fileCount int + cfg parser.Config + }{fileCount: 0}, + expectedError: false, }, { - name: "timeout panic without obvious culprit", - expectedTimeout: true, - expectedError: ErrDetectedTimeoutFailedAttribution, - outputs: []string{ - "panic: test timed out after 10m0s\n", - "\trunning tests:\n", - "\t\tTestAddAndPromoteCandidatesForNewChain (22s)\n", - "\t\tTestAddAndPromoteCandidatesForNewChain/Remote_chains_owned_by_MCMS (22s)\n", - "\t\tTestAlmostPanicTime (9m59s)\n", - "\t\tTestConnectNewChain (1m1s)\n", - "\t\tTestConnectNewChain/Use_production_router_(with_MCMS) (1m1s)\n", - "\t\tTestJobSpecChangeset (0s)\n", - "\t\tTest_ActiveCandidate (1m1s)\n", - "goroutine 971967 [running]:\n", - "testing.(*M).startAlarm.func1()\n", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:2484 +0x605\n", - "created by time.goFunc\n", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/time/sleep.go:215 +0x45\n", - "goroutine 1 [chan receive]:\n", - "testing.tRunner.func1()\n", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:1753 +0x965\n", - "testing.tRunner(0xc0013dac40, 0xc0025b7ae0)\n", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:1798 +0x25f\n", - "testing.runTests(0xc0010a0b70, {0x14366840, 0x25, 0x25}, {0x3?, 0x0?, 0x146214a0?})\n", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:2277 +0x96d\n", - "testing.(*M).Run(0xc0014732c0)\n", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:2142 +0xeeb\n", - "main.main()\n", - "\t_testmain.go:119 +0x165\n", + name: "Executor error during rerun", + initialFailedTests: []reports.TestResult{ + {TestName: "TestFailA", TestPackage: "pkgA", Failures: 1, Runs: 1}, }, - }, - { - name: "possible regex trip-up", - expectedTimeout: false, - expectedError: ErrFailedToAttributePanicToTest, - outputs: []string{ - "panic: runtime error: invalid memory address or nil pointer dereference\n", - "[signal SIGSEGV: segmentation violation code=0x1 addr=0x18 pc=0x21589cc]\n", - "\n", - "goroutine 3048 [running]:\n", - "github.com/smartcontractkit/chainlink/v2/core/services/workflows.(*MeteringReport).Message(0x0)\n", - "\t/home/runner/work/chainlink/chainlink/core/services/workflows/metering.go:147 +0x6c\n", - "github.com/smartcontractkit/chainlink/v2/core/services/workflows.newTestEngine.func4(0x0)\n", // Possible regex trip-up - "\t/home/runner/work/chainlink/chainlink/core/services/workflows/engine_test.go:230 +0x1e5\n", - "github.com/smartcontractkit/chainlink/v2/core/services/workflows.(*Engine).handleStepUpdate(0xc002a8ce08, {0x533a008, 0xc002aee730}, {{0xc001428f00, 0xe}, {0xc000ebef60, 0x22}, {0x4cdb827, 0x9}, 0xc001426338, ...}, ...)\n", - "\t/home/runner/work/chainlink/chainlink/core/services/workflows/engine.go:662 +0x72f\n", - "github.com/smartcontractkit/chainlink/v2/core/services/workflows.(*Engine).stepUpdateLoop(0xc002a8ce08, {0x533a008, 0xc002aee730}, {0xc002746540, 0xe}, 0xc00275e000, 0xc00274e4b0)\n", - "\t/home/runner/work/chainlink/chainlink/core/services/workflows/engine.go:545 +0x418\n", - "created by github.com/smartcontractkit/chainlink/v2/core/services/workflows.(*Engine).resumeInProgressExecutions in goroutine 3314\n", - "\t/home/runner/work/chainlink/chainlink/core/services/workflows/engine.go:437 +0x511\n", - " logger.go:146: 2025-03-21T17:15:55.491Z\tDEBUG\tEVM.1337.Txm.TxmStore.TxmStore\tlogger/logger.go:199\tNew logger: TxmStore\t{\"version\": \"unset@unset\"}\n", - " logger.go:146: 2025-03-21T17:15:55.491Z\tDEBUG\tEVM.1337.Txm.TxmStore.TxmStore\tlogger/logger.go:199\tNew logger: TxmStore\t{\"version\": \"unset@unset\"}\n", - " logger.go:146: 2025-03-21T17:15:55.493Z\tDEBUG\tEVM.1337.Txm.Confirmer\ttxmgr/confirmer.go:265\tFinished CheckForConfirmation\t{\"version\": \"unset@unset\", \"headNum\": 230, \"time\": \"14.767649ms\", \"id\": \"confirmer\"}\n", - " logger.go:146: 2025-03-21T17:15:55.493Z\tDEBUG\tEVM.1337.Txm.Confirmer\ttxmgr/confirmer.go:271\tFinished ProcessStuckTransactions\t{\"version\": \"unset@unset\", \"headNum\": 230, \"time\": \"1.543µs\", \"id\": \"confirmer\"}\n", - " logger.go:146: 2025-03-21T17:15:55.493Z\tDEBUG\tEVM.1337.Txm.TxmStore.TxmStore\tlogger/logger.go:199\tNew logger: TxmStore\t{\"version\": \"unset@unset\"}\n", - " logger.go:146: 2025-03-21T17:15:55.493Z\tDEBUG\tEVM.1337.Txm.TxmStore.TxmStore\tlogger/logger.go:199\tNew logger: TxmStore\t{\"version\": \"unset@unset\"}\n", - " logger.go:146: 2025-03-21T17:15:55.493Z\tDEBUG\tEVM.1337.Txm.TxmStore.TxmStore\tlogger/logger.go:199\tNew logger: TxmStore\t{\"version\": \"unset@unset\"}\n", - " logger.go:146: 2025-03-21T17:15:55.493Z\tDEBUG\tEVM.1337.Txm.TxmStore.TxmStore\tlogger/logger.go:199\tNew logger: TxmStore\t{\"version\": \"unset@unset\"}\n", - " logger.go:146: 2025-03-21T17:15:55.493Z\tDEBUG\tEVM.1337.Txm.TxmStore.TxmStore\tlogger/logger.go:199\tNew logger: TxmStore\t{\"version\": \"unset@unset\"}\n", - " logger.go:146: 2025-03-21T17:15:55.493Z\tDEBUG\tEVM.1337.Txm.TxmStore.TxmStore\tlogger/logger.go:199\tNew logger: TxmStore\t{\"version\": \"unset@unset\"}\n", - " logger.go:146: 2025-03-21T17:15:55.496Z\tDEBUG\tEVM.1337.Txm.TxmStore.TxmStore\tlogger/logger.go:199\tNew logger: TxmStore\t{\"version\": \"unset@unset\"}\n", - " logger.go:146: 2025-03-21T17:15:55.496Z\tDEBUG\tEVM.1337.Txm.TxmStore.TxmStore\tlogger/logger.go:199\tNew logger: TxmStore\t{\"version\": \"unset@unset\"}\n", - " logger.go:146: 2025-03-21T17:15:55.496Z\tDEBUG\tEVM.1337.Txm.TxmStore.TxmStore\tlogger/logger.go:199\tNew logger: TxmStore\t{\"version\": \"unset@unset\"}\n", - " logger.go:146: 2025-03-21T17:15:55.496Z\tDEBUG\tEVM.1337.Txm.TxmStore.TxmStore\tlogger/logger.go:199\tNew logger: TxmStore\t{\"version\": \"unset@unset\"}\n", - " logger.go:146: 2025-03-21T17:15:55.496Z\tDEBUG\tEVM.1337.Txm.TxmStore.TxmStore\tlogger/logger.go:199\tNew logger: TxmStore\t{\"version\": \"unset@unset\"}\n", - " logger.go:146: 2025-03-21T17:15:55.496Z\tDEBUG\tEVM.1337.Txm.TxmStore.TxmStore\tlogger/logger.go:199\tNew logger: TxmStore\t{\"version\": \"unset@unset\"}\n", - " logger.go:146: 2025-03-21T17:15:55.498Z\tDEBUG\tEVM.1337.Txm.TxmStore.TxmStore\tlogger/logger.go:199\tNew logger: TxmStore\t{\"version\": \"unset@unset\"}\n", - " logger.go:146: 2025-03-21T17:15:55.499Z\tDEBUG\tEVM.1337.Txm.TxmStore.TxmStore\tlogger/logger.go:199\tNew logger: TxmStore\t{\"version\": \"unset@unset\"}\n", - " logger.go:146: 2025-03-21T17:15:55.504Z\tDEBUG\tEVM.1337.Txm.Confirmer\ttxmgr/confirmer.go:277\tFinished RebroadcastWhereNecessary\t{\"version\": \"unset@unset\", \"headNum\": 230, \"time\": \"11.026144ms\", \"id\": \"confirmer\"}\n", - " logger.go:146: 2025-03-21T17:15:55.504Z\tDEBUG\tEVM.1337.Txm.Confirmer\ttxmgr/confirmer.go:278\tprocessHead finish\t{\"version\": \"unset@unset\", \"headNum\": 230, \"id\": \"confirmer\"}\n", - " logger.go:146: 2025-03-21T17:15:55.506Z\tDEBUG\tEVM.1337.LogPoller\tlogpoller/log_poller.go:1035\tPolling for logs\t{\"version\": \"unset@unset\", \"currentBlockNumber\": 443}\n", - " logger.go:146: 2025-03-21T17:15:55.506Z\tDEBUG\tEVM.1337.LogPoller\tlogpoller/log_poller.go:1141\tLatest blocks read from chain\t{\"version\": \"unset@unset\", \"latest\": 443, \"finalized\": 441}\n", - " logger.go:146: 2025-03-21T17:15:55.508Z\tDEBUG\tEVM.1337.LogPoller\tlogpoller/log_poller.go:1099\tUnfinalized log query\t{\"version\": \"unset@unset\", \"logs\": 7, \"currentBlockNumber\": 443, \"blockHash\": \"0xbbb8232c79d104d6da1cd97f9725a44e3fc3dd660a519ba139590f5367ec2b8f\", \"timestamp\": \"2025-03-21T17:21:47.000Z\"}\n", - " logger.go:146: 2025-03-21T17:15:55.515Z\tDEBUG\tEVM.1000.LogPoller\tlogpoller/log_poller.go:1035\tPolling for logs\t{\"version\": \"unset@unset\", \"currentBlockNumber\": 560}\n", - " logger.go:146: 2025-03-21T17:15:55.516Z\tDEBUG\tEVM.1000.LogPoller\tlogpoller/log_poller.go:1141\tLatest blocks read from chain\t{\"version\": \"unset@unset\", \"latest\": 560, \"finalized\": 558}\n", - " logger.go:146: 2025-03-21T17:15:55.517Z\tDEBUG\tEVM.1337.LogPoller\tlogpoller/log_poller.go:1035\tPolling for logs\t{\"version\": \"unset@unset\", \"currentBlockNumber\": 443}\n", - " logger.go:146: 2025-03-21T17:15:55.517Z\tDEBUG\tEVM.1000.LogPoller\tlogpoller/log_poller.go:1099\tUnfinalized log query\t{\"version\": \"unset@unset\", \"logs\": 0, \"currentBlockNumber\": 560, \"blockHash\": \"0xe04f73ebcb6fca77f11860b9f03f0c4dec77ffffa8202366ee1e512155b0e108\", \"timestamp\": \"2025-03-21T17:23:44.000Z\"}\n", - " logger.go:146: 2025-03-21T17:15:55.518Z\tDEBUG\tEVM.1337.LogPoller\tlogpoller/log_poller.go:1141\tLatest blocks read from chain\t{\"version\": \"unset@unset\", \"latest\": 443, \"finalized\": 441}\n", - " logger.go:146: 2025-03-21T17:15:55.519Z\tDEBUG\toracle_streams_2.OCR2.offchainreporting2.a2742fc2-23d8-408d-9bce-d78b539b9f44.LLO.1.ReportingPlugin\tllo/plugin_outcome.go:352\tGot observations from peer\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"2e0b415eb4d97f389ff6d6c33eaadf0cc4613171ebdc59d1e87f91539985a7ce\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"stage\": \"Outcome\", \"sv\": {\"52\":[\"2976.39\"]}, \"oracleID\": 2, \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.519Z\tDEBUG\toracle_streams_2.OCR2.offchainreporting2.a2742fc2-23d8-408d-9bce-d78b539b9f44.LLO.1.ReportingPlugin\tllo/plugin_outcome.go:352\tGot observations from peer\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"2e0b415eb4d97f389ff6d6c33eaadf0cc4613171ebdc59d1e87f91539985a7ce\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"stage\": \"Outcome\", \"sv\": {\"52\":[\"2976.39\",\"2976.39\"]}, \"oracleID\": 0, \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.519Z\tDEBUG\toracle_streams_2.OCR2.offchainreporting2.a2742fc2-23d8-408d-9bce-d78b539b9f44.LLO.1.ReportingPlugin\tllo/plugin_outcome.go:352\tGot observations from peer\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"2e0b415eb4d97f389ff6d6c33eaadf0cc4613171ebdc59d1e87f91539985a7ce\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"stage\": \"Outcome\", \"sv\": {\"52\":[\"2976.39\",\"2976.39\",\"2976.39\"]}, \"oracleID\": 1, \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.519Z\tDEBUG\toracle_streams_2.OCR2.offchainreporting2.a2742fc2-23d8-408d-9bce-d78b539b9f44.LLO.1.ReportingPlugin\tllo/plugin_outcome.go:352\tGot observations from peer\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"2e0b415eb4d97f389ff6d6c33eaadf0cc4613171ebdc59d1e87f91539985a7ce\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"stage\": \"Outcome\", \"sv\": {\"52\":[\"2976.39\",\"2976.39\",\"2976.39\",\"2976.39\"]}, \"oracleID\": 3, \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.519Z\tDEBUG\toracle_streams_2.OCR2.offchainreporting2.a2742fc2-23d8-408d-9bce-d78b539b9f44.LLO.1.ReportingPlugin\tllo/plugin_outcome.go:167\tChannel is not reportable\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"2e0b415eb4d97f389ff6d6c33eaadf0cc4613171ebdc59d1e87f91539985a7ce\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"channelID\": 1, \"err\": \"ChannelID: 1; Reason: ChannelID: 1; Reason: IsReportable=false; not valid yet (observationsTimestampSeconds=1742577354, validAfterSeconds=1742577354)\", \"stage\": \"Outcome\", \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.519Z\tDEBUG\toracle_streams_2.OCR2.offchainreporting2.a2742fc2-23d8-408d-9bce-d78b539b9f44.LLO.1.ReportingPlugin\tllo/plugin_outcome.go:298\tGenerated outcome\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"2e0b415eb4d97f389ff6d6c33eaadf0cc4613171ebdc59d1e87f91539985a7ce\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"outcome\": {\"LifeCycleStage\":\"production\",\"ObservationTimestampNanoseconds\":1742577355265352958,\"ChannelDefinitions\":{\"1\":{\"reportFormat\":\"json\",\"streams\":[{\"streamId\":52,\"aggregator\":\"median\"}],\"opts\":null}},\"ValidAfterNanoseconds\":{\"1\":1742577354000000000},\"StreamAggregates\":{\"52\":{\"median\":\"2976.39\"}}}, \"stage\": \"Outcome\", \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.520Z\tDEBUG\toracle_streams_1.OCR2.offchainreporting2.5c86b148-b15a-4541-82b9-ae0079f35304.LLO.1.ReportingPlugin\tllo/plugin_outcome.go:352\tGot observations from peer\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"43e4d609de4ea5422f3796de3874abe56e755c2b6ca575c05707f6a341402bf9\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"stage\": \"Outcome\", \"sv\": {\"52\":[\"2976.39\"]}, \"oracleID\": 2, \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.520Z\tDEBUG\toracle_streams_1.OCR2.offchainreporting2.5c86b148-b15a-4541-82b9-ae0079f35304.LLO.1.ReportingPlugin\tllo/plugin_outcome.go:352\tGot observations from peer\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"43e4d609de4ea5422f3796de3874abe56e755c2b6ca575c05707f6a341402bf9\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"stage\": \"Outcome\", \"sv\": {\"52\":[\"2976.39\",\"2976.39\"]}, \"oracleID\": 0, \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.520Z\tDEBUG\toracle_streams_1.OCR2.offchainreporting2.5c86b148-b15a-4541-82b9-ae0079f35304.LLO.1.ReportingPlugin\tllo/plugin_outcome.go:352\tGot observations from peer\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"43e4d609de4ea5422f3796de3874abe56e755c2b6ca575c05707f6a341402bf9\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"stage\": \"Outcome\", \"sv\": {\"52\":[\"2976.39\",\"2976.39\",\"2976.39\"]}, \"oracleID\": 1, \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.520Z\tDEBUG\toracle_streams_1.OCR2.offchainreporting2.5c86b148-b15a-4541-82b9-ae0079f35304.LLO.1.ReportingPlugin\tllo/plugin_outcome.go:352\tGot observations from peer\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"43e4d609de4ea5422f3796de3874abe56e755c2b6ca575c05707f6a341402bf9\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"stage\": \"Outcome\", \"sv\": {\"52\":[\"2976.39\",\"2976.39\",\"2976.39\",\"2976.39\"]}, \"oracleID\": 3, \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.520Z\tDEBUG\toracle_streams_1.OCR2.offchainreporting2.5c86b148-b15a-4541-82b9-ae0079f35304.LLO.1.ReportingPlugin\tllo/plugin_outcome.go:167\tChannel is not reportable\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"43e4d609de4ea5422f3796de3874abe56e755c2b6ca575c05707f6a341402bf9\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"channelID\": 1, \"err\": \"ChannelID: 1; Reason: ChannelID: 1; Reason: IsReportable=false; not valid yet (observationsTimestampSeconds=1742577354, validAfterSeconds=1742577354)\", \"stage\": \"Outcome\", \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.520Z\tDEBUG\toracle_streams_1.OCR2.offchainreporting2.5c86b148-b15a-4541-82b9-ae0079f35304.LLO.1.ReportingPlugin\tllo/plugin_outcome.go:298\tGenerated outcome\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"43e4d609de4ea5422f3796de3874abe56e755c2b6ca575c05707f6a341402bf9\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"outcome\": {\"LifeCycleStage\":\"production\",\"ObservationTimestampNanoseconds\":1742577355265352958,\"ChannelDefinitions\":{\"1\":{\"reportFormat\":\"json\",\"streams\":[{\"streamId\":52,\"aggregator\":\"median\"}],\"opts\":null}},\"ValidAfterNanoseconds\":{\"1\":1742577354000000000},\"StreamAggregates\":{\"52\":{\"median\":\"2976.39\"}}}, \"stage\": \"Outcome\", \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.521Z\tDEBUG\toracle_streams_0.OCR2.offchainreporting2.a01a923c-0104-4a46-a904-14bc4abc096a.LLO.1.ReportingPlugin\tllo/plugin_outcome.go:352\tGot observations from peer\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"68ab5d04d12c8d2127639d2e32c294e8e849fa9b608f5ad0a650bc0e7386d448\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"stage\": \"Outcome\", \"sv\": {\"52\":[\"2976.39\"]}, \"oracleID\": 2, \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.521Z\tDEBUG\toracle_streams_0.OCR2.offchainreporting2.a01a923c-0104-4a46-a904-14bc4abc096a.LLO.1.ReportingPlugin\tllo/plugin_outcome.go:352\tGot observations from peer\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"68ab5d04d12c8d2127639d2e32c294e8e849fa9b608f5ad0a650bc0e7386d448\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"stage\": \"Outcome\", \"sv\": {\"52\":[\"2976.39\",\"2976.39\"]}, \"oracleID\": 0, \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.521Z\tDEBUG\toracle_streams_0.OCR2.offchainreporting2.a01a923c-0104-4a46-a904-14bc4abc096a.LLO.1.ReportingPlugin\tllo/plugin_outcome.go:352\tGot observations from peer\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"68ab5d04d12c8d2127639d2e32c294e8e849fa9b608f5ad0a650bc0e7386d448\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"stage\": \"Outcome\", \"sv\": {\"52\":[\"2976.39\",\"2976.39\",\"2976.39\"]}, \"oracleID\": 1, \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.521Z\tDEBUG\toracle_streams_0.OCR2.offchainreporting2.a01a923c-0104-4a46-a904-14bc4abc096a.LLO.1.ReportingPlugin\tllo/plugin_outcome.go:352\tGot observations from peer\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"68ab5d04d12c8d2127639d2e32c294e8e849fa9b608f5ad0a650bc0e7386d448\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"stage\": \"Outcome\", \"sv\": {\"52\":[\"2976.39\",\"2976.39\",\"2976.39\",\"2976.39\"]}, \"oracleID\": 3, \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.521Z\tDEBUG\toracle_streams_0.OCR2.offchainreporting2.a01a923c-0104-4a46-a904-14bc4abc096a.LLO.1.ReportingPlugin\tllo/plugin_outcome.go:167\tChannel is not reportable\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"68ab5d04d12c8d2127639d2e32c294e8e849fa9b608f5ad0a650bc0e7386d448\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"channelID\": 1, \"err\": \"ChannelID: 1; Reason: ChannelID: 1; Reason: IsReportable=false; not valid yet (observationsTimestampSeconds=1742577354, validAfterSeconds=1742577354)\", \"stage\": \"Outcome\", \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.521Z\tDEBUG\toracle_streams_0.OCR2.offchainreporting2.a01a923c-0104-4a46-a904-14bc4abc096a.LLO.1.ReportingPlugin\tllo/plugin_outcome.go:298\tGenerated outcome\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"68ab5d04d12c8d2127639d2e32c294e8e849fa9b608f5ad0a650bc0e7386d448\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"outcome\": {\"LifeCycleStage\":\"production\",\"ObservationTimestampNanoseconds\":1742577355265352958,\"ChannelDefinitions\":{\"1\":{\"reportFormat\":\"json\",\"streams\":[{\"streamId\":52,\"aggregator\":\"median\"}],\"opts\":null}},\"ValidAfterNanoseconds\":{\"1\":1742577354000000000},\"StreamAggregates\":{\"52\":{\"median\":\"2976.39\"}}}, \"stage\": \"Outcome\", \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.522Z\tDEBUG\tEVM.1337.LogPoller\tlogpoller/log_poller.go:1099\tUnfinalized log query\t{\"version\": \"unset@unset\", \"logs\": 7, \"currentBlockNumber\": 443, \"blockHash\": \"0xbbb8232c79d104d6da1cd97f9725a44e3fc3dd660a519ba139590f5367ec2b8f\", \"timestamp\": \"2025-03-21T17:21:47.000Z\"}\n", - " logger.go:146: 2025-03-21T17:15:55.522Z\tDEBUG\toracle_streams_3.OCR2.offchainreporting2.9a9f0afb-b437-4026-8133-f52c4bc053e9.LLO.1.ReportingPlugin\tllo/plugin_outcome.go:352\tGot observations from peer\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"b3567e847b4b38827a78d4c289aa559674bee38859064ded8dce99aa1a2f99ce\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"stage\": \"Outcome\", \"sv\": {\"52\":[\"2976.39\"]}, \"oracleID\": 2, \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.522Z\tDEBUG\toracle_streams_3.OCR2.offchainreporting2.9a9f0afb-b437-4026-8133-f52c4bc053e9.LLO.1.ReportingPlugin\tllo/plugin_outcome.go:352\tGot observations from peer\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"b3567e847b4b38827a78d4c289aa559674bee38859064ded8dce99aa1a2f99ce\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"stage\": \"Outcome\", \"sv\": {\"52\":[\"2976.39\",\"2976.39\"]}, \"oracleID\": 0, \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.522Z\tDEBUG\toracle_streams_3.OCR2.offchainreporting2.9a9f0afb-b437-4026-8133-f52c4bc053e9.LLO.1.ReportingPlugin\tllo/plugin_outcome.go:352\tGot observations from peer\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"b3567e847b4b38827a78d4c289aa559674bee38859064ded8dce99aa1a2f99ce\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"stage\": \"Outcome\", \"sv\": {\"52\":[\"2976.39\",\"2976.39\",\"2976.39\"]}, \"oracleID\": 1, \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.522Z\tDEBUG\toracle_streams_3.OCR2.offchainreporting2.9a9f0afb-b437-4026-8133-f52c4bc053e9.LLO.1.ReportingPlugin\tllo/plugin_outcome.go:352\tGot observations from peer\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"b3567e847b4b38827a78d4c289aa559674bee38859064ded8dce99aa1a2f99ce\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"stage\": \"Outcome\", \"sv\": {\"52\":[\"2976.39\",\"2976.39\",\"2976.39\",\"2976.39\"]}, \"oracleID\": 3, \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.522Z\tDEBUG\toracle_streams_3.OCR2.offchainreporting2.9a9f0afb-b437-4026-8133-f52c4bc053e9.LLO.1.ReportingPlugin\tllo/plugin_outcome.go:167\tChannel is not reportable\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"b3567e847b4b38827a78d4c289aa559674bee38859064ded8dce99aa1a2f99ce\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"channelID\": 1, \"err\": \"ChannelID: 1; Reason: ChannelID: 1; Reason: IsReportable=false; not valid yet (observationsTimestampSeconds=1742577354, validAfterSeconds=1742577354)\", \"stage\": \"Outcome\", \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.522Z\tDEBUG\toracle_streams_3.OCR2.offchainreporting2.9a9f0afb-b437-4026-8133-f52c4bc053e9.LLO.1.ReportingPlugin\tllo/plugin_outcome.go:298\tGenerated outcome\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"b3567e847b4b38827a78d4c289aa559674bee38859064ded8dce99aa1a2f99ce\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"outcome\": {\"LifeCycleStage\":\"production\",\"ObservationTimestampNanoseconds\":1742577355265352958,\"ChannelDefinitions\":{\"1\":{\"reportFormat\":\"json\",\"streams\":[{\"streamId\":52,\"aggregator\":\"median\"}],\"opts\":null}},\"ValidAfterNanoseconds\":{\"1\":1742577354000000000},\"StreamAggregates\":{\"52\":{\"median\":\"2976.39\"}}}, \"stage\": \"Outcome\", \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.528Z\tDEBUG\toracle_streams_3.OCR2.offchainreporting2.9a9f0afb-b437-4026-8133-f52c4bc053e9.LLO.1.ReportingPlugin\tllo/plugin_reports.go:51\tReportable channels\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"b3567e847b4b38827a78d4c289aa559674bee38859064ded8dce99aa1a2f99ce\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"lifeCycleStage\": \"production\", \"reportableChannels\": [1], \"unreportableChannels\": null, \"stage\": \"Report\", \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.528Z\tDEBUG\toracle_streams_3.OCR2.offchainreporting2.9a9f0afb-b437-4026-8133-f52c4bc053e9.LLO.1.ReportingPlugin\tllo/plugin_reports.go:72\tEmitting report\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"b3567e847b4b38827a78d4c289aa559674bee38859064ded8dce99aa1a2f99ce\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"lifeCycleStage\": \"production\", \"channelID\": 1, \"report\": {\"ConfigDigest\":\"000946ca792b3e79ace0817d9b9ad9e397dbdc18af8ee9c5bb751b5863ddb86d\",\"SeqNr\":78,\"ChannelID\":1,\"ValidAfterNanoseconds\":1742577354000000000,\"ObservationTimestampNanoseconds\":1742577355265352958,\"Values\":[\"2976.39\"],\"Specimen\":false}, \"stage\": \"Report\", \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.529Z\tDEBUG\toracle_streams_2.OCR2.offchainreporting2.a2742fc2-23d8-408d-9bce-d78b539b9f44.LLO.1.ReportingPlugin\tllo/plugin_reports.go:51\tReportable channels\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"2e0b415eb4d97f389ff6d6c33eaadf0cc4613171ebdc59d1e87f91539985a7ce\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"lifeCycleStage\": \"production\", \"reportableChannels\": [1], \"unreportableChannels\": null, \"stage\": \"Report\", \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.529Z\tDEBUG\toracle_streams_2.OCR2.offchainreporting2.a2742fc2-23d8-408d-9bce-d78b539b9f44.LLO.1.ReportingPlugin\tllo/plugin_reports.go:72\tEmitting report\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"2e0b415eb4d97f389ff6d6c33eaadf0cc4613171ebdc59d1e87f91539985a7ce\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"lifeCycleStage\": \"production\", \"channelID\": 1, \"report\": {\"ConfigDigest\":\"000946ca792b3e79ace0817d9b9ad9e397dbdc18af8ee9c5bb751b5863ddb86d\",\"SeqNr\":78,\"ChannelID\":1,\"ValidAfterNanoseconds\":1742577354000000000,\"ObservationTimestampNanoseconds\":1742577355265352958,\"Values\":[\"2976.39\"],\"Specimen\":false}, \"stage\": \"Report\", \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.528Z\tDEBUG\toracle_streams_1.OCR2.offchainreporting2.5c86b148-b15a-4541-82b9-ae0079f35304.LLO.1.ReportingPlugin\tllo/plugin_reports.go:51\tReportable channels\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"43e4d609de4ea5422f3796de3874abe56e755c2b6ca575c05707f6a341402bf9\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"lifeCycleStage\": \"production\", \"reportableChannels\": [1], \"unreportableChannels\": null, \"stage\": \"Report\", \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.532Z\tDEBUG\toracle_streams_1.OCR2.offchainreporting2.5c86b148-b15a-4541-82b9-ae0079f35304.LLO.1.ReportingPlugin\tllo/plugin_reports.go:72\tEmitting report\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"43e4d609de4ea5422f3796de3874abe56e755c2b6ca575c05707f6a341402bf9\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"lifeCycleStage\": \"production\", \"channelID\": 1, \"report\": {\"ConfigDigest\":\"000946ca792b3e79ace0817d9b9ad9e397dbdc18af8ee9c5bb751b5863ddb86d\",\"SeqNr\":78,\"ChannelID\":1,\"ValidAfterNanoseconds\":1742577354000000000,\"ObservationTimestampNanoseconds\":1742577355265352958,\"Values\":[\"2976.39\"],\"Specimen\":false}, \"stage\": \"Report\", \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.529Z\tDEBUG\toracle_streams_3.EVM.1337.Relayer.job-3.LLO-888333\tllo/transmitter.go:138\tTransmit report\t{\"version\": \"unset@unset\", \"evmChainID\": \"1337\", \"donID\": 888333, \"transmitterID\": \"b3567e847b4b38827a78d4c289aa559674bee38859064ded8dce99aa1a2f99ce\", \"configMode\": \"bluegreen\", \"configuratorAddress\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"donID\": 888333, \"digest\": \"000946ca792b3e79ace0817d9b9ad9e397dbdc18af8ee9c5bb751b5863ddb86d\", \"seqNr\": 78, \"report\": {\"Report\":\"eyJDb25maWdEaWdlc3QiOiIwMDA5NDZjYTc5MmIzZTc5YWNlMDgxN2Q5YjlhZDllMzk3ZGJkYzE4YWY4ZWU5YzViYjc1MWI1ODYzZGRiODZkIiwiU2VxTnIiOjc4LCJDaGFubmVsSUQiOjEsIlZhbGlkQWZ0ZXJOYW5vc2Vjb25kcyI6MTc0MjU3NzM1NDAwMDAwMDAwMCwiT2JzZXJ2YXRpb25UaW1lc3RhbXBOYW5vc2Vjb25kcyI6MTc0MjU3NzM1NTI2NTM1Mjk1OCwiVmFsdWVzIjpbeyJ0IjowLCJ2IjoiMjk3Ni4zOSJ9XSwiU3BlY2ltZW4iOmZhbHNlfQ==\",\"Info\":{\"LifeCycleStage\":\"production\",\"ReportFormat\":\"json\"}}, \"sigs\": [{\"Signature\":\"CT9+T7PVUZ8Al7MZir9fQOdUPmZndjrYlnhaZSaoBcga+lfRMCYbkeiMpUiv8Jt1d9DUZrgsdm8T", - "nGKP1EmWAQE=\",\"Signer\":2},{\"Signature\":\"USh7s2xt+5M5OqCHm86lgeGj8g+4dq597bvWeXj4hiJri7Nvohgf4jBTqxzQhFlrdqkST1ysYbhvpkDXkIWhEwE=\",\"Signer\":3}]}\n", - " logger.go:146: 2025-03-21T17:15:55.529Z\tDEBUG\toracle_streams_2.EVM.1337.Relayer.job-3.LLO-888333\tllo/transmitter.go:138\tTransmit report\t{\"version\": \"unset@unset\", \"evmChainID\": \"1337\", \"donID\": 888333, \"transmitterID\": \"2e0b415eb4d97f389ff6d6c33eaadf0cc4613171ebdc59d1e87f91539985a7ce\", \"configMode\": \"bluegreen\", \"configuratorAddress\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"donID\": 888333, \"digest\": \"000946ca792b3e79ace0817d9b9ad9e397dbdc18af8ee9c5bb751b5863ddb86d\", \"seqNr\": 78, \"report\": {\"Report\":\"eyJDb25maWdEaWdlc3QiOiIwMDA5NDZjYTc5MmIzZTc5YWNlMDgxN2Q5YjlhZDllMzk3ZGJkYzE4YWY4ZWU5YzViYjc1MWI1ODYzZGRiODZkIiwiU2VxTnIiOjc4LCJDaGFubmVsSUQiOjEsIlZhbGlkQWZ0ZXJOYW5vc2Vjb25kcyI6MTc0MjU3NzM1NDAwMDAwMDAwMCwiT2JzZXJ2YXRpb25UaW1lc3RhbXBOYW5vc2Vjb25kcyI6MTc0MjU3NzM1NTI2NTM1Mjk1OCwiVmFsdWVzIjpbeyJ0IjowLCJ2IjoiMjk3Ni4zOSJ9XSwiU3BlY2ltZW4iOmZhbHNlfQ==\",\"Info\":{\"LifeCycleStage\":\"production\",\"ReportFormat\":\"json\"}}, \"sigs\": [{\"Signature\":\"CT9+T7PVUZ8Al7MZir9fQOdUPmZndjrYlnhaZSaoBcga+lfRMCYbkeiMpUiv8Jt1d9DUZrgsdm8T", - "nGKP1EmWAQE=\",\"Signer\":2},{\"Signature\":\"USh7s2xt+5M5OqCHm86lgeGj8g+4dq597bvWeXj4hiJri7Nvohgf4jBTqxzQhFlrdqkST1ysYbhvpkDXkIWhEwE=\",\"Signer\":3}]}\n", - " logger.go:146: 2025-03-21T17:15:55.532Z\tDEBUG\toracle_streams_1.EVM.1337.Relayer.job-3.LLO-888333\tllo/transmitter.go:138\tTransmit report\t{\"version\": \"unset@unset\", \"evmChainID\": \"1337\", \"donID\": 888333, \"transmitterID\": \"43e4d609de4ea5422f3796de3874abe56e755c2b6ca575c05707f6a341402bf9\", \"configMode\": \"bluegreen\", \"configuratorAddress\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"donID\": 888333, \"digest\": \"000946ca792b3e79ace0817d9b9ad9e397dbdc18af8ee9c5bb751b5863ddb86d\", \"seqNr\": 78, \"report\": {\"Report\":\"eyJDb25maWdEaWdlc3QiOiIwMDA5NDZjYTc5MmIzZTc5YWNlMDgxN2Q5YjlhZDllMzk3ZGJkYzE4YWY4ZWU5YzViYjc1MWI1ODYzZGRiODZkIiwiU2VxTnIiOjc4LCJDaGFubmVsSUQiOjEsIlZhbGlkQWZ0ZXJOYW5vc2Vjb25kcyI6MTc0MjU3NzM1NDAwMDAwMDAwMCwiT2JzZXJ2YXRpb25UaW1lc3RhbXBOYW5vc2Vjb25kcyI6MTc0MjU3NzM1NTI2NTM1Mjk1OCwiVmFsdWVzIjpbeyJ0IjowLCJ2IjoiMjk3Ni4zOSJ9XSwiU3BlY2ltZW4iOmZhbHNlfQ==\",\"Info\":{\"LifeCycleStage\":\"production\",\"ReportFormat\":\"json\"}}, \"sigs\": [{\"Signature\":\"2xZ13fSjHHLkkZFPL1qsOR6uzGdLBM3QmnaWy97LP71wjtECYly7BCxcFXLDY4BjsTO/LojDFmmq", - "Ts0IIeR6LgA=\",\"Signer\":1},{\"Signature\":\"CT9+T7PVUZ8Al7MZir9fQOdUPmZndjrYlnhaZSaoBcga+lfRMCYbkeiMpUiv8Jt1d9DUZrgsdm8TnGKP1EmWAQE=\",\"Signer\":2}]}\n", - " logger.go:146: 2025-03-21T17:15:55.533Z\tDEBUG\tEVM.1000.LogPoller\tlogpoller/log_poller.go:1035\tPolling for logs\t{\"version\": \"unset@unset\", \"currentBlockNumber\": 561}\n", - " logger.go:146: 2025-03-21T17:15:55.533Z\tDEBUG\toracle_streams_0.OCR2.offchainreporting2.a01a923c-0104-4a46-a904-14bc4abc096a.LLO.1.ReportingPlugin\tllo/plugin_reports.go:51\tReportable channels\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"68ab5d04d12c8d2127639d2e32c294e8e849fa9b608f5ad0a650bc0e7386d448\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"lifeCycleStage\": \"production\", \"reportableChannels\": [1], \"unreportableChannels\": null, \"stage\": \"Report\", \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.533Z\tDEBUG\toracle_streams_0.OCR2.offchainreporting2.a01a923c-0104-4a46-a904-14bc4abc096a.LLO.1.ReportingPlugin\tllo/plugin_reports.go:72\tEmitting report\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"68ab5d04d12c8d2127639d2e32c294e8e849fa9b608f5ad0a650bc0e7386d448\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"lloProtocolVersion\": 0, \"lifeCycleStage\": \"production\", \"channelID\": 1, \"report\": {\"ConfigDigest\":\"000946ca792b3e79ace0817d9b9ad9e397dbdc18af8ee9c5bb751b5863ddb86d\",\"SeqNr\":78,\"ChannelID\":1,\"ValidAfterNanoseconds\":1742577354000000000,\"ObservationTimestampNanoseconds\":1742577355265352958,\"Values\":[\"2976.39\"],\"Specimen\":false}, \"stage\": \"Report\", \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.533Z\tDEBUG\tEVM.1000.LogPoller\tlogpoller/log_poller.go:1141\tLatest blocks read from chain\t{\"version\": \"unset@unset\", \"latest\": 560, \"finalized\": 558}\n", - " logger.go:146: 2025-03-21T17:15:55.533Z\tDEBUG\tEVM.1000.LogPoller\tlogpoller/log_poller.go:1047\tNo new blocks since last poll\t{\"version\": \"unset@unset\", \"currentBlockNumber\": 561, \"latestBlockNumber\": 560}\n", - " logger.go:146: 2025-03-21T17:15:55.534Z\tDEBUG\toracle_streams_3.EVM.1337.Relayer.job-3.LLO-888333.LLOMercuryTransmitter.LLOMercuryTransmitter\tmercurytransmitter/transmitter.go:315\tTransmit report\t{\"version\": \"unset@unset\", \"evmChainID\": \"1337\", \"donID\": 888333, \"transmitterID\": \"b3567e847b4b38827a78d4c289aa559674bee38859064ded8dce99aa1a2f99ce\", \"configMode\": \"bluegreen\", \"configuratorAddress\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"donID\": 888333, \"digest\": \"000946ca792b3e79ace0817d9b9ad9e397dbdc18af8ee9c5bb751b5863ddb86d\", \"seqNr\": 78, \"reportFormat\": \"json\", \"reportLifeCycleStage\": \"production\", \"transmissionHash\": \"773e4fcec57212c299c183b740e86ca6026439c6b2c7f159c33e901e9b5ca37c\"}\n", - " logger.go:146: 2025-03-21T17:15:55.534Z\tINFO\toracle_streams_3.OCR2.offchainreporting2.9a9f0afb-b437-4026-8133-f52c4bc053e9.LLO.1\tllo/suppressed_logger.go:51\t🚀 successfully invoked ContractTransmitter.Transmit\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"b3567e847b4b38827a78d4c289aa559674bee38859064ded8dce99aa1a2f99ce\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"oid\": 3, \"configDigest\": \"000946ca792b3e79ace0817d9b9ad9e397dbdc18af8ee9c5bb751b5863ddb86d\", \"proto\": \"transmission\", \"seqNr\": 78, \"index\": 0}\n", - " logger.go:146: 2025-03-21T17:15:55.534Z\tDEBUG\toracle_streams_1.EVM.1337.Relayer.job-3.LLO-888333.LLOMercuryTransmitter.LLOMercuryTransmitter\tmercurytransmitter/transmitter.go:315\tTransmit report\t{\"version\": \"unset@unset\", \"evmChainID\": \"1337\", \"donID\": 888333, \"transmitterID\": \"43e4d609de4ea5422f3796de3874abe56e755c2b6ca575c05707f6a341402bf9\", \"configMode\": \"bluegreen\", \"configuratorAddress\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"donID\": 888333, \"digest\": \"000946ca792b3e79ace0817d9b9ad9e397dbdc18af8ee9c5bb751b5863ddb86d\", \"seqNr\": 78, \"reportFormat\": \"json\", \"reportLifeCycleStage\": \"production\", \"transmissionHash\": \"33d4af4eef92950ac4483c2ceae526d2e5a4a1cc0b5b9fe7074e819a3b2b474f\"}\n", - " logger.go:146: 2025-03-21T17:15:55.534Z\tINFO\toracle_streams_1.OCR2.offchainreporting2.5c86b148-b15a-4541-82b9-ae0079f35304.LLO.1\tllo/suppressed_logger.go:51\t🚀 successfully invoked ContractTransmitter.Transmit\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"43e4d609de4ea5422f3796de3874abe56e755c2b6ca575c05707f6a341402bf9\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"oid\": 1, \"proto\": \"transmission\", \"configDigest\": \"000946ca792b3e79ace0817d9b9ad9e397dbdc18af8ee9c5bb751b5863ddb86d\", \"index\": 0, \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.534Z\tDEBUG\toracle_streams_0.EVM.1337.Relayer.job-3.LLO-888333\tllo/transmitter.go:138\tTransmit report\t{\"version\": \"unset@unset\", \"evmChainID\": \"1337\", \"donID\": 888333, \"transmitterID\": \"68ab5d04d12c8d2127639d2e32c294e8e849fa9b608f5ad0a650bc0e7386d448\", \"configMode\": \"bluegreen\", \"configuratorAddress\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"donID\": 888333, \"digest\": \"000946ca792b3e79ace0817d9b9ad9e397dbdc18af8ee9c5bb751b5863ddb86d\", \"seqNr\": 78, \"report\": {\"Report\":\"eyJDb25maWdEaWdlc3QiOiIwMDA5NDZjYTc5MmIzZTc5YWNlMDgxN2Q5YjlhZDllMzk3ZGJkYzE4YWY4ZWU5YzViYjc1MWI1ODYzZGRiODZkIiwiU2VxTnIiOjc4LCJDaGFubmVsSUQiOjEsIlZhbGlkQWZ0ZXJOYW5vc2Vjb25kcyI6MTc0MjU3NzM1NDAwMDAwMDAwMCwiT2JzZXJ2YXRpb25UaW1lc3RhbXBOYW5vc2Vjb25kcyI6MTc0MjU3NzM1NTI2NTM1Mjk1OCwiVmFsdWVzIjpbeyJ0IjowLCJ2IjoiMjk3Ni4zOSJ9XSwiU3BlY2ltZW4iOmZhbHNlfQ==\",\"Info\":{\"LifeCycleStage\":\"production\",\"ReportFormat\":\"json\"}}, \"sigs\": [{\"Signature\":\"CT9+T7PVUZ8Al7MZir9fQOdUPmZndjrYlnhaZSaoBcga+lfRMCYbkeiMpUiv8Jt1d9DUZrgsdm8T", - "nGKP1EmWAQE=\",\"Signer\":2},{\"Signature\":\"USh7s2xt+5M5OqCHm86lgeGj8g+4dq597bvWeXj4hiJri7Nvohgf4jBTqxzQhFlrdqkST1ysYbhvpkDXkIWhEwE=\",\"Signer\":3}]}\n", - " logger.go:146: 2025-03-21T17:15:55.534Z\tDEBUG\toracle_streams_3.EVM.1337.Relayer.job-3.LLO-888333.LLOMercuryTransmitter.\"127.0.0.1:46557\"\tmercurytransmitter/server.go:231\tTransmit report success\t{\"version\": \"unset@unset\", \"evmChainID\": \"1337\", \"donID\": 888333, \"transmitterID\": \"b3567e847b4b38827a78d4c289aa559674bee38859064ded8dce99aa1a2f99ce\", \"configMode\": \"bluegreen\", \"configuratorAddress\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"donID\": 888333, \"serverURL\": \"127.0.0.1:46557\", \"req.Payload\": \"eyJjb25maWdEaWdlc3QiOiIwMDA5NDZjYTc5MmIzZTc5YWNlMDgxN2Q5YjlhZDllMzk3ZGJkYzE4YWY4ZWU5YzViYjc1MWI1ODYzZGRiODZkIiwic2VxTnIiOjc4LCJyZXBvcnQiOnsiQ29uZmlnRGlnZXN0IjoiMDAwOTQ2Y2E3OTJiM2U3OWFjZTA4MTdkOWI5YWQ5ZTM5N2RiZGMxOGFmOGVlOWM1YmI3NTFiNTg2M2RkYjg2ZCIsIlNlcU5yIjo3OCwiQ2hhbm5lbElEIjoxLCJWYWxpZEFmdGVyTmFub3NlY29uZHMiOjE3NDI1NzczNTQwMDAwMDAwMDAsIk9ic2VydmF0aW9uVGltZXN0YW1wTmFub3NlY29uZHMiOjE3NDI1NzczNTUyNjUzNTI5NTgsIlZhbHVlcyI6W3sidCI6MCwidiI6IjI5NzYuMzkifV0sIlNwZWNpbWVuIjpmYWxzZX0sInNpZ3MiOlt7IlNpZ25hdHVyZSI6IkNUOS", - "tUN1BWVVo4QWw3TVppcjlmUU9kVVBtWm5kanJZbG5oYVpTYW9CY2dhK2xmUk1DWWJrZWlNcFVpdjhKdDFkOURVWnJnc2RtOFRuR0tQMUVtV0FRRT0iLCJTaWduZXIiOjJ9LHsiU2lnbmF0dXJlIjoiVVNoN3MyeHQrNU01T3FDSG04NmxnZUdqOGcrNGRxNTk3YnZXZVhqNGhpSnJpN052b2hnZjRqQlRxeHpRaEZscmRxa1NUMXlzWWJodnBrRFhrSVdoRXdFPSIsIlNpZ25lciI6M31dfQ==\", \"req.ReportFormat\": 2}\n", - " logger.go:146: 2025-03-21T17:15:55.535Z\tDEBUG\toracle_streams_1.EVM.1337.Relayer.job-3.LLO-888333.LLOMercuryTransmitter.\"127.0.0.1:46557\"\tmercurytransmitter/server.go:231\tTransmit report success\t{\"version\": \"unset@unset\", \"evmChainID\": \"1337\", \"donID\": 888333, \"transmitterID\": \"43e4d609de4ea5422f3796de3874abe56e755c2b6ca575c05707f6a341402bf9\", \"configMode\": \"bluegreen\", \"configuratorAddress\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"donID\": 888333, \"serverURL\": \"127.0.0.1:46557\", \"req.Payload\": \"eyJjb25maWdEaWdlc3QiOiIwMDA5NDZjYTc5MmIzZTc5YWNlMDgxN2Q5YjlhZDllMzk3ZGJkYzE4YWY4ZWU5YzViYjc1MWI1ODYzZGRiODZkIiwic2VxTnIiOjc4LCJyZXBvcnQiOnsiQ29uZmlnRGlnZXN0IjoiMDAwOTQ2Y2E3OTJiM2U3OWFjZTA4MTdkOWI5YWQ5ZTM5N2RiZGMxOGFmOGVlOWM1YmI3NTFiNTg2M2RkYjg2ZCIsIlNlcU5yIjo3OCwiQ2hhbm5lbElEIjoxLCJWYWxpZEFmdGVyTmFub3NlY29uZHMiOjE3NDI1NzczNTQwMDAwMDAwMDAsIk9ic2VydmF0aW9uVGltZXN0YW1wTmFub3NlY29uZHMiOjE3NDI1NzczNTUyNjUzNTI5NTgsIlZhbHVlcyI6W3sidCI6MCwidiI6IjI5NzYuMzkifV0sIlNwZWNpbWVuIjpmYWxzZX0sInNpZ3MiOlt7IlNpZ25hdHVyZSI6IjJ4Wj", - "EzZlNqSEhMa2taRlBMMXFzT1I2dXpHZExCTTNRbW5hV3k5N0xQNzF3anRFQ1lseTdCQ3hjRlhMRFk0QmpzVE8vTG9qREZtbXFUczBJSWVSNkxnQT0iLCJTaWduZXIiOjF9LHsiU2lnbmF0dXJlIjoiQ1Q5K1Q3UFZVWjhBbDdNWmlyOWZRT2RVUG1abmRqcllsbmhhWlNhb0JjZ2ErbGZSTUNZYmtlaU1wVWl2OEp0MWQ5RFVacmdzZG04VG5HS1AxRW1XQVFFPSIsIlNpZ25lciI6Mn1dfQ==\", \"req.ReportFormat\": 2}\n", - " logger.go:146: 2025-03-21T17:15:55.535Z\tDEBUG\tEVM.1000.LogPoller\tlogpoller/log_poller.go:1035\tPolling for logs\t{\"version\": \"unset@unset\", \"currentBlockNumber\": 561}\n", - " logger.go:146: 2025-03-21T17:15:55.536Z\tDEBUG\tEVM.1000.LogPoller\tlogpoller/log_poller.go:1141\tLatest blocks read from chain\t{\"version\": \"unset@unset\", \"latest\": 560, \"finalized\": 558}\n", - " logger.go:146: 2025-03-21T17:15:55.536Z\tDEBUG\tEVM.1000.LogPoller\tlogpoller/log_poller.go:1047\tNo new blocks since last poll\t{\"version\": \"unset@unset\", \"currentBlockNumber\": 561, \"latestBlockNumber\": 560}\n", - " logger.go:146: 2025-03-21T17:15:55.536Z\tDEBUG\toracle_streams_0.EVM.1337.Relayer.job-3.LLO-888333.LLOMercuryTransmitter.LLOMercuryTransmitter\tmercurytransmitter/transmitter.go:315\tTransmit report\t{\"version\": \"unset@unset\", \"evmChainID\": \"1337\", \"donID\": 888333, \"transmitterID\": \"68ab5d04d12c8d2127639d2e32c294e8e849fa9b608f5ad0a650bc0e7386d448\", \"configMode\": \"bluegreen\", \"configuratorAddress\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"donID\": 888333, \"digest\": \"000946ca792b3e79ace0817d9b9ad9e397dbdc18af8ee9c5bb751b5863ddb86d\", \"seqNr\": 78, \"reportFormat\": \"json\", \"reportLifeCycleStage\": \"production\", \"transmissionHash\": \"773e4fcec57212c299c183b740e86ca6026439c6b2c7f159c33e901e9b5ca37c\"}\n", - " logger.go:146: 2025-03-21T17:15:55.537Z\tINFO\toracle_streams_0.OCR2.offchainreporting2.a01a923c-0104-4a46-a904-14bc4abc096a.LLO.1\tllo/suppressed_logger.go:51\t🚀 successfully invoked ContractTransmitter.Transmit\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"68ab5d04d12c8d2127639d2e32c294e8e849fa9b608f5ad0a650bc0e7386d448\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"index\": 0, \"configDigest\": \"000946ca792b3e79ace0817d9b9ad9e397dbdc18af8ee9c5bb751b5863ddb86d\", \"oid\": 0, \"proto\": \"transmission\", \"seqNr\": 78}\n", - " logger.go:146: 2025-03-21T17:15:55.537Z\tDEBUG\toracle_streams_0.EVM.1337.Relayer.job-3.LLO-888333.LLOMercuryTransmitter.\"127.0.0.1:46557\"\tmercurytransmitter/server.go:231\tTransmit report success\t{\"version\": \"unset@unset\", \"evmChainID\": \"1337\", \"donID\": 888333, \"transmitterID\": \"68ab5d04d12c8d2127639d2e32c294e8e849fa9b608f5ad0a650bc0e7386d448\", \"configMode\": \"bluegreen\", \"configuratorAddress\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"donID\": 888333, \"serverURL\": \"127.0.0.1:46557\", \"req.Payload\": \"eyJjb25maWdEaWdlc3QiOiIwMDA5NDZjYTc5MmIzZTc5YWNlMDgxN2Q5YjlhZDllMzk3ZGJkYzE4YWY4ZWU5YzViYjc1MWI1ODYzZGRiODZkIiwic2VxTnIiOjc4LCJyZXBvcnQiOnsiQ29uZmlnRGlnZXN0IjoiMDAwOTQ2Y2E3OTJiM2U3OWFjZTA4MTdkOWI5YWQ5ZTM5N2RiZGMxOGFmOGVlOWM1YmI3NTFiNTg2M2RkYjg2ZCIsIlNlcU5yIjo3OCwiQ2hhbm5lbElEIjoxLCJWYWxpZEFmdGVyTmFub3NlY29uZHMiOjE3NDI1NzczNTQwMDAwMDAwMDAsIk9ic2VydmF0aW9uVGltZXN0YW1wTmFub3NlY29uZHMiOjE3NDI1NzczNTUyNjUzNTI5NTgsIlZhbHVlcyI6W3sidCI6MCwidiI6IjI5NzYuMzkifV0sIlNwZWNpbWVuIjpmYWxzZX0sInNpZ3MiOlt7IlNpZ25hdHVyZSI6IkNUOS", - "tUN1BWVVo4QWw3TVppcjlmUU9kVVBtWm5kanJZbG5oYVpTYW9CY2dhK2xmUk1DWWJrZWlNcFVpdjhKdDFkOURVWnJnc2RtOFRuR0tQMUVtV0FRRT0iLCJTaWduZXIiOjJ9LHsiU2lnbmF0dXJlIjoiVVNoN3MyeHQrNU01T3FDSG04NmxnZUdqOGcrNGRxNTk3YnZXZVhqNGhpSnJpN052b2hnZjRqQlRxeHpRaEZscmRxa1NUMXlzWWJodnBrRFhrSVdoRXdFPSIsIlNpZ25lciI6M31dfQ==\", \"req.ReportFormat\": 2}\n", - " logger.go:146: 2025-03-21T17:15:55.539Z\tDEBUG\tEVM.1337.LogPoller\tlogpoller/log_poller.go:1035\tPolling for logs\t{\"version\": \"unset@unset\", \"currentBlockNumber\": 444}\n", - " logger.go:146: 2025-03-21T17:15:55.540Z\tDEBUG\tEVM.1337.LogPoller\tlogpoller/log_poller.go:1141\tLatest blocks read from chain\t{\"version\": \"unset@unset\", \"latest\": 443, \"finalized\": 441}\n", - " logger.go:146: 2025-03-21T17:15:55.540Z\tDEBUG\tEVM.1337.LogPoller\tlogpoller/log_poller.go:1047\tNo new blocks since last poll\t{\"version\": \"unset@unset\", \"currentBlockNumber\": 444, \"latestBlockNumber\": 443}\n", - " logger.go:146: 2025-03-21T17:15:55.540Z\tDEBUG\toracle_streams_2.EVM.1337.Relayer.job-3.LLO-888333.LLOMercuryTransmitter.LLOMercuryTransmitter\tmercurytransmitter/transmitter.go:315\tTransmit report\t{\"version\": \"unset@unset\", \"evmChainID\": \"1337\", \"donID\": 888333, \"transmitterID\": \"2e0b415eb4d97f389ff6d6c33eaadf0cc4613171ebdc59d1e87f91539985a7ce\", \"configMode\": \"bluegreen\", \"configuratorAddress\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"donID\": 888333, \"digest\": \"000946ca792b3e79ace0817d9b9ad9e397dbdc18af8ee9c5bb751b5863ddb86d\", \"seqNr\": 78, \"reportFormat\": \"json\", \"reportLifeCycleStage\": \"production\", \"transmissionHash\": \"773e4fcec57212c299c183b740e86ca6026439c6b2c7f159c33e901e9b5ca37c\"}\n", - " logger.go:146: 2025-03-21T17:15:55.540Z\tDEBUG\tEVM.1337.LogPoller\tlogpoller/log_poller.go:1035\tPolling for logs\t{\"version\": \"unset@unset\", \"currentBlockNumber\": 444}\n", - " logger.go:146: 2025-03-21T17:15:55.540Z\tINFO\toracle_streams_2.OCR2.offchainreporting2.a2742fc2-23d8-408d-9bce-d78b539b9f44.LLO.1\tllo/suppressed_logger.go:51\t🚀 successfully invoked ContractTransmitter.Transmit\t{\"version\": \"unset@unset\", \"jobID\": 3, \"jobName\": \"feed-1\", \"contractID\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"transmitterID\": \"2e0b415eb4d97f389ff6d6c33eaadf0cc4613171ebdc59d1e87f91539985a7ce\", \"evmChainID\": \"1337\", \"donID\": 888333, \"channelDefinitionsContractAddress\": \"0xE278738AaB5aA4Cb17F16Ada3D197A2FdE7D935c\", \"instanceType\": \"Green\", \"proto\": \"transmission\", \"configDigest\": \"000946ca792b3e79ace0817d9b9ad9e397dbdc18af8ee9c5bb751b5863ddb86d\", \"oid\": 2, \"seqNr\": 78, \"index\": 0}\n", - " logger.go:146: 2025-03-21T17:15:55.540Z\tDEBUG\tEVM.1337.LogPoller\tlogpoller/log_poller.go:1141\tLatest blocks read from chain\t{\"version\": \"unset@unset\", \"latest\": 443, \"finalized\": 441}\n", - " logger.go:146: 2025-03-21T17:15:55.540Z\tDEBUG\tEVM.1337.LogPoller\tlogpoller/log_poller.go:1047\tNo new blocks since last poll\t{\"version\": \"unset@unset\", \"currentBlockNumber\": 444, \"latestBlockNumber\": 443}\n", - " logger.go:146: 2025-03-21T17:15:55.541Z\tDEBUG\toracle_streams_2.EVM.1337.Relayer.job-3.LLO-888333.LLOMercuryTransmitter.\"127.0.0.1:46557\"\tmercurytransmitter/server.go:231\tTransmit report success\t{\"version\": \"unset@unset\", \"evmChainID\": \"1337\", \"donID\": 888333, \"transmitterID\": \"2e0b415eb4d97f389ff6d6c33eaadf0cc4613171ebdc59d1e87f91539985a7ce\", \"configMode\": \"bluegreen\", \"configuratorAddress\": \"0xc78dbd2D4bfCE2fDA728461C5f1b67222a4031B6\", \"donID\": 888333, \"serverURL\": \"127.0.0.1:46557\", \"req.Payload\": \"eyJjb25maWdEaWdlc3QiOiIwMDA5NDZjYTc5MmIzZTc5YWNlMDgxN2Q5YjlhZDllMzk3ZGJkYzE4YWY4ZWU5YzViYjc1MWI1ODYzZGRiODZkIiwic2VxTnIiOjc4LCJyZXBvcnQiOnsiQ29uZmlnRGlnZXN0IjoiMDAwOTQ2Y2E3OTJiM2U3OWFjZTA4MTdkOWI5YWQ5ZTM5N2RiZGMxOGFmOGVlOWM1YmI3NTFiNTg2M2RkYjg2ZCIsIlNlcU5yIjo3OCwiQ2hhbm5lbElEIjoxLCJWYWxpZEFmdGVyTmFub3NlY29uZHMiOjE3NDI1NzczNTQwMDAwMDAwMDAsIk9ic2VydmF0aW9uVGltZXN0YW1wTmFub3NlY29uZHMiOjE3NDI1NzczNTUyNjUzNTI5NTgsIlZhbHVlcyI6W3sidCI6MCwidiI6IjI5NzYuMzkifV0sIlNwZWNpbWVuIjpmYWxzZX0sInNpZ3MiOlt7IlNpZ25hdHVyZSI6IkNUOS", - "tUN1BWVVo4QWw3TVppcjlmUU9kVVBtWm5kanJZbG5oYVpTYW9CY2dhK2xmUk1DWWJrZWlNcFVpdjhKdDFkOURVWnJnc2RtOFRuR0tQMUVtV0FRRT0iLCJTaWduZXIiOjJ9LHsiU2lnbmF0dXJlIjoiVVNoN3MyeHQrNU01T3FDSG04NmxnZUdqOGcrNGRxNTk3YnZXZVhqNGhpSnJpN052b2hnZjRqQlRxeHpRaEZscmRxa1NUMXlzWWJodnBrRFhrSVdoRXdFPSIsIlNpZ25lciI6M31dfQ==\", \"req.ReportFormat\": 2}\n", - " logger.go:146: 2025-03-21T17:15:55.548Z\tDEBUG\tEVM.1000.LogPoller\tlogpoller/log_poller.go:1035\tPolling for logs\t{\"version\": \"unset@unset\", \"currentBlockNumber\": 561}\n", - " logger.go:146: 2025-03-21T17:15:55.549Z\tDEBUG\tEVM.1000.LogPoller\tlogpoller/log_poller.go:1141\tLatest blocks read from chain\t{\"version\": \"unset@unset\", \"latest\": 560, \"finalized\": 558}\n", - " logger.go:146: 2025-03-21T17:15:55.549Z\tDEBUG\tEVM.1000.LogPoller\tlogpoller/log_poller.go:1047\tNo new blocks since last poll\t{\"version\": \"unset@unset\", \"currentBlockNumber\": 561, \"latestBlockNumber\": 560}\n", - " logger.go:146: 2025-03-21T17:15:55.549Z\tDEBUG\tEVM.1000.LogPoller\tlogpoller/log_poller.go:1035\tPolling for logs\t{\"version\": \"unset@unset\", \"currentBlockNumber\": 561}\n", - " logger.go:146: 2025-03-21T17:15:55.550Z\tDEBUG\tEVM.1000.LogPoller\tlogpoller/log_poller.go:1141\tLatest blocks read from chain\t{\"version\": \"unset@unset\", \"latest\": 560, \"finalized\": 558}\n", - " logger.go:146: 2025-03-21T17:15:55.550Z\tDEBUG\tEVM.1000.LogPoller\tlogpoller/log_poller.go:1047\tNo new blocks since last poll\t{\"version\": \"unset@unset\", \"currentBlockNumber\": 561, \"latestBlockNumber\": 560}\n", - " logger.go:146: 2025-03-21T17:15:55.558Z\tWARN\tsyncer/handler.go:769\tworkflow spec not found\t{\"version\": \"unset@unset\", \"workflowID\": \"004b077cb5debdd46c7fcedb10b182e1f89880e9891f7e308563dd3bdb08b85b\"}\n", - "--- PASS: Test_workflowDeletedHandler/success_deleting_non-existing_workflow_spec (2.97s)\n", - "--- PASS: Test_workflowDeletedHandler (6.48s)\n", - "=== RUN Test_workflowPausedActivatedUpdatedHandler\n", - "=== RUN Test_workflowPausedActivatedUpdatedHandler/success_pausing_activating_and_updating_existing_engine_and_spec\n", - " logger.go:146: 2025-03-21T17:15:55.579Z\tDEBUG\tEVM.1337.LogPoller\tlogpoller/log_poller.go:1035\tPolling for logs\t{\"version\": \"unset@unset\", \"currentBlockNumber\": 444}\n", - " logger.go:146: 2025-03-21T17:15:55.580Z\tDEBUG\tEVM.1337.LogPoller\tlogpoller/log_poller.go:1141\tLatest blocks read from chain\t{\"version\": \"unset@unset\", \"latest\": 443, \"finalized\": 441}\n", - " logger.go:146: 2025-03-21T17:15:55.580Z\tDEBUG\tEVM.1337.LogPoller\tlogpoller/log_poller.go:1047\tNo new blocks since last poll\t{\"version\": \"unset@unset\", \"currentBlockNumber\": 444, \"latestBlockNumber\": 443}\n", - " logger.go:146: 2025-03-21T17:15:55.589Z\tDEBUG\tEVM.1337.Txm.TxmStore.TxmStore\tlogger/logger.go:199\tNew logger: TxmStore\t{\"version\": \"unset@unset\"}\n", - " logger.go:146: 2025-03-21T17:15:55.589Z\tDEBUG\tEVM.1337.Txm.TxmStore.TxmStore\tlogger/logger.go:199\tNew logger: TxmStore\t{\"version\": \"unset@unset\"}\n", - " logger.go:146: 2025-03-21T17:15:55.599Z\tDEBUG\tEVM.1337.LogPoller\tlogpoller/log_poller.go:1035\tPolling for logs\t{\"version\": \"unset@unset\", \"currentBlockNumber\": 444}\n", - " logger.go:146: 2025-03-21T17:15:55.600Z\tDEBUG\tEVM.1337.LogPoller\tlogpoller/log_poller.go:1141\tLatest blocks read from chain\t{\"version\": \"unset@unset\", \"latest\": 443, \"finalized\": 441}\n", - " logger.go:146: 2025-03-21T17:15:55.600Z\tDEBUG\tEVM.1337.LogPoller\tlogpoller/log_poller.go:1047\tNo new blocks since last poll\t{\"version\": \"unset@unset\", \"currentBlockNumber\": 444, \"latestBlockNumber\": 443}\n", - " logger.go:146: 2025-03-21T17:15:55.611Z\tDEBUG\tEVM.1337.LogPoller\tlogpoller/log_poller.go:1035\tPolling for logs\t{\"version\": \"unset@unset\", \"currentBlockNumber\": 444}\n", - " logger.go:146: 2025-03-21T17:15:55.611Z\tDEBUG\tEVM.1337.LogPoller\tlogpoller/log_poller.go:1141\tLatest blocks read from chain\t{\"version\": \"unset@unset\", \"latest\": 443, \"finalized\": 441}\n", - " logger.go:146: 2025-03-21T17:15:55.611Z\tDEBUG\tEVM.1337.LogPoller\tlogpoller/log_poller.go:1047\tNo new blocks since last poll\t{\"version\": \"unset@unset\", \"currentBlockNumber\": 444, \"latestBlockNumber\": 443}\n", + rerunCount: 1, + executorResponses: map[string]struct { + passed bool + err error + }{ + "pkgA-0": {passed: false, err: fmt.Errorf("exec rerun boom")}, }, + expectedExecCalls: 1, + expectedError: true, }, } for _, tc := range testCases { + tc := tc t.Run(tc.name, func(t *testing.T) { - testName, timeout, err := attributePanicToTest(tc.outputs) - assert.Equal(t, tc.expectedTimeout, timeout, "timeout flag mismatch") - require.Error(t, err) - assert.ErrorIs(t, err, tc.expectedError, "error mismatch") - assert.Empty(t, testName, "test name should be empty") - }) - } -} + execCallCount := 0 + mockExec := &mockExecutor{ + RunTestPackageFn: func(cfg executor.Config, pkg string, idx int) (string, bool, error) { + execCallCount++ + key := fmt.Sprintf("%s-%d", pkg, idx) + resp, ok := tc.executorResponses[key] + if !ok { + return fmt.Sprintf("mock_rerun_%s_%d.json", pkg, idx), true, nil + } + // Check if config forces count=1 and has the right -run pattern + assert.NotNil(t, cfg.GoTestCountFlag, "Rerun should force GoTestCountFlag") + if cfg.GoTestCountFlag != nil { + assert.Equal(t, 1, *cfg.GoTestCountFlag, "Rerun should force count=1") + } + assert.Nil(t, cfg.SkipTests, "Rerun should clear SkipTests") + require.Len(t, cfg.SelectTests, 1, "Rerun should set exactly one SelectTests pattern") + // Basic check if pattern looks right (contains test names from input) + for _, failedTest := range tc.initialFailedTests { + if failedTest.TestPackage == pkg { + assert.Contains(t, cfg.SelectTests[0], regexp.QuoteMeta(failedTest.TestName)) + } + } + return fmt.Sprintf("mock_rerun_%s_%d.json", pkg, idx), resp.passed, resp.err + }, + } + mockParse := &mockParser{} -func TestAttributeRaceToTest(t *testing.T) { - t.Parallel() + r := runner.NewRunner(".", false, 0, nil, false, "", nil, false, "", false, nil, nil, false, false, mockExec, mockParse) - testCases := []struct { - name string - packageName string - expectedTestName string - outputs []string - }{ - { - name: "properly attributed race", - expectedTestName: "TestRace", - packageName: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/example_test_package", - outputs: []string{ - "==================", - "WARNING: DATA RACE", - "Read at 0x00c000292028 by goroutine 13:", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.func1()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:67 +0x94", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.gowrap1()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x44", - "", - "Previous write at 0x00c000292028 by goroutine 12:", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.func1()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:67 +0xa4", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.gowrap1()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x44", - "", - "Goroutine 13 (running) created at:", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x158", - " testing.tRunner()", - " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1690 +0x184", - " testing.(*T).Run.gowrap1()", - " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1743 +0x40", - "", - "Goroutine 12 (running) created at:", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x158", - " testing.tRunner()", - " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1690 +0x184", - " testing.(*T).Run.gowrap1()", - " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1743 +0x40", - "==================", - "==================", - "WARNING: DATA RACE", - "Write at 0x00c000292028 by goroutine 13:", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.func1()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:67 +0xa4", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.gowrap1()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x44", - "", - "Previous write at 0x00c000292028 by goroutine 14:", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.func1()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:67 +0xa4", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.gowrap1()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x44", - "", - "Goroutine 13 (running) created at:", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x158", - " testing.tRunner()", - " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1690 +0x184", - " testing.(*T).Run.gowrap1()", - " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1743 +0x40", - "", - "Goroutine 14 (running) created at:", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x158", - " testing.tRunner()", - " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1690 +0x184", - " testing.(*T).Run.gowrap1()", - " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1743 +0x40", - "==================", - " testing.go:1399: race detected during execution of test", - }, - }, - { - name: "improperly attributed race", - expectedTestName: "TestRace", - packageName: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/example_test_package", - outputs: []string{ - "==================", - "WARNING: DATA RACE", - "Read at 0x00c000292028 by goroutine 13:", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.func1()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:67 +0x94", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.gowrap1()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x44", - "", - "Previous write at 0x00c000292028 by goroutine 12:", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.func1()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:67 +0xa4", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.gowrap1()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x44", - "", - "Goroutine 13 (running) created at:", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x158", - " testing.tRunner()", - " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1690 +0x184", - " testing.(*T).Run.gowrap1()", - " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1743 +0x40", - "", - "Goroutine 12 (running) created at:", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x158", - " testing.tRunner()", - " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1690 +0x184", - " testing.(*T).Run.gowrap1()", - " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1743 +0x40", - "==================", - "==================", - "WARNING: DATA RACE", - "Write at 0x00c000292028 by goroutine 13:", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.func1()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:67 +0xa4", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.gowrap1()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x44", - "", - "Previous write at 0x00c000292028 by goroutine 14:", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.func1()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:67 +0xa4", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.gowrap1()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x44", - "", - "Goroutine 13 (running) created at:", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x158", - " testing.tRunner()", - " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1690 +0x184", - " testing.(*T).Run.gowrap1()", - " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1743 +0x40", - "", - "Goroutine 14 (running) created at:", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x158", - " testing.tRunner()", - " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1690 +0x184", - " testing.(*T).Run.gowrap1()", - " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1743 +0x40", - "==================", - "==================", - "WARNING: DATA RACE", - "Read at 0x00c000292028 by goroutine 19:", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.func1()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:68 +0xb8", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.gowrap1()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x44", - "", - "Previous write at 0x00c000292028 by goroutine 13:", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.func1()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:67 +0xa4", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.gowrap1()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x44", - "", - "Goroutine 19 (running) created at:", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x158", - " testing.tRunner()", - " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1690 +0x184", - " testing.(*T).Run.gowrap1()", - " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1743 +0x40", - "", - "Goroutine 13 (running) created at:", - " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace()", - " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x158", - " testing.tRunner()", - " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1690 +0x184", - " testing.(*T).Run.gowrap1()", - " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1743 +0x40", - "==================", - " testing.go:1399: race detected during execution of test", - }, - }, - { - name: "empty", - packageName: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/example_test_package", - outputs: []string{}, - }, - } + actualResults, _, err := r.RerunFailedTests(tc.initialFailedTests, tc.rerunCount) - for _, testCase := range testCases { - tc := testCase - t.Run(tc.name, func(t *testing.T) { - testName, err := attributeRaceToTest(tc.outputs) - if tc.expectedTestName == "" { - require.Error(t, err) + assert.Equal(t, tc.expectedExecCalls, execCallCount, "Unexpected number of executor calls") + + if tc.expectedError { + assert.Error(t, err) + assert.Len(t, mockParse.ParseFilesCalls, 0, "Parser should not be called on rerun executor error") } else { - require.NoError(t, err) - assert.Equal(t, tc.expectedTestName, testName, "test race not attributed correctly") + assert.NoError(t, err) + if tc.expectedExecCalls > 0 { + assert.Len(t, mockParse.ParseFilesCalls, 1, "Parser should be called once after reruns") + if len(mockParse.ParseFilesCalls) > 0 { + assert.Len(t, mockParse.ParseFilesCalls[0], tc.expectedParseArgs.fileCount, "Parser called with wrong number of files") + assert.Equal(t, r.IgnoreParentFailuresOnSubtests, mockParse.LastParseCfg.IgnoreParentFailuresOnSubtests, "Parser IgnoreParentFailures mismatch") + assert.Equal(t, r.OmitOutputsOnSuccess, mockParse.LastParseCfg.OmitOutputsOnSuccess, "Parser OmitOutputsOnSuccess mismatch") + } + assert.Len(t, actualResults, tc.expectedFinalResultCount, "Unexpected number of results returned from rerun parse") + } else { + assert.Len(t, mockParse.ParseFilesCalls, 0, "Parser should not be called if no reruns executed") + assert.Empty(t, actualResults, "No results expected if no reruns executed") + } } }) } } - -// TODO: Running the failing test here fools tools like gotestfmt into thinking we actually ran a failing test -// as the output gets piped out to the console. This a confusing annoyance that I'd like to fix, but it's not crucial. -func TestFailedOutputs(t *testing.T) { - t.Parallel() - - runner := Runner{ - ProjectPath: "./", - Verbose: true, - RunCount: 1, - SelectTests: []string{"TestFail"}, // This test is known to fail consistently - } - - testResults, err := runner.RunTestPackages([]string{flakyTestPackagePath}) - require.NoError(t, err, "running tests should not produce an unexpected error") - - require.Equal(t, 1, len(testResults), "unexpected number of test runs") - - var testFailResult *reports.TestResult - for i := range testResults { - if testResults[i].TestName == "TestFail" { - testFailResult = &testResults[i] - break - } - } - require.NotNil(t, testFailResult, "expected TestFail result not found in report") - - require.NotEmpty(t, testFailResult.FailedOutputs, "expected failed outputs for TestFail") - - // Verify that each run (in this case, only one) has some non-empty output - for runID, outputs := range testFailResult.FailedOutputs { - t.Logf("Failed outputs for run %s: %v", runID, outputs) - require.NotEmpty(t, outputs, "Failed outputs should not be empty for TestFail") - } -} - -func TestSkippedTests(t *testing.T) { - t.Parallel() - - runner := Runner{ - ProjectPath: "./", - Verbose: true, - RunCount: 1, - SelectTests: []string{"TestSkipped"}, // Known skipping test - } - - testResults, err := runner.RunTestPackages([]string{flakyTestPackagePath}) - require.NoError(t, err, "running tests should not produce an unexpected error") - - var testSkipResult *reports.TestResult - for i := range testResults { - if testResults[i].TestName == "TestSkipped" { - testSkipResult = &testResults[i] - break - } - } - require.NotNil(t, testSkipResult, "expected 'TestSkipped' result not found in report") - - // Check that the test was properly marked as skipped - require.True(t, testSkipResult.Skipped, "test 'TestSkipped' should be marked as skipped") - require.Equal(t, 0, testSkipResult.Failures, "test 'TestSkipped' should have no failures") - require.Equal(t, 0, testSkipResult.Successes, "test 'TestSkipped' should have no successes") - require.Equal(t, 1, testSkipResult.Skips, "test 'TestSkipped' should have exactly one skip recorded") -} - -func TestOmitOutputsOnSuccess(t *testing.T) { - t.Parallel() - - runner := Runner{ - ProjectPath: "./", - Verbose: true, - RunCount: 1, - SelectTests: []string{"TestPass"}, // Known passing test - OmitOutputsOnSuccess: true, - } - - testResults, err := runner.RunTestPackages([]string{flakyTestPackagePath}) - require.NoError(t, err, "running tests should not produce an unexpected error") - - var testPassResult *reports.TestResult - for i := range testResults { - if testResults[i].TestName == "TestPass" { - testPassResult = &testResults[i] - break - } - } - require.NotNil(t, testPassResult, "expected 'TestPass' result not found in report") - require.Empty(t, testPassResult.PassedOutputs, "expected no passed outputs due to OmitOutputsOnSuccess") - require.Empty(t, testPassResult.Outputs, "expected no captured outputs due to OmitOutputsOnSuccess and a successful test") -} diff --git a/tools/flakeguard/testparser/testparser.go b/tools/flakeguard/testparser/testparser.go deleted file mode 100644 index 017578eb9..000000000 --- a/tools/flakeguard/testparser/testparser.go +++ /dev/null @@ -1,356 +0,0 @@ -package testparser - -import ( - "bufio" - "encoding/json" - "fmt" - "os" - "path/filepath" - "regexp" - "strconv" - "strings" - "time" - - "github.com/rs/zerolog/log" - "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/reports" -) - -var ( - startPanicRe = regexp.MustCompile(`^panic:`) - startRaceRe = regexp.MustCompile(`^WARNING: DATA RACE`) -) - -// ParseOptions holds options that control how test results are parsed. -type ParseOptions struct { - OmitOutputsOnSuccess bool -} - -// entry represents a single JSON record from go test -json output. -type entry struct { - Action string `json:"Action"` - Test string `json:"Test"` - Package string `json:"Package"` - Output string `json:"Output"` - Elapsed float64 `json:"Elapsed"` -} - -func (e entry) String() string { - return fmt.Sprintf("Action: %s, Test: %s, Package: %s, Output: %s, Elapsed: %f", e.Action, e.Test, e.Package, e.Output, e.Elapsed) -} - -// ParseTestResults parses the test output JSON files and produces test results. -// Note that any pre-processing (such as transforming the files to ignore parent failures) -// must be performed before calling this function. -func ParseTestResults(jsonOutputPaths []string, runPrefix string, runCount int, options ParseOptions) ([]reports.TestResult, error) { - var ( - testDetails = make(map[string]*reports.TestResult) - panickedPackages = map[string]struct{}{} - packageLevelOutputs = make(map[string][]string) - testsWithSubTests = make(map[string][]string) - panicDetectionMode = false - raceDetectionMode = false - detectedEntries []entry - expectedRuns = runCount - ) - - runNumber := 0 - // Process each JSON output file. - for _, filePath := range jsonOutputPaths { - runNumber++ - runID := fmt.Sprintf("%s%d", runPrefix, runNumber) - file, err := os.Open(filePath) - if err != nil { - return nil, fmt.Errorf("failed to open test output file: %w", err) - } - - scanner := bufio.NewScanner(file) - var precedingLines []string // context for error reporting - var followingLines []string - - for scanner.Scan() { - line := scanner.Text() - precedingLines = append(precedingLines, line) - if len(precedingLines) > 15 { - precedingLines = precedingLines[1:] - } - - var entryLine entry - if err := json.Unmarshal(scanner.Bytes(), &entryLine); err != nil { - // Gather extra context for error reporting. - for scanner.Scan() && len(followingLines) < 15 { - followingLines = append(followingLines, scanner.Text()) - } - context := append(precedingLines, followingLines...) - return nil, fmt.Errorf("failed to parse json test output near lines:\n%s\nerror: %w", strings.Join(context, "\n"), err) - } - - var result *reports.TestResult - if entryLine.Test != "" { - // Build a key with package and test name. - key := fmt.Sprintf("%s/%s", entryLine.Package, entryLine.Test) - parentTestName, subTestName := parseSubTest(entryLine.Test) - if subTestName != "" { - parentTestKey := fmt.Sprintf("%s/%s", entryLine.Package, parentTestName) - testsWithSubTests[parentTestKey] = append(testsWithSubTests[parentTestKey], subTestName) - } - if _, exists := testDetails[key]; !exists { - testDetails[key] = &reports.TestResult{ - TestName: entryLine.Test, - TestPackage: entryLine.Package, - PassRatio: 0, - PassedOutputs: make(map[string][]string), - FailedOutputs: make(map[string][]string), - PackageOutputs: []string{}, - } - } - result = testDetails[key] - } - - // Process output field and handle panic/race detection. - if entryLine.Output != "" { - if panicDetectionMode || raceDetectionMode { - detectedEntries = append(detectedEntries, entryLine) - continue - } else if startPanicRe.MatchString(entryLine.Output) { - panickedPackages[entryLine.Package] = struct{}{} - detectedEntries = append(detectedEntries, entryLine) - panicDetectionMode = true - continue - } else if startRaceRe.MatchString(entryLine.Output) { - detectedEntries = append(detectedEntries, entryLine) - raceDetectionMode = true - continue - } else if entryLine.Test != "" && entryLine.Action == "output" { - if result.Outputs == nil { - result.Outputs = make(map[string][]string) - } - result.Outputs[runID] = append(result.Outputs[runID], entryLine.Output) - } else if entryLine.Test == "" { - packageLevelOutputs[entryLine.Package] = append(packageLevelOutputs[entryLine.Package], entryLine.Output) - } else { - switch entryLine.Action { - case "pass": - result.PassedOutputs[runID] = append(result.PassedOutputs[runID], entryLine.Output) - case "fail": - result.FailedOutputs[runID] = append(result.FailedOutputs[runID], entryLine.Output) - } - } - } - - // If in panic or race detection mode, wait for a "fail" action to close the block. - if (panicDetectionMode || raceDetectionMode) && entryLine.Action == "fail" { - if panicDetectionMode { - var outputs []string - for _, entry := range detectedEntries { - outputs = append(outputs, entry.Output) - } - panicTest, timeout, err := attributePanicToTest(outputs) - if err != nil { - log.Warn().Err(err).Msg("Unable to attribute panic to a test") - panicTest = "UnableToAttributePanicTestPleaseInvestigate" - } - panicTestKey := fmt.Sprintf("%s/%s", entryLine.Package, panicTest) - result, exists := testDetails[panicTestKey] - if !exists { - result = &reports.TestResult{ - TestName: panicTest, - TestPackage: entryLine.Package, - PassRatio: 0, - PassedOutputs: make(map[string][]string), - FailedOutputs: make(map[string][]string), - PackageOutputs: []string{}, - } - testDetails[panicTestKey] = result - } - result.Panic = true - result.Timeout = timeout - result.Failures++ - result.Runs++ - for _, entry := range detectedEntries { - if entry.Test == "" { - result.PackageOutputs = append(result.PackageOutputs, entry.Output) - } else { - result.FailedOutputs[runID] = append(result.FailedOutputs[runID], entry.Output) - } - } - } else if raceDetectionMode { - raceTest, err := attributeRaceToTest(entryLine.Package, detectedEntries) - if err != nil { - return nil, err - } - raceTestKey := fmt.Sprintf("%s/%s", entryLine.Package, raceTest) - result, exists := testDetails[raceTestKey] - if !exists { - result = &reports.TestResult{ - TestName: raceTest, - TestPackage: entryLine.Package, - PassRatio: 0, - PassedOutputs: make(map[string][]string), - FailedOutputs: make(map[string][]string), - PackageOutputs: []string{}, - } - testDetails[raceTestKey] = result - } - result.Race = true - result.Failures++ - result.Runs++ - for _, entry := range detectedEntries { - if entry.Test == "" { - result.PackageOutputs = append(result.PackageOutputs, entry.Output) - } else { - result.FailedOutputs[runID] = append(result.FailedOutputs[runID], entry.Output) - } - } - } - detectedEntries = []entry{} - panicDetectionMode = false - raceDetectionMode = false - continue - } - - // Process pass, fail, and skip actions. - switch entryLine.Action { - case "pass": - if entryLine.Test != "" { - duration, err := time.ParseDuration(strconv.FormatFloat(entryLine.Elapsed, 'f', -1, 64) + "s") - if err != nil { - return nil, fmt.Errorf("failed to parse duration: %w", err) - } - result.Durations = append(result.Durations, duration) - result.Successes++ - if result.PassedOutputs == nil { - result.PassedOutputs = make(map[string][]string) - } - result.PassedOutputs[runID] = result.Outputs[runID] - delete(result.Outputs, runID) - } - case "fail": - if entryLine.Test != "" { - duration, err := time.ParseDuration(strconv.FormatFloat(entryLine.Elapsed, 'f', -1, 64) + "s") - if err != nil { - return nil, fmt.Errorf("failed to parse duration: %w", err) - } - result.Durations = append(result.Durations, duration) - result.Failures++ - if result.FailedOutputs == nil { - result.FailedOutputs = make(map[string][]string) - } - result.FailedOutputs[runID] = result.Outputs[runID] - delete(result.Outputs, runID) - } - case "skip": - if entryLine.Test != "" { - result.Skipped = true - result.Skips++ - } - } - if entryLine.Test != "" { - result.Runs = result.Successes + result.Failures - if result.Runs > 0 { - result.PassRatio = float64(result.Successes) / float64(result.Runs) - } else { - result.PassRatio = 1 - } - } - } - - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("reading test output file: %w", err) - } - if err = file.Close(); err != nil { - log.Warn().Err(err).Str("file", filePath).Msg("failed to close file") - } - } - - // Propagate panic status from parent tests to subtests. - for parentTestKey, subTests := range testsWithSubTests { - if parentTestResult, exists := testDetails[parentTestKey]; exists { - if parentTestResult.Panic { - for _, subTest := range subTests { - subTestKey := fmt.Sprintf("%s/%s/%s", parentTestResult.TestPackage, parentTestResult.TestName, subTest) - if subTestResult, exists := testDetails[subTestKey]; exists { - if subTestResult.Failures > 0 { - subTestResult.Panic = true - if subTestResult.FailedOutputs == nil { - subTestResult.FailedOutputs = make(map[string][]string) - } - for runID := range subTestResult.FailedOutputs { - subTestResult.FailedOutputs[runID] = append(subTestResult.FailedOutputs[runID], "Panic in parent test") - } - } - } else { - log.Warn().Str("expected subtest", subTestKey).Str("parent test", parentTestKey).Msg("expected subtest not found in parent test") - } - } - } - } else { - log.Warn().Str("parent test", parentTestKey).Msg("expected parent test not found") - } - } - - var results []reports.TestResult - for _, result := range testDetails { - // Correct for possible double-counting caused by panics. - if result.Runs > expectedRuns { - if result.Panic { - result.Failures = expectedRuns - result.Runs = expectedRuns - } else { - log.Warn().Str("test", result.TestName).Int("actual runs", result.Runs).Int("expected runs", expectedRuns).Msg("unexpected test runs") - } - } - if outputs, exists := packageLevelOutputs[result.TestPackage]; exists { - result.PackageOutputs = outputs - } - results = append(results, *result) - } - - if options.OmitOutputsOnSuccess { - for i := range results { - results[i].PassedOutputs = make(map[string][]string) - results[i].Outputs = make(map[string][]string) - } - } - - return results, nil -} - -// attributePanicToTest extracts the test function name causing a panic. -func attributePanicToTest(outputs []string) (test string, timeout bool, err error) { - testNameRe := regexp.MustCompile(`(?:.*\.)?(Test[A-Z]\w+)(?:\.[^(]+)?\s*\(`) - timeoutRe := regexp.MustCompile(`(?i)(timeout|timedout|timed\s*out)`) - for _, o := range outputs { - if matches := testNameRe.FindStringSubmatch(o); len(matches) > 1 { - testName := strings.TrimSpace(matches[1]) - if timeoutRe.MatchString(o) { - return testName, true, nil - } - return testName, false, nil - } - } - return "", false, fmt.Errorf("failed to attribute panic to test using regex '%s' on these strings:\n\n%s", testNameRe.String(), strings.Join(outputs, "")) -} - -// attributeRaceToTest extracts the test function name causing a race condition. -func attributeRaceToTest(racePackage string, raceEntries []entry) (string, error) { - regexSanitizeRacePackage := filepath.Base(racePackage) - raceAttributionRe := regexp.MustCompile(fmt.Sprintf(`%s\.(Test[^\.\(]+)`, regexSanitizeRacePackage)) - var entriesOutputs []string - for _, entry := range raceEntries { - entriesOutputs = append(entriesOutputs, entry.Output) - if matches := raceAttributionRe.FindStringSubmatch(entry.Output); len(matches) > 1 { - testName := strings.TrimSpace(matches[1]) - return testName, nil - } - } - return "", fmt.Errorf("failed to attribute race to test using regex %s on these strings:\n%s", raceAttributionRe.String(), strings.Join(entriesOutputs, "")) -} - -// parseSubTest splits a test name into parent and subtest names. -func parseSubTest(testName string) (parentTestName, subTestName string) { - parts := strings.SplitN(testName, "/", 2) - if len(parts) == 1 { - return parts[0], "" - } - return parts[0], parts[1] -} diff --git a/tools/flakeguard/testparser/testparser_test.go b/tools/flakeguard/testparser/testparser_test.go deleted file mode 100644 index 6e2c4235d..000000000 --- a/tools/flakeguard/testparser/testparser_test.go +++ /dev/null @@ -1,467 +0,0 @@ -package testparser - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var ( - defaultTestRunCount = 5 - flakyTestPackagePath = "./example_test_package" - debugDir = "debug_outputs" -) - -type expectedTestResult struct { - allSuccesses bool - someSuccesses bool - allFailures bool - someFailures bool - allSkips bool - testPanic bool - packagePanic bool - race bool - maximumRuns int - - exactRuns *int - minimumRuns *int - exactPassRate *float64 - minimumPassRate *float64 - maximumPassRate *float64 - - seen bool -} - -func TestAttributePanicToTest(t *testing.T) { - t.Parallel() - - // Test cases: each test case contains a slice of output strings. - testCases := []struct { - name string - expectedTestName string - expectedTimeout bool - outputs []string - }{ - { - name: "properly attributed panic", - expectedTestName: "TestPanic", - expectedTimeout: false, - outputs: []string{ - "panic: This test intentionally panics [recovered]", - "\tpanic: This test intentionally panics", - "goroutine 25 [running]:", - "testing.tRunner.func1.2({0x1008cde80, 0x1008f7d90})", - "\t/opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1632 +0x1bc", - "testing.tRunner.func1()", - "\t/opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1635 +0x334", - "panic({0x1008cde80?, 0x1008f7d90?})", - "\t/opt/homebrew/Cellar/go/1.23.2/libexec/src/runtime/panic.go:785 +0x124", - "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/example_test_package.TestPanic(0x140000b6ea0?)", - }, - }, - { - name: "improperly attributed panic", - expectedTestName: "TestPanic", - expectedTimeout: false, - outputs: []string{ - "panic: This test intentionally panics [recovered]", - "TestPanic(0x140000b6ea0?)", - "goroutine 25 [running]:", - "testing.tRunner.func1.2({0x1008cde80, 0x1008f7d90})", - "\t/opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1632 +0x1bc", - "testing.tRunner.func1()", - "\t/opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1635 +0x334", - "panic({0x1008cde80?, 0x1008f7d90?})", - "\t/opt/homebrew/Cellar/go/1.23.2/libexec/src/runtime/panic.go:785 +0x124", - "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/example_test_package.TestPanic(0x140000b6ea0?)", - }, - }, - { - name: "timeout panic", - expectedTestName: "TestTimedOut", - expectedTimeout: true, - outputs: []string{ - "panic: test timed out after 10m0s", - "running tests", - "TestTimedOut (10m0s)", - "goroutine 397631 [running]:", - "testing.(*M).startAlarm.func1()", - "\t/opt/hostedtoolcache/go/1.23.3/x64/src/testing/testing.go:2373 +0x385", - "created by time.goFunc", - "/opt/hostedtoolcache/go/1.23.3/x64/src/time/sleep.go:215 +0x2d", - }, - }, - { - name: "subtest panic", - expectedTestName: "TestSubTestsSomePanic", - expectedTimeout: false, - outputs: []string{ - "panic: This subtest always panics [recovered]", - "panic: This subtest always panics", - "goroutine 23 [running]:", - "testing.tRunner.func1.2({0x100489e80, 0x1004b3e30})", - "\t/opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1632 +0x1bc", - "testing.tRunner.func1()", - "\t/opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1635 +0x334", - "panic({0x100489e80?, 0x1004b3e30?})", - "\t/opt/homebrew/Cellar/go/1.23.2/libexec/src/runtime/panic.go:785 +0x124", - "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/example_test_package.TestSubTestsSomePanic.func2(0x140000c81a0?)", - }, - }, - { - name: "memory_test panic extraction", - expectedTestName: "TestJobClientJobAPI", - expectedTimeout: false, - outputs: []string{ - "panic: freeport: cannot allocate port block [recovered]", - "\tpanic: freeport: cannot allocate port block", - "goroutine 321 [running]:", - "testing.tRunner.func1.2({0x5e0dd80, 0x72ebb40})", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:1734 +0x21c", - "testing.tRunner.func1()", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:1737 +0x35e", - "panic({0x5e0dd80?, 0x72ebb40?})", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/runtime/panic.go:787 +0x132", - "github.com/hashicorp/consul/sdk/freeport.alloc()", - "\t/home/runner/go/pkg/mod/github.com/hashicorp/consul/sdk@v0.16.1/freeport/freeport.go:274 +0xad", - "github.com/hashicorp/consul/sdk/freeport.initialize()", - "\t/home/runner/go/pkg/mod/github.com/hashicorp/consul/sdk@v0.16.1/freeport/freeport.go:124 +0x2d7", - "sync.(*Once).doSlow(0xc0018eb600?, 0xc000da4a98?)", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/sync/once.go:78 +0xab", - "sync.(*Once).Do(...)", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/sync/once.go:69", - "github.com/hashicorp/consul/sdk/freeport.Take(0x1)", - "\t/home/runner/go/pkg/mod/github.com/hashicorp/consul/sdk@v0.16.1/freeport/freeport.go:303 +0xe5", - "github.com/hashicorp/consul/sdk/freeport.GetN({0x7337708, 0xc000683dc0}, 0x1)", - "\t/home/runner/go/pkg/mod/github.com/hashicorp/consul/sdk@v0.16.1/freeport/freeport.go:427 +0x48", - "github.com/smartcontractkit/chainlink/deployment/environment/memory_test.TestJobClientJobAPI(0xc000683dc0)", - "\t/home/runner/work/chainlink/chainlink/deployment/environment/memory/job_service_client_test.go:116 +0xc6", - "testing.tRunner(0xc000683dc0, 0x6d6c838)", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:1792 +0xf4", - "created by testing.(*T).Run in goroutine 1", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:1851 +0x413", - }, - }, - { - name: "changeset_test panic extraction", - expectedTestName: "TestDeployBalanceReader", - expectedTimeout: false, - outputs: []string{ - "panic: freeport: cannot allocate port block [recovered]", - "\tpanic: freeport: cannot allocate port block", - "goroutine 378 [running]:", - "testing.tRunner.func1.2({0x6063f40, 0x76367f0})", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:1734 +0x21c", - "testing.tRunner.func1()", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:1737 +0x35e", - "panic({0x6063f40?, 0x76367f0?})", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/runtime/panic.go:787 +0x132", - "github.com/hashicorp/consul/sdk/freeport.alloc()", - "\t/home/runner/go/pkg/mod/github.com/hashicorp/consul/sdk@v0.16.1/freeport/freeport.go:274 +0xad", - "github.com/hashicorp/consul/sdk/freeport.initialize()", - "\t/home/runner/go/pkg/mod/github.com/hashicorp/consul/sdk@v0.16.1/freeport/freeport.go:124 +0x2d7", - "sync.(*Once).doSlow(0xa94f820?, 0xa8000a?)", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/sync/once.go:78 +0xab", - "sync.(*Once).Do(...)", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/sync/once.go:69", - "github.com/hashicorp/consul/sdk/freeport.Take(0x1)", - "\t/home/runner/go/pkg/mod/github.com/hashicorp/consul/sdk@v0.16.1/freeport/freeport.go:303 +0xe5", - "github.com/hashicorp/consul/sdk/freeport.GetN({0x7684150, 0xc000583c00}, 0x1)", - "\t/home/runner/go/pkg/mod/github.com/hashicorp/consul/sdk@v0.16.1/freeport/freeport.go:427 +0x48", - "github.com/smartcontractkit/chainlink/deployment/environment/memory.NewNodes(0xc000583c00, 0xff, 0xc001583d10, 0xc005aa0030, 0x1, 0x0, {0x0, {0x0, 0x0, 0x0, ...}, ...}, ...)", - "\t/home/runner/work/chainlink/chainlink/deployment/environment/memory/environment.go:177 +0xa5", - "github.com/smartcontractkit/chainlink/deployment/environment/memory.NewMemoryEnvironment(_, {_, _}, _, {0x2, 0x0, 0x0, 0x1, 0x0, {0x0, ...}})", - "\t/home/runner/work/chainlink/chainlink/deployment/environment/memory/environment.go:223 +0x10c", - "github.com/smartcontractkit/chainlink/deployment/keystone/changeset_test.TestDeployBalanceReader(0xc000583c00)", - "\t/home/runner/work/chainlink/chainlink/deployment/keystone/changeset/deploy_balance_reader_test.go:23 +0xf5", - "testing.tRunner(0xc000583c00, 0x70843d0)", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:1792 +0xf4", - "created by testing.(*T).Run in goroutine 1", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/testing/testing.go:1851 +0x413", - " logger.go:146: 03:14:04.485880684\tINFO\tDeployed KeystoneForwarder 1.0.0 chain selector 909606746561742123 addr 0x72B66019aCEdc35F7F6e58DF94De95f3cBCC5971\t{\"version\": \"(devel)@unset\"}", - " logger.go:146: 03:14:04.486035865\tINFO\tdeploying forwarder\t{\"version\": \"(devel)@unset\", \"chainSelector\": 5548718428018410741}", - " logger.go:146: 2025-03-08T03:14:04.490Z\tINFO\tchangeset/jd_register_nodes.go:91\tregistered node\t{\"version\": \"unset@unset\", \"name\": \"node1\", \"id\": \"node:{id:\\\"895776f5ba0cc11c570a47b5cc3dbb8771da9262cfb545cd5d48251796af7f\\\" public_key:\\\"895776f5ba0cc11c570a47b5cc3dbb8771da9262cfb545cd5d48251796af7f\\\" is_enabled:true is_connected:true labels:{key:\\\"product\\\" value:\\\"test-product\\\"} labels:{key:\\\"environment\\\" value:\\\"test-env\\\"} labels:{key:\\\"nodeType\\\" value:\\\"bootstrap\\\"} labels:{key:\\\"don-0-don1\\\"}\"}", - }, - }, - { - name: "empty", - expectedTestName: "", - expectedTimeout: false, - outputs: []string{}, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - testName, timeout, err := attributePanicToTest(tc.outputs) - assert.Equal(t, tc.expectedTimeout, timeout, "timeout flag mismatch") - if tc.expectedTestName == "" { - require.Error(t, err) - } else { - require.NoError(t, err) - assert.Equal(t, tc.expectedTestName, testName, "test name mismatch") - } - }) - } -} - -func TestFailToAttributePanicToTest(t *testing.T) { - t.Parallel() - - testCases := []struct { - name string - outputs []string - }{ - { - name: "no test name in panic", - outputs: []string{ - "panic: reflect: Elem of invalid type bool", - "goroutine 104182 [running]:", - "reflect.elem(0xc0569d9998?)", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/reflect/type.go:733 +0x9a", - "reflect.(*rtype).Elem(0xa4dd940?)", - "\t/opt/hostedtoolcache/go/1.24.0/x64/src/reflect/type.go:737 +0x15", - "github.com/smartcontractkit/chainlink-solana/pkg/solana/chainreader.setPollingFilterOverrides(0x0, {0xc052040510, 0x1, 0xc?})", - "\t/home/runner/go/pkg/mod/github.com/smartcontractkit/chainlink-solana@v1.1.2-0.20250319030827-8e2f4d76eb79/pkg/solana/chainreader/chain_reader.go:942 +0x492", - "github.com/smartcontractkit/chainlink-solana/pkg/solana/chainreader.(*ContractReaderService).addEventRead(_, _, {_, _}, {_, _}, {{0xc0544c4270, 0x9}, {0xc0544c4280, 0xc}, ...}, ...)", - "\t/home/runner/go/pkg/mod/github.com/smartcontractkit/chainlink-solana@v1.1.2-0.20250319030827-8e2f4d76eb79/pkg/solana/chainreader/chain_reader.go:605 +0x13d", - "github.com/smartcontractkit/chainlink-solana/pkg/solana/chainreader.(*ContractReaderService).initNamespace(0xc054472540, 0xc01c37d440?)", - "\t/home/runner/go/pkg/mod/github.com/smartcontractkit/chainlink-solana@v1.1.2-0.20250319030827-8e2f4d76eb79/pkg/solana/chainreader/chain_reader.go:443 +0x28b", - "github.com/smartcontractkit/chainlink-solana/pkg/solana/chainreader.NewContractReaderService({0x7fcf8b532040?, 0xc015b223e0?}, {0xc6ac960, 0xc05464e470}, {0xc0544384e0?, {0xc01c37d440?, 0xc054163b84?, 0xc054163b80?}}, {0x7fcf8071c7a0, 0xc0157928c0})", - "\t/home/runner/go/pkg/mod/github.com/smartcontractkit/chainlink-solana@v1.1.2-0.20250319030827-8e2f4d76eb79/pkg/solana/chainreader/chain_reader.go:97 +0x287", - "github.com/smartcontractkit/chainlink-solana/pkg/solana.(*Relayer).NewContractReader(0xc015b2e150, {0x4d0102030cb384f5?, 0xb938300b5ca1aa13?}, {0xc05469c000, 0x1eedf, 0x20000})", - "\t/home/runner/go/pkg/mod/github.com/smartcontractkit/chainlink-solana@v1.1.2-0.20250319030827-8e2f4d76eb79/pkg/solana/relay.go:160 +0x205", - "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/oraclecreator.(*pluginOracleCreator).createReadersAndWriters(_, {_, _}, {_, _}, _, {0x3, {0x0, 0xa, 0x93, ...}, ...}, ...)", - "\t/home/runner/work/chainlink/chainlink/core/capabilities/ccip/oraclecreator/plugin.go:446 +0x338", - "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/oraclecreator.(*pluginOracleCreator).Create(0xc033a69ad0, {0xc6f5a10, 0xc02e4f9a40}, 0x3, {0x3, {0x0, 0xa, 0x93, 0x8f, 0x67, ...}, ...})", - "\t/home/runner/work/chainlink/chainlink/core/capabilities/ccip/oraclecreator/plugin.go:215 +0xc0c", - "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/launcher.createDON({0xc6f5a10, 0xc02e4f9a40}, {0x7fcf8b533ad0, 0xc015b97340}, {0xb6, 0x5e, 0x31, 0xd0, 0x35, 0xef, ...}, ...)", - "\t/home/runner/work/chainlink/chainlink/core/capabilities/ccip/launcher/launcher.go:367 +0x451", - "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/launcher.(*launcher).processAdded(0xc015723080, {0xc6f5a10, 0xc02e4f9a40}, 0xc053de2ff0)", - "\t/home/runner/work/chainlink/chainlink/core/capabilities/ccip/launcher/launcher.go:254 +0x239", - "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/launcher.(*launcher).processDiff(0xc015723080, {0xc6f5a10, 0xc02e4f9a40}, {0xc053de2ff0?, 0xc053de3020?, 0xc053de3050?})", - "\t/home/runner/work/chainlink/chainlink/core/capabilities/ccip/launcher/launcher.go:192 +0x68", - "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/launcher.(*launcher).tick(0xc015723080, {0xc6f5a10, 0xc02e4f9a40})", - "\t/home/runner/work/chainlink/chainlink/core/capabilities/ccip/launcher/launcher.go:178 +0x20b", - "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/launcher.(*launcher).monitor(0xc015723080)", - "\t/home/runner/work/chainlink/chainlink/core/capabilities/ccip/launcher/launcher.go:152 +0x112", - "created by github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/launcher.(*launcher).Start.func1 in goroutine 1335", - "\t/home/runner/work/chainlink/chainlink/core/capabilities/ccip/launcher/launcher.go:134 +0xa5", - "FAIL\tgithub.com/smartcontractkit/chainlink/deployment/ccip/changeset/solana\t184.801s", - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - testName, timeout, err := attributePanicToTest(tc.outputs) - require.Error(t, err) - assert.Empty(t, testName, "test name should be empty") - assert.False(t, timeout, "timeout flag should be false") - }) - } -} - -func TestAttributeRaceToTest(t *testing.T) { - t.Parallel() - - testCases := []struct { - name string - packageName string - expectedTestName string - raceEntries []entry - }{ - { - name: "properly attributed race", - expectedTestName: "TestRace", - packageName: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/example_test_package", - raceEntries: properlyAttributedRaceEntries, - }, - { - name: "improperly attributed race", - expectedTestName: "TestRace", - packageName: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/example_test_package", - raceEntries: improperlyAttributedRaceEntries, - }, - { - name: "empty", - packageName: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/example_test_package", - raceEntries: []entry{ - {}, - }, - }, - } - - for _, testCase := range testCases { - tc := testCase - t.Run(tc.name, func(t *testing.T) { - testName, err := attributeRaceToTest(tc.packageName, tc.raceEntries) - if tc.expectedTestName == "" { - require.Error(t, err) - } else { - require.NoError(t, err) - assert.Equal(t, tc.expectedTestName, testName, "test race not attributed correctly") - } - }) - } -} - -var ( - improperlyAttributedRaceEntries = []entry{ - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "==================\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "WARNING: DATA RACE\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "Read at 0x00c000292028 by goroutine 13:\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.func1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:67 +0x94\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.gowrap1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x44\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "Previous write at 0x00c000292028 by goroutine 12:\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.func1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:67 +0xa4\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.gowrap1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x44\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "Goroutine 13 (running) created at:\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x158\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " testing.tRunner()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1690 +0x184\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " testing.(*T).Run.gowrap1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1743 +0x40\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "Goroutine 12 (running) created at:\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x158\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " testing.tRunner()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1690 +0x184\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " testing.(*T).Run.gowrap1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1743 +0x40\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "==================\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "==================\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "WARNING: DATA RACE\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "Write at 0x00c000292028 by goroutine 13:\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.func1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:67 +0xa4\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.gowrap1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x44\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "Previous write at 0x00c000292028 by goroutine 14:\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.func1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:67 +0xa4\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.gowrap1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x44\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "Goroutine 13 (running) created at:\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x158\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " testing.tRunner()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1690 +0x184\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " testing.(*T).Run.gowrap1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1743 +0x40\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "Goroutine 14 (running) created at:\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x158\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " testing.tRunner()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1690 +0x184\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " testing.(*T).Run.gowrap1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1743 +0x40\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "==================\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "==================\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "WARNING: DATA RACE\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "Read at 0x00c000292028 by goroutine 19:\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.func1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:68 +0xb8\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.gowrap1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x44\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "Previous write at 0x00c000292028 by goroutine 13:\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.func1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:67 +0xa4\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.gowrap1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x44\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "Goroutine 19 (running) created at:\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x158\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " testing.tRunner()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1690 +0x184\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " testing.(*T).Run.gowrap1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1743 +0x40\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "Goroutine 13 (running) created at:\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x158\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " testing.tRunner()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1690 +0x184\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " testing.(*T).Run.gowrap1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1743 +0x40\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: "==================\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestFlaky", Output: " testing.go:1399: race detected during execution of test\n"}, - } - properlyAttributedRaceEntries = []entry{ - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: "==================\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: "WARNING: DATA RACE\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: "Read at 0x00c000292028 by goroutine 13:\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.func1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:67 +0x94\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.gowrap1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x44\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: "\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: "Previous write at 0x00c000292028 by goroutine 12:\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.func1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:67 +0xa4\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.gowrap1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x44\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: "\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: "Goroutine 13 (running) created at:\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x158\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " testing.tRunner()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1690 +0x184\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " testing.(*T).Run.gowrap1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1743 +0x40\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: "\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: "Goroutine 12 (running) created at:\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x158\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " testing.tRunner()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1690 +0x184\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " testing.(*T).Run.gowrap1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1743 +0x40\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: "==================\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: "==================\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: "WARNING: DATA RACE\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: "Write at 0x00c000292028 by goroutine 13:\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.func1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:67 +0xa4\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.gowrap1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x44\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: "\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: "Previous write at 0x00c000292028 by goroutine 14:\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.func1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:67 +0xa4\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace.gowrap1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x44\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: "\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: "Goroutine 13 (running) created at:\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x158\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " testing.tRunner()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1690 +0x184\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " testing.(*T).Run.gowrap1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1743 +0x40\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: "\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: "Goroutine 14 (running) created at:\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package.TestRace()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " /Users/adamhamrick/Projects/chainlink-testing-framework/tools/flakeguard/runner/example_test_package/example_tests_test.go:74 +0x158\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " testing.tRunner()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1690 +0x184\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " testing.(*T).Run.gowrap1()\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " /opt/homebrew/Cellar/go/1.23.2/libexec/src/testing/testing.go:1743 +0x40\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: "==================\n"}, - {Action: "output", Package: "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/runner/example_test_package", Test: "TestRace", Output: " testing.go:1399: race detected during execution of test\n"}, - } -)