|
| 1 | +package runner |
| 2 | + |
| 3 | +import ( |
| 4 | + "encoding/json" |
| 5 | + "fmt" |
| 6 | + "os" |
| 7 | + "testing" |
| 8 | + |
| 9 | + "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/reports" |
| 10 | + "github.com/stretchr/testify/assert" |
| 11 | + "github.com/stretchr/testify/require" |
| 12 | +) |
| 13 | + |
| 14 | +type MockGoRunner struct { |
| 15 | + RunFunc func(dir string, args []string) (string, bool, error) |
| 16 | +} |
| 17 | + |
| 18 | +func (m MockGoRunner) RunCommand(dir string, args []string) (string, bool, error) { |
| 19 | + return m.RunFunc(dir, args) |
| 20 | +} |
| 21 | + |
| 22 | +func TestRun(t *testing.T) { |
| 23 | + t.Parallel() |
| 24 | + |
| 25 | + runs := 5 |
| 26 | + |
| 27 | + runner := Runner{ |
| 28 | + ProjectPath: "./", |
| 29 | + Verbose: true, |
| 30 | + RunCount: runs, |
| 31 | + UseRace: false, |
| 32 | + SkipTests: []string{}, |
| 33 | + FailFast: false, |
| 34 | + SelectedTestPackages: []string{"./flaky_test_package"}, |
| 35 | + } |
| 36 | + |
| 37 | + expectedResults := map[string]*struct { |
| 38 | + *reports.TestResult |
| 39 | + seen bool |
| 40 | + }{ |
| 41 | + "TestFlaky": { |
| 42 | + TestResult: &reports.TestResult{ |
| 43 | + TestName: "TestFlaky", |
| 44 | + Panicked: false, |
| 45 | + Skipped: false, |
| 46 | + }, |
| 47 | + }, |
| 48 | + "TestFail": { |
| 49 | + TestResult: &reports.TestResult{ |
| 50 | + TestName: "TestFail", |
| 51 | + Panicked: false, |
| 52 | + Skipped: false, |
| 53 | + PassRatio: 0, |
| 54 | + Failures: runs, |
| 55 | + }, |
| 56 | + }, |
| 57 | + "TestPass": { |
| 58 | + TestResult: &reports.TestResult{ |
| 59 | + TestName: "TestPass", |
| 60 | + Panicked: false, |
| 61 | + Skipped: false, |
| 62 | + PassRatio: 1, |
| 63 | + Successes: runs, |
| 64 | + }, |
| 65 | + }, |
| 66 | + // "TestPanic": { |
| 67 | + // TestResult: &reports.TestResult{ |
| 68 | + // TestName: "TestPanic", |
| 69 | + // Panicked: true, |
| 70 | + // Skipped: false, |
| 71 | + // PassRatio: 0, |
| 72 | + // }, |
| 73 | + // }, |
| 74 | + "TestSkipped": { |
| 75 | + TestResult: &reports.TestResult{ |
| 76 | + TestName: "TestSkipped", |
| 77 | + Panicked: false, |
| 78 | + Skipped: true, |
| 79 | + PassRatio: 0, |
| 80 | + }, |
| 81 | + }, |
| 82 | + } |
| 83 | + |
| 84 | + testResults, err := runner.RunTests() |
| 85 | + require.NoError(t, err) |
| 86 | + t.Cleanup(func() { |
| 87 | + if t.Failed() { |
| 88 | + t.Log("Writing test results to flaky_test_results.json") |
| 89 | + jsonResults, err := json.Marshal(testResults) |
| 90 | + require.NoError(t, err) |
| 91 | + err = os.WriteFile("flaky_test_results.json", jsonResults, 0644) //nolint:gosec |
| 92 | + require.NoError(t, err) |
| 93 | + } |
| 94 | + }) |
| 95 | + for _, result := range testResults { |
| 96 | + t.Run(fmt.Sprintf("checking results of %s", result.TestName), func(t *testing.T) { |
| 97 | + expected, ok := expectedResults[result.TestName] |
| 98 | + // Sanity checks |
| 99 | + require.True(t, ok, "unexpected test result: %s", result.TestName) |
| 100 | + require.False(t, expected.seen, "test '%s' was seen multiple times", result.TestName) |
| 101 | + expected.seen = true |
| 102 | + |
| 103 | + assert.Equal(t, runs, result.Runs, "test '%s' had an unexpected number of runs", result.TestName) |
| 104 | + assert.Len(t, result.Durations, runs, "test '%s' had an unexpected number of durations as it was run %d times", result.TestName, runs) |
| 105 | + if result.TestName == "TestSlow" { |
| 106 | + for _, duration := range result.Durations { |
| 107 | + assert.GreaterOrEqual(t, duration, float64(1), "slow test '%s' should have a duration of at least 2s", result.TestName) |
| 108 | + } |
| 109 | + } |
| 110 | + assert.Equal(t, expected.TestResult.Panicked, result.Panicked, "test '%s' had an unexpected panic result", result.TestName) |
| 111 | + assert.Equal(t, expected.TestResult.Skipped, result.Skipped, "test '%s' had an unexpected skipped result", result.TestName) |
| 112 | + |
| 113 | + if result.TestName == "TestFlaky" { |
| 114 | + assert.Greater(t, result.Successes, 0, "flaky test '%s' should have passed some", result.TestName) |
| 115 | + assert.Greater(t, result.Failures, 0, "flaky test '%s' should have failed some", result.TestName) |
| 116 | + assert.Greater(t, result.PassRatio, float64(0), "flaky test '%s' should have a flaky pass ratio", result.TestName) |
| 117 | + assert.Less(t, result.PassRatio, float64(1), "flaky test '%s' should have a flaky pass ratio", result.TestName) |
| 118 | + } else { |
| 119 | + assert.Equal(t, expected.TestResult.PassRatio, result.PassRatio, "test '%s' had an unexpected pass ratio", result.TestName) |
| 120 | + assert.Equal(t, expected.TestResult.Successes, result.Successes, "test '%s' had an unexpected number of successes", result.TestName) |
| 121 | + assert.Equal(t, expected.TestResult.Failures, result.Failures, "test '%s' had an unexpected number of failures", result.TestName) |
| 122 | + } |
| 123 | + }) |
| 124 | + } |
| 125 | + |
| 126 | + for _, expected := range expectedResults { |
| 127 | + assert.True(t, expected.seen, "expected test '%s' not found in test runs", expected.TestResult.TestName) |
| 128 | + } |
| 129 | +} |
0 commit comments