|
14 | 14 | using Garnet.server.Auth.Settings; |
15 | 15 | using Microsoft.Extensions.Logging; |
16 | 16 | using NUnit.Framework; |
| 17 | +using NUnit.Framework.Interfaces; |
17 | 18 | using NUnit.Framework.Legacy; |
18 | 19 | using StackExchange.Redis; |
19 | 20 | using Tsavorite.core; |
@@ -120,25 +121,71 @@ public void RestartNode(int nodeIndex) |
120 | 121 |
|
121 | 122 | public void TearDown() |
122 | 123 | { |
| 124 | + // Capture test outcome before any teardown work to distinguish |
| 125 | + // primary teardown failures from secondary ones caused by a hung/failed test. |
| 126 | + var testOutcome = TestContext.CurrentContext.Result.Outcome; |
| 127 | + var testAlreadyFailed = testOutcome.Status == TestStatus.Failed; |
| 128 | + |
| 129 | + if (testAlreadyFailed) |
| 130 | + { |
| 131 | + logger?.LogError( |
| 132 | + "TearDown: test already failed ({label}): {message}", |
| 133 | + testOutcome.Label, |
| 134 | + TestContext.CurrentContext.Result.Message); |
| 135 | + } |
| 136 | + |
123 | 137 | cts.Cancel(); |
124 | 138 | cts.Dispose(); |
125 | | - logger.LogDebug("0. Dispose <<<<<<<<<<<"); |
126 | 139 | waiter?.Dispose(); |
127 | 140 | clusterTestUtils?.Dispose(); |
128 | | - var timeoutSeconds = 5; |
129 | | - if (!Task.Run(() => DisposeCluster()).Wait(TimeSpan.FromSeconds(timeoutSeconds))) |
| 141 | + |
| 142 | + var timeoutSeconds = 30; |
| 143 | + string failureReason = null; |
| 144 | + |
| 145 | + // Phase 1: Dispose cluster nodes (may timeout if handlers are stuck) |
| 146 | + try |
| 147 | + { |
| 148 | + if (!Task.Run(() => DisposeCluster()).Wait(TimeSpan.FromSeconds(timeoutSeconds))) |
| 149 | + { |
| 150 | + failureReason = "Timed out waiting for DisposeCluster"; |
| 151 | + logger?.LogError("Timed out waiting for DisposeCluster"); |
| 152 | + } |
| 153 | + } |
| 154 | + catch (Exception ex) |
130 | 155 | { |
131 | | - logger?.LogError("Timed out waiting for DisposeCluster"); |
132 | | - Assert.Fail("Timed out waiting for DisposeCluster"); |
| 156 | + failureReason = $"DisposeCluster threw: {ex.Message}"; |
| 157 | + logger?.LogError(ex, "DisposeCluster failed"); |
133 | 158 | } |
134 | | - // Dispose logger factory only after servers are disposed |
| 159 | + |
| 160 | + // Phase 2: Dispose logger factory (always, even after timeout) |
135 | 161 | loggerFactory?.Dispose(); |
136 | | - if (!Task.Run(() => TestUtils.DeleteDirectory(TestFolder, true)).Wait(TimeSpan.FromSeconds(timeoutSeconds))) |
| 162 | + |
| 163 | + // Phase 3: Delete test directory (may timeout if files locked from Phase 1 timeout) |
| 164 | + try |
| 165 | + { |
| 166 | + if (!Task.Run(() => TestUtils.DeleteDirectory(TestFolder, true)).Wait(TimeSpan.FromSeconds(timeoutSeconds))) |
| 167 | + { |
| 168 | + failureReason ??= "Timed out DeleteDirectory"; |
| 169 | + logger?.LogError("Timed out DeleteDirectory"); |
| 170 | + } |
| 171 | + } |
| 172 | + catch (Exception ex) |
137 | 173 | { |
138 | | - logger?.LogError("Timed out DeleteDirectory"); |
139 | | - Assert.Fail("Timed out DeleteDirectory"); |
| 174 | + failureReason ??= $"DeleteDirectory threw: {ex.Message}"; |
| 175 | + logger?.LogError(ex, "DeleteDirectory failed"); |
140 | 176 | } |
| 177 | + |
| 178 | + // Phase 4: Always runs — resets LightEpoch instances to prevent cross-test contamination |
141 | 179 | TestUtils.OnTearDown(); |
| 180 | + |
| 181 | + // Fail the test at the end, after all cleanup is done |
| 182 | + if (failureReason != null) |
| 183 | + { |
| 184 | + var context = testAlreadyFailed |
| 185 | + ? $" (secondary failure — test already failed with '{testOutcome.Label}')" |
| 186 | + : " (primary failure — test itself passed)"; |
| 187 | + Assert.Fail(failureReason + context); |
| 188 | + } |
142 | 189 | } |
143 | 190 |
|
144 | 191 | public void RegisterCustomTxn(string name, Func<CustomTransactionProcedure> proc, RespCommandsInfo commandInfo = null, RespCommandDocs commandDocs = null) |
|
0 commit comments