@@ -43,25 +43,25 @@ class _TestMetrics:
4343 "Represents the duration of the initial execution of the test."
4444
4545 # NOTE(remyduthu): We need this flag because we may have processed a test
46- # without scheduling retries for it (e.g., because it was too slow).
46+ # without scheduling reruns for it (e.g., because it was too slow).
4747 is_processed : bool = dataclasses .field (default = False )
4848
49- retry_count : int = dataclasses .field (default = 0 )
50- "Represents the number of times the test has been retried so far."
49+ rerun_count : int = dataclasses .field (default = 0 )
50+ "Represents the number of times the test has been rerun so far."
5151
52- scheduled_retry_count : int = dataclasses .field (default = 0 )
53- "Represents the number of retries that have been scheduled for this test depending on the budget."
52+ scheduled_rerun_count : int = dataclasses .field (default = 0 )
53+ "Represents the number of reruns that have been scheduled for this test depending on the budget."
5454
5555 total_duration : datetime .timedelta = dataclasses .field (
5656 default_factory = datetime .timedelta
5757 )
58- "Represents the total duration spent executing this test, including retries ."
58+ "Represents the total duration spent executing this test, including reruns ."
5959
6060 def add_duration (self , duration : datetime .timedelta ) -> None :
6161 if not self .initial_duration :
6262 self .initial_duration = duration
6363
64- self .retry_count += 1
64+ self .rerun_count += 1
6565 self .total_duration += duration
6666
6767
@@ -102,10 +102,10 @@ class FlakyDetector:
102102 <session>: [(finalizer_fn, ...), exception_info] # Session scope.
103103 }
104104
105- When retrying a test, we want to:
105+ When rerunning a test, we want to:
106106
107- - Tear down and re-setup function-scoped fixtures for each retry .
108- - Keep higher-scoped fixtures alive across all retries .
107+ - Tear down and re-setup function-scoped fixtures for each rerun .
108+ - Keep higher-scoped fixtures alive across all reruns .
109109
110110 This approach is inspired by pytest-rerunfailures:
111111 https://github.com/pytest-dev/pytest-rerunfailures/blob/master/src/pytest_rerunfailures.py#L503-L542
@@ -169,7 +169,7 @@ def filter_context_tests_with_session(self, session: _pytest.main.Session) -> No
169169 test for test in self ._context .unhealthy_test_names if test in session_tests
170170 ]
171171
172- def get_retry_count_for_test (self , test : str ) -> int :
172+ def get_rerun_count_for_test (self , test : str ) -> int :
173173 metrics = self ._test_metrics .get (test )
174174 if not metrics :
175175 return 0
@@ -186,7 +186,7 @@ def get_retry_count_for_test(self, test: str) -> int:
186186 if result < self ._context .min_test_execution_count :
187187 return 0
188188
189- metrics .scheduled_retry_count = result
189+ metrics .scheduled_rerun_count = result
190190
191191 return result
192192
@@ -216,42 +216,42 @@ def make_report(self) -> str:
216216
217217 return result
218218
219- total_retry_duration_seconds = sum (
219+ total_rerun_duration_seconds = sum (
220220 metrics .total_duration .total_seconds ()
221221 for metrics in self ._test_metrics .values ()
222222 )
223223 budget_duration_seconds = self ._get_budget_duration ().total_seconds ()
224224 result += (
225- f"{ os .linesep } - Used { total_retry_duration_seconds / budget_duration_seconds * 100 :.2f} % of the budget "
226- f"({ total_retry_duration_seconds :.2f} s/{ budget_duration_seconds :.2f} s)"
225+ f"{ os .linesep } - Used { total_rerun_duration_seconds / budget_duration_seconds * 100 :.2f} % of the budget "
226+ f"({ total_rerun_duration_seconds :.2f} s/{ budget_duration_seconds :.2f} s)"
227227 )
228228
229229 result += (
230230 f"{ os .linesep } - Active for { len (self ._test_metrics )} { self .mode } "
231231 f"test{ 's' if len (self ._test_metrics ) > 1 else '' } :"
232232 )
233233 for test , metrics in self ._test_metrics .items ():
234- if metrics .scheduled_retry_count == 0 :
234+ if metrics .scheduled_rerun_count == 0 :
235235 result += (
236236 f"{ os .linesep } • '{ test } ' is too slow to be tested at least "
237237 f"{ self ._context .min_test_execution_count } times within the budget"
238238 )
239239 continue
240240
241- if metrics .retry_count < metrics .scheduled_retry_count :
241+ if metrics .rerun_count < metrics .scheduled_rerun_count :
242242 result += (
243- f"{ os .linesep } • '{ test } ' has been tested only { metrics .retry_count } "
244- f"time{ 's' if metrics .retry_count > 1 else '' } instead of { metrics .scheduled_retry_count } "
245- f"time{ 's' if metrics .scheduled_retry_count > 1 else '' } to avoid exceeding the budget"
243+ f"{ os .linesep } • '{ test } ' has been tested only { metrics .rerun_count } "
244+ f"time{ 's' if metrics .rerun_count > 1 else '' } instead of { metrics .scheduled_rerun_count } "
245+ f"time{ 's' if metrics .scheduled_rerun_count > 1 else '' } to avoid exceeding the budget"
246246 )
247247 continue
248248
249- retry_duration_seconds = metrics .total_duration .total_seconds ()
249+ rerun_duration_seconds = metrics .total_duration .total_seconds ()
250250 result += (
251- f"{ os .linesep } • '{ test } ' has been tested { metrics .retry_count } "
252- f"time{ 's' if metrics .retry_count > 1 else '' } using approx. "
253- f"{ retry_duration_seconds / budget_duration_seconds * 100 :.2f} % of the budget "
254- f"({ retry_duration_seconds :.2f} s/{ budget_duration_seconds :.2f} s)"
251+ f"{ os .linesep } • '{ test } ' has been tested { metrics .rerun_count } "
252+ f"time{ 's' if metrics .rerun_count > 1 else '' } using approx. "
253+ f"{ rerun_duration_seconds / budget_duration_seconds * 100 :.2f} % of the budget "
254+ f"({ rerun_duration_seconds :.2f} s/{ budget_duration_seconds :.2f} s)"
255255 )
256256
257257 return result
@@ -263,17 +263,17 @@ def set_deadline(self) -> None:
263263 + self ._get_budget_duration ()
264264 )
265265
266- def is_last_retry_for_test (self , test : str ) -> bool :
267- "Returns true if the given test exists and this is its last retry ."
266+ def is_last_rerun_for_test (self , test : str ) -> bool :
267+ "Returns true if the given test exists and this is its last rerun ."
268268
269269 metrics = self ._test_metrics .get (test )
270270 if not metrics :
271271 return False
272272
273273 return (
274- metrics .scheduled_retry_count != 0
275- and metrics .scheduled_retry_count + 1 # Add the initial execution.
276- == metrics .retry_count
274+ metrics .scheduled_rerun_count != 0
275+ and metrics .scheduled_rerun_count + 1 # Add the initial execution.
276+ == metrics .rerun_count
277277 )
278278
279279 def suspend_item_finalizers (self , item : _pytest .nodes .Item ) -> None :
0 commit comments