@@ -84,7 +84,7 @@ def supervised_training_step(
8484 trainer = Engine(update_fn)
8585
8686 .. versionadded:: 0.4.5
87- .. versionchanged:: 0.5.0
87+ .. versionchanged:: 0.4.7
8888 Added Gradient Accumulation.
8989 """
9090
@@ -157,7 +157,7 @@ def supervised_training_step_amp(
157157 trainer = Engine(update_fn)
158158
159159 .. versionadded:: 0.4.5
160- .. versionchanged:: 0.5.0
160+ .. versionchanged:: 0.4.7
161161 Added Gradient Accumulation.
162162 """
163163
@@ -240,7 +240,7 @@ def supervised_training_step_apex(
240240 trainer = Engine(update_fn)
241241
242242 .. versionadded:: 0.4.5
243- .. versionchanged:: 0.5.0
243+ .. versionchanged:: 0.4.7
244244 Added Gradient Accumulation.
245245 """
246246
@@ -316,7 +316,7 @@ def supervised_training_step_tpu(
316316 trainer = Engine(update_fn)
317317
318318 .. versionadded:: 0.4.5
319- .. versionchanged:: 0.5.0
319+ .. versionchanged:: 0.4.7
320320 Added Gradient Accumulation argument for all supervised training methods.
321321 """
322322 try :
@@ -491,7 +491,7 @@ def output_transform_fn(x, y, y_pred, loss):
491491 - Added ``amp_mode`` argument for automatic mixed precision.
492492 - Added ``scaler`` argument for gradient scaling.
493493
494- .. versionchanged:: 0.5.0
494+ .. versionchanged:: 0.4.7
495495 Added Gradient Accumulation argument for all supervised training methods.
496496 """
497497
0 commit comments