We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 9eb3492 commit d4e95fcCopy full SHA for d4e95fc
src/forge/observability/perf_tracker.py
@@ -186,7 +186,7 @@ def _start_memory_tracking(self) -> None:
186
187
if should_track:
188
_set_memory_active(True)
189
- torch.cuda.reset_max_memory_allocated()
+ torch.cuda.reset_peak_memory_stats()
190
self._start_mem = torch.cuda.memory_allocated()
191
self._memory_started = True
192
@@ -202,7 +202,7 @@ def _stop_memory_tracking(self) -> None:
202
)
203
record_metric(f"{self.prefix}/memory_peak_max_gb", peak_mem, Reduce.MAX)
204
_set_memory_active(False)
205
206
self._memory_started = False
207
208
def _record_timing_metrics(
0 commit comments