We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 8fc300b commit 139824aCopy full SHA for 139824a
src/llama_recipes/utils/train_utils.py
@@ -428,7 +428,7 @@ def setup_environ_flags(rank):
428
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = str(1)
429
# os.environ["TORCH_DISTRIBUTED_DEBUG"] = "DETAIL"
430
# This flag will help with CUDA memory fragmentations that can lead into OOM in some cases.
431
- # Note this is only availble in PyTorch Nighlies (as of July 30 2023)
+ # Note this is only available in PyTorch Nighlies (as of July 30 2023)
432
# os.environ['PYTORCH_CUDA_ALLOC_CONF']='expandable_segments:True'
433
if rank == 0:
434
print(f"--> Running with torch dist debug set to detail")
0 commit comments