We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
2 parents ad5ce80 + 139824a commit 5fdeb55Copy full SHA for 5fdeb55
src/llama_recipes/utils/train_utils.py
@@ -432,7 +432,7 @@ def setup_environ_flags(rank):
432
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = str(1)
433
# os.environ["TORCH_DISTRIBUTED_DEBUG"] = "DETAIL"
434
# This flag will help with CUDA memory fragmentations that can lead into OOM in some cases.
435
- # Note this is only availble in PyTorch Nighlies (as of July 30 2023)
+ # Note this is only available in PyTorch Nighlies (as of July 30 2023)
436
# os.environ['PYTORCH_CUDA_ALLOC_CONF']='expandable_segments:True'
437
if rank == 0:
438
print(f"--> Running with torch dist debug set to detail")
0 commit comments