We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent e23393f commit 987663cCopy full SHA for 987663c
megatron/initialize.py
@@ -290,9 +290,7 @@ def _initialize_distributed():
290
args.local_rank = device
291
torch.cuda.set_device(device)
292
# Call the init process
293
- torch.distributed.init_process_group(
294
- backend=args.distributed_backend,
295
- world_size=args.world_size, rank=args.rank)
+ deepspeed.init_distributed(args.distributed_backend)
296
297
# Set the tensor model-parallel, pipeline model-parallel, and
298
# data-parallel communicators.
0 commit comments