"vscode:/vscode.git/clone" did not exist on "d1fcb208cc67b9cbd7423bc58a37c9eef3741539"
Commit b462d254 authored by Jared Casper's avatar Jared Casper
Browse files

Merge branch 'slym/allreduce-overlap' into 'main'

use low-priority stream for nccl overlapping all-reduce and gemm

See merge request ADLR/megatron-lm!319
parents 6e1bde1e f3e57f6f
......@@ -176,22 +176,11 @@ def _initialize_distributed():
else:
args.local_rank = device
torch.cuda.set_device(device)
# Increase cuda stream priority of NCCL ops when overlapping with other ops
if (not args.no_async_tensor_model_parallel_allreduce and
args.tensor_model_parallel_size > 1):
from torch._C._distributed_c10d import ProcessGroupNCCL
pg_options = ProcessGroupNCCL.Options()
pg_options.is_high_priority_stream = True
pg_options._timeout = timedelta(days=7)
else:
pg_options = None
# Call the init process
torch.distributed.init_process_group(
backend=args.distributed_backend,
world_size=args.world_size, rank=args.rank,
timeout=timedelta(days=7),
pg_options=pg_options)
# Call the init process
torch.distributed.init_process_group(
backend=args.distributed_backend,
world_size=args.world_size, rank=args.rank,
timeout=timedelta(days=7))
# Set the tensor model-parallel, pipeline model-parallel, and
# data-parallel communicators.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment