Commit f90f3671 authored by taokong's avatar taokong
Browse files

Revert "Revert "add 'find_unused_parameters' in dist train""

This reverts commit eda7242b.
parent eda7242b
......@@ -206,9 +206,14 @@ def _dist_train(model,
]
# put model on gpus
# model = MMDistributedDataParallel(model.cuda())
model = MMDistributedDataParallel(model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
find_unused_parameters = True
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
......
......@@ -240,7 +240,11 @@ def main():
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show)
else:
model = MMDistributedDataParallel(model.cuda())
# model = MMDistributedDataParallel(model.cuda())
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
......
......@@ -375,7 +375,11 @@ def main():
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show)
else:
model = MMDistributedDataParallel(model.cuda())
# model = MMDistributedDataParallel(model.cuda())
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir)
rank, _ = get_dist_info()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment