Commit 7b4cc7bf authored by taokong's avatar taokong
Browse files

using mmcv==0.2.16

parent f90f3671
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
- CUDA 9.0 or higher - CUDA 9.0 or higher
- NCCL 2 - NCCL 2
- GCC 4.9 or higher - GCC 4.9 or higher
- [mmcv](https://github.com/open-mmlab/mmcv) - [mmcv 0.2.16](https://github.com/open-mmlab/mmcv/tree/v0.2.16)
We have tested the following versions of OS and softwares: We have tested the following versions of OS and softwares:
......
...@@ -205,15 +205,7 @@ def _dist_train(model, ...@@ -205,15 +205,7 @@ def _dist_train(model,
for ds in dataset for ds in dataset
] ]
# put model on gpus # put model on gpus
# model = MMDistributedDataParallel(model.cuda()) model = MMDistributedDataParallel(model.cuda())
find_unused_parameters = True
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
# build runner # build runner
optimizer = build_optimizer(model, cfg.optimizer) optimizer = build_optimizer(model, cfg.optimizer)
......
matplotlib matplotlib
mmcv>=0.3.1 mmcv==0.2.16
numpy numpy
scipy scipy
# need older pillow until torchvision is fixed # need older pillow until torchvision is fixed
......
...@@ -240,11 +240,7 @@ def main(): ...@@ -240,11 +240,7 @@ def main():
model = MMDataParallel(model, device_ids=[0]) model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show) outputs = single_gpu_test(model, data_loader, args.show)
else: else:
# model = MMDistributedDataParallel(model.cuda()) model = MMDistributedDataParallel(model.cuda())
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir, outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect) args.gpu_collect)
......
...@@ -375,11 +375,7 @@ def main(): ...@@ -375,11 +375,7 @@ def main():
model = MMDataParallel(model, device_ids=[0]) model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show) outputs = single_gpu_test(model, data_loader, args.show)
else: else:
# model = MMDistributedDataParallel(model.cuda()) model = MMDistributedDataParallel(model.cuda())
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir) outputs = multi_gpu_test(model, data_loader, args.tmpdir)
rank, _ = get_dist_info() rank, _ = get_dist_info()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment