Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
SOLOv2-pytorch
Commits
10830bc7
Commit
10830bc7
authored
Apr 02, 2020
by
taokong
Browse files
add 'find_unused_parameters' in dist train
parent
83363a9a
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
18 additions
and
5 deletions
+18
-5
mmdet/apis/train.py
mmdet/apis/train.py
+8
-3
tools/test.py
tools/test.py
+5
-1
tools/test_robustness.py
tools/test_robustness.py
+5
-1
No files found.
mmdet/apis/train.py
View file @
10830bc7
...
@@ -206,9 +206,14 @@ def _dist_train(model,
...
@@ -206,9 +206,14 @@ def _dist_train(model,
]
]
# put model on gpus
# put model on gpus
# model = MMDistributedDataParallel(model.cuda())
# model = MMDistributedDataParallel(model.cuda())
model
=
MMDistributedDataParallel
(
model
.
cuda
(),
find_unused_parameters
=
True
device_ids
=
[
torch
.
cuda
.
current_device
()],
# Sets the `find_unused_parameters` parameter in
broadcast_buffers
=
False
)
# torch.nn.parallel.DistributedDataParallel
model
=
MMDistributedDataParallel
(
model
.
cuda
(),
device_ids
=
[
torch
.
cuda
.
current_device
()],
broadcast_buffers
=
False
,
find_unused_parameters
=
find_unused_parameters
)
# build runner
# build runner
optimizer
=
build_optimizer
(
model
,
cfg
.
optimizer
)
optimizer
=
build_optimizer
(
model
,
cfg
.
optimizer
)
...
...
tools/test.py
View file @
10830bc7
...
@@ -240,7 +240,11 @@ def main():
...
@@ -240,7 +240,11 @@ def main():
model
=
MMDataParallel
(
model
,
device_ids
=
[
0
])
model
=
MMDataParallel
(
model
,
device_ids
=
[
0
])
outputs
=
single_gpu_test
(
model
,
data_loader
,
args
.
show
)
outputs
=
single_gpu_test
(
model
,
data_loader
,
args
.
show
)
else
:
else
:
model
=
MMDistributedDataParallel
(
model
.
cuda
())
# model = MMDistributedDataParallel(model.cuda())
model
=
MMDistributedDataParallel
(
model
.
cuda
(),
device_ids
=
[
torch
.
cuda
.
current_device
()],
broadcast_buffers
=
False
)
outputs
=
multi_gpu_test
(
model
,
data_loader
,
args
.
tmpdir
,
outputs
=
multi_gpu_test
(
model
,
data_loader
,
args
.
tmpdir
,
args
.
gpu_collect
)
args
.
gpu_collect
)
...
...
tools/test_robustness.py
View file @
10830bc7
...
@@ -375,7 +375,11 @@ def main():
...
@@ -375,7 +375,11 @@ def main():
model
=
MMDataParallel
(
model
,
device_ids
=
[
0
])
model
=
MMDataParallel
(
model
,
device_ids
=
[
0
])
outputs
=
single_gpu_test
(
model
,
data_loader
,
args
.
show
)
outputs
=
single_gpu_test
(
model
,
data_loader
,
args
.
show
)
else
:
else
:
model
=
MMDistributedDataParallel
(
model
.
cuda
())
# model = MMDistributedDataParallel(model.cuda())
model
=
MMDistributedDataParallel
(
model
.
cuda
(),
device_ids
=
[
torch
.
cuda
.
current_device
()],
broadcast_buffers
=
False
)
outputs
=
multi_gpu_test
(
model
,
data_loader
,
args
.
tmpdir
)
outputs
=
multi_gpu_test
(
model
,
data_loader
,
args
.
tmpdir
)
rank
,
_
=
get_dist_info
()
rank
,
_
=
get_dist_info
()
...
...
bailuo
@bailuo
mentioned in commit
eda7242b
·
Nov 19, 2025
mentioned in commit
eda7242b
mentioned in commit eda7242bd08f8c24e1781934b5b535565ea9d679
Toggle commit list
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment