Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
SOLOv2-pytorch
Commits
eda7242b
Commit
eda7242b
authored
Apr 02, 2020
by
taokong
Browse files
Revert "add 'find_unused_parameters' in dist train"
This reverts commit
10830bc7
.
parent
10830bc7
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
5 additions
and
18 deletions
+5
-18
mmdet/apis/train.py
mmdet/apis/train.py
+3
-8
tools/test.py
tools/test.py
+1
-5
tools/test_robustness.py
tools/test_robustness.py
+1
-5
No files found.
mmdet/apis/train.py
View file @
eda7242b
...
...
@@ -206,14 +206,9 @@ def _dist_train(model,
]
# put model on gpus
# model = MMDistributedDataParallel(model.cuda())
find_unused_parameters
=
True
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model
=
MMDistributedDataParallel
(
model
.
cuda
(),
device_ids
=
[
torch
.
cuda
.
current_device
()],
broadcast_buffers
=
False
,
find_unused_parameters
=
find_unused_parameters
)
model
=
MMDistributedDataParallel
(
model
.
cuda
(),
device_ids
=
[
torch
.
cuda
.
current_device
()],
broadcast_buffers
=
False
)
# build runner
optimizer
=
build_optimizer
(
model
,
cfg
.
optimizer
)
...
...
tools/test.py
View file @
eda7242b
...
...
@@ -240,11 +240,7 @@ def main():
model
=
MMDataParallel
(
model
,
device_ids
=
[
0
])
outputs
=
single_gpu_test
(
model
,
data_loader
,
args
.
show
)
else
:
# model = MMDistributedDataParallel(model.cuda())
model
=
MMDistributedDataParallel
(
model
.
cuda
(),
device_ids
=
[
torch
.
cuda
.
current_device
()],
broadcast_buffers
=
False
)
model
=
MMDistributedDataParallel
(
model
.
cuda
())
outputs
=
multi_gpu_test
(
model
,
data_loader
,
args
.
tmpdir
,
args
.
gpu_collect
)
...
...
tools/test_robustness.py
View file @
eda7242b
...
...
@@ -375,11 +375,7 @@ def main():
model
=
MMDataParallel
(
model
,
device_ids
=
[
0
])
outputs
=
single_gpu_test
(
model
,
data_loader
,
args
.
show
)
else
:
# model = MMDistributedDataParallel(model.cuda())
model
=
MMDistributedDataParallel
(
model
.
cuda
(),
device_ids
=
[
torch
.
cuda
.
current_device
()],
broadcast_buffers
=
False
)
model
=
MMDistributedDataParallel
(
model
.
cuda
())
outputs
=
multi_gpu_test
(
model
,
data_loader
,
args
.
tmpdir
)
rank
,
_
=
get_dist_info
()
...
...
bailuo
@bailuo
mentioned in commit
f90f3671
·
Nov 19, 2025
mentioned in commit
f90f3671
mentioned in commit f90f36717b78db2718d660a091742a926eaa4837
Toggle commit list
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment