Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
SOLOv2-pytorch
Commits
5b9a200e
"vscode:/vscode.git/clone" did not exist on "4a7e5609a9db54a098cfd7c958866635f685d3d3"
Commit
5b9a200e
authored
Dec 27, 2018
by
ThangVu
Browse files
add group norm config and minor fix
parent
f9a1c196
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
188 additions
and
6 deletions
+188
-6
configs/mask_rcnn_r50_fpn_gn_2x.py
configs/mask_rcnn_r50_fpn_gn_2x.py
+183
-0
mmdet/models/backbones/resnet.py
mmdet/models/backbones/resnet.py
+5
-6
No files found.
configs/mask_rcnn_r50_fpn_gn_2x.py
0 → 100644
View file @
5b9a200e
# model settings
model
=
dict
(
type
=
'MaskRCNN'
,
pretrained
=
'tools/resnet50-GN.path'
,
backbone
=
dict
(
type
=
'ResNet'
,
depth
=
50
,
num_stages
=
4
,
out_indices
=
(
0
,
1
,
2
,
3
),
frozen_stages
=
1
,
style
=
'pytorch'
,
normalize
=
dict
(
type
=
'GN'
,
num_groups
=
32
)),
neck
=
dict
(
type
=
'FPN'
,
in_channels
=
[
256
,
512
,
1024
,
2048
],
out_channels
=
256
,
num_outs
=
5
,
normalize
=
dict
(
type
=
'GN'
,
num_groups
=
32
)),
rpn_head
=
dict
(
type
=
'RPNHead'
,
in_channels
=
256
,
feat_channels
=
256
,
anchor_scales
=
[
8
],
anchor_ratios
=
[
0.5
,
1.0
,
2.0
],
anchor_strides
=
[
4
,
8
,
16
,
32
,
64
],
target_means
=
[.
0
,
.
0
,
.
0
,
.
0
],
target_stds
=
[
1.0
,
1.0
,
1.0
,
1.0
],
use_sigmoid_cls
=
True
),
bbox_roi_extractor
=
dict
(
type
=
'SingleRoIExtractor'
,
roi_layer
=
dict
(
type
=
'RoIAlign'
,
out_size
=
7
,
sample_num
=
2
),
out_channels
=
256
,
featmap_strides
=
[
4
,
8
,
16
,
32
]),
bbox_head
=
dict
(
type
=
'ConvFCBBoxHead'
,
num_shared_convs
=
4
,
num_shared_fcs
=
1
,
in_channels
=
256
,
conv_out_channels
=
256
,
fc_out_channels
=
1024
,
roi_feat_size
=
7
,
num_classes
=
81
,
target_means
=
[
0.
,
0.
,
0.
,
0.
],
target_stds
=
[
0.1
,
0.1
,
0.2
,
0.2
],
reg_class_agnostic
=
False
,
normalize
=
dict
(
type
=
'GN'
,
num_groups
=
32
)),
mask_roi_extractor
=
dict
(
type
=
'SingleRoIExtractor'
,
roi_layer
=
dict
(
type
=
'RoIAlign'
,
out_size
=
14
,
sample_num
=
2
),
out_channels
=
256
,
featmap_strides
=
[
4
,
8
,
16
,
32
]),
mask_head
=
dict
(
type
=
'FCNMaskHead'
,
num_convs
=
4
,
in_channels
=
256
,
conv_out_channels
=
256
,
num_classes
=
81
,
normalize
=
dict
(
type
=
'GN'
,
num_groups
=
32
)))
# model training and testing settings
train_cfg
=
dict
(
rpn
=
dict
(
assigner
=
dict
(
type
=
'MaxIoUAssigner'
,
pos_iou_thr
=
0.7
,
neg_iou_thr
=
0.3
,
min_pos_iou
=
0.3
,
ignore_iof_thr
=-
1
),
sampler
=
dict
(
type
=
'RandomSampler'
,
num
=
256
,
pos_fraction
=
0.5
,
neg_pos_ub
=-
1
,
add_gt_as_proposals
=
False
),
allowed_border
=
0
,
pos_weight
=-
1
,
smoothl1_beta
=
1
/
9.0
,
debug
=
False
),
rcnn
=
dict
(
assigner
=
dict
(
type
=
'MaxIoUAssigner'
,
pos_iou_thr
=
0.5
,
neg_iou_thr
=
0.5
,
min_pos_iou
=
0.5
,
ignore_iof_thr
=-
1
),
sampler
=
dict
(
type
=
'RandomSampler'
,
num
=
512
,
pos_fraction
=
0.25
,
neg_pos_ub
=-
1
,
add_gt_as_proposals
=
True
),
mask_size
=
28
,
pos_weight
=-
1
,
debug
=
False
))
test_cfg
=
dict
(
rpn
=
dict
(
nms_across_levels
=
False
,
nms_pre
=
2000
,
nms_post
=
2000
,
max_num
=
2000
,
nms_thr
=
0.7
,
min_bbox_size
=
0
),
rcnn
=
dict
(
score_thr
=
0.05
,
nms
=
dict
(
type
=
'nms'
,
iou_thr
=
0.5
),
max_per_img
=
100
,
mask_thr_binary
=
0.5
))
# dataset settings
dataset_type
=
'CocoDataset'
data_root
=
'data/coco/'
img_norm_cfg
=
dict
(
mean
=
[
123.675
,
116.28
,
103.53
],
std
=
[
58.395
,
57.12
,
57.375
],
to_rgb
=
True
)
data
=
dict
(
imgs_per_gpu
=
2
,
workers_per_gpu
=
2
,
train
=
dict
(
type
=
dataset_type
,
ann_file
=
data_root
+
'annotations/instances_train2017.json'
,
img_prefix
=
data_root
+
'train2017/'
,
img_scale
=
(
1333
,
800
),
img_norm_cfg
=
img_norm_cfg
,
size_divisor
=
32
,
flip_ratio
=
0.5
,
with_mask
=
True
,
with_crowd
=
True
,
with_label
=
True
),
val
=
dict
(
type
=
dataset_type
,
ann_file
=
data_root
+
'annotations/instances_val2017.json'
,
img_prefix
=
data_root
+
'val2017/'
,
img_scale
=
(
1333
,
800
),
img_norm_cfg
=
img_norm_cfg
,
size_divisor
=
32
,
flip_ratio
=
0
,
with_mask
=
True
,
with_crowd
=
True
,
with_label
=
True
),
test
=
dict
(
type
=
dataset_type
,
ann_file
=
data_root
+
'annotations/instances_val2017.json'
,
img_prefix
=
data_root
+
'val2017/'
,
img_scale
=
(
1333
,
800
),
img_norm_cfg
=
img_norm_cfg
,
size_divisor
=
32
,
flip_ratio
=
0
,
with_mask
=
False
,
with_label
=
False
,
test_mode
=
True
))
# optimizer
optimizer
=
dict
(
type
=
'SGD'
,
lr
=
0.02
,
momentum
=
0.9
,
weight_decay
=
0.0001
)
optimizer_config
=
dict
(
grad_clip
=
dict
(
max_norm
=
35
,
norm_type
=
2
))
# learning policy
lr_config
=
dict
(
policy
=
'step'
,
warmup
=
'linear'
,
warmup_iters
=
500
,
warmup_ratio
=
1.0
/
3
,
step
=
[
16
,
22
])
checkpoint_config
=
dict
(
interval
=
1
)
# yapf:disable
log_config
=
dict
(
interval
=
50
,
hooks
=
[
dict
(
type
=
'TextLoggerHook'
),
dict
(
type
=
'TensorboardLoggerHook'
)
])
# yapf:enable
# runtime settings
total_epochs
=
24
dist_params
=
dict
(
backend
=
'nccl'
)
log_level
=
'INFO'
work_dir
=
'./work_dirs/mask_rcnn_r50_fpn_1x'
load_from
=
None
resume_from
=
None
workflow
=
[(
'train'
,
1
)]
mmdet/models/backbones/resnet.py
View file @
5b9a200e
...
@@ -33,7 +33,7 @@ class BasicBlock(nn.Module):
...
@@ -33,7 +33,7 @@ class BasicBlock(nn.Module):
downsample
=
None
,
downsample
=
None
,
style
=
'pytorch'
,
style
=
'pytorch'
,
with_cp
=
False
,
with_cp
=
False
,
normalize
=
dict
(
type
=
'
G
N'
)):
normalize
=
dict
(
type
=
'
B
N'
)):
super
(
BasicBlock
,
self
).
__init__
()
super
(
BasicBlock
,
self
).
__init__
()
self
.
conv1
=
conv3x3
(
inplanes
,
planes
,
stride
,
dilation
)
self
.
conv1
=
conv3x3
(
inplanes
,
planes
,
stride
,
dilation
)
...
@@ -252,16 +252,14 @@ class ResNet(nn.Module):
...
@@ -252,16 +252,14 @@ class ResNet(nn.Module):
self
.
depth
=
depth
self
.
depth
=
depth
self
.
num_stages
=
num_stages
self
.
num_stages
=
num_stages
assert
num_stages
>=
1
and
num_stages
<=
4
assert
num_stages
>=
1
and
num_stages
<=
4
assert
len
(
strides
)
==
len
(
dilations
)
==
num_stages
assert
max
(
out_indices
)
<
num_stages
self
.
strides
=
strides
self
.
strides
=
strides
self
.
dilations
=
dilations
self
.
dilations
=
dilations
assert
len
(
strides
)
==
len
(
dilations
)
==
num_stages
assert
len
(
strides
)
==
len
(
dilations
)
==
num_stages
self
.
out_indices
=
out_indices
self
.
out_indices
=
out_indices
assert
max
(
out_indices
)
<
num_stages
assert
max
(
out_indices
)
<
num_stages
self
.
style
=
style
self
.
style
=
style
self
.
with_cp
=
with_cp
self
.
frozen_stages
=
frozen_stages
self
.
frozen_stages
=
frozen_stages
self
.
with_cp
=
with_cp
assert
isinstance
(
normalize
,
dict
)
and
'type'
in
normalize
assert
isinstance
(
normalize
,
dict
)
and
'type'
in
normalize
assert
normalize
[
'type'
]
in
[
'BN'
,
'GN'
]
assert
normalize
[
'type'
]
in
[
'BN'
,
'GN'
]
...
@@ -447,7 +445,8 @@ class ResNetClassifier(ResNet):
...
@@ -447,7 +445,8 @@ class ResNetClassifier(ResNet):
if
'blobs'
in
cf_state
:
if
'blobs'
in
cf_state
:
cf_state
=
cf_state
[
'blobs'
]
cf_state
=
cf_state
[
'blobs'
]
for
i
,
(
py_k
,
cf_k
)
in
enumerate
(
mapping
.
items
(),
1
):
for
i
,
(
py_k
,
cf_k
)
in
enumerate
(
mapping
.
items
(),
1
):
print
(
'[{}/{}] Loading {} to {}'
.
format
(
i
,
len
(
mapping
),
cf_k
,
py_k
))
print
(
'[{}/{}] Loading {} to {}'
.
format
(
i
,
len
(
mapping
),
cf_k
,
py_k
))
assert
py_k
in
py_state
and
cf_k
in
cf_state
assert
py_k
in
py_state
and
cf_k
in
cf_state
py_state
[
py_k
]
=
torch
.
Tensor
(
cf_state
[
cf_k
])
py_state
[
py_k
]
=
torch
.
Tensor
(
cf_state
[
cf_k
])
self
.
load_state_dict
(
py_state
)
self
.
load_state_dict
(
py_state
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment