Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
SOLOv2-pytorch
Commits
56db9d2e
Commit
56db9d2e
authored
Apr 06, 2020
by
WXinlong
Browse files
add R101 configs
parent
89005895
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
260 additions
and
0 deletions
+260
-0
configs/solo/decoupled_solo_r101_fpn_8gpu_3x.py
configs/solo/decoupled_solo_r101_fpn_8gpu_3x.py
+130
-0
configs/solo/solo_r101_fpn_8gpu_3x.py
configs/solo/solo_r101_fpn_8gpu_3x.py
+130
-0
No files found.
configs/solo/decoupled_solo_r101_fpn_8gpu_3x.py
0 → 100644
View file @
56db9d2e
# model settings
model
=
dict
(
type
=
'SOLO'
,
pretrained
=
'torchvision://resnet101'
,
backbone
=
dict
(
type
=
'ResNet'
,
depth
=
101
,
num_stages
=
4
,
out_indices
=
(
0
,
1
,
2
,
3
),
# C2, C3, C4, C5
frozen_stages
=
1
,
style
=
'pytorch'
),
neck
=
dict
(
type
=
'FPN'
,
in_channels
=
[
256
,
512
,
1024
,
2048
],
out_channels
=
256
,
start_level
=
0
,
num_outs
=
5
),
bbox_head
=
dict
(
type
=
'DecoupledSOLOHead'
,
num_classes
=
81
,
in_channels
=
256
,
stacked_convs
=
7
,
seg_feat_channels
=
256
,
strides
=
[
8
,
8
,
16
,
32
,
32
],
scale_ranges
=
((
1
,
96
),
(
48
,
192
),
(
96
,
384
),
(
192
,
768
),
(
384
,
2048
)),
sigma
=
0.2
,
num_grids
=
[
40
,
36
,
24
,
16
,
12
],
cate_down_pos
=
0
,
with_deform
=
False
,
loss_ins
=
dict
(
type
=
'DiceLoss'
,
use_sigmoid
=
True
,
loss_weight
=
3.0
),
loss_cate
=
dict
(
type
=
'FocalLoss'
,
use_sigmoid
=
True
,
gamma
=
2.0
,
alpha
=
0.25
,
loss_weight
=
1.0
),
))
# training and testing settings
train_cfg
=
dict
()
test_cfg
=
dict
(
nms_pre
=
500
,
score_thr
=
0.1
,
mask_thr
=
0.5
,
update_thr
=
0.05
,
kernel
=
'gaussian'
,
# gaussian/linear
sigma
=
2.0
,
max_per_img
=
100
)
# dataset settings
dataset_type
=
'CocoDataset'
data_root
=
'data/coco/'
img_norm_cfg
=
dict
(
mean
=
[
123.675
,
116.28
,
103.53
],
std
=
[
58.395
,
57.12
,
57.375
],
to_rgb
=
True
)
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
),
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
True
,
with_mask
=
True
),
dict
(
type
=
'Resize'
,
img_scale
=
[(
1333
,
800
),
(
1333
,
768
),
(
1333
,
736
),
(
1333
,
704
),
(
1333
,
672
),
(
1333
,
640
)],
multiscale_mode
=
'value'
,
keep_ratio
=
True
),
dict
(
type
=
'RandomFlip'
,
flip_ratio
=
0.5
),
dict
(
type
=
'Normalize'
,
**
img_norm_cfg
),
dict
(
type
=
'Pad'
,
size_divisor
=
32
),
dict
(
type
=
'DefaultFormatBundle'
),
dict
(
type
=
'Collect'
,
keys
=
[
'img'
,
'gt_bboxes'
,
'gt_labels'
,
'gt_masks'
]),
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
),
dict
(
type
=
'MultiScaleFlipAug'
,
img_scale
=
(
1333
,
800
),
flip
=
False
,
transforms
=
[
dict
(
type
=
'Resize'
,
keep_ratio
=
True
),
dict
(
type
=
'RandomFlip'
),
dict
(
type
=
'Normalize'
,
**
img_norm_cfg
),
dict
(
type
=
'Pad'
,
size_divisor
=
32
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'img'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'img'
]),
])
]
data
=
dict
(
imgs_per_gpu
=
2
,
workers_per_gpu
=
2
,
train
=
dict
(
type
=
dataset_type
,
ann_file
=
data_root
+
'annotations/instances_train2017.json'
,
img_prefix
=
data_root
+
'train2017/'
,
pipeline
=
train_pipeline
),
val
=
dict
(
type
=
dataset_type
,
ann_file
=
data_root
+
'annotations/instances_val2017.json'
,
img_prefix
=
data_root
+
'val2017/'
,
pipeline
=
test_pipeline
),
test
=
dict
(
type
=
dataset_type
,
ann_file
=
data_root
+
'annotations/instances_val2017.json'
,
img_prefix
=
data_root
+
'val2017/'
,
pipeline
=
test_pipeline
))
# optimizer
optimizer
=
dict
(
type
=
'SGD'
,
lr
=
0.01
,
momentum
=
0.9
,
weight_decay
=
0.0001
)
optimizer_config
=
dict
(
grad_clip
=
dict
(
max_norm
=
35
,
norm_type
=
2
))
# learning policy
lr_config
=
dict
(
policy
=
'step'
,
warmup
=
'linear'
,
warmup_iters
=
500
,
warmup_ratio
=
1.0
/
3
,
step
=
[
27
,
33
])
checkpoint_config
=
dict
(
interval
=
1
)
# yapf:disable
log_config
=
dict
(
interval
=
50
,
hooks
=
[
dict
(
type
=
'TextLoggerHook'
),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs
=
36
device_ids
=
range
(
8
)
dist_params
=
dict
(
backend
=
'nccl'
)
log_level
=
'INFO'
work_dir
=
'./work_dirs/decoupled_solo_release_r101_fpn_8gpu_3x'
load_from
=
None
resume_from
=
None
workflow
=
[(
'train'
,
1
)]
configs/solo/solo_r101_fpn_8gpu_3x.py
0 → 100644
View file @
56db9d2e
# model settings
model
=
dict
(
type
=
'SOLO'
,
pretrained
=
'torchvision://resnet101'
,
backbone
=
dict
(
type
=
'ResNet'
,
depth
=
101
,
num_stages
=
4
,
out_indices
=
(
0
,
1
,
2
,
3
),
# C2, C3, C4, C5
frozen_stages
=
1
,
style
=
'pytorch'
),
neck
=
dict
(
type
=
'FPN'
,
in_channels
=
[
256
,
512
,
1024
,
2048
],
out_channels
=
256
,
start_level
=
0
,
num_outs
=
5
),
bbox_head
=
dict
(
type
=
'SOLOHead'
,
num_classes
=
81
,
in_channels
=
256
,
stacked_convs
=
7
,
seg_feat_channels
=
256
,
strides
=
[
8
,
8
,
16
,
32
,
32
],
scale_ranges
=
((
1
,
96
),
(
48
,
192
),
(
96
,
384
),
(
192
,
768
),
(
384
,
2048
)),
sigma
=
0.2
,
num_grids
=
[
40
,
36
,
24
,
16
,
12
],
cate_down_pos
=
0
,
with_deform
=
False
,
loss_ins
=
dict
(
type
=
'DiceLoss'
,
use_sigmoid
=
True
,
loss_weight
=
3.0
),
loss_cate
=
dict
(
type
=
'FocalLoss'
,
use_sigmoid
=
True
,
gamma
=
2.0
,
alpha
=
0.25
,
loss_weight
=
1.0
),
))
# training and testing settings
train_cfg
=
dict
()
test_cfg
=
dict
(
nms_pre
=
500
,
score_thr
=
0.1
,
mask_thr
=
0.5
,
update_thr
=
0.05
,
kernel
=
'gaussian'
,
# gaussian/linear
sigma
=
2.0
,
max_per_img
=
100
)
# dataset settings
dataset_type
=
'CocoDataset'
data_root
=
'data/coco/'
img_norm_cfg
=
dict
(
mean
=
[
123.675
,
116.28
,
103.53
],
std
=
[
58.395
,
57.12
,
57.375
],
to_rgb
=
True
)
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
),
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
True
,
with_mask
=
True
),
dict
(
type
=
'Resize'
,
img_scale
=
[(
1333
,
800
),
(
1333
,
768
),
(
1333
,
736
),
(
1333
,
704
),
(
1333
,
672
),
(
1333
,
640
)],
multiscale_mode
=
'value'
,
keep_ratio
=
True
),
dict
(
type
=
'RandomFlip'
,
flip_ratio
=
0.5
),
dict
(
type
=
'Normalize'
,
**
img_norm_cfg
),
dict
(
type
=
'Pad'
,
size_divisor
=
32
),
dict
(
type
=
'DefaultFormatBundle'
),
dict
(
type
=
'Collect'
,
keys
=
[
'img'
,
'gt_bboxes'
,
'gt_labels'
,
'gt_masks'
]),
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
),
dict
(
type
=
'MultiScaleFlipAug'
,
img_scale
=
(
1333
,
800
),
flip
=
False
,
transforms
=
[
dict
(
type
=
'Resize'
,
keep_ratio
=
True
),
dict
(
type
=
'RandomFlip'
),
dict
(
type
=
'Normalize'
,
**
img_norm_cfg
),
dict
(
type
=
'Pad'
,
size_divisor
=
32
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'img'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'img'
]),
])
]
data
=
dict
(
imgs_per_gpu
=
2
,
workers_per_gpu
=
2
,
train
=
dict
(
type
=
dataset_type
,
ann_file
=
data_root
+
'annotations/instances_train2017.json'
,
img_prefix
=
data_root
+
'train2017/'
,
pipeline
=
train_pipeline
),
val
=
dict
(
type
=
dataset_type
,
ann_file
=
data_root
+
'annotations/instances_val2017.json'
,
img_prefix
=
data_root
+
'val2017/'
,
pipeline
=
test_pipeline
),
test
=
dict
(
type
=
dataset_type
,
ann_file
=
data_root
+
'annotations/instances_val2017.json'
,
img_prefix
=
data_root
+
'val2017/'
,
pipeline
=
test_pipeline
))
# optimizer
optimizer
=
dict
(
type
=
'SGD'
,
lr
=
0.01
,
momentum
=
0.9
,
weight_decay
=
0.0001
)
optimizer_config
=
dict
(
grad_clip
=
dict
(
max_norm
=
35
,
norm_type
=
2
))
# learning policy
lr_config
=
dict
(
policy
=
'step'
,
warmup
=
'linear'
,
warmup_iters
=
500
,
warmup_ratio
=
1.0
/
3
,
step
=
[
27
,
33
])
checkpoint_config
=
dict
(
interval
=
1
)
# yapf:disable
log_config
=
dict
(
interval
=
50
,
hooks
=
[
dict
(
type
=
'TextLoggerHook'
),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs
=
36
device_ids
=
range
(
8
)
dist_params
=
dict
(
backend
=
'nccl'
)
log_level
=
'INFO'
work_dir
=
'./work_dirs/solo_release_r101_fpn_8gpu_3x'
load_from
=
None
resume_from
=
None
workflow
=
[(
'train'
,
1
)]
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment