Unverified Commit 64928acc authored by Kai Chen's avatar Kai Chen Committed by GitHub
Browse files

Rename normalize to norm_cfg (#637)

* rename normalize to norm_cfg

* update configs

* Update resnet.py
parent 960e614c
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='CascadeRCNN',
num_stages=3,
......@@ -11,7 +12,7 @@ model = dict(
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
normalize=dict(type='BN', requires_grad=False),
norm_cfg=norm_cfg,
norm_eval=True,
style='caffe'),
shared_head=dict(
......@@ -21,7 +22,7 @@ model = dict(
stride=2,
dilation=1,
style='caffe',
normalize=dict(type='BN', requires_grad=False),
norm_cfg=norm_cfg,
norm_eval=True),
rpn_head=dict(
type='RPNHead',
......
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='CascadeRCNN',
num_stages=3,
......@@ -11,7 +12,7 @@ model = dict(
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
normalize=dict(type='BN', requires_grad=False),
norm_cfg=norm_cfg,
norm_eval=True,
style='caffe'),
shared_head=dict(
......@@ -21,7 +22,7 @@ model = dict(
stride=2,
dilation=1,
style='caffe',
normalize=dict(type='BN', requires_grad=False),
norm_cfg=norm_cfg,
norm_eval=True),
rpn_head=dict(
type='RPNHead',
......
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='FastRCNN',
pretrained='open-mmlab://resnet50_caffe',
......@@ -10,7 +11,7 @@ model = dict(
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
normalize=dict(type='BN', requires_grad=False),
norm_cfg=norm_cfg,
norm_eval=True,
style='caffe'),
shared_head=dict(
......@@ -20,7 +21,7 @@ model = dict(
stride=2,
dilation=1,
style='caffe',
normalize=dict(type='BN', requires_grad=False),
norm_cfg=norm_cfg,
norm_eval=True),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
......
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='FasterRCNN',
pretrained='open-mmlab://resnet50_caffe',
......@@ -10,7 +11,7 @@ model = dict(
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
normalize=dict(type='BN', requires_grad=False),
norm_cfg=norm_cfg,
norm_eval=True,
style='caffe'),
shared_head=dict(
......@@ -20,7 +21,7 @@ model = dict(
stride=2,
dilation=1,
style='caffe',
normalize=dict(type='BN', requires_grad=False),
norm_cfg=norm_cfg,
norm_eval=True),
rpn_head=dict(
type='RPNHead',
......
# model settings
conv_cfg = dict(type='ConvWS')
normalize = dict(type='GN', num_groups=32, requires_grad=True)
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
type='FasterRCNN',
pretrained='open-mmlab://jhu/resnet50_gn_ws',
......@@ -12,14 +12,14 @@ model = dict(
frozen_stages=1,
style='pytorch',
conv_cfg=conv_cfg,
normalize=normalize),
norm_cfg=norm_cfg),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
conv_cfg=conv_cfg,
normalize=normalize),
norm_cfg=norm_cfg),
rpn_head=dict(
type='RPNHead',
in_channels=256,
......@@ -48,7 +48,7 @@ model = dict(
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
conv_cfg=conv_cfg,
normalize=normalize))
norm_cfg=norm_cfg))
# model training and testing settings
train_cfg = dict(
rpn=dict(
......
# model settings
conv_cfg = dict(type='ConvWS')
normalize = dict(type='GN', num_groups=32, requires_grad=True)
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
type='MaskRCNN',
pretrained='open-mmlab://jhu/resnet50_gn_ws',
......@@ -12,14 +12,14 @@ model = dict(
frozen_stages=1,
style='pytorch',
conv_cfg=conv_cfg,
normalize=normalize),
norm_cfg=norm_cfg),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
conv_cfg=conv_cfg,
normalize=normalize),
norm_cfg=norm_cfg),
rpn_head=dict(
type='RPNHead',
in_channels=256,
......@@ -48,7 +48,7 @@ model = dict(
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
conv_cfg=conv_cfg,
normalize=normalize),
norm_cfg=norm_cfg),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2),
......@@ -61,7 +61,7 @@ model = dict(
conv_out_channels=256,
num_classes=81,
conv_cfg=conv_cfg,
normalize=normalize))
norm_cfg=norm_cfg))
# model training and testing settings
train_cfg = dict(
rpn=dict(
......
# model settings
conv_cfg = dict(type='ConvWS')
normalize = dict(type='GN', num_groups=32, requires_grad=True)
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
type='MaskRCNN',
pretrained='open-mmlab://jhu/resnet50_gn_ws',
......@@ -12,14 +12,14 @@ model = dict(
frozen_stages=1,
style='pytorch',
conv_cfg=conv_cfg,
normalize=normalize),
norm_cfg=norm_cfg),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
conv_cfg=conv_cfg,
normalize=normalize),
norm_cfg=norm_cfg),
rpn_head=dict(
type='RPNHead',
in_channels=256,
......@@ -48,7 +48,7 @@ model = dict(
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
conv_cfg=conv_cfg,
normalize=normalize),
norm_cfg=norm_cfg),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2),
......@@ -61,7 +61,7 @@ model = dict(
conv_out_channels=256,
num_classes=81,
conv_cfg=conv_cfg,
normalize=normalize))
norm_cfg=norm_cfg))
# model training and testing settings
train_cfg = dict(
rpn=dict(
......
# model settings
conv_cfg = dict(type='ConvWS')
normalize = dict(type='GN', num_groups=32, requires_grad=True)
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
type='MaskRCNN',
pretrained='open-mmlab://jhu/resnext101_32x4d_gn_ws',
......@@ -14,14 +14,14 @@ model = dict(
frozen_stages=1,
style='pytorch',
conv_cfg=conv_cfg,
normalize=normalize),
norm_cfg=norm_cfg),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
conv_cfg=conv_cfg,
normalize=normalize),
norm_cfg=norm_cfg),
rpn_head=dict(
type='RPNHead',
in_channels=256,
......@@ -50,7 +50,7 @@ model = dict(
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
conv_cfg=conv_cfg,
normalize=normalize),
norm_cfg=norm_cfg),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2),
......@@ -63,7 +63,7 @@ model = dict(
conv_out_channels=256,
num_classes=81,
conv_cfg=conv_cfg,
normalize=normalize))
norm_cfg=norm_cfg))
# model training and testing settings
train_cfg = dict(
rpn=dict(
......
# model settings
normalize = dict(type='GN', num_groups=32, requires_grad=True)
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
type='MaskRCNN',
......@@ -11,13 +11,13 @@ model = dict(
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
normalize=normalize),
norm_cfg=norm_cfg),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
normalize=normalize),
norm_cfg=norm_cfg),
rpn_head=dict(
type='RPNHead',
in_channels=256,
......@@ -45,7 +45,7 @@ model = dict(
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
normalize=normalize),
norm_cfg=norm_cfg),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2),
......@@ -57,7 +57,7 @@ model = dict(
in_channels=256,
conv_out_channels=256,
num_classes=81,
normalize=normalize))
norm_cfg=norm_cfg))
# model training and testing settings
train_cfg = dict(
......
# model settings
normalize = dict(type='GN', num_groups=32, requires_grad=True)
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
type='MaskRCNN',
......@@ -11,13 +11,13 @@ model = dict(
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
normalize=normalize),
norm_cfg=norm_cfg),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
normalize=normalize),
norm_cfg=norm_cfg),
rpn_head=dict(
type='RPNHead',
in_channels=256,
......@@ -45,7 +45,7 @@ model = dict(
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
normalize=normalize),
norm_cfg=norm_cfg),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2),
......@@ -57,7 +57,7 @@ model = dict(
in_channels=256,
conv_out_channels=256,
num_classes=81,
normalize=normalize))
norm_cfg=norm_cfg))
# model training and testing settings
train_cfg = dict(
......
# model settings
normalize = dict(type='GN', num_groups=32, requires_grad=True)
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
type='MaskRCNN',
......@@ -11,13 +11,13 @@ model = dict(
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
normalize=normalize),
norm_cfg=norm_cfg),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
normalize=normalize),
norm_cfg=norm_cfg),
rpn_head=dict(
type='RPNHead',
in_channels=256,
......@@ -45,7 +45,7 @@ model = dict(
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
normalize=normalize),
norm_cfg=norm_cfg),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2),
......@@ -57,7 +57,7 @@ model = dict(
in_channels=256,
conv_out_channels=256,
num_classes=81,
normalize=normalize))
norm_cfg=norm_cfg))
# model training and testing settings
train_cfg = dict(
......
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='MaskRCNN',
pretrained='open-mmlab://resnet50_caffe',
......@@ -10,7 +11,7 @@ model = dict(
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
normalize=dict(type='BN', requires_grad=False),
norm_cfg=norm_cfg,
norm_eval=True,
style='caffe'),
shared_head=dict(
......@@ -20,7 +21,7 @@ model = dict(
stride=2,
dilation=1,
style='caffe',
normalize=dict(type='BN', requires_grad=False),
norm_cfg=norm_cfg,
norm_eval=True),
rpn_head=dict(
type='RPNHead',
......
......@@ -10,7 +10,7 @@ model = dict(
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
normalize=dict(type='BN', requires_grad=False),
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe'),
neck=None,
......
......@@ -17,13 +17,13 @@ class RetinaHead(AnchorHead):
octave_base_scale=4,
scales_per_octave=3,
conv_cfg=None,
normalize=None,
norm_cfg=None,
**kwargs):
self.stacked_convs = stacked_convs
self.octave_base_scale = octave_base_scale
self.scales_per_octave = scales_per_octave
self.conv_cfg = conv_cfg
self.normalize = normalize
self.norm_cfg = norm_cfg
octave_scales = np.array(
[2**(i / scales_per_octave) for i in range(scales_per_octave)])
anchor_scales = octave_scales * octave_base_scale
......@@ -49,8 +49,7 @@ class RetinaHead(AnchorHead):
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
normalize=self.normalize,
bias=self.normalize is None))
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
......@@ -59,8 +58,7 @@ class RetinaHead(AnchorHead):
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
normalize=self.normalize,
bias=self.normalize is None))
norm_cfg=self.norm_cfg))
self.retina_cls = nn.Conv2d(
self.feat_channels,
self.num_anchors * self.cls_out_channels,
......
......@@ -23,13 +23,13 @@ class BasicBlock(nn.Module):
style='pytorch',
with_cp=False,
conv_cfg=None,
normalize=dict(type='BN'),
norm_cfg=dict(type='BN'),
dcn=None):
super(BasicBlock, self).__init__()
assert dcn is None, "Not implemented yet."
self.norm1_name, norm1 = build_norm_layer(normalize, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(normalize, planes, postfix=2)
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.conv1 = build_conv_layer(
conv_cfg,
......@@ -95,7 +95,7 @@ class Bottleneck(nn.Module):
style='pytorch',
with_cp=False,
conv_cfg=None,
normalize=dict(type='BN'),
norm_cfg=dict(type='BN'),
dcn=None):
"""Bottleneck block for ResNet.
If style is "pytorch", the stride-two layer is the 3x3 conv layer,
......@@ -106,21 +106,26 @@ class Bottleneck(nn.Module):
assert dcn is None or isinstance(dcn, dict)
self.inplanes = inplanes
self.planes = planes
self.stride = stride
self.dilation = dilation
self.downsample = downsample
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.normalize = normalize
self.norm_cfg = norm_cfg
self.dcn = dcn
self.with_dcn = dcn is not None
if style == 'pytorch':
if self.style == 'pytorch':
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
self.norm1_name, norm1 = build_norm_layer(normalize, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(normalize, planes, postfix=2)
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
normalize, planes * self.expansion, postfix=3)
norm_cfg, planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
conv_cfg,
......@@ -180,11 +185,6 @@ class Bottleneck(nn.Module):
self.add_module(self.norm3_name, norm3)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
self.normalize = normalize
@property
def norm1(self):
......@@ -249,7 +249,7 @@ def make_res_layer(block,
style='pytorch',
with_cp=False,
conv_cfg=None,
normalize=dict(type='BN'),
norm_cfg=dict(type='BN'),
dcn=None):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
......@@ -261,7 +261,7 @@ def make_res_layer(block,
kernel_size=1,
stride=stride,
bias=False),
build_norm_layer(normalize, planes * block.expansion)[1],
build_norm_layer(norm_cfg, planes * block.expansion)[1],
)
layers = []
......@@ -275,7 +275,7 @@ def make_res_layer(block,
style=style,
with_cp=with_cp,
conv_cfg=conv_cfg,
normalize=normalize,
norm_cfg=norm_cfg,
dcn=dcn))
inplanes = planes * block.expansion
for i in range(1, blocks):
......@@ -288,7 +288,7 @@ def make_res_layer(block,
style=style,
with_cp=with_cp,
conv_cfg=conv_cfg,
normalize=normalize,
norm_cfg=norm_cfg,
dcn=dcn))
return nn.Sequential(*layers)
......@@ -309,7 +309,7 @@ class ResNet(nn.Module):
the first 1x1 conv layer.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
normalize (dict): dictionary to construct and config norm layer.
norm_cfg (dict): dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
......@@ -336,7 +336,7 @@ class ResNet(nn.Module):
style='pytorch',
frozen_stages=-1,
conv_cfg=None,
normalize=dict(type='BN', requires_grad=True),
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
dcn=None,
stage_with_dcn=(False, False, False, False),
......@@ -356,7 +356,7 @@ class ResNet(nn.Module):
self.style = style
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.normalize = normalize
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.dcn = dcn
......@@ -386,7 +386,7 @@ class ResNet(nn.Module):
style=self.style,
with_cp=with_cp,
conv_cfg=conv_cfg,
normalize=normalize,
norm_cfg=norm_cfg,
dcn=dcn)
self.inplanes = planes * self.block.expansion
layer_name = 'layer{}'.format(i + 1)
......@@ -411,8 +411,7 @@ class ResNet(nn.Module):
stride=2,
padding=3,
bias=False)
self.norm1_name, norm1 = build_norm_layer(
self.normalize, 64, postfix=1)
self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
......
......@@ -24,11 +24,11 @@ class Bottleneck(_Bottleneck):
width = math.floor(self.planes * (base_width / 64)) * groups
self.norm1_name, norm1 = build_norm_layer(
self.normalize, width, postfix=1)
self.norm_cfg, width, postfix=1)
self.norm2_name, norm2 = build_norm_layer(
self.normalize, width, postfix=2)
self.norm_cfg, width, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
self.normalize, self.planes * self.expansion, postfix=3)
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
......@@ -102,7 +102,7 @@ def make_res_layer(block,
style='pytorch',
with_cp=False,
conv_cfg=None,
normalize=dict(type='BN'),
norm_cfg=dict(type='BN'),
dcn=None):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
......@@ -114,7 +114,7 @@ def make_res_layer(block,
kernel_size=1,
stride=stride,
bias=False),
build_norm_layer(normalize, planes * block.expansion)[1],
build_norm_layer(norm_cfg, planes * block.expansion)[1],
)
layers = []
......@@ -130,7 +130,7 @@ def make_res_layer(block,
style=style,
with_cp=with_cp,
conv_cfg=conv_cfg,
normalize=normalize,
norm_cfg=norm_cfg,
dcn=dcn))
inplanes = planes * block.expansion
for i in range(1, blocks):
......@@ -145,7 +145,7 @@ def make_res_layer(block,
style=style,
with_cp=with_cp,
conv_cfg=conv_cfg,
normalize=normalize,
norm_cfg=norm_cfg,
dcn=dcn))
return nn.Sequential(*layers)
......@@ -168,7 +168,7 @@ class ResNeXt(ResNet):
the first 1x1 conv layer.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters.
normalize (dict): dictionary to construct and config norm layer.
norm_cfg (dict): dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
......@@ -208,7 +208,7 @@ class ResNeXt(ResNet):
style=self.style,
with_cp=self.with_cp,
conv_cfg=self.conv_cfg,
normalize=self.normalize,
norm_cfg=self.norm_cfg,
dcn=dcn)
self.inplanes = planes * self.block.expansion
layer_name = 'layer{}'.format(i + 1)
......
......@@ -25,7 +25,7 @@ class ConvFCBBoxHead(BBoxHead):
conv_out_channels=256,
fc_out_channels=1024,
conv_cfg=None,
normalize=None,
norm_cfg=None,
*args,
**kwargs):
super(ConvFCBBoxHead, self).__init__(*args, **kwargs)
......@@ -46,8 +46,7 @@ class ConvFCBBoxHead(BBoxHead):
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.normalize = normalize
self.with_bias = normalize is None
self.norm_cfg = norm_cfg
# add shared convs and fcs
self.shared_convs, self.shared_fcs, last_layer_dim = \
......@@ -104,8 +103,7 @@ class ConvFCBBoxHead(BBoxHead):
3,
padding=1,
conv_cfg=self.conv_cfg,
normalize=self.normalize,
bias=self.with_bias))
norm_cfg=self.norm_cfg))
last_layer_dim = self.conv_out_channels
# add branch specific fc layers
branch_fcs = nn.ModuleList()
......
......@@ -23,7 +23,7 @@ class FCNMaskHead(nn.Module):
num_classes=81,
class_agnostic=False,
conv_cfg=None,
normalize=None):
norm_cfg=None):
super(FCNMaskHead, self).__init__()
if upsample_method not in [None, 'deconv', 'nearest', 'bilinear']:
raise ValueError(
......@@ -39,8 +39,7 @@ class FCNMaskHead(nn.Module):
self.num_classes = num_classes
self.class_agnostic = class_agnostic
self.conv_cfg = conv_cfg
self.normalize = normalize
self.with_bias = normalize is None
self.norm_cfg = norm_cfg
self.convs = nn.ModuleList()
for i in range(self.num_convs):
......@@ -54,8 +53,7 @@ class FCNMaskHead(nn.Module):
self.conv_kernel_size,
padding=padding,
conv_cfg=conv_cfg,
normalize=normalize,
bias=self.with_bias))
norm_cfg=norm_cfg))
upsample_in_channels = (self.conv_out_channels
if self.num_convs > 0 else in_channels)
if self.upsample_method is None:
......
......@@ -31,7 +31,7 @@ class FusedSemanticHead(nn.Module):
ignore_label=255,
loss_weight=0.2,
conv_cfg=None,
normalize=None):
norm_cfg=None):
super(FusedSemanticHead, self).__init__()
self.num_ins = num_ins
self.fusion_level = fusion_level
......@@ -42,8 +42,7 @@ class FusedSemanticHead(nn.Module):
self.ignore_label = ignore_label
self.loss_weight = loss_weight
self.conv_cfg = conv_cfg
self.normalize = normalize
self.with_bias = normalize is None
self.norm_cfg = norm_cfg
self.lateral_convs = nn.ModuleList()
for i in range(self.num_ins):
......@@ -53,8 +52,7 @@ class FusedSemanticHead(nn.Module):
self.in_channels,
1,
conv_cfg=self.conv_cfg,
normalize=self.normalize,
bias=self.with_bias,
norm_cfg=self.norm_cfg,
inplace=False))
self.convs = nn.ModuleList()
......@@ -67,15 +65,13 @@ class FusedSemanticHead(nn.Module):
3,
padding=1,
conv_cfg=self.conv_cfg,
normalize=self.normalize,
bias=self.with_bias))
norm_cfg=self.norm_cfg))
self.conv_embedding = ConvModule(
conv_out_channels,
conv_out_channels,
1,
conv_cfg=self.conv_cfg,
normalize=self.normalize,
bias=self.with_bias)
norm_cfg=self.norm_cfg)
self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)
self.criterion = nn.CrossEntropyLoss(ignore_index=ignore_label)
......
......@@ -13,8 +13,7 @@ class HTCMaskHead(FCNMaskHead):
self.conv_out_channels,
1,
conv_cfg=self.conv_cfg,
normalize=self.normalize,
bias=self.with_bias)
norm_cfg=self.norm_cfg)
def init_weights(self):
super(HTCMaskHead, self).init_weights()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment