Unverified Commit 64928acc authored by Kai Chen's avatar Kai Chen Committed by GitHub
Browse files

Rename normalize to norm_cfg (#637)

* rename normalize to norm_cfg

* update configs

* Update resnet.py
parent 960e614c
......@@ -18,7 +18,7 @@ class FPN(nn.Module):
add_extra_convs=False,
extra_convs_on_inputs=True,
conv_cfg=None,
normalize=None,
norm_cfg=None,
activation=None):
super(FPN, self).__init__()
assert isinstance(in_channels, list)
......@@ -27,7 +27,6 @@ class FPN(nn.Module):
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.activation = activation
self.with_bias = normalize is None
if end_level == -1:
self.backbone_end_level = self.num_ins
......@@ -51,8 +50,7 @@ class FPN(nn.Module):
out_channels,
1,
conv_cfg=conv_cfg,
normalize=normalize,
bias=self.with_bias,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
fpn_conv = ConvModule(
......@@ -61,8 +59,7 @@ class FPN(nn.Module):
3,
padding=1,
conv_cfg=conv_cfg,
normalize=normalize,
bias=self.with_bias,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
......@@ -83,8 +80,8 @@ class FPN(nn.Module):
3,
stride=2,
padding=1,
normalize=normalize,
bias=self.with_bias,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
self.fpn_convs.append(extra_fpn_conv)
......
......@@ -17,13 +17,13 @@ class ResLayer(nn.Module):
stride=2,
dilation=1,
style='pytorch',
normalize=dict(type='BN', requires_grad=True),
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
with_cp=False,
dcn=None):
super(ResLayer, self).__init__()
self.norm_eval = norm_eval
self.normalize = normalize
self.norm_cfg = norm_cfg
self.stage = stage
block, stage_blocks = ResNet.arch_settings[depth]
stage_block = stage_blocks[stage]
......@@ -39,7 +39,7 @@ class ResLayer(nn.Module):
dilation=dilation,
style=style,
with_cp=with_cp,
normalize=self.normalize,
norm_cfg=self.norm_cfg,
dcn=dcn)
self.add_module('layer{}'.format(stage + 1), res_layer)
......
......@@ -42,6 +42,27 @@ def build_conv_layer(cfg, *args, **kwargs):
class ConvModule(nn.Module):
"""Conv-Norm-Activation block.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): Same as nn.Conv2d.
kernel_size (int or tuple[int]): Same as nn.Conv2d.
stride (int or tuple[int]): Same as nn.Conv2d.
padding (int or tuple[int]): Same as nn.Conv2d.
dilation (int or tuple[int]): Same as nn.Conv2d.
groups (int): Same as nn.Conv2d.
bias (bool or str): If specified as `auto`, it will be decided by the
norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
False.
conv_cfg (dict): Config dict for convolution layer.
norm_cfg (dict): Config dict for normalization layer.
activation (str or None): Activation type, "ReLU" by default.
inplace (bool): Whether to use inplace mode for activation.
activate_last (bool): Whether to apply the activation layer in the
last. (Do not use this flag since the behavior and api may be
changed in the future.)
"""
def __init__(self,
in_channels,
......@@ -51,35 +72,42 @@ class ConvModule(nn.Module):
padding=0,
dilation=1,
groups=1,
bias=True,
bias='auto',
conv_cfg=None,
normalize=None,
norm_cfg=None,
activation='relu',
inplace=True,
activate_last=True):
super(ConvModule, self).__init__()
assert conv_cfg is None or isinstance(conv_cfg, dict)
assert normalize is None or isinstance(normalize, dict)
self.with_norm = normalize is not None
self.with_activatation = activation is not None
self.with_bias = bias
assert norm_cfg is None or isinstance(norm_cfg, dict)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.activation = activation
self.inplace = inplace
self.activate_last = activate_last
self.with_norm = norm_cfg is not None
self.with_activatation = activation is not None
# if the conv layer is before a norm layer, bias is unnecessary.
if bias == 'auto':
bias = False if self.with_norm else True
self.with_bias = bias
if self.with_norm and self.with_bias:
warnings.warn('ConvModule has norm and bias at the same time')
self.conv = build_conv_layer(
conv_cfg,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
bias=bias)
# build convolution layer
self.conv = build_conv_layer(conv_cfg,
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
# export the attributes of self.conv to a higher level for convenience
self.in_channels = self.conv.in_channels
self.out_channels = self.conv.out_channels
self.kernel_size = self.conv.kernel_size
......@@ -90,17 +118,21 @@ class ConvModule(nn.Module):
self.output_padding = self.conv.output_padding
self.groups = self.conv.groups
# build normalization layers
if self.with_norm:
norm_channels = out_channels if self.activate_last else in_channels
self.norm_name, norm = build_norm_layer(normalize, norm_channels)
self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels)
self.add_module(self.norm_name, norm)
# build activation layer
if self.with_activatation:
assert activation in ['relu'], 'Only ReLU supported.'
if self.activation not in ['relu']:
raise ValueError('{} is currently not supported.'.format(
self.activation))
if self.activation == 'relu':
self.activate = nn.ReLU(inplace=inplace)
# Default using msra init
# Use msra init by default
self.init_weights()
@property
......@@ -121,6 +153,7 @@ class ConvModule(nn.Module):
if activate and self.with_activatation:
x = self.activate(x)
else:
# WARN: this may be removed or modified
if norm and self.with_norm:
x = self.norm(x)
if activate and self.with_activatation:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment