Commit 97243508 authored by sunxx1's avatar sunxx1
Browse files

添加DBnet代码

parents
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from torch import nn
import torch.nn.functional as F
class HSwish(nn.Module):
def forward(self, x):
out = x * F.relu6(x + 3, inplace=True) / 6
return out
class HardSigmoid(nn.Module):
def __init__(self, slope=.2, offset=.5):
super().__init__()
self.slope = slope
self.offset = offset
def forward(self, x):
x = (self.slope * x) + self.offset
x = F.threshold(-x, -1, -1)
x = F.threshold(-x, 0, 0)
return x
class ConvBNACT(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, groups=1, act=None):
super().__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, groups=groups,
bias=False)
self.bn = nn.BatchNorm2d(out_channels)
if act == 'relu':
self.act = nn.ReLU()
elif act == 'hard_swish':
self.act = HSwish()
elif act is None:
self.act = None
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
if self.act is not None:
x = self.act(x)
return x
class SEBlock(nn.Module):
def __init__(self, in_channels, out_channels, ratio=4):
super().__init__()
num_mid_filter = out_channels // ratio
self.pool = nn.AdaptiveAvgPool2d(1)
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=num_mid_filter, kernel_size=1, bias=True)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(in_channels=num_mid_filter, kernel_size=1, out_channels=out_channels, bias=True)
self.relu2 = HardSigmoid()
def forward(self, x):
attn = self.pool(x)
attn = self.conv1(attn)
attn = self.relu1(attn)
attn = self.conv2(attn)
attn = self.relu2(attn)
return x * attn
class ResidualUnit(nn.Module):
def __init__(self, num_in_filter, num_mid_filter, num_out_filter, stride, kernel_size, act=None, use_se=False):
super().__init__()
self.conv0 = ConvBNACT(in_channels=num_in_filter, out_channels=num_mid_filter, kernel_size=1, stride=1,
padding=0, act=act)
self.conv1 = ConvBNACT(in_channels=num_mid_filter, out_channels=num_mid_filter, kernel_size=kernel_size,
stride=stride,
padding=int((kernel_size - 1) // 2), act=act, groups=num_mid_filter)
if use_se:
self.se = SEBlock(in_channels=num_mid_filter, out_channels=num_mid_filter)
else:
self.se = None
self.conv2 = ConvBNACT(in_channels=num_mid_filter, out_channels=num_out_filter, kernel_size=1, stride=1,
padding=0)
self.not_add = num_in_filter != num_out_filter or stride != 1
def forward(self, x):
y = self.conv0(x)
y = self.conv1(y)
if self.se is not None:
y = self.se(y)
y = self.conv2(y)
if not self.not_add:
y = x + y
return y
class MobileNetV3(nn.Module):
def __init__(self, in_channels=3, **kwargs):
"""
the MobilenetV3 backbone network for detection module.
Args:
params(dict): the super parameters for build network
"""
super().__init__()
self.scale = kwargs.get('scale', 0.5)
model_name = kwargs.get('model_name', 'large')
self.inplanes = 16
if model_name == "large":
self.cfg = [
# k, exp, c, se, nl, s,
[3, 16, 16, False, 'relu', 1],
[3, 64, 24, False, 'relu', 2],
[3, 72, 24, False, 'relu', 1],
[5, 72, 40, True, 'relu', 2],
[5, 120, 40, True, 'relu', 1],
[5, 120, 40, True, 'relu', 1],
[3, 240, 80, False, 'hard_swish', 2],
[3, 200, 80, False, 'hard_swish', 1],
[3, 184, 80, False, 'hard_swish', 1],
[3, 184, 80, False, 'hard_swish', 1],
[3, 480, 112, True, 'hard_swish', 1],
[3, 672, 112, True, 'hard_swish', 1],
[5, 672, 160, True, 'hard_swish', 2],
[5, 960, 160, True, 'hard_swish', 1],
[5, 960, 160, True, 'hard_swish', 1],
]
self.cls_ch_squeeze = 960
self.cls_ch_expand = 1280
elif model_name == "small":
self.cfg = [
# k, exp, c, se, nl, s,
[3, 16, 16, True, 'relu', 2],
[3, 72, 24, False, 'relu', 2],
[3, 88, 24, False, 'relu', 1],
[5, 96, 40, True, 'hard_swish', 2],
[5, 240, 40, True, 'hard_swish', 1],
[5, 240, 40, True, 'hard_swish', 1],
[5, 120, 48, True, 'hard_swish', 1],
[5, 144, 48, True, 'hard_swish', 1],
[5, 288, 96, True, 'hard_swish', 2],
[5, 576, 96, True, 'hard_swish', 1],
[5, 576, 96, True, 'hard_swish', 1],
]
self.cls_ch_squeeze = 576
self.cls_ch_expand = 1280
else:
raise NotImplementedError("mode[" + model_name +
"_model] is not implemented!")
supported_scale = [0.35, 0.5, 0.75, 1.0, 1.25]
assert self.scale in supported_scale, \
"supported scale are {} but input scale is {}".format(supported_scale, self.scale)
scale = self.scale
inplanes = self.inplanes
cfg = self.cfg
cls_ch_squeeze = self.cls_ch_squeeze
# conv1
self.conv1 = ConvBNACT(in_channels=in_channels,
out_channels=self.make_divisible(inplanes * scale),
kernel_size=3,
stride=2,
padding=1,
groups=1,
act='hard_swish')
i = 0
inplanes = self.make_divisible(inplanes * scale)
self.stages = nn.ModuleList()
block_list = []
self.out_channels = []
for layer_cfg in cfg:
if layer_cfg[5] == 2 and i > 2:
self.out_channels.append(inplanes)
self.stages.append(nn.Sequential(*block_list))
block_list = []
block = ResidualUnit(num_in_filter=inplanes,
num_mid_filter=self.make_divisible(scale * layer_cfg[1]),
num_out_filter=self.make_divisible(scale * layer_cfg[2]),
act=layer_cfg[4],
stride=layer_cfg[5],
kernel_size=layer_cfg[0],
use_se=layer_cfg[3])
block_list.append(block)
inplanes = self.make_divisible(scale * layer_cfg[2])
i += 1
self.stages.append(nn.Sequential(*block_list))
self.conv2 = ConvBNACT(
in_channels=inplanes,
out_channels=self.make_divisible(scale * cls_ch_squeeze),
kernel_size=1,
stride=1,
padding=0,
groups=1,
act='hard_swish')
self.out_channels.append(self.make_divisible(scale * cls_ch_squeeze))
def make_divisible(self, v, divisor=8, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
def forward(self, x):
x = self.conv1(x)
out = []
for stage in self.stages:
x = stage(x)
out.append(x)
out[-1] = self.conv2(out[-1])
return out
# -*- coding: utf-8 -*-
# @Time : 2019/8/23 21:54
# @Author : zhoujun
from .resnet import *
from .resnest import *
from .shufflenetv2 import *
from .MobilenetV3 import MobileNetV3
__all__ = ['build_backbone']
support_backbone = ['resnet18', 'deformable_resnet18', 'deformable_resnet50',
'resnet50', 'resnet34', 'resnet101', 'resnet152',
'resnest50', 'resnest101', 'resnest200', 'resnest269',
'shufflenet_v2_x0_5', 'shufflenet_v2_x1_0', 'shufflenet_v2_x1_5', 'shufflenet_v2_x2_0',
'MobileNetV3']
def build_backbone(backbone_name, **kwargs):
assert backbone_name in support_backbone, f'all support backbone is {support_backbone}'
backbone = eval(backbone_name)(**kwargs)
return backbone
from .resnest import *
\ No newline at end of file
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## Email: zhanghang0704@gmail.com
## Copyright (c) 2020
##
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
"""ResNeSt ablation study models"""
import torch
from .resnet import ResNet, Bottleneck
__all__ = ['resnest50_fast_1s1x64d', 'resnest50_fast_2s1x64d', 'resnest50_fast_4s1x64d',
'resnest50_fast_1s2x40d', 'resnest50_fast_2s2x40d', 'resnest50_fast_4s2x40d',
'resnest50_fast_1s4x24d']
_url_format = 'https://hangzh.s3.amazonaws.com/encoding/models/{}-{}.pth'
_model_sha256 = {name: checksum for checksum, name in [
('d8fbf808', 'resnest50_fast_1s1x64d'),
('44938639', 'resnest50_fast_2s1x64d'),
('f74f3fc3', 'resnest50_fast_4s1x64d'),
('32830b84', 'resnest50_fast_1s2x40d'),
('9d126481', 'resnest50_fast_2s2x40d'),
('41d14ed0', 'resnest50_fast_4s2x40d'),
('d4a4f76f', 'resnest50_fast_1s4x24d'),
]}
def short_hash(name):
if name not in _model_sha256:
raise ValueError('Pretrained model for {name} is not available.'.format(name=name))
return _model_sha256[name][:8]
resnest_model_urls = {name: _url_format.format(name, short_hash(name)) for
name in _model_sha256.keys()
}
def resnest50_fast_1s1x64d(pretrained=False, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3],
radix=1, groups=1, bottleneck_width=64,
deep_stem=True, stem_width=32, avg_down=True,
avd=True, avd_first=True, **kwargs)
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(
resnest_model_urls['resnest50_fast_1s1x64d'], progress=True, check_hash=True))
return model
def resnest50_fast_2s1x64d(pretrained=False, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3],
radix=2, groups=1, bottleneck_width=64,
deep_stem=True, stem_width=32, avg_down=True,
avd=True, avd_first=True, **kwargs)
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(
resnest_model_urls['resnest50_fast_2s1x64d'], progress=True, check_hash=True))
return model
def resnest50_fast_4s1x64d(pretrained=False, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3],
radix=4, groups=1, bottleneck_width=64,
deep_stem=True, stem_width=32, avg_down=True,
avd=True, avd_first=True, **kwargs)
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(
resnest_model_urls['resnest50_fast_4s1x64d'], progress=True, check_hash=True))
return model
def resnest50_fast_1s2x40d(pretrained=False, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3],
radix=1, groups=2, bottleneck_width=40,
deep_stem=True, stem_width=32, avg_down=True,
avd=True, avd_first=True, **kwargs)
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(
resnest_model_urls['resnest50_fast_1s2x40d'], progress=True, check_hash=True))
return model
def resnest50_fast_2s2x40d(pretrained=False, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3],
radix=2, groups=2, bottleneck_width=40,
deep_stem=True, stem_width=32, avg_down=True,
avd=True, avd_first=True, **kwargs)
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(
resnest_model_urls['resnest50_fast_2s2x40d'], progress=True, check_hash=True))
return model
def resnest50_fast_4s2x40d(pretrained=False, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3],
radix=4, groups=2, bottleneck_width=40,
deep_stem=True, stem_width=32, avg_down=True,
avd=True, avd_first=True, **kwargs)
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(
resnest_model_urls['resnest50_fast_4s2x40d'], progress=True, check_hash=True))
return model
def resnest50_fast_1s4x24d(pretrained=False, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3],
radix=1, groups=4, bottleneck_width=24,
deep_stem=True, stem_width=32, avg_down=True,
avd=True, avd_first=True, **kwargs)
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(
resnest_model_urls['resnest50_fast_1s4x24d'], progress=True, check_hash=True))
return model
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment