Commit 85529f35 authored by unknown's avatar unknown
Browse files

添加openmmlab测试用例

parent b21b0c01
import mmcv
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
# class SELayer(nn.Module):
class SELayer(BaseModule):
"""Squeeze-and-Excitation Module.
Args:
channels (int): The input (and output) channels of the SE layer.
ratio (int): Squeeze ratio in SELayer, the intermediate channel will be
``int(channels/ratio)``. Default: 16.
conv_cfg (None or dict): Config dict for convolution layer.
Default: None, which means using conv2d.
act_cfg (dict or Sequence[dict]): Config dict for activation layer.
If act_cfg is a dict, two activation layers will be configurated
by this dict. If act_cfg is a sequence of dicts, the first
activation layer will be configurated by the first dict and the
second activation layer will be configurated by the second dict.
Default: (dict(type='ReLU'), dict(type='Sigmoid'))
"""
def __init__(self,
channels,
ratio=16,
conv_cfg=None,
act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')),
init_cfg=None):
super(SELayer, self).__init__(init_cfg)
if isinstance(act_cfg, dict):
act_cfg = (act_cfg, act_cfg)
assert len(act_cfg) == 2
assert mmcv.is_tuple_of(act_cfg, dict)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.conv1 = ConvModule(
in_channels=channels,
out_channels=int(channels / ratio),
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[0])
self.conv2 = ConvModule(
in_channels=int(channels / ratio),
out_channels=channels,
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[1])
def forward(self, x):
out = self.global_avgpool(x)
out = self.conv1(out)
out = self.conv2(out)
return x * out
from .collect_env import collect_env
from .logger import get_root_logger
__all__ = ['collect_env', 'get_root_logger']
from mmcv.utils import collect_env as collect_base_env
from mmcv.utils import get_git_hash
import mmcls
def collect_env():
"""Collect the information of the running environments."""
env_info = collect_base_env()
env_info['MMClassification'] = mmcls.__version__ + '+' + get_git_hash()[:7]
return env_info
if __name__ == '__main__':
for name, val in collect_env().items():
print(f'{name}: {val}')
import logging
from mmcv.utils import get_logger
def get_root_logger(log_file=None, log_level=logging.INFO):
return get_logger('mmcls', log_file, log_level)
# Copyright (c) Open-MMLab. All rights reserved.
__version__ = '0.12.0'
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
__all__ = ['__version__', 'version_info', 'parse_version_info']
Import:
- configs/fp16/metafile.yml
- configs/mobilenet_v2/metafile.yml
- configs/resnet/metafile.yml
- configs/resnext/metafile.yml
- configs/seresnet/metafile.yml
- configs/seresnext/metafile.yml
- configs/shufflenet_v1/metafile.yml
- configs/shufflenet_v2/metafile.yml
- configs/vgg/metafile.yml
#!/bin/bash
export HIP_VISIBLE_DEVICES=0,1
export MIOPEN_FIND_MODE=3
export HSA_FORCE_FINE_GRAIN_PCIE=1
my_config=$1
numactl --cpunodebind=2 --membind=2 python3 -m torch.distributed.launch --nproc_per_node=2 --master_port=29500 tools/train.py $my_config --launcher pytorch
-r requirements/optional.txt
-r requirements/runtime.txt
-r requirements/tests.txt
recommonmark
sphinx
sphinx_markdown_tables
sphinx_rtd_theme
codecov
flake8
interrogate
isort==4.3.21
pytest
xdoctest >= 0.10.0
yapf
[bdist_wheel]
universal=1
[aliases]
test=pytest
[yapf]
based_on_style = pep8
blank_line_before_nested_class_or_def = true
split_before_expression_after_opening_paren = true
[isort]
line_length = 79
multi_line_output = 0
known_standard_library = pkg_resources,setuptools
known_first_party = mmcls
known_third_party = PIL,cv2,matplotlib,mmcv,numpy,onnxruntime,pytest,torch,torchvision,ts
no_lines_before = STDLIB,LOCALFOLDER
default_section = THIRDPARTY
from setuptools import find_packages, setup
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
def get_version():
version_file = 'mmcls/version.py'
with open(version_file, 'r', encoding='utf-8') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import re
import sys
from os.path import exists
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
setup(
name='mmcls',
version=get_version(),
description='OpenMMLab Image Classification Toolbox and Benchmark',
long_description=readme(),
long_description_content_type='text/markdown',
author='OpenMMLab',
author_email='openmmlab@gmail.com',
keywords='computer vision, image classification',
url='https://github.com/open-mmlab/mmclassification',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
license='Apache License 2.0',
tests_require=parse_requirements('requirements/tests.txt'),
install_requires=parse_requirements('requirements/runtime.txt'),
zip_safe=False)
#!/bin/bash
export HIP_VISIBLE_DEVICES=3
export MIOPEN_FIND_MODE=3
my_config=$1
python3 tools/train.py $my_config
import pytest
import torch
from torch.nn.modules import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmcls.models.backbones import MobileNetV2
from mmcls.models.backbones.mobilenet_v2 import InvertedResidual
def is_block(modules):
"""Check if is ResNet building block."""
if isinstance(modules, (InvertedResidual, )):
return True
return False
def is_norm(modules):
"""Check if is one of the norms."""
if isinstance(modules, (GroupNorm, _BatchNorm)):
return True
return False
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True
def test_mobilenetv2_invertedresidual():
with pytest.raises(AssertionError):
# stride must be in [1, 2]
InvertedResidual(16, 24, stride=3, expand_ratio=6)
# Test InvertedResidual with checkpoint forward, stride=1
block = InvertedResidual(16, 24, stride=1, expand_ratio=6)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size((1, 24, 56, 56))
# Test InvertedResidual with expand_ratio=1
block = InvertedResidual(16, 16, stride=1, expand_ratio=1)
assert len(block.conv) == 2
# Test InvertedResidual with use_res_connect
block = InvertedResidual(16, 16, stride=1, expand_ratio=6)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert block.use_res_connect is True
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual with checkpoint forward, stride=2
block = InvertedResidual(16, 24, stride=2, expand_ratio=6)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size((1, 24, 28, 28))
# Test InvertedResidual with checkpoint forward
block = InvertedResidual(16, 24, stride=1, expand_ratio=6, with_cp=True)
assert block.with_cp
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size((1, 24, 56, 56))
# Test InvertedResidual with act_cfg=dict(type='ReLU')
block = InvertedResidual(
16, 24, stride=1, expand_ratio=6, act_cfg=dict(type='ReLU'))
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size((1, 24, 56, 56))
def test_mobilenetv2_backbone():
with pytest.raises(TypeError):
# pretrained must be a string path
model = MobileNetV2()
model.init_weights(pretrained=0)
with pytest.raises(ValueError):
# frozen_stages must in range(-1, 8)
MobileNetV2(frozen_stages=8)
with pytest.raises(ValueError):
# out_indices in range(0, 8)
MobileNetV2(out_indices=[8])
# Test MobileNetV2 with first stage frozen
frozen_stages = 1
model = MobileNetV2(frozen_stages=frozen_stages)
model.init_weights()
model.train()
for mod in model.conv1.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test MobileNetV2 with norm_eval=True
model = MobileNetV2(norm_eval=True)
model.init_weights()
model.train()
assert check_norm_state(model.modules(), False)
# Test MobileNetV2 forward with widen_factor=1.0
model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 8))
model.init_weights()
model.train()
assert check_norm_state(model.modules(), True)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 8
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
assert feat[7].shape == torch.Size((1, 1280, 7, 7))
# Test MobileNetV2 forward with widen_factor=0.5
model = MobileNetV2(widen_factor=0.5, out_indices=range(0, 7))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 8, 112, 112))
assert feat[1].shape == torch.Size((1, 16, 56, 56))
assert feat[2].shape == torch.Size((1, 16, 28, 28))
assert feat[3].shape == torch.Size((1, 32, 14, 14))
assert feat[4].shape == torch.Size((1, 48, 14, 14))
assert feat[5].shape == torch.Size((1, 80, 7, 7))
assert feat[6].shape == torch.Size((1, 160, 7, 7))
# Test MobileNetV2 forward with widen_factor=2.0
model = MobileNetV2(widen_factor=2.0)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat.shape == torch.Size((1, 2560, 7, 7))
# Test MobileNetV2 forward with out_indices=None
model = MobileNetV2(widen_factor=1.0)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat.shape == torch.Size((1, 1280, 7, 7))
# Test MobileNetV2 forward with dict(type='ReLU')
model = MobileNetV2(
widen_factor=1.0, act_cfg=dict(type='ReLU'), out_indices=range(0, 7))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
# Test MobileNetV2 with BatchNorm forward
model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 7))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
# Test MobileNetV2 with GroupNorm forward
model = MobileNetV2(
widen_factor=1.0,
norm_cfg=dict(type='GN', num_groups=2, requires_grad=True),
out_indices=range(0, 7))
for m in model.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
# Test MobileNetV2 with layers 1, 3, 5 out forward
model = MobileNetV2(widen_factor=1.0, out_indices=(0, 2, 4))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 32, 28, 28))
assert feat[2].shape == torch.Size((1, 96, 14, 14))
# Test MobileNetV2 with checkpoint forward
model = MobileNetV2(
widen_factor=1.0, with_cp=True, out_indices=range(0, 7))
for m in model.modules():
if is_block(m):
assert m.with_cp
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
import pytest
import torch
from torch.nn.modules import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmcls.models.backbones import MobileNetv3
from mmcls.models.utils import InvertedResidual
def is_norm(modules):
"""Check if is one of the norms."""
if isinstance(modules, (GroupNorm, _BatchNorm)):
return True
return False
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True
def test_mobilenetv3_backbone():
with pytest.raises(TypeError):
# pretrained must be a string path
model = MobileNetv3()
model.init_weights(pretrained=0)
with pytest.raises(AssertionError):
# arch must in [small, big]
MobileNetv3(arch='others')
with pytest.raises(ValueError):
# frozen_stages must less than 12 when arch is small
MobileNetv3(arch='small', frozen_stages=12)
with pytest.raises(ValueError):
# frozen_stages must less than 16 when arch is big
MobileNetv3(arch='big', frozen_stages=16)
with pytest.raises(ValueError):
# max out_indices must less than 11 when arch is small
MobileNetv3(arch='small', out_indices=(11, ))
with pytest.raises(ValueError):
# max out_indices must less than 15 when arch is big
MobileNetv3(arch='big', out_indices=(15, ))
# Test MobileNetv3
model = MobileNetv3()
model.init_weights()
model.train()
# Test MobileNetv3 with first stage frozen
frozen_stages = 1
model = MobileNetv3(frozen_stages=frozen_stages)
model.init_weights()
model.train()
for param in model.conv1.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test MobileNetv3 with norm eval
model = MobileNetv3(norm_eval=True, out_indices=range(0, 11))
model.init_weights()
model.train()
assert check_norm_state(model.modules(), False)
# Test MobileNetv3 forward with small arch
model = MobileNetv3(out_indices=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 11
assert feat[0].shape == torch.Size([1, 16, 56, 56])
assert feat[1].shape == torch.Size([1, 24, 28, 28])
assert feat[2].shape == torch.Size([1, 24, 28, 28])
assert feat[3].shape == torch.Size([1, 40, 14, 14])
assert feat[4].shape == torch.Size([1, 40, 14, 14])
assert feat[5].shape == torch.Size([1, 40, 14, 14])
assert feat[6].shape == torch.Size([1, 48, 14, 14])
assert feat[7].shape == torch.Size([1, 48, 14, 14])
assert feat[8].shape == torch.Size([1, 96, 7, 7])
assert feat[9].shape == torch.Size([1, 96, 7, 7])
assert feat[10].shape == torch.Size([1, 96, 7, 7])
# Test MobileNetv3 forward with small arch and GroupNorm
model = MobileNetv3(
out_indices=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
norm_cfg=dict(type='GN', num_groups=2, requires_grad=True))
for m in model.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 11
assert feat[0].shape == torch.Size([1, 16, 56, 56])
assert feat[1].shape == torch.Size([1, 24, 28, 28])
assert feat[2].shape == torch.Size([1, 24, 28, 28])
assert feat[3].shape == torch.Size([1, 40, 14, 14])
assert feat[4].shape == torch.Size([1, 40, 14, 14])
assert feat[5].shape == torch.Size([1, 40, 14, 14])
assert feat[6].shape == torch.Size([1, 48, 14, 14])
assert feat[7].shape == torch.Size([1, 48, 14, 14])
assert feat[8].shape == torch.Size([1, 96, 7, 7])
assert feat[9].shape == torch.Size([1, 96, 7, 7])
assert feat[10].shape == torch.Size([1, 96, 7, 7])
# Test MobileNetv3 forward with big arch
model = MobileNetv3(
arch='big',
out_indices=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 15
assert feat[0].shape == torch.Size([1, 16, 112, 112])
assert feat[1].shape == torch.Size([1, 24, 56, 56])
assert feat[2].shape == torch.Size([1, 24, 56, 56])
assert feat[3].shape == torch.Size([1, 40, 28, 28])
assert feat[4].shape == torch.Size([1, 40, 28, 28])
assert feat[5].shape == torch.Size([1, 40, 28, 28])
assert feat[6].shape == torch.Size([1, 80, 14, 14])
assert feat[7].shape == torch.Size([1, 80, 14, 14])
assert feat[8].shape == torch.Size([1, 80, 14, 14])
assert feat[9].shape == torch.Size([1, 80, 14, 14])
assert feat[10].shape == torch.Size([1, 112, 14, 14])
assert feat[11].shape == torch.Size([1, 112, 14, 14])
assert feat[12].shape == torch.Size([1, 160, 14, 14])
assert feat[13].shape == torch.Size([1, 160, 7, 7])
assert feat[14].shape == torch.Size([1, 160, 7, 7])
# Test MobileNetv3 forward with big arch
model = MobileNetv3(arch='big', out_indices=(0, ))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat.shape == torch.Size([1, 16, 112, 112])
# Test MobileNetv3 with checkpoint forward
model = MobileNetv3(with_cp=True)
for m in model.modules():
if isinstance(m, InvertedResidual):
assert m.with_cp
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat.shape == torch.Size([1, 96, 7, 7])
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment