Commit 0fd8347d authored by unknown's avatar unknown
Browse files

添加mmclassification-0.24.1代码,删除mmclassification-speed-benchmark

parent cc567e9e
# Copyright (c) OpenMMLab. All rights reserved.
def make_divisible(value, divisor, min_value=None, min_ratio=0.9):
"""Make divisible function.
......
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.runner.base_module import BaseModule
class ConditionalPositionEncoding(BaseModule):
"""The Conditional Position Encoding (CPE) module.
The CPE is the implementation of 'Conditional Positional Encodings
for Vision Transformers <https://arxiv.org/abs/2102.10882>'_.
Args:
in_channels (int): Number of input channels.
embed_dims (int): The feature dimension. Default: 768.
stride (int): Stride of conv layer. Default: 1.
"""
def __init__(self, in_channels, embed_dims=768, stride=1, init_cfg=None):
super(ConditionalPositionEncoding, self).__init__(init_cfg=init_cfg)
self.proj = nn.Conv2d(
in_channels,
embed_dims,
kernel_size=3,
stride=stride,
padding=1,
bias=True,
groups=embed_dims)
self.stride = stride
def forward(self, x, hw_shape):
B, N, C = x.shape
H, W = hw_shape
feat_token = x
# convert (B, N, C) to (B, C, H, W)
cnn_feat = feat_token.transpose(1, 2).view(B, C, H, W).contiguous()
if self.stride == 1:
x = self.proj(cnn_feat) + cnn_feat
else:
x = self.proj(cnn_feat)
x = x.flatten(2).transpose(1, 2)
return x
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from .make_divisible import make_divisible
class SELayer(BaseModule):
"""Squeeze-and-Excitation Module.
Args:
channels (int): The input (and output) channels of the SE layer.
squeeze_channels (None or int): The intermediate channel number of
SElayer. Default: None, means the value of ``squeeze_channels``
is ``make_divisible(channels // ratio, divisor)``.
ratio (int): Squeeze ratio in SELayer, the intermediate channel will
be ``make_divisible(channels // ratio, divisor)``. Only used when
``squeeze_channels`` is None. Default: 16.
divisor(int): The divisor to true divide the channel number. Only
used when ``squeeze_channels`` is None. Default: 8.
conv_cfg (None or dict): Config dict for convolution layer. Default:
None, which means using conv2d.
return_weight(bool): Whether to return the weight. Default: False.
act_cfg (dict or Sequence[dict]): Config dict for activation layer.
If act_cfg is a dict, two activation layers will be configurated
by this dict. If act_cfg is a sequence of dicts, the first
activation layer will be configurated by the first dict and the
second activation layer will be configurated by the second dict.
Default: (dict(type='ReLU'), dict(type='Sigmoid'))
"""
def __init__(self,
channels,
squeeze_channels=None,
ratio=16,
divisor=8,
bias='auto',
conv_cfg=None,
act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')),
return_weight=False,
init_cfg=None):
super(SELayer, self).__init__(init_cfg)
if isinstance(act_cfg, dict):
act_cfg = (act_cfg, act_cfg)
assert len(act_cfg) == 2
assert mmcv.is_tuple_of(act_cfg, dict)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
if squeeze_channels is None:
squeeze_channels = make_divisible(channels // ratio, divisor)
assert isinstance(squeeze_channels, int) and squeeze_channels > 0, \
'"squeeze_channels" should be a positive integer, but get ' + \
f'{squeeze_channels} instead.'
self.return_weight = return_weight
self.conv1 = ConvModule(
in_channels=channels,
out_channels=squeeze_channels,
kernel_size=1,
stride=1,
bias=bias,
conv_cfg=conv_cfg,
act_cfg=act_cfg[0])
self.conv2 = ConvModule(
in_channels=squeeze_channels,
out_channels=channels,
kernel_size=1,
stride=1,
bias=bias,
conv_cfg=conv_cfg,
act_cfg=act_cfg[1])
def forward(self, x):
out = self.global_avgpool(x)
out = self.conv1(out)
out = self.conv2(out)
if self.return_weight:
return out
else:
return x * out
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .device import auto_select_device
from .distribution import wrap_distributed_model, wrap_non_distributed_model
from .logger import get_root_logger, load_json_log
from .setup_env import setup_multi_processes
__all__ = [
'collect_env', 'get_root_logger', 'load_json_log', 'setup_multi_processes',
'wrap_non_distributed_model', 'wrap_distributed_model',
'auto_select_device'
]
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.utils import collect_env as collect_base_env
from mmcv.utils import get_git_hash
......
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmcv.utils import digit_version
def auto_select_device() -> str:
mmcv_version = digit_version(mmcv.__version__)
if mmcv_version >= digit_version('1.6.0'):
from mmcv.device import get_device
return get_device()
elif torch.cuda.is_available():
return 'cuda'
else:
return 'cpu'
# Copyright (c) OpenMMLab. All rights reserved.
def wrap_non_distributed_model(model, device='cuda', dim=0, *args, **kwargs):
"""Wrap module in non-distributed environment by device type.
- For CUDA, wrap as :obj:`mmcv.parallel.MMDataParallel`.
- For MPS, wrap as :obj:`mmcv.device.mps.MPSDataParallel`.
- For CPU & IPU, not wrap the model.
Args:
model(:class:`nn.Module`): model to be parallelized.
device(str): device type, cuda, cpu or mlu. Defaults to cuda.
dim(int): Dimension used to scatter the data. Defaults to 0.
Returns:
model(nn.Module): the model to be parallelized.
"""
if device == 'npu':
from mmcv.device.npu import NPUDataParallel
model = NPUDataParallel(model.npu(), dim=dim, *args, **kwargs)
elif device == 'cuda':
from mmcv.parallel import MMDataParallel
model = MMDataParallel(model.cuda(), dim=dim, *args, **kwargs)
elif device == 'cpu':
model = model.cpu()
elif device == 'ipu':
model = model.cpu()
elif device == 'mps':
from mmcv.device import mps
model = mps.MPSDataParallel(model.to('mps'), dim=dim, *args, **kwargs)
else:
raise RuntimeError(f'Unavailable device "{device}"')
return model
def wrap_distributed_model(model, device='cuda', *args, **kwargs):
"""Build DistributedDataParallel module by device type.
- For CUDA, wrap as :obj:`mmcv.parallel.MMDistributedDataParallel`.
- Other device types are not supported by now.
Args:
model(:class:`nn.Module`): module to be parallelized.
device(str): device type, mlu or cuda.
Returns:
model(:class:`nn.Module`): the module to be parallelized
References:
.. [1] https://pytorch.org/docs/stable/generated/torch.nn.parallel.
DistributedDataParallel.html
"""
if device == 'npu':
from mmcv.device.npu import NPUDistributedDataParallel
from torch.npu import current_device
model = NPUDistributedDataParallel(
model.npu(), *args, device_ids=[current_device()], **kwargs)
elif device == 'cuda':
from mmcv.parallel import MMDistributedDataParallel
from torch.cuda import current_device
model = MMDistributedDataParallel(
model.cuda(), *args, device_ids=[current_device()], **kwargs)
else:
raise RuntimeError(f'Unavailable device "{device}"')
return model
# Copyright (c) OpenMMLab. All rights reserved.
import json
import logging
from collections import defaultdict
from mmcv.utils import get_logger
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get root logger.
Args:
log_file (str, optional): File path of log. Defaults to None.
log_level (int, optional): The level of logger.
Defaults to :obj:`logging.INFO`.
Returns:
:obj:`logging.Logger`: The obtained logger
"""
return get_logger('mmcls', log_file, log_level)
def load_json_log(json_log):
"""load and convert json_logs to log_dicts.
Args:
json_log (str): The path of the json log file.
Returns:
dict[int, dict[str, list]]:
Key is the epoch, value is a sub dict. The keys in each sub dict
are different metrics, e.g. memory, bbox_mAP, and the value is a
list of corresponding values in all iterations in this epoch.
.. code-block:: python
# An example output
{
1: {'iter': [100, 200, 300], 'loss': [6.94, 6.73, 6.53]},
2: {'iter': [100, 200, 300], 'loss': [6.33, 6.20, 6.07]},
...
}
"""
log_dict = dict()
with open(json_log, 'r') as log_file:
for line in log_file:
log = json.loads(line.strip())
# skip lines without `epoch` field
if 'epoch' not in log:
continue
epoch = log.pop('epoch')
if epoch not in log_dict:
log_dict[epoch] = defaultdict(list)
for k, v in log.items():
log_dict[epoch][k].append(v)
return log_dict
# Copyright (c) OpenMMLab. All rights reserved.
import os
import platform
import warnings
import cv2
import torch.multiprocessing as mp
def setup_multi_processes(cfg):
"""Setup multi-processing environment variables."""
# set multi-process start method as `fork` to speed up the training
if platform.system() != 'Windows':
mp_start_method = cfg.get('mp_start_method', 'fork')
current_method = mp.get_start_method(allow_none=True)
if current_method is not None and current_method != mp_start_method:
warnings.warn(
f'Multi-processing start method `{mp_start_method}` is '
f'different from the previous setting `{current_method}`.'
f'It will be force set to `{mp_start_method}`. You can change '
f'this behavior by changing `mp_start_method` in your config.')
mp.set_start_method(mp_start_method, force=True)
# disable opencv multithreading to avoid system being overloaded
opencv_num_threads = cfg.get('opencv_num_threads', 0)
cv2.setNumThreads(opencv_num_threads)
# setup OMP threads
# This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa
if 'OMP_NUM_THREADS' not in os.environ and cfg.data.workers_per_gpu > 1:
omp_num_threads = 1
warnings.warn(
f'Setting OMP_NUM_THREADS environment variable for each process '
f'to be {omp_num_threads} in default, to avoid your system being '
f'overloaded, please further tune the variable for optimal '
f'performance in your application as needed.')
os.environ['OMP_NUM_THREADS'] = str(omp_num_threads)
# setup MKL threads
if 'MKL_NUM_THREADS' not in os.environ and cfg.data.workers_per_gpu > 1:
mkl_num_threads = 1
warnings.warn(
f'Setting MKL_NUM_THREADS environment variable for each process '
f'to be {mkl_num_threads} in default, to avoid your system being '
f'overloaded, please further tune the variable for optimal '
f'performance in your application as needed.')
os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads)
# Copyright (c) Open-MMLab. All rights reserved.
# Copyright (c) OpenMMLab. All rights reserved
__version__ = '0.12.0'
__version__ = '0.24.1'
def parse_version_info(version_str):
......
Import:
- configs/mobilenet_v2/metafile.yml
- configs/resnet/metafile.yml
- configs/res2net/metafile.yml
- configs/resnext/metafile.yml
- configs/seresnet/metafile.yml
- configs/shufflenet_v1/metafile.yml
- configs/shufflenet_v2/metafile.yml
- configs/swin_transformer/metafile.yml
- configs/swin_transformer_v2/metafile.yml
- configs/vgg/metafile.yml
- configs/repvgg/metafile.yml
- configs/tnt/metafile.yml
- configs/vision_transformer/metafile.yml
- configs/t2t_vit/metafile.yml
- configs/mlp_mixer/metafile.yml
- configs/conformer/metafile.yml
- configs/regnet/metafile.yml
- configs/deit/metafile.yml
- configs/twins/metafile.yml
- configs/efficientnet/metafile.yml
- configs/convnext/metafile.yml
- configs/hrnet/metafile.yml
- configs/repmlp/metafile.yml
- configs/wrn/metafile.yml
- configs/van/metafile.yml
- configs/cspnet/metafile.yml
- configs/convmixer/metafile.yml
- configs/densenet/metafile.yml
- configs/poolformer/metafile.yml
- configs/csra/metafile.yml
- configs/mvit/metafile.yml
- configs/efficientformer/metafile.yml
- configs/hornet/metafile.yml
export MIOPEN_FIND_MODE=1
export MIOPEN_USE_APPROXIMATE_PERFORMANCE=0
export HSA_FORCE_FINE_GRAIN_PCIE=1
./tools/dist_test.sh configs/vgg/vgg16_8xb32_in1k.py models/vgg16_bn_batch256_imagenet_20210208-7e55cd29.pth 4 --metrics=accuracy --metric-options=topk=5 2>&1 | tee fp16_vgg16.log
./tools/dist_test.sh configs/resnet/resnet50_8xb32_in1k.py models/resnet50_8xb32_in1k_20210831-ea4938fc.pth 4 --metrics=accuracy --metric-options=topk=5 2>&1 | tee fp16_resnet50.log
./tools/dist_test.sh configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py models/shufflenet_v2_batch1024_imagenet_20200812-5bf4721e.pth 4 --metrics=accuracy --metric-options=topk=5 2>&1 | tee fp16_shufflenet_v2.log
./tools/dist_test.sh configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py models/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth 4 --metrics=accuracy --metric-options=topk=5 2>&1 | tee fp16_mobilenet_v2.log
docutils==0.17.1
myst-parser
-e git+https://github.com/open-mmlab/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
sphinx==4.5.0
sphinx-copybutton
sphinx_markdown_tables
albumentations>=0.3.2 --no-binary qudida,albumentations
colorama
requests
rich
scipy
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment