Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
dcuai
dlexamples
Commits
0fd8347d
Commit
0fd8347d
authored
Jan 08, 2023
by
unknown
Browse files
添加mmclassification-0.24.1代码,删除mmclassification-speed-benchmark
parent
cc567e9e
Changes
838
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
383 additions
and
2 deletions
+383
-2
openmmlab_test/mmclassification-0.24.1/mmcls/models/utils/make_divisible.py
...lassification-0.24.1/mmcls/models/utils/make_divisible.py
+1
-0
openmmlab_test/mmclassification-0.24.1/mmcls/models/utils/position_encoding.py
...sification-0.24.1/mmcls/models/utils/position_encoding.py
+41
-0
openmmlab_test/mmclassification-0.24.1/mmcls/models/utils/se_layer.py
...st/mmclassification-0.24.1/mmcls/models/utils/se_layer.py
+80
-0
openmmlab_test/mmclassification-0.24.1/mmcls/utils/__init__.py
...mlab_test/mmclassification-0.24.1/mmcls/utils/__init__.py
+12
-0
openmmlab_test/mmclassification-0.24.1/mmcls/utils/collect_env.py
...b_test/mmclassification-0.24.1/mmcls/utils/collect_env.py
+1
-0
openmmlab_test/mmclassification-0.24.1/mmcls/utils/device.py
openmmlab_test/mmclassification-0.24.1/mmcls/utils/device.py
+15
-0
openmmlab_test/mmclassification-0.24.1/mmcls/utils/distribution.py
..._test/mmclassification-0.24.1/mmcls/utils/distribution.py
+68
-0
openmmlab_test/mmclassification-0.24.1/mmcls/utils/logger.py
openmmlab_test/mmclassification-0.24.1/mmcls/utils/logger.py
+56
-0
openmmlab_test/mmclassification-0.24.1/mmcls/utils/setup_env.py
...lab_test/mmclassification-0.24.1/mmcls/utils/setup_env.py
+47
-0
openmmlab_test/mmclassification-0.24.1/mmcls/version.py
openmmlab_test/mmclassification-0.24.1/mmcls/version.py
+2
-2
openmmlab_test/mmclassification-0.24.1/model-index.yml
openmmlab_test/mmclassification-0.24.1/model-index.yml
+34
-0
openmmlab_test/mmclassification-0.24.1/mult_test.sh
openmmlab_test/mmclassification-0.24.1/mult_test.sh
+7
-0
openmmlab_test/mmclassification-0.24.1/requirements.txt
openmmlab_test/mmclassification-0.24.1/requirements.txt
+0
-0
openmmlab_test/mmclassification-0.24.1/requirements/docs.txt
openmmlab_test/mmclassification-0.24.1/requirements/docs.txt
+6
-0
openmmlab_test/mmclassification-0.24.1/requirements/mminstall.txt
...b_test/mmclassification-0.24.1/requirements/mminstall.txt
+1
-0
openmmlab_test/mmclassification-0.24.1/requirements/optional.txt
...ab_test/mmclassification-0.24.1/requirements/optional.txt
+5
-0
openmmlab_test/mmclassification-0.24.1/requirements/readthedocs.txt
...test/mmclassification-0.24.1/requirements/readthedocs.txt
+3
-0
openmmlab_test/mmclassification-0.24.1/requirements/runtime.txt
...lab_test/mmclassification-0.24.1/requirements/runtime.txt
+3
-0
openmmlab_test/mmclassification-0.24.1/requirements/tests.txt
...mmlab_test/mmclassification-0.24.1/requirements/tests.txt
+1
-0
openmmlab_test/mmclassification-0.24.1/resources/mmcls-logo.png
...lab_test/mmclassification-0.24.1/resources/mmcls-logo.png
+0
-0
No files found.
Too many changes to show.
To preserve performance only
838 of 838+
files are displayed.
Plain diff
Email patch
openmmlab_test/mmclassification-
speed-benchmark
/mmcls/models/utils/make_divisible.py
→
openmmlab_test/mmclassification-
0.24.1
/mmcls/models/utils/make_divisible.py
View file @
0fd8347d
# Copyright (c) OpenMMLab. All rights reserved.
def
make_divisible
(
value
,
divisor
,
min_value
=
None
,
min_ratio
=
0.9
):
"""Make divisible function.
...
...
openmmlab_test/mmclassification-0.24.1/mmcls/models/utils/position_encoding.py
0 → 100644
View file @
0fd8347d
# Copyright (c) OpenMMLab. All rights reserved.
import
torch.nn
as
nn
from
mmcv.runner.base_module
import
BaseModule
class
ConditionalPositionEncoding
(
BaseModule
):
"""The Conditional Position Encoding (CPE) module.
The CPE is the implementation of 'Conditional Positional Encodings
for Vision Transformers <https://arxiv.org/abs/2102.10882>'_.
Args:
in_channels (int): Number of input channels.
embed_dims (int): The feature dimension. Default: 768.
stride (int): Stride of conv layer. Default: 1.
"""
def
__init__
(
self
,
in_channels
,
embed_dims
=
768
,
stride
=
1
,
init_cfg
=
None
):
super
(
ConditionalPositionEncoding
,
self
).
__init__
(
init_cfg
=
init_cfg
)
self
.
proj
=
nn
.
Conv2d
(
in_channels
,
embed_dims
,
kernel_size
=
3
,
stride
=
stride
,
padding
=
1
,
bias
=
True
,
groups
=
embed_dims
)
self
.
stride
=
stride
def
forward
(
self
,
x
,
hw_shape
):
B
,
N
,
C
=
x
.
shape
H
,
W
=
hw_shape
feat_token
=
x
# convert (B, N, C) to (B, C, H, W)
cnn_feat
=
feat_token
.
transpose
(
1
,
2
).
view
(
B
,
C
,
H
,
W
).
contiguous
()
if
self
.
stride
==
1
:
x
=
self
.
proj
(
cnn_feat
)
+
cnn_feat
else
:
x
=
self
.
proj
(
cnn_feat
)
x
=
x
.
flatten
(
2
).
transpose
(
1
,
2
)
return
x
openmmlab_test/mmclassification-0.24.1/mmcls/models/utils/se_layer.py
0 → 100644
View file @
0fd8347d
# Copyright (c) OpenMMLab. All rights reserved.
import
mmcv
import
torch.nn
as
nn
from
mmcv.cnn
import
ConvModule
from
mmcv.runner
import
BaseModule
from
.make_divisible
import
make_divisible
class
SELayer
(
BaseModule
):
"""Squeeze-and-Excitation Module.
Args:
channels (int): The input (and output) channels of the SE layer.
squeeze_channels (None or int): The intermediate channel number of
SElayer. Default: None, means the value of ``squeeze_channels``
is ``make_divisible(channels // ratio, divisor)``.
ratio (int): Squeeze ratio in SELayer, the intermediate channel will
be ``make_divisible(channels // ratio, divisor)``. Only used when
``squeeze_channels`` is None. Default: 16.
divisor(int): The divisor to true divide the channel number. Only
used when ``squeeze_channels`` is None. Default: 8.
conv_cfg (None or dict): Config dict for convolution layer. Default:
None, which means using conv2d.
return_weight(bool): Whether to return the weight. Default: False.
act_cfg (dict or Sequence[dict]): Config dict for activation layer.
If act_cfg is a dict, two activation layers will be configurated
by this dict. If act_cfg is a sequence of dicts, the first
activation layer will be configurated by the first dict and the
second activation layer will be configurated by the second dict.
Default: (dict(type='ReLU'), dict(type='Sigmoid'))
"""
def
__init__
(
self
,
channels
,
squeeze_channels
=
None
,
ratio
=
16
,
divisor
=
8
,
bias
=
'auto'
,
conv_cfg
=
None
,
act_cfg
=
(
dict
(
type
=
'ReLU'
),
dict
(
type
=
'Sigmoid'
)),
return_weight
=
False
,
init_cfg
=
None
):
super
(
SELayer
,
self
).
__init__
(
init_cfg
)
if
isinstance
(
act_cfg
,
dict
):
act_cfg
=
(
act_cfg
,
act_cfg
)
assert
len
(
act_cfg
)
==
2
assert
mmcv
.
is_tuple_of
(
act_cfg
,
dict
)
self
.
global_avgpool
=
nn
.
AdaptiveAvgPool2d
(
1
)
if
squeeze_channels
is
None
:
squeeze_channels
=
make_divisible
(
channels
//
ratio
,
divisor
)
assert
isinstance
(
squeeze_channels
,
int
)
and
squeeze_channels
>
0
,
\
'"squeeze_channels" should be a positive integer, but get '
+
\
f
'
{
squeeze_channels
}
instead.'
self
.
return_weight
=
return_weight
self
.
conv1
=
ConvModule
(
in_channels
=
channels
,
out_channels
=
squeeze_channels
,
kernel_size
=
1
,
stride
=
1
,
bias
=
bias
,
conv_cfg
=
conv_cfg
,
act_cfg
=
act_cfg
[
0
])
self
.
conv2
=
ConvModule
(
in_channels
=
squeeze_channels
,
out_channels
=
channels
,
kernel_size
=
1
,
stride
=
1
,
bias
=
bias
,
conv_cfg
=
conv_cfg
,
act_cfg
=
act_cfg
[
1
])
def
forward
(
self
,
x
):
out
=
self
.
global_avgpool
(
x
)
out
=
self
.
conv1
(
out
)
out
=
self
.
conv2
(
out
)
if
self
.
return_weight
:
return
out
else
:
return
x
*
out
openmmlab_test/mmclassification-0.24.1/mmcls/utils/__init__.py
0 → 100644
View file @
0fd8347d
# Copyright (c) OpenMMLab. All rights reserved.
from
.collect_env
import
collect_env
from
.device
import
auto_select_device
from
.distribution
import
wrap_distributed_model
,
wrap_non_distributed_model
from
.logger
import
get_root_logger
,
load_json_log
from
.setup_env
import
setup_multi_processes
__all__
=
[
'collect_env'
,
'get_root_logger'
,
'load_json_log'
,
'setup_multi_processes'
,
'wrap_non_distributed_model'
,
'wrap_distributed_model'
,
'auto_select_device'
]
openmmlab_test/mmclassification-
speed-benchmark
/mmcls/utils/collect_env.py
→
openmmlab_test/mmclassification-
0.24.1
/mmcls/utils/collect_env.py
View file @
0fd8347d
# Copyright (c) OpenMMLab. All rights reserved.
from
mmcv.utils
import
collect_env
as
collect_base_env
from
mmcv.utils
import
get_git_hash
...
...
openmmlab_test/mmclassification-0.24.1/mmcls/utils/device.py
0 → 100644
View file @
0fd8347d
# Copyright (c) OpenMMLab. All rights reserved.
import
mmcv
import
torch
from
mmcv.utils
import
digit_version
def
auto_select_device
()
->
str
:
mmcv_version
=
digit_version
(
mmcv
.
__version__
)
if
mmcv_version
>=
digit_version
(
'1.6.0'
):
from
mmcv.device
import
get_device
return
get_device
()
elif
torch
.
cuda
.
is_available
():
return
'cuda'
else
:
return
'cpu'
openmmlab_test/mmclassification-0.24.1/mmcls/utils/distribution.py
0 → 100644
View file @
0fd8347d
# Copyright (c) OpenMMLab. All rights reserved.
def
wrap_non_distributed_model
(
model
,
device
=
'cuda'
,
dim
=
0
,
*
args
,
**
kwargs
):
"""Wrap module in non-distributed environment by device type.
- For CUDA, wrap as :obj:`mmcv.parallel.MMDataParallel`.
- For MPS, wrap as :obj:`mmcv.device.mps.MPSDataParallel`.
- For CPU & IPU, not wrap the model.
Args:
model(:class:`nn.Module`): model to be parallelized.
device(str): device type, cuda, cpu or mlu. Defaults to cuda.
dim(int): Dimension used to scatter the data. Defaults to 0.
Returns:
model(nn.Module): the model to be parallelized.
"""
if
device
==
'npu'
:
from
mmcv.device.npu
import
NPUDataParallel
model
=
NPUDataParallel
(
model
.
npu
(),
dim
=
dim
,
*
args
,
**
kwargs
)
elif
device
==
'cuda'
:
from
mmcv.parallel
import
MMDataParallel
model
=
MMDataParallel
(
model
.
cuda
(),
dim
=
dim
,
*
args
,
**
kwargs
)
elif
device
==
'cpu'
:
model
=
model
.
cpu
()
elif
device
==
'ipu'
:
model
=
model
.
cpu
()
elif
device
==
'mps'
:
from
mmcv.device
import
mps
model
=
mps
.
MPSDataParallel
(
model
.
to
(
'mps'
),
dim
=
dim
,
*
args
,
**
kwargs
)
else
:
raise
RuntimeError
(
f
'Unavailable device "
{
device
}
"'
)
return
model
def
wrap_distributed_model
(
model
,
device
=
'cuda'
,
*
args
,
**
kwargs
):
"""Build DistributedDataParallel module by device type.
- For CUDA, wrap as :obj:`mmcv.parallel.MMDistributedDataParallel`.
- Other device types are not supported by now.
Args:
model(:class:`nn.Module`): module to be parallelized.
device(str): device type, mlu or cuda.
Returns:
model(:class:`nn.Module`): the module to be parallelized
References:
.. [1] https://pytorch.org/docs/stable/generated/torch.nn.parallel.
DistributedDataParallel.html
"""
if
device
==
'npu'
:
from
mmcv.device.npu
import
NPUDistributedDataParallel
from
torch.npu
import
current_device
model
=
NPUDistributedDataParallel
(
model
.
npu
(),
*
args
,
device_ids
=
[
current_device
()],
**
kwargs
)
elif
device
==
'cuda'
:
from
mmcv.parallel
import
MMDistributedDataParallel
from
torch.cuda
import
current_device
model
=
MMDistributedDataParallel
(
model
.
cuda
(),
*
args
,
device_ids
=
[
current_device
()],
**
kwargs
)
else
:
raise
RuntimeError
(
f
'Unavailable device "
{
device
}
"'
)
return
model
openmmlab_test/mmclassification-0.24.1/mmcls/utils/logger.py
0 → 100644
View file @
0fd8347d
# Copyright (c) OpenMMLab. All rights reserved.
import
json
import
logging
from
collections
import
defaultdict
from
mmcv.utils
import
get_logger
def
get_root_logger
(
log_file
=
None
,
log_level
=
logging
.
INFO
):
"""Get root logger.
Args:
log_file (str, optional): File path of log. Defaults to None.
log_level (int, optional): The level of logger.
Defaults to :obj:`logging.INFO`.
Returns:
:obj:`logging.Logger`: The obtained logger
"""
return
get_logger
(
'mmcls'
,
log_file
,
log_level
)
def
load_json_log
(
json_log
):
"""load and convert json_logs to log_dicts.
Args:
json_log (str): The path of the json log file.
Returns:
dict[int, dict[str, list]]:
Key is the epoch, value is a sub dict. The keys in each sub dict
are different metrics, e.g. memory, bbox_mAP, and the value is a
list of corresponding values in all iterations in this epoch.
.. code-block:: python
# An example output
{
1: {'iter': [100, 200, 300], 'loss': [6.94, 6.73, 6.53]},
2: {'iter': [100, 200, 300], 'loss': [6.33, 6.20, 6.07]},
...
}
"""
log_dict
=
dict
()
with
open
(
json_log
,
'r'
)
as
log_file
:
for
line
in
log_file
:
log
=
json
.
loads
(
line
.
strip
())
# skip lines without `epoch` field
if
'epoch'
not
in
log
:
continue
epoch
=
log
.
pop
(
'epoch'
)
if
epoch
not
in
log_dict
:
log_dict
[
epoch
]
=
defaultdict
(
list
)
for
k
,
v
in
log
.
items
():
log_dict
[
epoch
][
k
].
append
(
v
)
return
log_dict
openmmlab_test/mmclassification-0.24.1/mmcls/utils/setup_env.py
0 → 100644
View file @
0fd8347d
# Copyright (c) OpenMMLab. All rights reserved.
import
os
import
platform
import
warnings
import
cv2
import
torch.multiprocessing
as
mp
def
setup_multi_processes
(
cfg
):
"""Setup multi-processing environment variables."""
# set multi-process start method as `fork` to speed up the training
if
platform
.
system
()
!=
'Windows'
:
mp_start_method
=
cfg
.
get
(
'mp_start_method'
,
'fork'
)
current_method
=
mp
.
get_start_method
(
allow_none
=
True
)
if
current_method
is
not
None
and
current_method
!=
mp_start_method
:
warnings
.
warn
(
f
'Multi-processing start method `
{
mp_start_method
}
` is '
f
'different from the previous setting `
{
current_method
}
`.'
f
'It will be force set to `
{
mp_start_method
}
`. You can change '
f
'this behavior by changing `mp_start_method` in your config.'
)
mp
.
set_start_method
(
mp_start_method
,
force
=
True
)
# disable opencv multithreading to avoid system being overloaded
opencv_num_threads
=
cfg
.
get
(
'opencv_num_threads'
,
0
)
cv2
.
setNumThreads
(
opencv_num_threads
)
# setup OMP threads
# This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa
if
'OMP_NUM_THREADS'
not
in
os
.
environ
and
cfg
.
data
.
workers_per_gpu
>
1
:
omp_num_threads
=
1
warnings
.
warn
(
f
'Setting OMP_NUM_THREADS environment variable for each process '
f
'to be
{
omp_num_threads
}
in default, to avoid your system being '
f
'overloaded, please further tune the variable for optimal '
f
'performance in your application as needed.'
)
os
.
environ
[
'OMP_NUM_THREADS'
]
=
str
(
omp_num_threads
)
# setup MKL threads
if
'MKL_NUM_THREADS'
not
in
os
.
environ
and
cfg
.
data
.
workers_per_gpu
>
1
:
mkl_num_threads
=
1
warnings
.
warn
(
f
'Setting MKL_NUM_THREADS environment variable for each process '
f
'to be
{
mkl_num_threads
}
in default, to avoid your system being '
f
'overloaded, please further tune the variable for optimal '
f
'performance in your application as needed.'
)
os
.
environ
[
'MKL_NUM_THREADS'
]
=
str
(
mkl_num_threads
)
openmmlab_test/mmclassification-
speed-benchmark
/mmcls/version.py
→
openmmlab_test/mmclassification-
0.24.1
/mmcls/version.py
View file @
0fd8347d
# Copyright (c) Open
-
MMLab. All rights reserved
.
# Copyright (c) OpenMMLab. All rights reserved
__version__
=
'0.
12.0
'
__version__
=
'0.
24.1
'
def
parse_version_info
(
version_str
):
...
...
openmmlab_test/mmclassification-0.24.1/model-index.yml
0 → 100644
View file @
0fd8347d
Import
:
-
configs/mobilenet_v2/metafile.yml
-
configs/resnet/metafile.yml
-
configs/res2net/metafile.yml
-
configs/resnext/metafile.yml
-
configs/seresnet/metafile.yml
-
configs/shufflenet_v1/metafile.yml
-
configs/shufflenet_v2/metafile.yml
-
configs/swin_transformer/metafile.yml
-
configs/swin_transformer_v2/metafile.yml
-
configs/vgg/metafile.yml
-
configs/repvgg/metafile.yml
-
configs/tnt/metafile.yml
-
configs/vision_transformer/metafile.yml
-
configs/t2t_vit/metafile.yml
-
configs/mlp_mixer/metafile.yml
-
configs/conformer/metafile.yml
-
configs/regnet/metafile.yml
-
configs/deit/metafile.yml
-
configs/twins/metafile.yml
-
configs/efficientnet/metafile.yml
-
configs/convnext/metafile.yml
-
configs/hrnet/metafile.yml
-
configs/repmlp/metafile.yml
-
configs/wrn/metafile.yml
-
configs/van/metafile.yml
-
configs/cspnet/metafile.yml
-
configs/convmixer/metafile.yml
-
configs/densenet/metafile.yml
-
configs/poolformer/metafile.yml
-
configs/csra/metafile.yml
-
configs/mvit/metafile.yml
-
configs/efficientformer/metafile.yml
-
configs/hornet/metafile.yml
openmmlab_test/mmclassification-0.24.1/mult_test.sh
0 → 100644
View file @
0fd8347d
export
MIOPEN_FIND_MODE
=
1
export
MIOPEN_USE_APPROXIMATE_PERFORMANCE
=
0
export
HSA_FORCE_FINE_GRAIN_PCIE
=
1
./tools/dist_test.sh configs/vgg/vgg16_8xb32_in1k.py models/vgg16_bn_batch256_imagenet_20210208-7e55cd29.pth 4
--metrics
=
accuracy
--metric-options
=
topk
=
5 2>&1 |
tee
fp16_vgg16.log
./tools/dist_test.sh configs/resnet/resnet50_8xb32_in1k.py models/resnet50_8xb32_in1k_20210831-ea4938fc.pth 4
--metrics
=
accuracy
--metric-options
=
topk
=
5 2>&1 |
tee
fp16_resnet50.log
./tools/dist_test.sh configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py models/shufflenet_v2_batch1024_imagenet_20200812-5bf4721e.pth 4
--metrics
=
accuracy
--metric-options
=
topk
=
5 2>&1 |
tee
fp16_shufflenet_v2.log
./tools/dist_test.sh configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py models/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth 4
--metrics
=
accuracy
--metric-options
=
topk
=
5 2>&1 |
tee
fp16_mobilenet_v2.log
openmmlab_test/mmclassification-
speed-benchmark
/requirements.txt
→
openmmlab_test/mmclassification-
0.24.1
/requirements.txt
View file @
0fd8347d
File moved
openmmlab_test/mmclassification-0.24.1/requirements/docs.txt
0 → 100644
View file @
0fd8347d
docutils==0.17.1
myst-parser
-e git+https://github.com/open-mmlab/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
sphinx==4.5.0
sphinx-copybutton
sphinx_markdown_tables
openmmlab_test/mmclassification-0.24.1/requirements/mminstall.txt
0 → 100644
View file @
0fd8347d
mmcv-full>=1.4.2,<1.9.0
openmmlab_test/mmclassification-0.24.1/requirements/optional.txt
0 → 100644
View file @
0fd8347d
albumentations>=0.3.2 --no-binary qudida,albumentations
colorama
requests
rich
scipy
openmmlab_test/mmclassification-0.24.1/requirements/readthedocs.txt
0 → 100644
View file @
0fd8347d
mmcv>=1.4.2
torch
torchvision
openmmlab_test/mmclassification-0.24.1/requirements/runtime.txt
0 → 100644
View file @
0fd8347d
matplotlib>=3.1.0
numpy
packaging
openmmlab_test/mmclassification-
speed-benchmark
/requirements/tests.txt
→
openmmlab_test/mmclassification-
0.24.1
/requirements/tests.txt
View file @
0fd8347d
...
...
@@ -2,6 +2,7 @@ codecov
flake8
interrogate
isort==4.3.21
mmdet
pytest
xdoctest >= 0.10.0
yapf
openmmlab_test/mmclassification-0.24.1/resources/mmcls-logo.png
0 → 100644
View file @
0fd8347d
32.2 KB
Prev
1
…
36
37
38
39
40
41
42
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment