Commit 0fd8347d authored by unknown's avatar unknown
Browse files

添加mmclassification-0.24.1代码,删除mmclassification-speed-benchmark

parent cc567e9e
# Copyright (c) OpenMMLab. All rights reserved.
from argparse import ArgumentParser from argparse import ArgumentParser
import mmcv
from mmcls.apis import inference_model, init_model, show_result_pyplot from mmcls.apis import inference_model, init_model, show_result_pyplot
...@@ -8,6 +11,10 @@ def main(): ...@@ -8,6 +11,10 @@ def main():
parser.add_argument('img', help='Image file') parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file') parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file') parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--show',
action='store_true',
help='Whether to show the predict results by matplotlib.')
parser.add_argument( parser.add_argument(
'--device', default='cuda:0', help='Device used for inference') '--device', default='cuda:0', help='Device used for inference')
args = parser.parse_args() args = parser.parse_args()
...@@ -17,7 +24,9 @@ def main(): ...@@ -17,7 +24,9 @@ def main():
# test a single image # test a single image
result = inference_model(model, args.img) result = inference_model(model, args.img)
# show the results # show the results
show_result_pyplot(model, args.img, result) print(mmcv.dump(result, file_format='json', indent=4))
if args.show:
show_result_pyplot(model, args.img, result)
if __name__ == '__main__': if __name__ == '__main__':
......
# get SOTA accuracy 81.2 for 224 input ViT fine-tuning, reference is below:
# https://github.com/google-research/vision_transformer#available-vit-models
# cfg: vit-base-p16_ft-4xb544_in1k-224_ipu train model in fp16 precision
# 8 epoch, 2176 batch size, 16 IPUs, 4 replicas, model Tput = 5600 images, training time 0.6 hour roughly
cfg_name=vit-base-p16_ft-4xb544_in1k-224_ipu
python3 tools/train.py configs/vision_transformer/${cfg_name}.py --ipu-replicas 4 --no-validate &&
python3 tools/test.py configs/vision_transformer/${cfg_name}.py work_dirs/${cfg_name}/latest.pth --metrics accuracy --device ipu
ARG PYTORCH="1.8.1"
ARG CUDA="10.2"
ARG CUDNN="7"
FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel
ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX"
ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all"
ENV CMAKE_PREFIX_PATH="(dirname(which conda))/../"
RUN apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# Install MMCV
RUN pip install openmim
RUN mim install mmcv-full
# Install MMClassification
RUN conda clean --all
RUN git clone https://github.com/open-mmlab/mmclassification.git
WORKDIR ./mmclassification
RUN pip install --no-cache-dir -e .
ARG PYTORCH="1.8.1"
ARG CUDA="10.2"
ARG CUDNN="7"
FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel
ARG MMCV="1.7.0"
ARG MMCLS="0.24.1"
ENV PYTHONUNBUFFERED TRUE
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \
ca-certificates \
g++ \
openjdk-11-jre-headless \
# MMDet Requirements
ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \
&& rm -rf /var/lib/apt/lists/*
ENV PATH="/opt/conda/bin:$PATH"
RUN export FORCE_CUDA=1
# TORCHSEVER
RUN pip install torchserve torch-model-archiver
# MMLAB
ARG PYTORCH
ARG CUDA
RUN ["/bin/bash", "-c", "pip install mmcv-full==${MMCV} -f https://download.openmmlab.com/mmcv/dist/cu${CUDA//./}/torch${PYTORCH}/index.html"]
RUN pip install mmcls==${MMCLS}
RUN useradd -m model-server \
&& mkdir -p /home/model-server/tmp
COPY entrypoint.sh /usr/local/bin/entrypoint.sh
RUN chmod +x /usr/local/bin/entrypoint.sh \
&& chown -R model-server /home/model-server
COPY config.properties /home/model-server/config.properties
RUN mkdir /home/model-server/model-store && chown -R model-server /home/model-server/model-store
EXPOSE 8080 8081 8082
USER model-server
WORKDIR /home/model-server
ENV TEMP=/home/model-server/tmp
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
CMD ["serve"]
.header-logo {
background-image: url("../image/mmcls-logo.png");
background-size: 204px 40px;
height: 40px;
width: 204px;
}
pre {
white-space: pre;
}
article.pytorch-article section code {
padding: .2em .4em;
background-color: #f3f4f7;
border-radius: 5px;
}
/* Disable the change in tables */
article.pytorch-article section table code {
padding: unset;
background-color: unset;
border-radius: unset;
}
table.autosummary td {
width: 50%
}
.. role:: hidden
:class: hidden-section
.. currentmodule:: {{ module }}
{{ name | underline}}
.. autoclass:: {{ name }}
:members:
..
autogenerated from source/_templates/classtemplate.rst
note it does not have :inherited-members:
.. role:: hidden
:class: hidden-section
mmcls.apis
===================================
These are some high-level APIs for classification tasks.
.. contents:: mmcls.apis
:depth: 2
:local:
:backlinks: top
.. currentmodule:: mmcls.apis
Train
------------------
.. autosummary::
:toctree: generated
:nosignatures:
init_random_seed
set_random_seed
train_model
Test
------------------
.. autosummary::
:toctree: generated
:nosignatures:
single_gpu_test
multi_gpu_test
Inference
------------------
.. autosummary::
:toctree: generated
:nosignatures:
init_model
inference_model
show_result_pyplot
.. role:: hidden
:class: hidden-section
mmcls.core
===================================
This package includes some runtime components. These components are useful in
classification tasks but not supported by MMCV yet.
.. note::
Some components may be moved to MMCV in the future.
.. contents:: mmcls.core
:depth: 2
:local:
:backlinks: top
.. currentmodule:: mmcls.core
Evaluation
------------------
Evaluation metrics calculation functions
.. autosummary::
:toctree: generated
:nosignatures:
precision
recall
f1_score
precision_recall_f1
average_precision
mAP
support
average_performance
calculate_confusion_matrix
Hook
------------------
.. autosummary::
:toctree: generated
:nosignatures:
:template: classtemplate.rst
ClassNumCheckHook
PreciseBNHook
CosineAnnealingCooldownLrUpdaterHook
MMClsWandbHook
Optimizers
------------------
.. autosummary::
:toctree: generated
:nosignatures:
:template: classtemplate.rst
Lamb
.. role:: hidden
:class: hidden-section
mmcls.datasets
===================================
The ``datasets`` package contains several usual datasets for image classification tasks and some dataset wrappers.
.. currentmodule:: mmcls.datasets
Custom Dataset
--------------
.. autoclass:: CustomDataset
ImageNet
--------
.. autoclass:: ImageNet
.. autoclass:: ImageNet21k
CIFAR
-----
.. autoclass:: CIFAR10
.. autoclass:: CIFAR100
MNIST
-----
.. autoclass:: MNIST
.. autoclass:: FashionMNIST
VOC
---
.. autoclass:: VOC
StanfordCars Cars
-----------------
.. autoclass:: StanfordCars
Base classes
------------
.. autoclass:: BaseDataset
.. autoclass:: MultiLabelDataset
Dataset Wrappers
----------------
.. autoclass:: ConcatDataset
.. autoclass:: RepeatDataset
.. autoclass:: ClassBalancedDataset
.. role:: hidden
:class: hidden-section
mmcls.models
===================================
The ``models`` package contains several sub-packages for addressing the different components of a model.
- :ref:`classifiers`: The top-level module which defines the whole process of a classification model.
- :ref:`backbones`: Usually a feature extraction network, e.g., ResNet, MobileNet.
- :ref:`necks`: The component between backbones and heads, e.g., GlobalAveragePooling.
- :ref:`heads`: The component for specific tasks. In MMClassification, we provides heads for classification.
- :ref:`losses`: Loss functions.
.. currentmodule:: mmcls.models
.. autosummary::
:toctree: generated
:nosignatures:
build_classifier
build_backbone
build_neck
build_head
build_loss
.. _classifiers:
Classifier
------------------
.. autosummary::
:toctree: generated
:nosignatures:
:template: classtemplate.rst
BaseClassifier
ImageClassifier
.. _backbones:
Backbones
------------------
.. autosummary::
:toctree: generated
:nosignatures:
:template: classtemplate.rst
AlexNet
CSPDarkNet
CSPNet
CSPResNeXt
CSPResNet
Conformer
ConvMixer
ConvNeXt
DenseNet
DistilledVisionTransformer
EfficientNet
HRNet
LeNet5
MlpMixer
MobileNetV2
MobileNetV3
PCPVT
PoolFormer
RegNet
RepMLPNet
RepVGG
Res2Net
ResNeSt
ResNeXt
ResNet
ResNetV1c
ResNetV1d
ResNet_CIFAR
SEResNeXt
SEResNet
SVT
ShuffleNetV1
ShuffleNetV2
SwinTransformer
T2T_ViT
TIMMBackbone
TNT
VAN
VGG
VisionTransformer
EfficientFormer
HorNet
.. _necks:
Necks
------------------
.. autosummary::
:toctree: generated
:nosignatures:
:template: classtemplate.rst
GlobalAveragePooling
GeneralizedMeanPooling
HRFuseScales
.. _heads:
Heads
------------------
.. autosummary::
:toctree: generated
:nosignatures:
:template: classtemplate.rst
ClsHead
LinearClsHead
StackedLinearClsHead
MultiLabelClsHead
MultiLabelLinearClsHead
VisionTransformerClsHead
DeiTClsHead
ConformerHead
.. _losses:
Losses
------------------
.. autosummary::
:toctree: generated
:nosignatures:
:template: classtemplate.rst
Accuracy
AsymmetricLoss
CrossEntropyLoss
LabelSmoothLoss
FocalLoss
SeesawLoss
.. role:: hidden
:class: hidden-section
Batch Augmentation
===================================
Batch augmentation is the augmentation which involve multiple samples, such as Mixup and CutMix.
In MMClassification, these batch augmentation is used as a part of :ref:`classifiers`. A typical usage is as below:
.. code-block:: python
model = dict(
backbone = ...,
neck = ...,
head = ...,
train_cfg=dict(augments=[
dict(type='BatchMixup', alpha=0.8, prob=0.5, num_classes=num_classes),
dict(type='BatchCutMix', alpha=1.0, prob=0.5, num_classes=num_classes),
]))
)
.. currentmodule:: mmcls.models.utils.augment
Mixup
-----
.. autoclass:: BatchMixupLayer
CutMix
------
.. autoclass:: BatchCutMixLayer
ResizeMix
---------
.. autoclass:: BatchResizeMixLayer
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment