Commit 7360bb8a authored by limm's avatar limm
Browse files

add demo part

parent fb54db0f
Pipeline #2805 canceled with stages
# Copyright (c) OpenMMLab. All rights reserved.
from argparse import ArgumentParser
from mmengine.fileio import dump
from rich import print_json
from mmpretrain.apis import ImageClassificationInferencer
def main():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('model', help='Model name or config file path')
parser.add_argument('--checkpoint', help='Checkpoint file path.')
parser.add_argument(
'--show',
action='store_true',
help='Whether to show the prediction result in a window.')
parser.add_argument(
'--show-dir',
type=str,
help='The directory to save the visualization image.')
parser.add_argument('--device', help='Device used for inference')
args = parser.parse_args()
# build the model from a config file and a checkpoint file
try:
pretrained = args.checkpoint or True
inferencer = ImageClassificationInferencer(
args.model, pretrained=pretrained)
except ValueError:
raise ValueError(
f'Unavailable model "{args.model}", you can specify find a model '
'name or a config file or find a model name from '
'https://mmpretrain.readthedocs.io/en/latest/modelzoo_statistics.html#all-checkpoints' # noqa: E501
)
result = inferencer(args.img, show=args.show, show_dir=args.show_dir)[0]
# show the results
result.pop('pred_scores') # pred_scores is too verbose for a demo.
print_json(dump(result, file_format='json', indent=4))
if __name__ == '__main__':
main()
# get SOTA accuracy 81.2 for 224 input ViT fine-tuning, reference is below:
# https://github.com/google-research/vision_transformer#available-vit-models
# cfg: vit-base-p16_ft-4xb544_in1k-224_ipu train model in fp16 precision
# 8 epoch, 2176 batch size, 16 IPUs, 4 replicas, model Tput = 5600 images, training time 0.6 hour roughly
cfg_name=vit-base-p16_ft-4xb544_in1k-224_ipu
python3 tools/train.py configs/vision_transformer/${cfg_name}.py --ipu-replicas 4 --no-validate &&
python3 tools/test.py configs/vision_transformer/${cfg_name}.py work_dirs/${cfg_name}/latest.pth --metrics accuracy --device ipu
ARG PYTORCH="1.12.1"
ARG CUDA="11.3"
ARG CUDNN="8"
FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel
# fetch the key refer to https://forums.developer.nvidia.com/t/18-04-cuda-docker-image-is-broken/212892/9
RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub 32
RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub
ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX"
ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all"
ENV CMAKE_PREFIX_PATH="(dirname(which conda))/../"
RUN apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# Install MIM
RUN pip install openmim
# Install MMPretrain
RUN conda clean --all
RUN git clone https://github.com/open-mmlab/mmpretrain.git
WORKDIR ./mmpretrain
RUN mim install --no-cache-dir -e .
ARG PYTORCH="2.0.1"
ARG CUDA="11.7"
ARG CUDNN="8"
FROM pytorch/torchserve:latest-gpu
ARG MMPRE="1.2.0"
ENV PYTHONUNBUFFERED TRUE
ENV HOME="/home/model-server"
ENV PATH="/opt/conda/bin:$HOME/.local/bin:$PATH"
RUN export FORCE_CUDA=1
# TORCHSEVER
RUN pip install torchserve torch-model-archiver
RUN pip install nvgpu
# OPEN-MMLAB
ARG PYTORCH
ARG CUDA
RUN pip install openmim
RUN mim install mmpretrain==${MMPRE}
RUN mkdir -p $HOME/tmp
COPY --chown=model-server entrypoint.sh $HOME/.local/bin/entrypoint.sh
RUN chmod +x $HOME/.local/bin/entrypoint.sh
COPY --chown=model-server config.properties $HOME/config.properties
EXPOSE 8080 8081 8082
USER model-server
WORKDIR $HOME
ENV TEMP=$HOME/tmp
ENTRYPOINT ["/home/model-server/.local/bin/entrypoint.sh"]
CMD ["serve"]
inference_address=http://0.0.0.0:8080
management_address=http://0.0.0.0:8081
metrics_address=http://0.0.0.0:8082
model_store=/home/model-server/model-store
load_models=all
#!/bin/bash
set -e
if [[ "$1" = "serve" ]]; then
shift 1
torchserve --start --ts-config /home/model-server/config.properties
else
eval "$@"
fi
# prevent docker exit
tail -f /dev/null
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment