Commit 502f4fb9 authored by limm's avatar limm
Browse files

add tools and service module

parent 68661967
Pipeline #2809 canceled with stages
set -e
WORKSPACE="."
MODEL_DIR="/__w/mmdeploy/testmodel/mmcls"
SDK_PYTHON_DIR="mmdeploy_runtime"
if [[ -n "$1" ]]; then
WORKSPACE=$1
fi
cd $WORKSPACE
cd $SDK_PYTHON_DIR
PY_VERSION=$(python3 -V | awk '{print $2}' | awk '{split($0, a, "."); print a[1]a[2]}')
test_pkg=$(ls | grep mmdeploy_runtime-*cp${PY_VERSION}*x86_64.whl)
python3 -m pip install $test_pkg --force-reinstall
python3 -m pip install opencv-python
code="
import cv2
from mmdeploy_runtime import Classifier
import sys
handle = Classifier('$MODEL_DIR', 'cpu', 0)
img = cv2.imread('$MODEL_DIR/demo.jpg')
try:
res = handle(img)
print(res)
except:
print('error')
sys.exit(1)
"
python3 -c "$code"
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import os.path as osp
import numpy as np
import torch
from mmengine import DictAction
from prettytable import PrettyTable
from mmdeploy.apis import build_task_processor
from mmdeploy.utils import get_root_logger
from mmdeploy.utils.config_utils import (Backend, get_backend, get_input_shape,
load_config)
from mmdeploy.utils.timer import TimeCounter
def parse_args():
parser = argparse.ArgumentParser(
description='MMDeploy Model Latency Test Tool.')
parser.add_argument('deploy_cfg', help='Deploy config path')
parser.add_argument('model_cfg', help='Model config path')
parser.add_argument('image_dir', help='Input directory to image files')
parser.add_argument(
'--model', type=str, nargs='+', help='Input model files.')
parser.add_argument(
'--device', help='device type for inference', default='cuda:0')
parser.add_argument(
'--shape',
type=str,
help='Input shape to test in `HxW` format, e.g., `800x1344`',
default=None)
parser.add_argument(
'--warmup',
type=int,
help='warmup iterations before counting inference latency.',
default=10)
parser.add_argument(
'--num-iter',
type=int,
help='Number of iterations to run the inference.',
default=100)
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--batch-size', type=int, default=1, help='the batch size for test.')
parser.add_argument(
'--img-ext',
type=str,
nargs='+',
help='the file extensions for input images from `image_dir`.',
default=['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif'])
args = parser.parse_args()
return args
def get_images(image_dir, extensions):
images = []
files = glob.glob(osp.join(image_dir, '**', '*'), recursive=True)
for f in files:
_, ext = osp.splitext(f)
if ext.lower() in extensions:
images.append(f)
return images
class TorchWrapper(torch.nn.Module):
def __init__(self, model):
super(TorchWrapper, self).__init__()
self.model = model
@TimeCounter.count_time(Backend.PYTORCH.value)
def test_step(self, *args, **kwargs):
return self.model.test_step(*args, **kwargs)
def main():
args = parse_args()
deploy_cfg_path = args.deploy_cfg
model_cfg_path = args.model_cfg
logger = get_root_logger()
# load deploy_cfg
deploy_cfg, model_cfg = load_config(deploy_cfg_path, model_cfg_path)
# merge options for model cfg
if args.cfg_options is not None:
model_cfg.merge_from_dict(args.cfg_options)
deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
if args.shape is not None:
h, w = [int(_) for _ in args.shape.split('x')]
input_shape = [w, h]
else:
input_shape = get_input_shape(deploy_cfg)
assert input_shape is not None, 'Input_shape should not be None'
# create model an inputs
task_processor = build_task_processor(model_cfg, deploy_cfg, args.device)
model_ext = osp.splitext(args.model[0])[1]
is_pytorch = model_ext in ['.pth', '.pt']
if is_pytorch:
# load pytorch model
model = task_processor.build_pytorch_model(args.model[0])
model = TorchWrapper(model)
backend = Backend.PYTORCH.value
else:
# load the model of the backend
model = task_processor.build_backend_model(args.model)
backend = get_backend(deploy_cfg).value
model = model.eval().to(args.device)
is_device_cpu = args.device == 'cpu'
with_sync = not is_device_cpu
if not is_device_cpu:
torch.backends.cudnn.benchmark = True
image_files = get_images(args.image_dir, args.img_ext)
nrof_image = len(image_files)
assert nrof_image > 0, f'No image files found in {args.image_dir}'
logger.info(f'Found totally {nrof_image} image files in {args.image_dir}')
total_nrof_image = (args.num_iter + args.warmup) * args.batch_size
if nrof_image < total_nrof_image:
np.random.seed(1234)
image_files += [
image_files[i]
for i in np.random.choice(nrof_image, total_nrof_image -
nrof_image)
]
image_files = image_files[:total_nrof_image]
with TimeCounter.activate(
warmup=args.warmup,
log_interval=20,
with_sync=with_sync,
batch_size=args.batch_size):
for i in range(0, total_nrof_image, args.batch_size):
batch_files = image_files[i:(i + args.batch_size)]
data, _ = task_processor.create_input(
batch_files,
input_shape,
data_preprocessor=getattr(model, 'data_preprocessor', None))
model.test_step(data)
print('----- Settings:')
settings = PrettyTable()
settings.header = False
settings.add_row(['batch size', args.batch_size])
settings.add_row(['shape', f'{input_shape[1]}x{input_shape[0]}'])
settings.add_row(['iterations', args.num_iter])
settings.add_row(['warmup', args.warmup])
print(settings)
print('----- Results:')
TimeCounter.print_stats(backend)
if __name__ == '__main__':
main()
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence
import mmcv
from mmengine import Config, FileClient
from torch.utils.data import Dataset
from mmdeploy.apis import build_task_processor
class QuantizationImageDataset(Dataset):
def __init__(
self,
path: str,
deploy_cfg: Config,
model_cfg: Config,
file_client_args: Optional[dict] = None,
extensions: Sequence[str] = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp',
'.pgm', '.tif'),
):
super().__init__()
task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu')
self.task_processor = task_processor
self.samples = []
self.extensions = tuple(set([i.lower() for i in extensions]))
self.file_client = FileClient.infer_client(file_client_args, path)
self.path = path
assert self.file_client.isdir(path)
files = list(
self.file_client.list_dir_or_file(
path,
list_dir=False,
list_file=True,
recursive=False,
))
for file in files:
if self.is_valid_file(self.file_client.join_path(file)):
path = self.file_client.join_path(self.path, file)
self.samples.append(path)
def __len__(self):
return len(self.samples)
def __getitem__(self, index):
sample = self.samples[index]
image = mmcv.imread(sample)
data = self.task_processor.create_input(image)
return data[0]
def is_valid_file(self, filename: str) -> bool:
"""Check if a file is a valid sample."""
return filename.lower().endswith(self.extensions)
This diff is collapsed.
#!/bin/sh
set -e
ip=${1}
port=${2:8585}
date_today=`date +'%Y%m%d'`
# create http server
nohup python3 -m http.server --directory /data2/shared/nvidia $port > tmp.log 2>&1
export TENSORRT_URL=http://$ip:$port/TensorRT-8.2.3.0.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz
export TENSORRT_VERSION=8.2.3.0
export CUDA_INT=113
export TAG=ubuntu20.04-cuda11.3
# build docker image
docker build ./docker/Base/ -t openmmlab/mmdeploy:$TAG \
--build-arg CUDA_INT=$CUDA_INT \
--build-arg TENSORRT_URL=${TENSORRT_URL} \
--build-arg TENSORRT_VERSION=${TENSORRT_VERSION}
docker tag openmmlab/mmdeploy:$TAG openmmlab/mmdeploy:${TAG}-${date_today}
# test docker image
docker run --gpus=all -itd \
-v /data2/benchmark:/root/workspace/openmmlab-data \
-v /data2/checkpoints:/root/workspace/mmdeploy_checkpoints \
-v ~/mmdeploy:/root/workspace/mmdeploy \
openmmlab/mmdeploy:$TAG
# push to docker hub
docker login
docker push openmmlab/mmdeploy:$TAG
docker push openmmlab/mmdeploy:$TAG-${date_today}
#! /bin/bash
# check python version is 3.8 or not
check_python_38(){
MAJOR=`python3 --version | awk '{print $2}' | awk -F . '{print $1}'`
MINOR=`python3 --version | awk '{print $2}' | awk -F . '{print $2}'`
if [ ${MAJOR} -ne 3 ];then
echo 'This script needs python==3.8 +_+'
exit 0
fi
if [ ${MINOR} -ne 8 ];then
echo 'This script needs python==3.8 +_+'
exit 0
fi
}
install_torch() {
version=`python3 -c "import torch; print(torch.__version__)"`
if [ -n "$version" ];then
return 0
fi
TORCH_WHL="torch-1.11.0-cp38-cp38-linux_aarch64.whl"
if [ ! -e "${TORCH_WHL}" ];then
wget -q --show-progress https://nvidia.box.com/shared/static/ssf2v7pf5i245fk4i0q926hy4imzs2ph.whl -O ${TORCH_WHL}
fi
python3 -m pip install ${TORCH_WHL}
python3 -m pip install numpy
sudo apt install libopenblas-dev -y
python3 -c "import torch; print(torch.__version__)"
}
install_torchvision() {
version=`python3 -c "import torchvision; print(torchvision.__version__)"`
if [ -n "$version" ];then
return 0
fi
sudo apt-get install libjpeg-dev zlib1g-dev libpython3-dev libavcodec-dev libavformat-dev libswscale-dev -y
if [ ! -e "torchvision" ];then
git clone https://github.com/pytorch/vision torchvision --branch v0.11.1 --depth=1
fi
cd torchvision
export BUILD_VERSION=0.11.1
python3 -m pip install -e .
python3 -c "import torchvision; print(torchvision.__version__)"
cd -
}
install_cmake() {
command -v cmake > /dev/null 2>&1 || {
python3 -m pip install cmake
}
echo "cmake installed $(which cmake)"
}
install_tensorrt() {
echo 'export PYTHONPATH=/usr/lib/python3.8/dist-packages:${PYTHONPATH}' >> ~/mmdeploy.env
export PYTHONPATH=/usr/lib/python3.8/dist-packages:${PYTHONPATH}
echo 'export TENSORRT_DIR=/usr/include/aarch64-linux-gnu' >> ~/mmdeploy.env
export TENSORRT_DIR=/usr/include/aarch64-linux-gnu
echo 'export PATH=$PATH:/usr/local/cuda/bin' >> ~/mmdeploy.env
export PATH=$PATH:/usr/local/cuda/bin
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/lib64' >> ~/mmdeploy.env
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/lib64
echo 'export CUDA_HOME=/usr/local/cuda-11' >> ~/mmdeploy.env
export CUDA_HOME=/usr/local/cuda-11
echo 'export CUDA_ROOT=/usr/local/cuda-11' >> ~/mmdeploy.env
export CUDA_ROOT=/usr/local/cuda-11
python -c "import tensorrt; print(tensorrt.__version__)"
}
install_mmcv_pycuda() {
version=`python3 -c "import mmcv; print(mmcv.__version__)"`
if [ -n "$version" ];then
return 0
fi
# try prebuilt .whl
board=`cat /etc/nv_tegra_release | awk '{print $9}'`
release=`cat /etc/nv_tegra_release | awk '{print $2}'`
revision=`cat /etc/nv_tegra_release | awk '{print $5}'`
if [ ${board} = "t186ref," ];then
if [ ${release} = "R34," ];then
if [ ${revision} = "1.1," ];then
# use prebuilt whl
wget -q --show-progress --https://github.com/tpoisonooo/mmcv-jetson-orin-prebuilt-whl/raw/main/mmcv_full-1.5.1-cp38-cp38-linux_aarch64.whl
python3 -m pip install mmcv_full-1.5.1-cp38-cp38-linux_aarch64.whl
wget https://github.com/tpoisonooo/mmcv-jetson-orin-prebuilt-whl/raw/main/pycuda-2022.1-cp38-cp38-linux_aarch64.whl
python3 -m pip install pycuda-2022.1-cp38-cp38-linux_aarch64.whl
fi
fi
elif [ ! -e "mmcv" ];then
# source build mmcv and pycuda
sudo apt-get install -y libssl-dev
git clone https://github.com/open-mmlab/mmcv.git --branch v1.5.1 --depth=1
cd mmcv
echo 'Building mmcv-full with MMCV_WITH_OPS=1 and pycuda, it may take an hour, please wait..'
MMCV_WITH_OPS=1 python3 -m pip install -e .
python3 -m pip install pycuda
cd -
fi
python3 -c "import mmcv; print(mmcv.__version__)"
}
install_pplcv() {
if [ ! -e "ppl.cv" ];then
git clone https://github.com/openppl-public/ppl.cv.git --depth=1 --recursive
fi
cd ppl.cv
./build.sh cuda
echo "PPLCV_DIR=$(pwd)" >> ~/mmdeploy.env
export PPLCV_DIR=$(pwd)
cd -
}
install_mmdeploy() {
sudo apt-get install -y pkg-config libhdf5-103 libhdf5-dev libspdlog-dev
python3 -m pip install onnx
python3 -m pip install versioned-hdf5
# build and install mmdeploy
cd ../mmdeploy
git submodule init
git submodule update
if [ ! -e "build" ];then
mkdir -p build
fi
cd build
cmake .. \
-DMMDEPLOY_BUILD_SDK=ON \
-DMMDEPLOY_BUILD_SDK_PYTHON_API=ON \
-DMMDEPLOY_BUILD_EXAMPLES=ON \
-DMMDEPLOY_TARGET_DEVICES="cuda;cpu" \
-DMMDEPLOY_TARGET_BACKENDS="trt" \
-DMMDEPLOY_CODEBASES=all \
-Dpplcv_DIR=${PPLCV_DIR}/cuda-build/install/lib/cmake/ppl
make -j 7 && make install
cd -
python3 -m pip install -v -e .
python3 tools/check_env.py
}
show_env() {
echo ""
echo "----------------------------------------------------------------------------------------------------------"
echo '>> Install finished, `source ~/mmdeploy.env` to setup your environment !'
cat ~/mmdeploy.env
echo "----------------------------------------------------------------------------------------------------------"
}
# setup env
echo "" > ~/mmdeploy.env
echo 'export OPENBLAS_CORETYPE=ARMV8' >> ~/mmdeploy.env
export OPENBLAS_CORETYPE=ARMV8
echo 'export ARCH=aarch64' >> ~/mmdeploy.env
export ARCH=aarch64
check_python_38
if [ ! -e "../mmdeploy-dep" ];then
mkdir ../mmdeploy-dep
fi
cd ../mmdeploy-dep
echo $(pwd)
install_torch
install_torchvision
install_cmake
install_tensorrt
install_mmcv_pycuda
install_pplcv
install_mmdeploy
show_env
#!/bin/bash
# build_linux_nvidia.sh
# Date: 08-03-2022, 24-04-2022
#
# Run this script to build MMDeploy SDK and install necessary prerequisites.
# This script will also setup python venv and generate prebuild binaries if requested.
#
#####
# Build vars
BUILD_TYPE="Release"
ARCH=$(uname -i)
PROC_NUM=$(nproc)
# Default GCC
GCC_COMPILER="g++"
#####
# Directories
# WORKING_DIR must correspond to MMDeploy root dir
WORKING_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../.." && pwd )"
PPLCV_DIR=${WORKING_DIR}/ppl.cv
MMDEPLOY_DIR=${WORKING_DIR}
#####
# Versions
PPLCV_VER="0.7.0"
CMAKE_VER="3.23.0"
#####
# Flags
# WITH_PYTHON: Install misc. dependencies in the active venv
WITH_PYTHON=1
# WITH_CLEAN: Remove build output dirs
WITH_CLEAN=1
# WITH_PREBUILD: Generate prebuild archives
WITH_PREBUILD=0
# WITH_UNATTENDED: Unattended install, skip/use default options
WITH_UNATTENDED=0
#####
# Prefix: Set install prefix for ppl.cv, mmdeploy SDK depending on arch
if [[ "$ARCH" == aarch64 ]]; then
INSTALL_PREFIX="/usr/local/aarch64-linux-gnu"
else
INSTALL_PREFIX="/usr/local"
fi
PYTHON_VENV_DIR=${WORKING_DIR}/venv-mmdeploy
appargument1=$1
appargument2=$2
#####
# helper functions
echo_green() {
if [ -n "$1" ]; then
echo "$(tput setaf 10)$1$(tput sgr 0)"
fi
}
echo_red() {
if [ -n "$1" ]; then
echo "$(tput setaf 1)$1$(tput sgr 0)"
fi
}
echo_blue() {
if [ -n "$1" ]; then
echo "$(tput setaf 4)$1$(tput sgr 0)"
fi
}
contains_element () {
local e match="$1"
shift
for e; do [[ "$e" == "$match" ]] && return 0; done
return 1
}
function version {
echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }';
}
prompt_yesno() {
if [ -n "$1" ]; then
echo_blue "$1"
fi
if [[ $WITH_UNATTENDED -eq 1 ]]
then
echo_green "Unattended install, selecting default option"
return 2
else
echo_blue "(y/n/q) or press [ENTER] to select default option"
read -p "?" -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
return 1
elif [[ $REPLY =~ ^[Nn]$ ]]
then
return 0
elif [[ $REPLY = "" ]]
then
echo "Selecting default option..."
return 2
elif [[ $REPLY =~ ^[Qq]$ ]]
then
echo_green "Quitting!"
exit
else
echo_red "Invalid argument. Try again"
prompt_yesno
fi
fi
}
prereqs() {
echo_green "Installing prerequisites..."
# cmake check & install
echo_green "Checking your cmake version..."
CMAKE_DETECT_VER=$(cmake --version | grep -oP '(?<=version).*')
if [ $(version $CMAKE_DETECT_VER) -ge $(version "3.14.0") ]; then
echo "Cmake version $CMAKE_DETECT_VER is up to date"
else
echo "CMake too old, purging existing cmake and installing ${CMAKE_VER}..."
# purge existing
sudo apt-get purge cmake
sudo snap remove cmake
# install prebuild
wget https://github.com/Kitware/CMake/releases/download/v${CMAKE_VER}/cmake-${CMAKE_VER}-linux-${ARCH}.sh
chmod +x cmake-${CMAKE_VER}-linux-${ARCH}.sh
sudo ./cmake-${CMAKE_VER}-linux-${ARCH}.sh --prefix=/usr --skip-license
fi
# gcc-7 check
echo_green "Checking your gcc version..."
GCC_DETECT_VER=$(gcc --version | grep -oP '(?<=\)).*' -m1)
if [ $(version $GCC_DETECT_VER) -ge $(version "7.0.0") ]; then
echo "GCC version $GCC_DETECT_VER is up to date"
else
echo "gcc version too old, installing ${CMAKE_VER}..."
echo "Purge existing cmake and install ${GCC_DETECT_VER}..."
# Add repository if ubuntu < 18.04
sudo add-apt-repository ppa:ubuntu-toolchain-r/test
sudo apt-get update
sudo apt-get install gcc-7
sudo apt-get install g++-7
GCC_COMPILER="g++-7"
fi
# spdlog
echo_green "Checking spdlog version..."
prompt_yesno "Install latest spdlog from source? (Default:no)"
local res=$?
if [[ $res -eq 1 ]] # || [ $res -eq 2 ]
then
echo_green "Building and installing latest spdlog from source"
# remove libspdlog, as it might be an old version
sudo apt-get remove libspdlog-dev -y
git clone https://github.com/gabime/spdlog.git spdlog
cd spdlog
git pull
git checkout tags/v1.8.1
mkdir build -p && cd build
# we must build spdlog with -fPIC enabled
cmake .. -DCMAKE_POSITION_INDEPENDENT_CODE=ON && make -j${PROC_NUM}
sudo make install
sudo ldconfig
fi
# tensorrt check
echo_green "Check your TensorRT version:"
## Check if ${TENSORRT_DIR} env variable has been set
if [ -d "${TENSORRT_DIR}" ]; then
echo "TENSORRT_DIR env. variable has been set ${TENSORRT_DIR}"
else
echo_red "TENSORRT_DIR env. variable has NOT been set."
if [[ "$ARCH" == aarch64 ]]; then
echo "Added TENSORRT_DIR, CUDNN_DIR to env."
echo 'export TENSORRT_DIR="/usr/include/'${ARCH}'-linux-gnu/"' >> ${HOME}/.bashrc
echo 'export CUDNN_DIR="/usr/include/'${ARCH}'-linux-gnu/"' >> ${HOME}/.bashrc
echo 'export LD_LIBRARY_PATH="/usr/lib/'${ARCH}'-linux-gnu/:$LD_LIBRARY_PATH"' >> ${HOME}/.bashrc
#source ${HOME}/.bashrc
# sourcing in bash script won't set the env. variables so we will set them temporarily
export TENSORRT_DIR="/usr/include/'${ARCH}'-linux-gnu/"
export CUDNN_DIR="/usr/include/'${ARCH}'-linux-gnu/"
export LD_LIBRARY_PATH="/usr/lib/'${ARCH}'-linux-gnu/:$LD_LIBRARY_PATH"
else
echo_red "Please Install TensorRT, CUDNN and add TENSORRT_DIR, CUDNN_DIR to environment variables before running this script!"
exit
fi
fi
# Determine TensorRT version and set paths accordingly
echo "Checking TensorRT version...Please verify the detected versions below:"
if [[ "$ARCH" == aarch64 ]]; then
cat /usr/include/${ARCH}-linux-gnu/NvInferVersion.h | grep NV_TENSORRT
else
cat ${TENSORRT_DIR}/include/NvInferVersion.h | grep NV_TENSORRT
fi
prompt_yesno "Is TensorRT >=8.0.1.6 installed? (Always installed on Jetson) (Default:yes)"
local res=$?
if [[ $res -eq 1 ]] || [ $res -eq 2 ]
then
echo "TensorRT appears to be installed..."
else
echo_red "Error: You must install TensorRT before installing MMDeploy!"
exit
fi
prompt_yesno "Install OpenCV? (Always installed on Jetson) (Default:no)"
local res=$?
if [[ $res -eq 1 ]] # || [ $res -eq 2 ]
then
echo "Installing libopencv-dev..."
# opencv
sudo apt-get install libopencv-dev
fi
}
py_venv() {
## python venv
echo_green "Installing python venv..."
#check for python installed version
pyv="$(python3 -V 2>&1)"
pyv_old="Python 3.6"
if echo "$pyv" | grep -q "$pyv_old"; then
# use python 3.6
curl https://bootstrap.pypa.io/pip/3.6/get-pip.py -o get-pip.py
else
# use python >=3.7
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
fi
# dependencies
sudo apt-get install protobuf-compiler libprotoc-dev libssl-dev curl ninja-build -y
sudo apt-get install libopenblas-dev python3-venv python3-dev python3-setuptools -y
sudo python3 get-pip.py
pip3 install testresources
pip3 install --upgrade setuptools wheel
if [ -d "${PYTHON_VENV_DIR}" ]; then
prompt_yesno "Reinstall existing Python venv ${PYTHON_VENV_DIR}? (Default:no)"
local res=$?
if [[ $res -eq 1 ]]
then
rm -r ${PYTHON_VENV_DIR}
python3 -m venv ${PYTHON_VENV_DIR} --system-site-packages #system site packages to keep trt from system installation
fi
else
python3 -m venv ${PYTHON_VENV_DIR} --system-site-packages #system site packages to keep trt from system installation
fi
source ${PYTHON_VENV_DIR}/bin/activate
python3 get-pip.py
pip3 install testresources
pip3 install --upgrade setuptools wheel
# Latest PIL is not compatible with mmcv=1.4.1
pip3 install Pillow==7.0.0
if [[ "$ARCH" == aarch64 ]]
then
# protofbuf on jetson is quite old - must be upgraded
pip3 install --upgrade protobuf
# Install numpy >1.19.4 might give "Illegal instruction (core dumped)" on Jetson/aarch64
# To solve it, we should set OPENBLAS_CORETYPE
echo 'export OPENBLAS_CORETYPE=ARMV8' >> ~/.bashrc
#source ${HOME}/.bashrc
# sourcing in bash script won't set the env. variables so we will set them temporarily
export OPENBLAS_CORETYPE=ARMV8
fi
pip3 install numpy
pip3 install opencv-python
pip3 install matplotlib
prompt_yesno "Install PyTorch, Torchvision, mmcv in the active venv? (Default:no)"
local res=$?
if [[ $res -eq 1 ]]
then
# pytorch, torchvision, torchaudio
if [[ "$ARCH" == aarch64 ]]
then
# pytorch
wget https://nvidia.box.com/shared/static/fjtbno0vpo676a25cgvuqc1wty0fkkg6.whl -O torch-1.10.0-cp36-cp36m-linux_aarch64.whl
pip3 install torch-1.10.0-cp36-cp36m-linux_aarch64.whl
# torchvision
sudo apt-get install libjpeg-dev zlib1g-dev libpython3-dev libavcodec-dev libavformat-dev libswscale-dev -y
sudo rm -r torchvision
git clone --branch v0.11.1 https://github.com/pytorch/vision torchvision
cd torchvision
export BUILD_VERSION=0.11.1 # where 0.x.0 is the torchvision version
python3 setup.py install
cd ../
# torchaudio
#sudo apt-get install -y sox libsox-dev libsox-fmt-all
#sudo rm -r torchaudio
#git clone -b v0.10.0 https://github.com/pytorch/audio torchaudio
#cd torchaudio
#git submodule update --init --recursive
#python3 setup.py install
#cd ../
# mmcv
pip3 uninstall mmcv-full
pip3 install mmcv-full==1.4.1 -f https://download.openmmlab.com/mmcv/dist/cu102/torch1.10.0/index.html
else
pip3 install torch==1.10.0+cu113 torchvision==0.11.1+cu113 torchaudio==0.10.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
# mmcv
pip3 uninstall mmcv-full
pip3 install mmcv-full==1.4.1 -f https://download.openmmlab.com/mmcv/dist/cu113/torch1.10.0/index.html
fi
fi
# cleanup
rm get-pip.py
}
pplcv() {
## ppl.cv
echo_green "Building and installing ppl.cv..."
cd ${WORKING_DIR}
echo_blue "checking out '${PPLCV_DIR}' pkg..."
if [ -d "${PPLCV_DIR}" ]; then
echo "Already exists! Checking out the requested version..."
else
git clone https://github.com/openppl-public/ppl.cv.git ${PPLCV_DIR}
fi
cd ${PPLCV_DIR}
git pull
git checkout tags/v${PPLCV_VER}
# remove all build files
if [[ $WITH_CLEAN -eq 1 ]]
then
sudo rm -r ${PPLCV_DIR}/build
fi
# build
mkdir build -p && cd build
cmake -DPPLCV_USE_CUDA=ON -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} .. && make -j${processor_num} && sudo make install
sudo ldconfig
# generate prebuild and pack into .tar.gz
if [[ $WITH_PREBUILD -eq 1 ]]
then
sudo make DESTDIR=./prebuild install
tar -zcvf ${WORKING_DIR}/pplcv_${PPLCV_VER}_cuda-${ARCH}-build.tar.gz -C ./prebuild/ .
fi
}
mmdeploy(){
## mmdeploy SDK
echo_green "Building and installing mmdeploysdk..."
cd ${MMDEPLOY_DIR}
MMDEPLOY_DETECT_VER=$(cat mmdeploy/version.py | grep -Eo '[0-9]\.[0-9].[0-9]+')
# reinit submodules
git submodule update --init --recursive
# python dependencies
if [[ $WITH_PYTHON -eq 1 ]]
then
source ${PYTHON_VENV_DIR}/bin/activate
## h5py (Required by mmdeploy)
## h5py not directly supported by jetson and must be built/installed manually
sudo apt-get install pkg-config libhdf5-10* libhdf5-dev -y
sudo pip3 install Cython
sudo env H5PY_SETUP_REQUIRES=0 pip3 install -U h5py==2.9.0
pip install -e .
fi
# remove all build files
if [[ $WITH_CLEAN -eq 1 ]]
then
sudo rm -r ${MMDEPLOY_DIR}/build
fi
# build
mkdir build -p && cd build
cmake .. \
-DMMDEPLOY_BUILD_SDK=ON \
-DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} \
-DCMAKE_CXX_COMPILER=${GCC_COMPILER} \
-Dpplcv_DIR=${INSTALL_PREFIX}/lib/cmake/ppl \
-DMMDEPLOY_TARGET_DEVICES="cuda;cpu" \
-DMMDEPLOY_TARGET_BACKENDS=trt \
-DMMDEPLOY_CODEBASES=all \
-DMMDEPLOY_BUILD_SDK_PYTHON_API=ON \
-DTENSORRT_DIR=${TENSORRT_DIR} \
-DCUDNN_DIR=${CUDNN_DIR}
cmake --build . -- -j${PROC_NUM} && sudo make install
sudo ldconfig
# generate prebuild and pack into .tar.gz
if [[ $WITH_PREBUILD -eq 1 ]]
then
sudo make DESTDIR=./prebuild install
tar -zcvf ${WORKING_DIR}/mmdeploysdk_${MMDEPLOY_VER}_${ARCH}-build.tar.gz -C ./prebuild/ .
fi
## build mmdeploy examples
cp -r ${MMDEPLOY_DIR}/demo/csrc ${MMDEPLOY_DIR}/build/example
cd ${MMDEPLOY_DIR}/build/example
rm -r build
mkdir build -p && cd build
cmake -DMMDeploy_DIR=${INSTALL_PREFIX} ..
make all
}
all() {
# build all
prereqs
py_venv
pplcv
mmdeploy
}
#####
# supported package
package_list=(
"all"
"prereqs"
"py_venv"
"pplcv"
"mmdeploy"
)
#####
# check input argument
if contains_element "$appargument1" "${package_list[@]}"; then
echo_green "Build and install '$appargument1'..."
else
echo_red "Unsupported argument '$appargument1'. Use one of the following:"
for i in ${package_list[@]}
do
echo $i
done
exit
fi
#####
# Unattended/auto install
if [[ $appargument2 == "auto" ]]
then
WITH_UNATTENDED=1
fi
#####
# Install dependencies in venv?
prompt_yesno "Install misc. dependencies in the active venv? (Default:${WITH_PYTHON})"
res=$?
if [[ $res -eq 1 ]]
then
WITH_PYTHON=1
elif [[ $res -eq 0 ]]
then
WITH_PYTHON=0
fi
#####
# Clean previous build dirs?
prompt_yesno "Clean previous build dirs? (Default:${WITH_CLEAN})"
res=$?
if [[ $res -eq 1 ]]
then
WITH_CLEAN=1
elif [[ $res -eq 0 ]]
then
WITH_CLEAN=0
fi
#####
# Generate prebuild dirs?
prompt_yesno "Generate prebuild dirs? (Default:${WITH_PREBUILD})"
res=$?
if [[ $res -eq 1 ]]
then
WITH_PREBUILD=1
elif [[ $res -eq 0 ]]
then
WITH_PREBUILD=0
fi
$appargument1
cd ${WORKING_DIR}
# update env. variables
exec bash
#!/bin/sh
set -e
ip=${1}
port=${2:8585}
date_today=`date +'%Y%m%d'`
# create http server
nohup python3 -m http.server --directory /data2/shared/mmdeploy-manylinux2014_x86_64-cuda11.3 $port > tmp.log 2>&1
export ip=10.1.52.36
export port=8585
export CUDA_URL=http://$ip:$port/cuda_11.3.0_465.19.01_linux.run
export CUDNN_URL=http://$ip:$port/cudnn-11.3-linux-x64-v8.2.1.32.tgz
export TENSORRT_URL=http://$ip:$port/TensorRT-8.2.3.0.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz
export TENSORRT_VERSION=8.2.3.0
export TAG=manylinux2014_x86_64-cuda11.3
# build docker image
docker build ./docker/prebuild/ -t openmmlab/mmdeploy:$TAG \
--build-arg CUDA_URL=$CUDA_URL \
--build-arg CUDNN_URL=$CUDNN_URL \
--build-arg TENSORRT_URL=${TENSORRT_URL}
# push to docker hub
docker login
docker push openmmlab/mmdeploy:$TAG
# Copyright (c) OpenMMLab. All rights reserved.
import os
import sys
import time
from pathlib import Path
from ubuntu_utils import cmd_result, ensure_base_env, get_job
g_jobs = 2
def install_protobuf(dep_dir) -> int:
"""build and install protobuf. protobuf seems not support repeated install,
so clean build first.
Args:
wor_dir (_type_): _description_
Returns:
: _description_
"""
print('-' * 10 + 'install protobuf' + '-' * 10)
os.chdir(dep_dir)
if not os.path.exists('protobuf-3.20.0'):
os.system(
'wget https://github.com/protocolbuffers/protobuf/releases/download/v3.20.0/protobuf-cpp-3.20.0.tar.gz' # noqa: E501
)
os.system('tar xvf protobuf-cpp-3.20.0.tar.gz')
os.chdir(os.path.join(dep_dir, 'protobuf-3.20.0'))
install_dir = os.path.join(dep_dir, 'pbinstall')
if os.path.exists(install_dir):
os.system('rm -rf {}'.format(install_dir))
os.system('make clean')
os.system('./configure --prefix={}'.format(install_dir))
os.system('make -j {} && make install'.format(g_jobs))
protoc = os.path.join(install_dir, 'bin', 'protoc')
print('protoc \t:{}'.format(cmd_result('{} --version'.format(protoc))))
os.system(""" echo 'export PATH={}:$PATH' >> ~/mmdeploy.env """.format(
os.path.join(install_dir, 'bin')))
os.system(
""" echo 'export LD_LIBRARY_PATH={}:$LD_LIBRARY_PATH' >> ~/mmdeploy.env """ # noqa: E501
.format(os.path.join(install_dir, 'lib')))
return 0
def install_pyncnn(dep_dir):
print('-' * 10 + 'build and install pyncnn' + '-' * 10)
time.sleep(2)
# generate unzip and build dir
os.chdir(dep_dir)
# git clone
if not os.path.exists('ncnn'):
os.system(
'git clone --depth 1 --branch 20230816 https://github.com/tencent/ncnn && cd ncnn' # noqa: E501
)
ncnn_dir = os.path.join(dep_dir, 'ncnn')
os.chdir(ncnn_dir)
# update submodule pybind11, gslang not required
os.system('git submodule init && git submodule update python/pybind11')
# build
if not os.path.exists('build'):
os.system('mkdir build')
os.chdir(os.path.join(ncnn_dir, 'build'))
os.system('rm -rf CMakeCache.txt')
pb_install = os.path.join(dep_dir, 'pbinstall')
pb_bin = os.path.join(pb_install, 'bin', 'protoc')
pb_lib = os.path.join(pb_install, 'lib', 'libprotobuf.so')
pb_include = os.path.join(pb_install, 'include')
cmd = 'cmake .. '
cmd += ' -DNCNN_PYTHON=ON '
cmd += ' -DProtobuf_LIBRARIES={} '.format(pb_lib)
cmd += ' -DProtobuf_PROTOC_EXECUTABLE={} '.format(pb_bin)
cmd += ' -DProtobuf_INCLUDE_DIR={} '.format(pb_include)
cmd += ' && make -j {} '.format(g_jobs)
cmd += ' && make install '
os.system(cmd)
# install
os.chdir(ncnn_dir)
os.system('cd python && python -m pip install -e . --user --no-cache-dir')
ncnn_cmake_dir = os.path.join(ncnn_dir, 'build', 'install', 'lib', 'cmake',
'ncnn')
assert (os.path.exists(ncnn_cmake_dir))
print('ncnn cmake dir \t:{}'.format(ncnn_cmake_dir))
print('\n')
return ncnn_cmake_dir
def install_mmdeploy(work_dir, dep_dir, ncnn_cmake_dir):
print('-' * 10 + 'build and install mmdeploy' + '-' * 10)
time.sleep(3)
os.chdir(work_dir)
os.system('git submodule init')
os.system('git submodule update')
if not os.path.exists('build'):
os.system('mkdir build')
pb_install = os.path.join(dep_dir, 'pbinstall')
pb_bin = os.path.join(pb_install, 'bin', 'protoc')
pb_lib = os.path.join(pb_install, 'lib', 'libprotobuf.so')
pb_include = os.path.join(pb_install, 'include')
os.system('rm -rf build/CMakeCache.txt')
cmd = 'cd build && cmake ..'
cmd += ' -DMMDEPLOY_BUILD_SDK=ON '
cmd += ' -DMMDEPLOY_BUILD_EXAMPLES=ON '
cmd += ' -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON '
cmd += ' -DMMDEPLOY_TARGET_DEVICES=cpu '
cmd += ' -DMMDEPLOY_TARGET_BACKENDS=ncnn '
cmd += ' -DProtobuf_PROTOC_EXECUTABLE={} '.format(pb_bin)
cmd += ' -DProtobuf_LIBRARIES={} '.format(pb_lib)
cmd += ' -DProtobuf_INCLUDE_DIR={} '.format(pb_include)
cmd += ' -Dncnn_DIR={} '.format(ncnn_cmake_dir)
os.system(cmd)
os.system('cd build && make -j {} && make install'.format(g_jobs))
os.system('python3 -m pip install -v -e . --user --no-cache-dir')
os.system(""" echo 'export PATH={}:$PATH' >> ~/mmdeploy.env """.format(
os.path.join(work_dir, 'mmdeploy', 'backend', 'ncnn')))
try:
import mmcv
print(mmcv.__version__)
os.system('python3 tools/check_env.py')
except Exception:
print('Please install torch & mmcv later.. ╮(╯▽╰)╭')
return 0
def main():
"""Auto install mmdeploy with ncnn. To verify this script:
1) use `sudo docker run -v /path/to/mmdeploy:/root/mmdeploy -v /path/to/Miniconda3-latest-Linux-x86_64.sh:/root/miniconda.sh -it ubuntu:18.04 /bin/bash` # noqa: E501
2) install conda and setup python environment
3) run `python3 tools/scripts/build_ubuntu_x64_ncnn.py`
Returns:
_type_: _description_
"""
global g_jobs
g_jobs = get_job(sys.argv)
print('g_jobs {}'.format(g_jobs))
work_dir = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
dep_dir = os.path.abspath(os.path.join(work_dir, '..', 'mmdeploy-dep'))
if not os.path.exists(dep_dir):
if os.path.isfile(dep_dir):
print('{} already exists and it is a file, exit.'.format(work_dir))
return -1
os.mkdir(dep_dir)
success = ensure_base_env(work_dir, dep_dir)
if success != 0:
return -1
if install_protobuf(dep_dir) != 0:
return -1
ncnn_cmake_dir = install_pyncnn(dep_dir)
if install_mmdeploy(work_dir, dep_dir, ncnn_cmake_dir) != 0:
return -1
if os.path.exists(Path('~/mmdeploy.env').expanduser()):
print('Please source ~/mmdeploy.env to setup your env !')
os.system('cat ~/mmdeploy.env')
if __name__ == '__main__':
main()
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment