Commit 502f4fb9 authored by limm's avatar limm
Browse files

add tools and service module

parent 68661967
Pipeline #2809 canceled with stages
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os.path as osp
from copy import deepcopy
from typing import Optional, Sequence
import h5py
import tqdm
from mmengine import Config
from mmdeploy.apis.utils import build_task_processor
from mmdeploy.utils import get_root_logger, load_config
def get_tensor_func(model, input_data):
input_data = model.data_preprocessor(input_data)
return input_data['inputs']
def process_model_config(model_cfg: Config,
input_shape: Optional[Sequence[int]] = None):
"""Process the model config.
Args:
model_cfg (Config): The model config.
input_shape (list[int]): A list of two integer in (width, height)
format specifying input shape. Default: None.
Returns:
Config: the model config after processing.
"""
cfg = model_cfg.copy()
pipeline = cfg.test_pipeline
for i, transform in enumerate(pipeline):
# for static exporting
if transform.type == 'Resize':
pipeline[i].keep_ratio = False
pipeline[i].scale = tuple(input_shape)
if transform.type in ('YOLOv5KeepRatioResize', 'LetterResize'):
pipeline[i].scale = tuple(input_shape)
pipeline = [
transform for transform in pipeline
if transform.type != 'LoadAnnotations'
]
cfg.test_pipeline = pipeline
return cfg
def get_quant(deploy_cfg: Config,
model_cfg: Config,
shape_dict: dict,
checkpoint_path: str,
work_dir: str,
device: str = 'cpu',
dataset_type: str = 'val'):
model_shape = list(shape_dict.values())[0]
model_cfg = process_model_config(model_cfg,
(model_shape[3], model_shape[2]))
task_processor = build_task_processor(model_cfg, deploy_cfg, device)
model = task_processor.build_pytorch_model(checkpoint_path)
calib_dataloader = deepcopy(model_cfg[f'{dataset_type}_dataloader'])
calib_dataloader['batch_size'] = 1
dataloader = task_processor.build_dataloader(calib_dataloader)
output_quant_dataset_path = osp.join(work_dir, 'calib_data.h5')
with h5py.File(output_quant_dataset_path, mode='w') as file:
calib_data_group = file.create_group('calib_data')
input_data_group = calib_data_group.create_group('input')
# get an available input shape randomly
for data_id, input_data in enumerate(tqdm.tqdm(dataloader)):
# input_data = data_preprocessor(input_data)['inputs'].numpy()
input_data = get_tensor_func(model, input_data).numpy()
calib_data_shape = input_data.shape
assert model_shape[2] >= calib_data_shape[2] and model_shape[
3] >= calib_data_shape[
3], f'vacc backend model shape is {tuple(model_shape[2:])}, \
the calib_data shape {calib_data_shape[2:]} is bigger'
input_data_group.create_dataset(
str(data_id),
shape=input_data.shape,
compression='gzip',
compression_opts=4,
data=input_data)
def parse_args():
parser = argparse.ArgumentParser(
description='Generate vacc quant dataset from ONNX.')
parser.add_argument('--deploy-cfg', help='Input deploy config path')
parser.add_argument('--model-cfg', help='Input model config path')
parser.add_argument('--shape-dict', help='Input model shape')
parser.add_argument('--checkpoint-path', help='checkpoint path')
parser.add_argument('--work-dir', help='Output quant dataset dir')
parser.add_argument(
'--log-level',
help='set log level',
default='INFO',
choices=list(logging._nameToLevel.keys()))
args = parser.parse_args()
return args
def main():
args = parse_args()
logger = get_root_logger(log_level=args.log_level)
deploy_cfg, model_cfg = load_config(args.deploy_cfg, args.model_cfg)
work_dir = args.work_dir
checkpoint_path = args.checkpoint_path
shape_dict = eval(args.shape_dict)
get_quant(deploy_cfg, model_cfg, shape_dict, checkpoint_path, work_dir)
logger.info('onnx2vacc_quant_dataset success.')
if __name__ == '__main__':
main()
# Precompiled package
This document is going to describe the way to build MMDeploy package.
## Prerequisites
- Download and install Miniconda from the [official website](https://docs.conda.io/en/latest/miniconda.html).
- Create conda environments for python 3.6, 3.7, 3.8, 3.9 and 3.10, respectively.
```shell
for PYTHON_VERSION in 3.6 3.7 3.8 3.9 3.10
do
conda create --name mmdeploy-$PYTHON_VERSION python=$PYTHON_VERSION -y
done
```
- Prepare MMDeploy dependencies
Please follow the [build-on-Linux guide](../../docs/en/01-how-to-build/linux-x86_64.md) or [build-on-Windows guide](../../docs/en/01-how-to-build/linux-x86_64.md) to install dependencies of MMDeploy,
including PyTorch, MMCV, OpenCV, ppl.cv, ONNX Runtime and TensorRT.
Make sure the environment variables `pplcv_DIR`, `ONNXRUNTIME_DIR`, `TENSORRT_DIR`, `CUDNN_DIR` and `CUDA_TOOLKIT_ROOT_DIR` are exported.
## Run precompiled command
- On Linux platform,
```shell
conda activate mmdeploy-3.6
pip install pyyaml packaging
cd the/root/path/of/mmdeploy
python tools/package_tools/generate_build_config.py --backend 'ort' \
--system linux --build-mmdeploy --device cpu --build-sdk \
--build-sdk-monolithic --build-sdk-python --sdk-dynamic-net \
--output config.yml
python tools/package_tools/mmdeploy_builder.py --config config.yml --output-dir pack
```
- On Windows platform, open `Anaconda Powershell Prompt` from the start menu and execute:
```shell
conda activate mmdeploy-3.6
pip install pyyaml packaging
cd the/root/path/of/MMDeploy
python tools/package_tools/generate_build_config.py --backend 'ort' \
--system windows --build-mmdeploy --device cpu --build-sdk \
--build-sdk-monolithic --build-sdk-python --sdk-dynamic-net \
--output config.yml
python tools/package_tools/mmdeploy_builder.py --config config.yml --output-dir pack
```
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import sys
from distutils.util import get_platform
import yaml
def parse_arguments():
parser = argparse.ArgumentParser(
description='MMDeploy create build config')
parser.add_argument(
'--backend',
required=True,
type=str,
help='target backend. Eg: "ort;trt"')
parser.add_argument(
'--system',
required=True,
type=str,
help='target system, Eg: windows/linux/jetson')
parser.add_argument(
'--build-mmdeploy',
action='store_true',
help='whether build mmdeploy runtime package')
parser.add_argument(
'--build-sdk', action='store_true', help='whether build sdk c/cpp api')
parser.add_argument(
'--sdk-dynamic-net',
action='store_true',
help='whether build mmdeploy sdk dynamic net')
parser.add_argument('--device', type=str, help='target device. Eg: "cpu"')
parser.add_argument(
'--shared', action='store_true', help='whether build shared lib')
parser.add_argument(
'--build-sdk-monolithic',
action='store_true',
help='whether build sdk monolithic')
parser.add_argument(
'--build-sdk-python',
action='store_true',
help='whether build sdk python api')
parser.add_argument(
'--opencv-dir',
type=str,
help='opencv path that contains OpenCVConfig.cmake, '
'default use $ENV{OpenCV_DIR}')
parser.add_argument(
'--pplcv-dir',
type=str,
help='pplcv path that contains pplcv-config.cmake, '
'default use $ENV{pplcv_DIR}')
parser.add_argument(
'--onnxruntime-dir',
type=str,
help='onnxruntime root path, default use $ENV{ONNXRUNTIME_DIR}')
parser.add_argument(
'--tensorrt-dir',
type=str,
help='tensorrt root path, default use $ENV{TENSORRT_DIR}')
parser.add_argument(
'--cudnn-dir',
type=str,
help='cudnn root dir, default use $ENV{CUDNN_DIR}')
parser.add_argument('--cxx11abi', action='store_true', help='new cxxabi')
parser.add_argument(
'--output', required=True, type=str, help='output config file path')
return parser.parse_args()
def generate_config(args):
config = {}
cmake_cfg = {}
# wheel platform tag
if args.system in ['linux']:
config['PLATFORM_TAG'] = 'manylinux2014_x86_64'
elif args.system in ['jetson']:
config['PLATFORM_TAG'] = 'any'
else:
config['PLATFORM_TAG'] = get_platform().replace('-',
'_').replace('.', '_')
config['BUILD_MMDEPLOY'] = 'ON' if args.build_mmdeploy else 'OFF'
# deps for mmdeploy
cmake_cfg['MMDEPLOY_TARGET_BACKENDS'] = args.backend
if 'ort' in args.backend:
if args.onnxruntime_dir:
cmake_cfg['ONNXRUNTIME_DIR'] = args.onnxruntime_dir
elif 'ONNXRUNTIME_DIR' in os.environ:
cmake_cfg['ONNXRUNTIME_DIR'] = os.environ['ONNXRUNTIME_DIR']
else:
raise Exception('please provide --onnxruntime-dir')
if 'trt' in args.backend:
if args.tensorrt_dir:
cmake_cfg['TENSORRT_DIR'] = args.tensorrt_dir
elif 'TENSORRT_DIR' in os.environ:
cmake_cfg['TENSORRT_DIR'] = os.environ['TENSORRT_DIR']
else:
raise Exception('please provide --tensorrt-dir')
if args.cudnn_dir:
cmake_cfg['CUDNN_DIR'] = args.cudnn_dir
elif 'CUDNN_DIR' in os.environ:
cmake_cfg['CUDNN_DIR'] = os.environ['CUDNN_DIR']
else:
raise Exception('please provide --cudnn-dir')
# deps for mmdeploy-python
if args.build_sdk:
cmake_cfg['MMDEPLOY_BUILD_SDK'] = 'ON'
cmake_cfg[
'MMDEPLOY_BUILD_SDK_MONOLITHIC'] = 'ON' \
if args.build_sdk_monolithic else 'OFF'
cmake_cfg[
'MMDEPLOY_BUILD_SDK_PYTHON_API'] = 'ON' \
if args.build_sdk_python else 'OFF'
cmake_cfg['MMDEPLOY_SHARED_LIBS'] = 'ON' if args.shared else 'OFF'
cmake_cfg['MMDEPLOY_TARGET_DEVICES'] = args.device
cmake_cfg[
'MMDEPLOY_DYNAMIC_BACKEND'] = 'ON' \
if args.sdk_dynamic_net else 'OFF'
cmake_cfg['MMDEPLOY_ZIP_MODEL'] = 'ON'
if args.opencv_dir:
cmake_cfg['OpenCV_DIR'] = args.opencv_dir
elif 'OpenCV_DIR' in os.environ:
cmake_cfg['OpenCV_DIR'] = os.environ['OpenCV_DIR']
else:
raise Exception('please provide --opencv-dir')
if args.device == 'cuda':
if args.pplcv_dir:
cmake_cfg['pplcv_DIR'] = args.pplcv_dir
elif 'pplcv_DIR' in os.environ:
cmake_cfg['pplcv_DIR'] = os.environ['pplcv_DIR']
else:
raise Exception('please provide --pplcv-dir')
# sdk package template
if args.system in ['windows', 'linux']:
name = 'mmdeploy-{mmdeploy_v}-{system}-{machine}'
if args.cxx11abi:
name = name + '-cxx11abi'
if args.device == 'cpu':
pass
elif args.device == 'cuda':
name = '{}-cuda'.format(name) + '{cuda_v}'
else:
raise Exception('unsupported device')
config['BUILD_SDK_NAME'] = name
elif args.system == 'jetson':
config['BUILD_SDK_NAME'] = 'mmdeploy-{mmdeploy_v}-jetson-{machine}'
else:
raise Exception('unsupported system')
else:
cmake_cfg['MMDEPLOY_BUILD_SDK'] = 'OFF'
cmake_cfg['MMDEPLOY_BUILD_SDK_PYTHON_API'] = 'OFF'
config['cmake_cfg'] = cmake_cfg
return config
def main():
# Parse arguments
args = parse_arguments()
print(args)
config = generate_config(args)
with open(args.output, 'w') as f:
yaml.dump(config, f)
if __name__ == '__main__':
sys.exit(main())
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import logging
import os
import os.path as osp
import platform
import re
import shutil
import sys
from glob import glob
from subprocess import check_output, run
from typing import Dict
import yaml
from packaging import version
logger = logging.getLogger()
logger.setLevel(logging.INFO)
CUR_DIR = osp.dirname(osp.abspath(__file__))
MMDEPLOY_DIR = osp.abspath(osp.join(CUR_DIR, '../..'))
PACKAGING_DIR = osp.join(CUR_DIR, 'packaging')
VERSION_FILE = osp.join(MMDEPLOY_DIR, 'mmdeploy', 'version.py')
def get_version(version_file):
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def _remove_if_exist(path):
if osp.exists(path):
logging.info(f'Remove path: {path}')
if osp.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
if osp.islink(path):
os.remove(path)
def _copy(src_path, dst_path):
assert osp.exists(src_path), f'src path: {src_path} not exist.'
logging.info(f'copy path: {src_path} to {dst_path}.')
if osp.isdir(src_path):
if osp.exists(dst_path):
shutil.rmtree(dst_path)
shutil.copytree(src_path, dst_path, symlinks=True)
else:
shutil.copy(src_path, dst_path)
def _call_command(cmd, cwd, stdout=None, stderr=None):
if cmd == '':
return
logging.info(f'Process cmd: {cmd}')
logging.info(f'work_path: {cwd}')
try:
ret = run(cmd, stdout=stdout, stderr=stderr, cwd=cwd, shell=True)
if ret.returncode != 0:
logging.error(f'Process cmd: "{cmd}"'
f' failed with returncode: {ret.returncode}')
exit(-1)
except Exception:
logging.error(f'Process cmd: {cmd} failed.')
exit(-1)
def _create_bdist_cmd(cfg, c_ext=False, dist_dir=None):
bdist_tags = cfg.get('bdist_tags', {})
# base
bdist_cmd = 'python setup.py bdist_wheel '
# platform
bdist_cmd += f' --plat-name {cfg["PLATFORM_TAG"]} '
# python tag
python_tag = f'cp{sys.version_info.major}{sys.version_info.minor}'\
if c_ext else 'py3'
if 'python_tag' in bdist_tags:
python_tag = bdist_tags['python_tag']
bdist_cmd += f' --python-tag {python_tag} '
# dist dir
if dist_dir is not None:
dist_dir = osp.abspath(dist_dir)
bdist_cmd += f' --dist-dir {dist_dir} '
return bdist_cmd
def clear_mmdeploy():
logging.info(f'Cleaning mmdeploy: {MMDEPLOY_DIR}')
def _remove_in_mmdeploy(path):
remove_dir = osp.join(MMDEPLOY_DIR, path)
_remove_if_exist(remove_dir)
# remove build file
_remove_in_mmdeploy('build')
# remove dist
_remove_in_mmdeploy('dist')
# remove installed library
_remove_in_mmdeploy('mmdeploy/lib')
# remove onnx2ncnn and ncnn ext
_remove_in_mmdeploy('mmdeploy/backend/ncnn/onnx2ncnn')
_remove_in_mmdeploy('mmdeploy/backend/ncnn/onnx2ncnn.exe')
_remove_in_mmdeploy('mmdeploy/backend/ncnn/mmdeploy_onnx2ncnn')
_remove_in_mmdeploy('mmdeploy/backend/ncnn/mmdeploy_onnx2ncnn.exe')
ncnn_ext_paths = glob(
osp.join(MMDEPLOY_DIR, 'mmdeploy/backend/ncnn/ncnn_ext.*'))
for ncnn_ext_path in ncnn_ext_paths:
os.remove(ncnn_ext_path)
# remove ts_optmizer
ts_optimizer_paths = glob(
osp.join(MMDEPLOY_DIR, 'mmdeploy/backend/torchscript/ts_optimizer.*'))
for ts_optimizer_path in ts_optimizer_paths:
os.remove(ts_optimizer_path)
def check_env(cfg: Dict):
env_info = {}
cmake_envs = cfg.get('cmake_cfg', dict())
# system
platform_system = platform.system().lower()
platform_machine = platform.machine().lower()
env_info['system'] = platform_system
env_info['machine'] = platform_machine
# CUDA version
cuda_version = 'unknown'
CUDA_TOOLKIT_ROOT_DIR = os.environ.get('CUDA_TOOLKIT_ROOT_DIR', '')
CUDA_TOOLKIT_ROOT_DIR = cmake_envs.get('CUDA_TOOLKIT_ROOT_DIR',
CUDA_TOOLKIT_ROOT_DIR)
CUDA_TOOLKIT_ROOT_DIR = osp.expandvars(CUDA_TOOLKIT_ROOT_DIR)
nvcc_cmd = ('nvcc' if len(CUDA_TOOLKIT_ROOT_DIR) <= 0 else osp.join(
CUDA_TOOLKIT_ROOT_DIR, 'bin', 'nvcc'))
try:
nvcc = check_output(f'"{nvcc_cmd}" -V', shell=True)
nvcc = nvcc.decode('utf-8').strip()
pattern = r'Cuda compilation tools, release (\d+.\d+)'
match = re.search(pattern, nvcc)
if match is not None:
cuda_version = match.group(1)
except Exception:
pass
env_info['cuda_v'] = cuda_version
# ONNX Runtime version
onnxruntime_version = 'unknown'
ONNXRUNTIME_DIR = os.getenv('ONNXRUNTIME_DIR', '')
ONNXRUNTIME_DIR = cmake_envs.get('ONNXRUNTIME_DIR', ONNXRUNTIME_DIR)
ONNXRUNTIME_DIR = osp.expandvars(ONNXRUNTIME_DIR)
if osp.exists(ONNXRUNTIME_DIR):
with open(osp.join(ONNXRUNTIME_DIR, 'VERSION_NUMBER'), mode='r') as f:
onnxruntime_version = f.readlines()[0].strip()
env_info['ort_v'] = onnxruntime_version
# TensorRT version
tensorrt_version = 'unknown'
TENSORRT_DIR = os.getenv('TENSORRT_DIR', '')
TENSORRT_DIR = cmake_envs.get('TENSORRT_DIR', TENSORRT_DIR)
TENSORRT_DIR = osp.expandvars(TENSORRT_DIR)
if osp.exists(TENSORRT_DIR):
with open(
osp.join(TENSORRT_DIR, 'include', 'NvInferVersion.h'),
mode='r') as f:
data = f.read()
major = re.search(r'#define NV_TENSORRT_MAJOR (\d+)', data)
minor = re.search(r'#define NV_TENSORRT_MINOR (\d+)', data)
patch = re.search(r'#define NV_TENSORRT_PATCH (\d+)', data)
build = re.search(r'#define NV_TENSORRT_BUILD (\d+)', data)
if major is not None and minor is not None and patch is not None \
and build is not None:
tensorrt_version = (f'{major.group(1)}.' +
f'{minor.group(1)}.' +
f'{patch.group(1)}.' + f'{build.group(1)}')
env_info['trt_v'] = tensorrt_version
return env_info
def build_mmdeploy(cfg: Dict):
build_dir = osp.join(MMDEPLOY_DIR, 'build')
if not osp.exists(build_dir):
os.mkdir(build_dir)
cmake_cfg = cfg['cmake_cfg']
cmake_options = [f'-D{k}="{v}"' for k, v in cmake_cfg.items() if v != '']
if sys.platform == 'win32':
cmake_windows_options = '-A x64 -T v142'
if 'CUDA_PATH' in os.environ:
cmake_windows_options += ',cuda="%CUDA_PATH%"'
cmake_options = [cmake_windows_options] + cmake_options
# configure
cmake_cmd = ' '.join(['cmake ..'] + cmake_options)
_call_command(cmake_cmd, build_dir)
# build
if sys.platform == 'win32':
build_cmd = 'cmake --build . --config Release -- /m'
else:
build_cmd = 'cmake --build . -- -j$(nproc)'
_call_command(build_cmd, build_dir)
# install
install_cmd = 'cmake --install . --config Release'
_call_command(install_cmd, build_dir)
def copy_thirdparty(cfg: Dict, sdk_path: str):
thirdparty_dir = osp.join(sdk_path, 'thirdparty')
os.mkdir(thirdparty_dir)
def _copy_needed(src_dir, dst_dir, needed):
if not osp.exists(dst_dir):
os.makedirs(dst_dir)
for path in needed:
src_path = osp.join(src_dir, path[0])
dst_path = osp.join(dst_dir, path[0])
_copy(src_path, dst_path)
if len(path) == 1 or path[1] == '**':
continue
old_dir = os.getcwd()
os.chdir(dst_path)
files = glob('**', recursive=True)
reserve = []
for pattern in path[1:]:
reserve.extend(glob(pattern, recursive=True))
for file in files:
if file not in reserve:
_remove_if_exist(file)
os.chdir(old_dir)
# copy onnxruntime, tensorrt
backend = cfg['cmake_cfg']['MMDEPLOY_TARGET_BACKENDS']
if 'ort' in backend:
src_dir = cfg['cmake_cfg']['ONNXRUNTIME_DIR']
dst_dir = osp.join(thirdparty_dir, 'onnxruntime')
needed = [('include', '**'), ('lib', '**')]
_copy_needed(src_dir, dst_dir, needed)
if 'trt' in backend:
src_dir = cfg['cmake_cfg']['TENSORRT_DIR']
dst_dir = osp.join(thirdparty_dir, 'tensorrt')
needed = [('include', '**'),
('lib', 'libnvinfer_builder_resource.so*', 'libnvinfer.so*',
'libnvinfer_plugin.so*', 'nvinfer_builder_resource.*',
'nvinfer*', 'nvinfer_plugin*')]
_copy_needed(src_dir, dst_dir, needed)
def copy_scripts(sdk_path: str):
scripts_base = osp.join(MMDEPLOY_DIR, 'tools', 'package_tools', 'scripts')
if sys.platform == 'win32':
src_dir = osp.join(scripts_base, 'windows')
elif sys.platform == 'linux':
src_dir = osp.join(scripts_base, 'linux')
else:
raise Exception('unsupported')
files = glob(osp.join(src_dir, '*'))
for file in files:
filename = osp.basename(file)
src_path = osp.join(src_dir, filename)
dst_path = osp.join(sdk_path, filename)
_copy(src_path, dst_path)
def copy_onnxruntime(cfg, dst_dir):
ort_root = cfg['cmake_cfg']['ONNXRUNTIME_DIR']
patterns = ['libonnxruntime.so.*', 'onnxruntime.dll']
for pattern in patterns:
src_lib = glob(osp.join(ort_root, 'lib', pattern))
if len(src_lib) > 0:
dst_lib = osp.join(dst_dir, osp.basename(src_lib[0]))
_copy(src_lib[0], dst_lib)
def create_mmdeploy(cfg: Dict, work_dir: str):
if cfg['BUILD_MMDEPLOY'] == 'OFF':
logging.info('Skip build mmdeploy package')
return
dist_dir = osp.join(work_dir, 'mmdeploy')
if osp.exists(dist_dir):
logging.info('mmdeploy existed, deleting...')
shutil.rmtree(dist_dir)
clear_mmdeploy()
build_mmdeploy(cfg)
# copy libonnxruntime.so.x.y.z
backend = cfg['cmake_cfg']['MMDEPLOY_TARGET_BACKENDS']
if 'ort' in backend:
dst_dir = osp.join(MMDEPLOY_DIR, 'mmdeploy', 'lib')
copy_onnxruntime(cfg, dst_dir)
# build wheel
build_dir = osp.join(MMDEPLOY_DIR, 'build')
_remove_if_exist(osp.join(build_dir, 'lib'))
_remove_if_exist(osp.join(build_dir, 'lib', 'Release'))
bdist_cmd = _create_bdist_cmd(cfg, c_ext=False, dist_dir=dist_dir)
_call_command(bdist_cmd, MMDEPLOY_DIR)
def create_mmdeploy_runtime(cfg: Dict, work_dir: str):
cmake_cfg = cfg['cmake_cfg']
if cmake_cfg['MMDEPLOY_BUILD_SDK'] == 'OFF' or \
cmake_cfg['MMDEPLOY_BUILD_SDK_PYTHON_API'] == 'OFF':
logging.info('Skip build mmdeploy sdk python api')
return
for python_version in ['3.6', '3.7', '3.8', '3.9', '3.10', '3.11']:
_version = version.parse(python_version)
python_major = _version.major
python_minor = _version.minor
# create sdk python api wheel
sdk_python_package_dir = osp.join(work_dir, '.mmdeploy_runtime')
_copy(PACKAGING_DIR, sdk_python_package_dir)
_copy(
VERSION_FILE,
osp.join(sdk_python_package_dir, 'mmdeploy_runtime', 'version.py'),
)
# build mmdeploy_runtime
python_executable = shutil.which('python')\
.replace('mmdeploy-3.6', f'mmdeploy-{python_version}')
cmake_options = [
f'-D{k}="{v}"' for k, v in cmake_cfg.items() if v != ''
]
cmake_options.append(
f'-DMMDeploy_DIR={MMDEPLOY_DIR}/build/install/lib/cmake/MMDeploy')
cmake_options.append(f'-DPYTHON_EXECUTABLE={python_executable}')
if sys.platform == 'win32':
cmake_options.append('-A x64 -T v142')
if 'CUDA_PATH' in os.environ:
cmake_options[-1] += ',cuda="%CUDA_PATH%"'
cmake_cmd = ' '.join(['cmake ../csrc/mmdeploy/apis/python'] +
cmake_options)
build_dir = osp.join(MMDEPLOY_DIR, 'build_python')
_remove_if_exist(build_dir)
os.mkdir(build_dir)
_call_command(cmake_cmd, build_dir)
if sys.platform == 'win32':
build_cmd = 'cmake --build . --config Release -- /m'
else:
build_cmd = 'cmake --build . -- -j$(nproc)'
_call_command(build_cmd, build_dir)
# copy api lib
python_api_lib_path = []
lib_patterns = ['*mmdeploy_runtime*.so', '*mmdeploy_runtime*.pyd']
for pattern in lib_patterns:
python_api_lib_path.extend(
glob(
osp.join(MMDEPLOY_DIR, 'build_python/**', pattern),
recursive=True,
))
_copy(
python_api_lib_path[0],
osp.join(sdk_python_package_dir, 'mmdeploy_runtime'),
)
_remove_if_exist(osp.join(MMDEPLOY_DIR, 'build_python'))
# copy net & mmdeploy
if sys.platform == 'win32':
libs_to_copy = ['*net.dll', 'mmdeploy.dll']
search_dir = osp.join(MMDEPLOY_DIR, 'build', 'install', 'bin')
elif sys.platform == 'linux':
mmdeploy_version = get_version(VERSION_FILE)
mmdeploy_version = version.parse(mmdeploy_version)
libs_to_copy = [
'*net.so', f'*mmdeploy.so.{mmdeploy_version.major}'
]
search_dir = osp.join(MMDEPLOY_DIR, 'build', 'install', 'lib')
else:
raise Exception('unsupported')
for pattern in libs_to_copy:
files = glob(osp.join(search_dir, pattern))
for file in files:
_copy(file, osp.join(sdk_python_package_dir,
'mmdeploy_runtime'))
# copy onnxruntime
if 'ort' in cfg['cmake_cfg']['MMDEPLOY_TARGET_BACKENDS']:
copy_onnxruntime(
cfg, osp.join(sdk_python_package_dir, 'mmdeploy_runtime'))
# bdist
sdk_wheel_dir = osp.join(work_dir, 'mmdeploy_runtime')
cfg['bdist_tags'] = {'python_tag': f'cp{python_major}{python_minor}'}
bdist_cmd = _create_bdist_cmd(cfg, c_ext=True, dist_dir=sdk_wheel_dir)
if 'cuda' in cmake_cfg['MMDEPLOY_TARGET_DEVICES']:
bdist_cmd += ' --use-gpu'
_call_command(bdist_cmd, sdk_python_package_dir)
_remove_if_exist(sdk_python_package_dir)
def create_sdk(cfg: Dict, work_dir: str):
cmake_cfg = cfg['cmake_cfg']
if cmake_cfg['MMDEPLOY_BUILD_SDK'] == 'OFF':
logging.info('Skip build mmdeploy sdk')
return
cfg = copy.deepcopy(cfg)
cfg['cmake_cfg']['MMDEPLOY_BUILD_SDK_PYTHON_API'] = 'OFF'
clear_mmdeploy()
build_mmdeploy(cfg)
sdk_root = osp.abspath(osp.join(work_dir, 'sdk'))
build_sdk_name = cfg['BUILD_SDK_NAME']
env_info = check_env(cfg)
mmdeploy_version = get_version(VERSION_FILE)
build_sdk_name = build_sdk_name.format(
mmdeploy_v=mmdeploy_version, **env_info)
sdk_path = osp.join(sdk_root, build_sdk_name)
if osp.exists(sdk_path):
logging.info(f'{sdk_path}, deleting...')
shutil.rmtree(sdk_path)
os.makedirs(sdk_path)
install_dir = osp.join(MMDEPLOY_DIR, 'build/install/')
_copy(install_dir, sdk_path)
_copy(f'{MMDEPLOY_DIR}/demo/python', f'{sdk_path}/example/python')
_remove_if_exist(osp.join(sdk_path, 'example', 'build'))
# copy thirdparty
copy_thirdparty(cfg, sdk_path)
# copy scripts
copy_scripts(sdk_path)
def create_package(cfg: Dict, work_dir: str):
create_mmdeploy(cfg, work_dir)
create_sdk(cfg, work_dir)
create_mmdeploy_runtime(cfg, work_dir)
def parse_args():
parser = argparse.ArgumentParser(description='Build mmdeploy from yaml.')
parser.add_argument('--config', help='The build config yaml file.')
parser.add_argument(
'--output-dir', default='.', help='Output package directory.')
args = parser.parse_args()
return args
def parse_configs(cfg_path: str):
with open(cfg_path, mode='r') as f:
config = yaml.load(f, yaml.Loader)
logging.info(f'Load config\n{yaml.dump(config)}')
return config
def main():
args = parse_args()
cfg = parse_configs(args.config)
work_dir = osp.abspath(args.output_dir)
logging.info(f'Using mmdeploy_dir: {MMDEPLOY_DIR}')
logging.info(f'Using output_dir: {work_dir}')
create_package(cfg, work_dir)
if __name__ == '__main__':
main()
include mmdeploy_runtime/*.so*
include mmdeploy_runtime/*.dll
include mmdeploy_runtime/*.pyd
# Copyright (c) OpenMMLab. All rights reserved.
# modify from https://github.com/NVIDIA/TensorRT/blob/main/python/packaging/tensorrt/__init__.py # noqa
import ctypes
import glob
import os
import sys
from .version import __version__
if sys.platform == 'win32':
os.environ['PATH'] = f'{os.path.dirname(__file__)};{os.environ["PATH"]}'
from . import _win_dll_path # noqa F401
def try_load(library):
try:
ctypes.CDLL(library)
except OSError:
pass
CURDIR = os.path.realpath(os.path.dirname(__file__))
for lib in glob.iglob(os.path.join(CURDIR, '*.so*')):
try_load(lib)
from .mmdeploy_runtime import * # noqa
__all__ = ['__version__']
# Copyright (c) OpenMMLab. All rights reserved.
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '1.3.1'
import os
import os.path as osp
import platform
import sys
try:
from setuptools import find_packages, setup
except ImportError:
from distutils.core import find_packages, setup
CURDIR = os.path.realpath(os.path.dirname(__file__))
version_file = osp.join(CURDIR, 'mmdeploy_runtime', 'version.py')
package_name = 'mmdeploy_runtime'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def get_platform_name():
return platform.machine()
def parse_arg_remove_boolean(argv, arg_name):
arg_value = False
if arg_name in sys.argv:
arg_value = True
argv.remove(arg_name)
return arg_value
if parse_arg_remove_boolean(sys.argv, '--use-gpu'):
package_name = package_name + '_gpu'
if sys.platform == 'win32':
with open('mmdeploy_runtime/_win_dll_path.py', 'a') as f:
code = \
'import os\n' \
'import sys\n\n' \
'cuda_bin_dir = ""\n' \
'if "CUDA_PATH" in os.environ:\n' \
' cuda_bin_dir = os.path.join(os.environ["CUDA_PATH"], "bin")\n' \
'else:\n' \
' raise ImportError("Can\'t find environment variable CUDA_PATH")\n' \
'if sys.version_info >= (3, 8):\n' \
' os.add_dll_directory(cuda_bin_dir)\n' \
'else:\n' \
' os.environ["PATH"] = cuda_bin_dir + os.pathsep + os.environ["PATH"]'
f.write(code)
if __name__ == '__main__':
setup(
name=package_name,
version=get_version(),
description='OpenMMLab Model Deployment SDK python api',
author='OpenMMLab',
author_email='openmmlab@gmail.com',
keywords='computer vision, model deployment',
url='https://github.com/open-mmlab/mmdeploy',
packages=find_packages(),
include_package_data=True,
platforms=get_platform_name(),
package_data={'mmdeploy_runtime': ['*.so*', '*.pyd', '*.pdb']},
license='Apache License 2.0')
# build sdk
1. installed opencv (you can skip this step if you have installed it)
in sdk folder:
`./install_opencv.sh`
2. set environment variable and path
in sdk folder:
`source ./set_env.sh`
(**you have to additionally install cuda and cudnn if use sdk cuda version**)
(**may need to set CUDNN environment variable point to cudnn root folder if use sdk cuda version**)
3. build sdk
in sdk folder:
`./build_sdk.sh` \
(if you installed opencv by ./install_opencv.sh)
or
`./build_sdk.sh "path/to/folder/of/OpenCVConfig.cmake"` \
(if you installed opencv yourself)
the executable will be generated in: `bin/`
#!/bin/bash
WORKSPACE=$(realpath $(dirname "$0"))
OPENCV_DIR=""
if [ -n "$1" ]; then
OPENCV_DIR=$(cd "$1"; pwd)
if [ $? -ne 0 ]; then
echo "opencv path $1 doesn't exist"
exit 1
fi
if [ ! -f "$OPENCV_DIR/OpenCVConfig.cmake" ]; then
echo "opencv path $1 doesn't contains OpenCVConfig.cmake"
exit 1
fi
fi
if [ -z "$OPENCV_DIR" ]; then
# search thirdparty
OPENCV_DIR="${WORKSPACE}/thirdparty/opencv/install/lib64/cmake/opencv4"
_OPENCV_DIR="${WORKSPACE}/thirdparty/opencv/install/lib/cmake/opencv4"
if [ -d "$OPENCV_DIR" ]; then
echo "Found OPENCV_DIR= $OPENCV_DIR"
elif [ -d "$_OPENCV_DIR" ]; then
OPENCV_DIR=$_OPENCV_DIR
echo "Found OPENCV_DIR= $OPENCV_DIR"
else
echo "Can't find opencv, please provide OPENCV_DIR or install it by install_opencv.sh"
exit 1
fi
fi
MMDEPLOY_DIR="$WORKSPACE/lib/cmake/MMDeploy"
BUILD_DIR="${WORKSPACE}/example/cpp/build"
if [ -d "${BUILD_DIR}" ]; then
rm -rf "${BUILD_DIR}"
fi
mkdir -p ${BUILD_DIR}
cd ${BUILD_DIR}
cmake .. -DMMDeploy_DIR="$MMDEPLOY_DIR" \
-DOpenCV_DIR="${OPENCV_DIR}"
make -j $(nproc)
cd ${WORKSPACE}
ln -sf ${BUILD_DIR} bin
#!/bin/bash
set -e
opencvVer="4.5.5"
WORKSPACE=$(realpath $(dirname "$0"))
THIRDPARTY_DIR="${WORKSPACE}/thirdparty"
if [ ! -d $THIRDPARTY_DIR ]; then
echo $THIRDPARTY_DIR
mkdir -p $THIRDPARTY_DIR
fi
pushd ${THIRDPARTY_DIR}
url="https://github.com/opencv/opencv/archive/refs/tags/$opencvVer.tar.gz"
wget $url
tar xf $opencvVer.tar.gz
mv opencv-$opencvVer opencv
pushd opencv
mkdir build
pushd build
cmake .. -DBUILD_TESTS=OFF -DBUILD_EXAMPLES=OFF -DCMAKE_INSTALL_PREFIX=../install
make -j$(nproc)
make install
pushd -3
#!/bin/bash
if [ -n "$1" ]; then
WORKSPACE=$1
else
WORKSPACE=$(realpath $(dirname "${BASH_SOURCE[0]}"))
fi
THIRDPARTY_DIR=$WORKSPACE/thirdparty
pushd $THIRDPARTY_DIR
if [ -d onnxruntime ]; then
export ONNXRUNTIME_DIR=$THIRDPARTY_DIR/onnxruntime
export LD_LIBRARY_PATH=$ONNXRUNTIME_DIR/lib:$LD_LIBRARY_PATH
fi
if [ -d tensorrt ]; then
export TENSORRT_DIR=$THIRDPARTY_DIR/tensorrt
export LD_LIBRARY_PATH=$TENSORRT_DIR/lib:$LD_LIBRARY_PATH
fi
if [ -d openvino ]; then
export InferenceEngine_DIR=$THIRDPARTY_DIR/runtime/cmake
sopaths=$(find $(pwd)/openvino -name "*.so" -exec dirname {} \; | uniq | tr '\n' ':')
export LD_LIBRARY_PATH=$sopaths$LD_LIBRARY_PATH
fi
popd
# build sdk
1. open windows powerShell with administrator privileges
set-ExecutionPolicy RemoteSigned
2. installed opencv (you can skip this step if you have installed it)
in sdk folder:
`.\install_opencv.ps1`
3. set environment variable and path
in sdk folder:
`. .\set_env.ps1`
if you use sdk cuda version:
(**you have to additionally install cuda and cudnn if use sdk cuda version**)
(**may need to set CUDNN environment variable point to cudnn root folder if use sdk cuda version**)
4. build sdk
in sdk folder:
`. .\build_sdk.ps1` \
(if you installed opencv by install_opencv.ps1)
or
`. .\build_sdk.ps1 "path/to/folder/of/OpenCVConfig.cmake"` \
(if you installed opencv yourself)
the executable will be generated in:
`example\cpp\build\Release`
$ErrorActionPreference = 'Stop'
$WORKSPACE = $PSScriptRoot
$OPENCV_DIR = ""
if ($args.Count -gt 0) {
$OPENCV_DIR = $args[0]
if (-Not (Test-Path -Path $OPENCV_DIR -PathType Container)) {
Write-Error "OPENCV_DIR $OPENCV_DIR doesn't exist"
Exit 1
}
$OPENCV_CONFIG = [IO.PATH]::Combine("$OPENCV_DIR", "OpenCVConfig.cmake")
if (-Not (Test-Path -Path $OPENCV_CONFIG -PathType Leaf)) {
Write-Error "OPENCV_DIR $OPENCV_DIR doesn't contains OpenCVConfig.cmake"
Exit 1
}
}
if ($OPENCV_DIR -eq "") {
# search thirdparty
$THIRDPARTY_DIR = "${WORKSPACE}/thirdparty"
$THIRD_OPENCV = [IO.Path]::Combine("$THIRDPARTY_DIR", "opencv", "install")
if (-Not (Test-Path $THIRD_OPENCV -PathType Container)) {
Write-Error "Can't find opencv, please provide OPENCV_DIR or install it by install_opencv.ps1"
Exit 1
}
$OPENCV_DIR = $THIRD_OPENCV
}
$MMDEPLOY_DIR = [IO.Path]::Combine("$WORKSPACE", "lib", "cmake", "MMDeploy")
$BUILD_DIR = "${WORKSPACE}/example/cpp/build"
if (Test-Path -Path $BUILD_DIR -PathType Container) {
Remove-Item $BUILD_DIR -Recurse
}
New-Item -Path $BUILD_DIR -ItemType Directory
Push-Location $BUILD_DIR
Write-Host $MMDEPLOY_DIR
$MSVC_TOOLSET = "-T v142"
if ($env:CUDA_PATH -ne "") {
$MSVC_TOOLSET = "$MSVC_TOOLSET,cuda=$env:CUDA_PATH"
Write-Host $MSVC_TOOLSET
}
cmake .. -A x64 $MSVC_TOOLSET `
-DMMDeploy_DIR="$MMDEPLOY_DIR" `
-DOpenCV_DIR="$OPENCV_DIR"
cmake --build . --config Release
Pop-Location
$opencvVer = "4.5.5"
# ----
$ErrorActionPreference = 'Stop'
$WORKSPACE = $PSScriptRoot
$THIRDPARTY_DIR = "${WORKSPACE}/thirdparty"
$OPENCV_DIR = "${THIRDPARTY_DIR}/opencv/install"
if (-Not (Test-Path -Path $THIRDPARTY_DIR -PathType Container)) {
New-Item -Path $THIRDPARTY_DIR -ItemType Directory
}
Push-Location "${THIRDPARTY_DIR}"
$url = "https://github.com/opencv/opencv/archive/refs/tags/$opencvVer.zip"
$fileName = [IO.Path]::GetFileName($url)
Start-BitsTransfer $url $fileName
Expand-Archive -Path $fileName -DestinationPath "." -Force
Move-Item "opencv-$opencvVer" "opencv"
Push-Location "opencv"
New-Item -Path "build" -ItemType Directory
Push-Location build
cmake .. -A x64 -T v142 `
-DBUILD_TESTS=OFF `
-DBUILD_PERF_TESTS=OFF `
-DCMAKE_INSTALL_PREFIX="${OPENCV_DIR}"
cmake --build . --config Release -j6
cmake --install . --config Release
Pop-Location
Pop-Location
Pop-Location
$WORKSPACE = $PSScriptRoot
$THIRDPARTY_DIR = "${WORKSPACE}/thirdparty"
Push-Location $THIRDPARTY_DIR
if (Test-Path -Path "onnxruntime" -PathType Container) {
$dir = [IO.Path]::Combine("$pwd", "onnxruntime")
$env:ONNXRUNTIME_DIR = $dir
$path = [IO.Path]::Combine("$dir", "lib")
$env:PATH = "$path;$env:PATH"
}
if (Test-Path -Path "tensorrt" -PathType Container) {
$dir = [IO.Path]::Combine("$pwd", "tensorrt")
$env:TENSORRT_DIR = $dir
$path = [IO.Path]::Combine("$dir", "lib")
$env:PATH = "$path;$env:PATH"
}
if (Test-Path -Path "openvino" -PathType Container) {
$root = [IO.Path]::Combine("$pwd", "openvino")
$dir = [IO.Path]::Combine("root", "runtime", "cmake")
$env:InferenceEngine_DIR = $dir
$paths = Get-ChildItem -Path $root -Filter "*.dll" -Recurse | `
ForEach-Object { $_.Directory.FullName } | Get-Unique
foreach ($path in $paths) {
$env:PATH = "$path;$env:PATH"
Write-Host $path
}
}
$path = [IO.Path]::Combine("$WORKSPACE", "bin")
$env:PATH = "$path;$env:PATH"
Pop-Location
$ErrorActionPreference = 'Stop'
$WORKSPACE = ""
$MODEL_DIR = "D:\DEPS\citest\mmcls"
$SDK_DIR = "sdk"
if ($args.Count -gt 0) {
$WORKSPACE = $args[0]
}
Push-Location $WORKSPACE
Push-Location $SDK_DIR
$pkgs = $(ls).Name
$test_pkg = $pkgs[0]
if ($pkgs.Count -gt 1) {
foreach ($pkg in $pkgs) {
if ($pkg -like '*-windows-amd64') {
$test_pkg = $pkg
break
}
}
}
$work_dir=[IO.Path]::Combine("$env:TMP", [guid]::NewGuid().ToString())
Copy-item $test_pkg $work_dir -Recurse
Push-Location $work_dir
# opencv
if (-Not (Test-Path $env:OpenCV_DIR)) {
.\install_opencv.ps1
}
# env
. .\set_env.ps1
# build
.\build_sdk.ps1 $env:OpenCV_DIR
# run
.\example\cpp\build\Release\classifier.exe "D:\DEPS\citest\mmcls" "$MODEL_DIR\demo.jpg"
Pop-Location
Remove-Item $work_dir -Recurse
Pop-Location
Pop-Location
set -e
WORKSPACE="."
MODEL_DIR="/__w/mmdeploy/testmodel/mmcls"
SDK_DIR="sdk"
if [[ -n "$1" ]]; then
WORKSPACE=$1
fi
pushd $WORKSPACE
pushd $SDK_DIR
test_pkg=$(find "." -type d -iname "*-x86_64")
work_dir=/tmp/_test
cp -r $test_pkg $work_dir
pushd $work_dir
# opencv
if [ ! -d "$OpenCV_DIR" ]; then
./install_opencv.sh
fi
# env
source ./set_env.sh $(pwd)
# build
./build_sdk.sh $OpenCV_DIR
# run
./bin/classifier $MODEL_DIR $MODEL_DIR/demo.jpg
popd
rm -rf $work_dir
$ErrorActionPreference = 'Stop'
$WORKSPACE = ""
$MODEL_DIR = "D:\DEPS\citest\mmcls"
$SDK_PYTHON_DIR = "mmdeploy_runtime"
if ($args.Count -gt 0) {
$WORKSPACE = $args[0]
}
Push-Location $WORKSPACE
Push-Location $SDK_PYTHON_DIR
$pkgs = $(ls).Name
$test_pkg = ""
if ($pkgs.Count -gt 1) {
foreach ($pkg in $pkgs) {
if ($pkg -like 'mmdeploy_runtime-*cp38*-win_amd64.whl') {
$test_pkg = $pkg
break
}
}
}
pip install $test_pkg --force-reinstall
$code = "
import cv2
from mmdeploy_runtime import Classifier
import sys
handle = Classifier('$MODEL_DIR', 'cpu', 0)
img = cv2.imread('$MODEL_DIR\demo.jpg')
try:
res = handle(img)
print(res)
except:
print('error')
sys.exit(1)
"
python -c $code
Pop-Location
Pop-Location
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment