Commit 502f4fb9 authored by limm's avatar limm
Browse files

add tools and service module

parent 68661967
Pipeline #2809 canceled with stages
set -e
WORKSPACE="."
MODEL_DIR="/__w/mmdeploy/testmodel/mmcls"
SDK_PYTHON_DIR="mmdeploy_runtime"
if [[ -n "$1" ]]; then
WORKSPACE=$1
fi
cd $WORKSPACE
cd $SDK_PYTHON_DIR
PY_VERSION=$(python3 -V | awk '{print $2}' | awk '{split($0, a, "."); print a[1]a[2]}')
test_pkg=$(ls | grep mmdeploy_runtime-*cp${PY_VERSION}*x86_64.whl)
python3 -m pip install $test_pkg --force-reinstall
python3 -m pip install opencv-python
code="
import cv2
from mmdeploy_runtime import Classifier
import sys
handle = Classifier('$MODEL_DIR', 'cpu', 0)
img = cv2.imread('$MODEL_DIR/demo.jpg')
try:
res = handle(img)
print(res)
except:
print('error')
sys.exit(1)
"
python3 -c "$code"
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import os.path as osp
import numpy as np
import torch
from mmengine import DictAction
from prettytable import PrettyTable
from mmdeploy.apis import build_task_processor
from mmdeploy.utils import get_root_logger
from mmdeploy.utils.config_utils import (Backend, get_backend, get_input_shape,
load_config)
from mmdeploy.utils.timer import TimeCounter
def parse_args():
parser = argparse.ArgumentParser(
description='MMDeploy Model Latency Test Tool.')
parser.add_argument('deploy_cfg', help='Deploy config path')
parser.add_argument('model_cfg', help='Model config path')
parser.add_argument('image_dir', help='Input directory to image files')
parser.add_argument(
'--model', type=str, nargs='+', help='Input model files.')
parser.add_argument(
'--device', help='device type for inference', default='cuda:0')
parser.add_argument(
'--shape',
type=str,
help='Input shape to test in `HxW` format, e.g., `800x1344`',
default=None)
parser.add_argument(
'--warmup',
type=int,
help='warmup iterations before counting inference latency.',
default=10)
parser.add_argument(
'--num-iter',
type=int,
help='Number of iterations to run the inference.',
default=100)
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--batch-size', type=int, default=1, help='the batch size for test.')
parser.add_argument(
'--img-ext',
type=str,
nargs='+',
help='the file extensions for input images from `image_dir`.',
default=['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif'])
args = parser.parse_args()
return args
def get_images(image_dir, extensions):
images = []
files = glob.glob(osp.join(image_dir, '**', '*'), recursive=True)
for f in files:
_, ext = osp.splitext(f)
if ext.lower() in extensions:
images.append(f)
return images
class TorchWrapper(torch.nn.Module):
def __init__(self, model):
super(TorchWrapper, self).__init__()
self.model = model
@TimeCounter.count_time(Backend.PYTORCH.value)
def test_step(self, *args, **kwargs):
return self.model.test_step(*args, **kwargs)
def main():
args = parse_args()
deploy_cfg_path = args.deploy_cfg
model_cfg_path = args.model_cfg
logger = get_root_logger()
# load deploy_cfg
deploy_cfg, model_cfg = load_config(deploy_cfg_path, model_cfg_path)
# merge options for model cfg
if args.cfg_options is not None:
model_cfg.merge_from_dict(args.cfg_options)
deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
if args.shape is not None:
h, w = [int(_) for _ in args.shape.split('x')]
input_shape = [w, h]
else:
input_shape = get_input_shape(deploy_cfg)
assert input_shape is not None, 'Input_shape should not be None'
# create model an inputs
task_processor = build_task_processor(model_cfg, deploy_cfg, args.device)
model_ext = osp.splitext(args.model[0])[1]
is_pytorch = model_ext in ['.pth', '.pt']
if is_pytorch:
# load pytorch model
model = task_processor.build_pytorch_model(args.model[0])
model = TorchWrapper(model)
backend = Backend.PYTORCH.value
else:
# load the model of the backend
model = task_processor.build_backend_model(args.model)
backend = get_backend(deploy_cfg).value
model = model.eval().to(args.device)
is_device_cpu = args.device == 'cpu'
with_sync = not is_device_cpu
if not is_device_cpu:
torch.backends.cudnn.benchmark = True
image_files = get_images(args.image_dir, args.img_ext)
nrof_image = len(image_files)
assert nrof_image > 0, f'No image files found in {args.image_dir}'
logger.info(f'Found totally {nrof_image} image files in {args.image_dir}')
total_nrof_image = (args.num_iter + args.warmup) * args.batch_size
if nrof_image < total_nrof_image:
np.random.seed(1234)
image_files += [
image_files[i]
for i in np.random.choice(nrof_image, total_nrof_image -
nrof_image)
]
image_files = image_files[:total_nrof_image]
with TimeCounter.activate(
warmup=args.warmup,
log_interval=20,
with_sync=with_sync,
batch_size=args.batch_size):
for i in range(0, total_nrof_image, args.batch_size):
batch_files = image_files[i:(i + args.batch_size)]
data, _ = task_processor.create_input(
batch_files,
input_shape,
data_preprocessor=getattr(model, 'data_preprocessor', None))
model.test_step(data)
print('----- Settings:')
settings = PrettyTable()
settings.header = False
settings.add_row(['batch size', args.batch_size])
settings.add_row(['shape', f'{input_shape[1]}x{input_shape[0]}'])
settings.add_row(['iterations', args.num_iter])
settings.add_row(['warmup', args.warmup])
print(settings)
print('----- Results:')
TimeCounter.print_stats(backend)
if __name__ == '__main__':
main()
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence
import mmcv
from mmengine import Config, FileClient
from torch.utils.data import Dataset
from mmdeploy.apis import build_task_processor
class QuantizationImageDataset(Dataset):
def __init__(
self,
path: str,
deploy_cfg: Config,
model_cfg: Config,
file_client_args: Optional[dict] = None,
extensions: Sequence[str] = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp',
'.pgm', '.tif'),
):
super().__init__()
task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu')
self.task_processor = task_processor
self.samples = []
self.extensions = tuple(set([i.lower() for i in extensions]))
self.file_client = FileClient.infer_client(file_client_args, path)
self.path = path
assert self.file_client.isdir(path)
files = list(
self.file_client.list_dir_or_file(
path,
list_dir=False,
list_file=True,
recursive=False,
))
for file in files:
if self.is_valid_file(self.file_client.join_path(file)):
path = self.file_client.join_path(self.path, file)
self.samples.append(path)
def __len__(self):
return len(self.samples)
def __getitem__(self, index):
sample = self.samples[index]
image = mmcv.imread(sample)
data = self.task_processor.create_input(image)
return data[0]
def is_valid_file(self, filename: str) -> bool:
"""Check if a file is a valid sample."""
return filename.lower().endswith(self.extensions)
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import logging
import os
import subprocess
from datetime import datetime
from pathlib import Path
from typing import List, Union
import mmengine
import openpyxl
import pandas as pd
import yaml
from torch.hub import download_url_to_file
from torch.multiprocessing import set_start_method
from tqdm import tqdm
import mmdeploy.version
from mmdeploy.utils import (get_backend, get_codebase, get_root_logger,
is_dynamic_shape)
def parse_args():
parser = argparse.ArgumentParser(description='Regression Test')
parser.add_argument(
'--codebase',
nargs='+',
help='regression test yaml path.',
default=[
'mmpretrain', 'mmdet', 'mmseg', 'mmpose', 'mmocr', 'mmagic',
'mmrotate', 'mmdet3d'
])
parser.add_argument(
'-p',
'--performance',
default=False,
action='store_true',
help='test performance if it set')
parser.add_argument(
'--backends', nargs='+', help='test specific backend(s)')
parser.add_argument(
'--models', nargs='+', default=['all'], help='test specific model(s)')
parser.add_argument(
'--work-dir',
type=str,
help='the dir to save logs and models',
default='../mmdeploy_regression_working_dir')
parser.add_argument(
'--checkpoint-dir',
type=str,
help='the dir to save checkpoint for all model',
default='../mmdeploy_checkpoints')
parser.add_argument(
'--device',
type=str,
help='Device type, cuda:id or cpu, cuda:0 as default',
default='cuda:0')
parser.add_argument(
'--log-level',
help='set log level',
default='INFO',
choices=list(logging._nameToLevel.keys()))
args = parser.parse_args()
return args
def merge_report(work_dir: str, logger: logging.Logger):
"""Merge all the report into one report.
Args:
work_dir (str): Work dir that including all reports.
logger (logging.Logger): Logger handler.
"""
work_dir = Path(work_dir)
res_file = work_dir.joinpath(
f'mmdeploy_regression_test_{mmdeploy.version.__version__}.xlsx')
logger.info(f'Whole result report saving in {res_file}')
if res_file.exists():
# delete if it existed
res_file.unlink()
for report_file in work_dir.iterdir():
if report_file.name.startswith('.~'):
# skip unclosed temp file
continue
if '_report.xlsx' not in report_file.name or \
report_file.name == res_file.name or \
not report_file.is_file():
# skip other file
continue
# get info from report
logger.info(f'Merging {report_file}')
df = pd.read_excel(str(report_file))
df.rename(columns={'Unnamed: 0': 'Index'}, inplace=True)
# get key then convert to list
keys = list(df.keys())
values = df.values.tolist()
# sheet name
sheet_name = report_file.stem.split('_')[0]
# begin to write
if res_file.exists():
# load if it existed
wb = openpyxl.load_workbook(str(res_file))
else:
wb = openpyxl.Workbook()
# delete if sheet already exist
if sheet_name in wb.sheetnames:
wb.remove(wb[sheet_name])
# create sheet
wb.create_sheet(title=sheet_name, index=0)
# write in row
wb[sheet_name].append(keys)
for value in values:
wb[sheet_name].append(value)
# delete the blank sheet
for name in wb.sheetnames:
ws = wb[name]
if ws.cell(1, 1).value is None:
wb.remove(ws)
# save to file
wb.save(str(res_file))
logger.info('Report merge successful.')
def get_model_metafile_info(global_info: dict, model_info: dict,
logger: logging.Logger):
"""Get model metafile information.
Args:
global_info (dict): global info from deploy yaml.
model_info (dict): model info from deploy yaml.
logger (logging.Logger): Logger handler.
Returns:
Dict: Meta info of each model config
"""
# get info from global_info and model_info
checkpoint_dir = global_info.get('checkpoint_dir', None)
assert checkpoint_dir is not None
codebase_dir = global_info.get('codebase_dir', None)
assert codebase_dir is not None
codebase_name = global_info.get('codebase_name', None)
assert codebase_name is not None
model_config_files = model_info.get('model_configs', [])
assert len(model_config_files) > 0
# make checkpoint save directory
model_name = _filter_string(model_info.get('name', 'model'))
checkpoint_save_dir = Path(checkpoint_dir).joinpath(
codebase_name, model_name)
checkpoint_save_dir.mkdir(parents=True, exist_ok=True)
logger.info(f'Saving checkpoint in {checkpoint_save_dir}')
# get model metafile info
metafile_path = Path(codebase_dir).joinpath(model_info.get('metafile'))
if not metafile_path.exists():
logger.warning(f'Metafile not exists: {metafile_path}')
return [], '', ''
with open(metafile_path) as f:
metafile_info = yaml.load(f, Loader=yaml.FullLoader)
model_meta_info = dict()
for meta_model in metafile_info.get('Models'):
if str(meta_model.get('Config')) not in model_config_files:
# skip if the model not in model_config_files
logger.warning(f'{meta_model.get("Config")} '
f'not in {model_config_files}, pls check ! '
'Skip it...')
continue
# get meta info
model_meta_info.update({meta_model.get('Config'): meta_model})
# get weight url
weights_url = meta_model.get('Weights')
weights_name = str(weights_url).split('/')[-1]
weights_save_path = checkpoint_save_dir.joinpath(weights_name)
if weights_save_path.exists() and \
not global_info.get('checkpoint_force_download', False):
logger.info(f'model {weights_name} exist, skip download it...')
continue
# Download weight
logger.info(f'Downloading {weights_url} to {weights_save_path}')
download_url_to_file(
weights_url, str(weights_save_path), progress=True)
# check weather the weight download successful
if not weights_save_path.exists():
raise FileExistsError(f'Weight {weights_name} download fail')
logger.info('All models had been downloaded successful !')
return model_meta_info, checkpoint_save_dir, codebase_dir
def update_report(report_dict: dict, model_name: str, model_config: str,
task_name: str, checkpoint: str, dataset: str,
backend_name: str, deploy_config: str,
static_or_dynamic: str, precision_type: str,
conversion_result: str, fps: str, metric_info: list,
test_pass: str, report_txt_path: Path, codebase_name: str):
"""Update report information.
Args:
report_dict (dict): Report info dict.
model_name (str): Model name.
model_config (str): Model config name.
task_name (str): Task name.
checkpoint (str): Model checkpoint name.
dataset (str): Dataset name.
backend_name (str): Backend name.
deploy_config (str): Deploy config name.
static_or_dynamic (str): Static or dynamic.
precision_type (str): Precision type of the model.
conversion_result (str): Conversion result: Successful or Fail.
fps (str): Inference speed (ms/im).
metric_info (list): Metric info list of the ${modelName}.yml.
test_pass (str): Test result: Pass or Fail.
report_txt_path (Path): Report txt path.
codebase_name (str): Codebase name.
"""
# save to tmp file
tmp_str = f'{model_name},{model_config},{task_name},{checkpoint},' \
f'{dataset},{backend_name},{deploy_config},' \
f'{static_or_dynamic},{precision_type},{conversion_result},'
# save to report
report_dict.get('Model').append(model_name)
report_dict.get('Model Config').append(model_config)
report_dict.get('Task').append(task_name)
report_dict.get('Checkpoint').append(checkpoint)
report_dict.get('Dataset').append(dataset)
report_dict.get('Backend').append(backend_name)
report_dict.get('Deploy Config').append(deploy_config)
report_dict.get('Static or Dynamic').append(static_or_dynamic)
report_dict.get('Precision Type').append(precision_type)
report_dict.get('Conversion Result').append(conversion_result)
# report_dict.get('FPS').append(fps)
for metric in metric_info:
for metric_name, metric_value in metric.items():
metric_name = str(metric_name)
report_dict.get(metric_name).append(metric_value)
tmp_str += f'{metric_value},'
report_dict.get('Test Pass').append(test_pass)
tmp_str += f'{test_pass}\n'
with open(report_txt_path, 'a+', encoding='utf-8') as f:
f.write(tmp_str)
def get_pytorch_result(model_name: str, meta_info: dict, checkpoint_path: Path,
model_config_path: Path, model_config_name: str,
test_yaml_metric_info: dict, report_dict: dict,
logger: logging.Logger, report_txt_path: Path,
codebase_name: str):
"""Get metric from metafile info of the model.
Args:
model_name (str): Name of model.
meta_info (dict): Metafile info from model's metafile.yml.
checkpoint_path (Path): Checkpoint path.
model_config_path (Path): Model config path.
model_config_name (str): Name of model config in meta_info.
test_yaml_metric_info (dict): Metrics info from test yaml.
report_dict (dict): Report info dict.
logger (logging.Logger): Logger.
report_txt_path (Path): Report txt path.
codebase_name (str): Codebase name.
Returns:
Dict: metric info of the model
"""
if model_config_name not in meta_info:
logger.warning(
f'{model_config_name} not in meta_info, which is {meta_info}')
return {}
# get metric
model_info = meta_info[model_config_name]
metafile_metric_info = model_info['Results']
# deal with mmseg case
if not isinstance(metafile_metric_info, (list, tuple)):
metafile_metric_info = [metafile_metric_info]
pytorch_metric = dict()
using_dataset = set()
using_task = set()
datasets = []
# Get metrics info from metafile
for metafile_metric in metafile_metric_info:
task_name = metafile_metric['Task']
dataset = metafile_metric['Dataset']
# check if metafile use the same metric on several datasets for mmagic
task_info = set([_['Task'] for _ in metafile_metric_info])
if len(metafile_metric_info) > 1 and len(task_info) == 1:
for k, v in metafile_metric['Metrics'].items():
pytorch_metric[f'{dataset} {k}'] = v
else:
pytorch_metric.update(metafile_metric['Metrics'])
datasets.append(dataset)
using_task.add(task_name)
using_dataset.add(dataset)
dataset_type = '+'.join(list(using_dataset))
task_type = '+'.join(list(using_task))
metric_list = []
for metric, metric_info in test_yaml_metric_info.items():
value = '-'
if metric in pytorch_metric:
if 'dataset' in metric_info and metric_info['dataset'] in datasets:
idx = datasets.index(metric_info['dataset'])
pytorch_metric.update(metafile_metric_info[idx]['Metrics'])
value = pytorch_metric[metric]
metric_list.append({metric: value})
valid_pytorch_metric = {
k: v
for k, v in pytorch_metric.items() if k in test_yaml_metric_info
}
# get pytorch fps value
fps_info = model_info.get('Metadata', {}).get('inference time (ms/im)')
if fps_info is None:
fps = '-'
elif isinstance(fps_info, list):
fps = fps_info[0].get('value')
else:
fps = fps_info.get('value')
logger.info(f'Got metric_list = {metric_list} ')
logger.info(f'Got pytorch_metric = {pytorch_metric} ')
# update report
update_report(
report_dict=report_dict,
model_name=model_name,
model_config=str(model_config_path),
task_name=task_type,
checkpoint=str(checkpoint_path),
dataset=dataset_type,
backend_name='Pytorch',
deploy_config='-',
static_or_dynamic='-',
precision_type='-',
conversion_result='-',
fps=fps,
metric_info=metric_list,
test_pass='-',
report_txt_path=report_txt_path,
codebase_name=codebase_name)
logger.info(f'Got {model_config_path} metric: {valid_pytorch_metric}')
dataset_info = dict(dataset=dataset_type, task=task_type)
return valid_pytorch_metric, dataset_info
def parse_test_log(work_dir: str) -> dict:
"""Parse metrics result from output json file.
Args:
work_dir: work directory that has output json file.
Returns:
dict: metric results
"""
logger = get_root_logger()
json_files = glob.glob(os.path.join(work_dir, '*', '*.json'))
json_path = None
newest_date = None
# filter json and get latest json file
for f in json_files:
fname = os.path.split(f)[1].strip('.json')
try:
date = datetime.strptime(fname, '%Y%m%d_%H%M%S')
if newest_date is None:
newest_date = date
json_path = f
elif date > newest_date:
newest_date = date
json_path = f
except Exception:
pass
if (not os.path.exists(work_dir)) or json_path is None:
logger.warning(f'Not json files found in {work_dir}')
result = {}
else:
logger.info(f'Parse test result from {json_path}')
result = mmengine.load(json_path)
return result
def get_fps_metric(shell_res: int, pytorch_metric: dict, metric_info: dict,
work_path: Path):
"""Get fps and metric.
Args:
shell_res (int): Backend convert result: 0 is success.
pytorch_metric (dict): Metric info of pytorch metafile.
work_path (Path): Logger path.
metric_info (dict): Metric info.
Returns:
Float: fps: FPS of the model.
List: metric_list: metric result list.
Bool: test_pass: If the test pass or not.
"""
# check if converted successes or not.
fps = '-'
if shell_res != 0:
backend_results = {}
else:
backend_results = parse_test_log(work_path)
compare_results = {}
output_result = {}
for metric_name, metric_value in pytorch_metric.items():
metric_key = metric_info[metric_name]['metric_key']
tolerance = metric_info[metric_name]['tolerance']
multi_value = metric_info[metric_name].get('multi_value', 1.0)
compare_flag = False
output_result[metric_name] = 'x'
if metric_key in backend_results:
backend_value = backend_results[metric_key] * multi_value
output_result[metric_name] = backend_value
if backend_value >= metric_value - tolerance:
compare_flag = True
compare_results[metric_name] = compare_flag
if len(compare_results):
test_pass = all(list(compare_results.values()))
else:
test_pass = False
return fps, output_result, test_pass
def get_backend_fps_metric(deploy_cfg_path: str, model_cfg_path: Path,
convert_checkpoint_path: str, device_type: str,
logger: logging.Logger, pytorch_metric: dict,
metric_info: dict, backend_name: str,
precision_type: str, convert_result: bool,
report_dict: dict, infer_type: str, log_path: Path,
dataset_info: dict, report_txt_path: Path,
model_name: str):
"""Get backend fps and metric.
Args:
deploy_cfg_path (str): Deploy config path.
model_cfg_path (Path): Model config path.
convert_checkpoint_path (str): Converted checkpoint path.
device_type (str): Device for converting.
logger (logging.Logger): Logger handler.
pytorch_metric (dict): Pytorch metric info dict get from metafile.
metric_info (dict): Metric info from test yaml.
backend_name (str): Backend name.
precision_type (str): Precision type for evaluation.
convert_result (bool): Backend convert result.
report_dict (dict): Backend convert result.
infer_type (str): Infer type.
log_path (Path): Logger save path.
dataset_info (dict): Dataset info.
report_txt_path (Path): report txt save path.
model_name (str): Name of model in test yaml.
"""
work_dir = log_path.parent.joinpath('test_logs')
if not work_dir.exists():
work_dir.mkdir(parents=True, exist_ok=True)
cmd_lines = [
'python3 tools/test.py', f'{deploy_cfg_path}', f'{model_cfg_path}',
f'--model {convert_checkpoint_path}', f'--work-dir "{work_dir}"',
'--speed-test', f'--device {device_type}'
]
codebase_name = get_codebase(str(deploy_cfg_path)).value
# to stop Dataloader OOM in docker CI
if codebase_name not in ['mmagic', 'mmocr', 'mmpretrain']:
cfg_options = 'test_dataloader.num_workers=0 ' \
'test_dataloader.persistent_workers=False ' \
'val_dataloader.num_workers=0 ' \
'val_dataloader.persistent_workers=False '
cmd_lines.append(f'--cfg-options {cfg_options}')
# Test backend
return_code = run_cmd(cmd_lines, log_path)
fps, backend_metric, test_pass = get_fps_metric(return_code,
pytorch_metric,
metric_info, work_dir)
logger.info(f'test_pass= {test_pass}, results= {backend_metric}')
metric_list = []
for metric in metric_info:
value = '-'
if metric in backend_metric:
value = backend_metric[metric]
metric_list.append({metric: value})
dataset_type = dataset_info['dataset']
task_name = dataset_info['task']
# update the report
update_report(
report_dict=report_dict,
model_name=model_name,
model_config=str(model_cfg_path),
task_name=task_name,
checkpoint=convert_checkpoint_path,
dataset=dataset_type,
backend_name=backend_name,
deploy_config=str(deploy_cfg_path),
static_or_dynamic=infer_type,
precision_type=precision_type,
conversion_result=str(convert_result),
fps=fps,
metric_info=metric_list,
test_pass=str(test_pass),
report_txt_path=report_txt_path,
codebase_name=codebase_name)
def get_precision_type(deploy_cfg_name: str):
"""Get backend precision_type according to the name of deploy config.
Args:
deploy_cfg_name (str): Name of the deploy config.
Returns:
Str: precision_type: Precision type of the deployment name.
"""
if 'int8' in deploy_cfg_name:
precision_type = 'int8'
elif 'fp16' in deploy_cfg_name:
precision_type = 'fp16'
else:
precision_type = 'fp32'
return precision_type
def replace_top_in_pipeline_json(backend_output_path: Path,
logger: logging.Logger):
"""Replace `topk` with `num_classes` in `pipeline.json`.
Args:
backend_output_path (Path): Backend convert result path.
logger (logger.Logger): Logger handler.
"""
sdk_pipeline_json_path = backend_output_path.joinpath('pipeline.json')
sdk_pipeline_json = mmengine.load(sdk_pipeline_json_path)
pipeline_tasks = sdk_pipeline_json.get('pipeline', {}).get('tasks', [])
for index, task in enumerate(pipeline_tasks):
if task.get('name', '') != 'postprocess':
continue
num_classes = task.get('params', {}).get('num_classes', 0)
if 'topk' not in task.get('params', {}):
continue
sdk_pipeline_json['pipeline']['tasks'][index]['params']['topk'] = \
num_classes
logger.info(f'sdk_pipeline_json = {sdk_pipeline_json}')
mmengine.dump(
sdk_pipeline_json, sdk_pipeline_json_path, sort_keys=False, indent=4)
logger.info('replace done')
def gen_log_path(backend_output_path: Path, log_name: str):
if not backend_output_path.exists():
backend_output_path.mkdir(parents=True, exist_ok=True)
log_path = backend_output_path.joinpath(log_name)
if log_path.exists():
os.remove(str(log_path))
return log_path
def run_cmd(cmd_lines: List[str], log_path: Path):
"""
Args:
cmd_lines: (list[str]): A command in multiple line style.
log_path (Path): Path to log file.
Returns:
int: error code.
"""
import platform
system = platform.system().lower()
if system == 'windows':
sep = r'`'
else: # 'Linux', 'Darwin'
sep = '\\'
cmd_for_run = ' '.join(cmd_lines)
cmd_for_log = f' {sep}\n'.join(cmd_lines) + '\n'
parent_path = log_path.parent
if not parent_path.exists():
parent_path.mkdir(parents=True, exist_ok=True)
logger = get_root_logger()
logger.info(100 * '-')
logger.info(f'Start running cmd\n{cmd_for_log}')
logger.info(f'Logging log to \n{log_path}')
with open(log_path, 'w', encoding='utf-8') as file_handler:
# write cmd
file_handler.write(f'Command:\n{cmd_for_log}\n')
file_handler.flush()
process_res = subprocess.Popen(
cmd_for_run,
cwd=str(Path(__file__).absolute().parent.parent),
shell=True,
stdout=file_handler,
stderr=file_handler)
process_res.wait()
return_code = process_res.returncode
if return_code != 0:
logger.error(f'Got shell return code={return_code}')
with open(log_path, 'r') as f:
content = f.read()
logger.error(f'Log error message\n{content}')
return return_code
def get_backend_result(pipeline_info: dict, model_cfg_path: Path,
checkpoint_path: Path, work_dir: Path, device_type: str,
pytorch_metric: dict, metric_info: dict,
report_dict: dict, test_type: str,
logger: logging.Logger, backend_file_name: Union[str,
list],
report_txt_path: Path, metafile_dataset: str,
model_name: str):
"""Convert model to onnx and then get metric.
Args:
pipeline_info (dict): Pipeline info of test yaml.
model_cfg_path (Path): Model config file path.
checkpoint_path (Path): Checkpoints path.
work_dir (Path): A working directory.
device_type (str): A string specifying device, defaults to 'cuda'.
pytorch_metric (dict): All pytorch metric info.
metric_info (dict): Metrics info.
report_dict (dict): Report info dict.
test_type (str): Test type. 'precision' or 'convert'.
logger (logging.Logger): Logger.
backend_file_name (str | list): backend file save name.
report_txt_path (Path): report txt path.
metafile_dataset (str): Dataset type get from metafile.
model_name (str): Name of model in test yaml.
"""
# get backend_test info
backend_test = pipeline_info.get('backend_test', False)
# get convert_image info
convert_image_info = pipeline_info.get('convert_image', None)
if convert_image_info is not None:
input_img_path = \
convert_image_info.get('input_img', './tests/data/tiger.jpeg')
test_img_path = convert_image_info.get('test_img', None)
else:
input_img_path = './tests/data/tiger.jpeg'
test_img_path = None
# get sdk_cfg info
sdk_config = pipeline_info.get('sdk_config', None)
if sdk_config is not None:
sdk_config = Path(sdk_config)
# Overwrite metric tolerance
metric_tolerance = pipeline_info.get('metric_tolerance', None)
if metric_tolerance is not None:
for metric, new_tolerance in metric_tolerance.items():
if metric not in metric_info:
logger.debug(f'{metric} not in {metric_info},'
'skip compare it...')
continue
if new_tolerance is None:
logger.debug('new_tolerance is None, skip it ...')
continue
metric_info[metric]['tolerance'] = new_tolerance
if backend_test is False and sdk_config is None:
test_type = 'convert'
deploy_cfg_path = Path(pipeline_info.get('deploy_config'))
backend_name = str(get_backend(str(deploy_cfg_path)).name).lower()
# change device_type for special case
if backend_name in ['ncnn', 'openvino']:
device_type = 'cpu'
elif backend_name == 'onnxruntime' and device_type != 'cpu':
import onnxruntime as ort
if ort.get_device() != 'GPU':
device_type = 'cpu'
logger.warning('Device type is forced to cpu '
'since onnxruntime-gpu is not installed')
infer_type = \
'dynamic' if is_dynamic_shape(str(deploy_cfg_path)) else 'static'
precision_type = get_precision_type(deploy_cfg_path.name)
codebase_name = get_codebase(str(deploy_cfg_path)).value
backend_output_path = Path(work_dir). \
joinpath(Path(checkpoint_path).parent.parent.name,
Path(checkpoint_path).parent.name,
backend_name,
infer_type,
precision_type,
Path(checkpoint_path).stem)
backend_output_path.mkdir(parents=True, exist_ok=True)
# convert cmd lines
cmd_lines = [
'python3 ./tools/deploy.py', f'{deploy_cfg_path}', f'{model_cfg_path}',
f'"{checkpoint_path}"', f'"{input_img_path}"',
f'--work-dir "{backend_output_path}"', f'--device {device_type}',
'--log-level INFO'
]
if sdk_config is not None and test_type == 'precision':
cmd_lines += ['--dump-info']
if test_img_path is not None:
cmd_lines += [f'--test-img {test_img_path}']
if precision_type == 'int8':
calib_dataset_cfg = pipeline_info.get('calib_dataset_cfg', None)
if calib_dataset_cfg is not None:
cmd_lines += [f'--calib-dataset-cfg {calib_dataset_cfg}']
convert_log_path = backend_output_path.joinpath('convert_log.txt')
return_code = run_cmd(cmd_lines, convert_log_path)
convert_result = return_code == 0
logger.info(f'Got convert_result = {convert_result}')
if isinstance(backend_file_name, list):
report_checkpoint = backend_output_path.joinpath(backend_file_name[0])
convert_checkpoint_path = ''
for backend_file in backend_file_name:
backend_path = backend_output_path.joinpath(backend_file)
convert_checkpoint_path += f'{backend_path} '
else:
report_checkpoint = backend_output_path.joinpath(backend_file_name)
convert_checkpoint_path = \
str(backend_output_path.joinpath(backend_file_name))
# Test the model
if convert_result and test_type == 'precision':
# test the model metric
if backend_test:
log_path = \
gen_log_path(backend_output_path.joinpath('backend'),
'test_log.txt')
get_backend_fps_metric(
deploy_cfg_path=str(deploy_cfg_path),
model_cfg_path=model_cfg_path,
convert_checkpoint_path=convert_checkpoint_path,
device_type=device_type,
logger=logger,
pytorch_metric=pytorch_metric,
metric_info=metric_info,
backend_name=backend_name,
precision_type=precision_type,
convert_result=convert_result,
report_dict=report_dict,
infer_type=infer_type,
log_path=log_path,
dataset_info=metafile_dataset,
report_txt_path=report_txt_path,
model_name=model_name)
if sdk_config is not None:
if codebase_name == 'mmpretrain' or codebase_name == 'mmaction':
replace_top_in_pipeline_json(backend_output_path, logger)
log_path = gen_log_path(
backend_output_path.joinpath('sdk'), 'test_log.txt')
if backend_name == 'onnxruntime':
# sdk only support onnxruntime of cpu
device_type = 'cpu'
# sdk test
get_backend_fps_metric(
deploy_cfg_path=str(sdk_config),
model_cfg_path=model_cfg_path,
convert_checkpoint_path=str(backend_output_path),
device_type=device_type,
logger=logger,
pytorch_metric=pytorch_metric,
metric_info=metric_info,
backend_name=f'SDK-{backend_name}',
precision_type=precision_type,
convert_result=convert_result,
report_dict=report_dict,
infer_type=infer_type,
log_path=log_path,
dataset_info=metafile_dataset,
report_txt_path=report_txt_path,
model_name=model_name)
else:
logger.info('Only test convert, saving to report...')
metric_list = [{metric: '-'} for metric in metric_info]
fps = '-'
test_pass = convert_result
dataset_type = metafile_dataset['dataset']
task_name = metafile_dataset['task']
# update the report
update_report(
report_dict=report_dict,
model_name=model_name,
model_config=str(model_cfg_path),
task_name=task_name,
checkpoint=str(report_checkpoint),
dataset=dataset_type,
backend_name=backend_name,
deploy_config=str(deploy_cfg_path),
static_or_dynamic=infer_type,
precision_type=precision_type,
conversion_result=str(convert_result),
fps=fps,
metric_info=metric_list,
test_pass=str(test_pass),
report_txt_path=report_txt_path,
codebase_name=codebase_name)
def save_report(report_info: dict, report_save_path: Path,
logger: logging.Logger):
"""Convert model to onnx and then get metric.
Args:
report_info (dict): Report info dict.
report_save_path (Path): Report save path.
logger (logging.Logger): Logger.
"""
logger.info('Saving regression test report to '
f'{report_save_path}, pls wait...')
try:
df = pd.DataFrame(report_info)
df.to_excel(report_save_path)
except ValueError:
logger.info(f'Got error report_info = {report_info}')
logger.info('Saved regression test report to '
f'{report_save_path}.')
def _filter_string(inputs):
"""Remove non alpha&number character from input string.
Args:
inputs(str): Input string.
Returns:
str: Output of only alpha&number string.
"""
outputs = ''.join([i.lower() for i in inputs if i.isalnum()])
return outputs
def main():
args = parse_args()
set_start_method('spawn')
logger = get_root_logger(log_level=args.log_level)
test_type = 'precision' if args.performance else 'convert'
logger.info(f'Processing regression test in {test_type} mode.')
backend_file_info = {
'onnxruntime': 'end2end.onnx',
'tensorrt': 'end2end.engine',
'openvino': 'end2end.xml',
'ncnn': ['end2end.param', 'end2end.bin'],
'pplnn': ['end2end.onnx', 'end2end.json'],
'torchscript': 'end2end.pt'
}
backend_list = args.backends
if backend_list is None:
backend_list = [
'onnxruntime', 'tensorrt', 'openvino', 'ncnn', 'pplnn',
'torchscript'
]
assert isinstance(backend_list, list)
logger.info(f'Regression test backend list = {backend_list}')
args.models = [_filter_string(s) for s in args.models]
logger.info(f'Regression test models list = {args.models}')
assert ' ' not in args.work_dir, \
f'No empty space included in {args.work_dir}'
assert ' ' not in args.checkpoint_dir, \
f'No empty space included in {args.checkpoint_dir}'
work_dir = Path(args.work_dir)
work_dir.mkdir(parents=True, exist_ok=True)
deploy_yaml_list = [
f'./tests/regression/{codebase}.yml' for codebase in args.codebase
]
for deploy_yaml in deploy_yaml_list:
if not Path(deploy_yaml).exists():
raise FileNotFoundError(f'deploy_yaml {deploy_yaml} not found, '
'please check !')
with open(deploy_yaml) as f:
yaml_info = yaml.load(f, Loader=yaml.FullLoader)
report_save_path = \
work_dir.joinpath(Path(deploy_yaml).stem + '_report.xlsx')
report_txt_path = report_save_path.with_suffix('.txt')
report_dict = {
'Model': [],
'Model Config': [],
'Task': [],
'Checkpoint': [],
'Dataset': [],
'Backend': [],
'Deploy Config': [],
'Static or Dynamic': [],
'Precision Type': [],
'Conversion Result': [],
# 'FPS': []
}
global_info = yaml_info.get('globals')
metric_info = global_info.get('metric_info', {})
for metric_name in metric_info:
report_dict.update({metric_name: []})
report_dict.update({'Test Pass': []})
global_info.update({'checkpoint_dir': args.checkpoint_dir})
global_info.update(
{'codebase_name': Path(deploy_yaml).stem.split('_')[0]})
with open(report_txt_path, 'w') as f_report:
title_str = ''
for key in report_dict:
title_str += f'{key},'
title_str = title_str[:-1] + '\n'
f_report.write(title_str) # clear the report tmp file
models_info = yaml_info.get('models')
for models in tqdm(models_info):
model_name_origin = models.get('name', 'model')
model_name_new = _filter_string(model_name_origin)
if 'model_configs' not in models:
logger.warning('Can not find field "model_configs", '
f'skipping {model_name_origin}...')
continue
if args.models != ['all'] and model_name_new not in args.models:
logger.info(
f'Test specific model mode, skip {model_name_origin}...')
continue
try:
model_metafile_info, checkpoint_save_dir, codebase_dir = \
get_model_metafile_info(global_info, models, logger)
except Exception as e:
logger.error(f'Failed to get meta info {e}')
continue
for model_config in model_metafile_info:
logger.info(f'Processing test for {model_config}...')
# Get backends info
pipelines_info = models.get('pipelines', None)
if pipelines_info is None:
logger.warning('pipelines_info is None, skip it...')
continue
# Get model config path
model_cfg_path = Path(codebase_dir).joinpath(model_config)
assert model_cfg_path.exists()
# Get checkpoint path
checkpoint_name = Path(
model_metafile_info.get(model_config).get('Weights')).name
checkpoint_path = Path(checkpoint_save_dir, checkpoint_name)
assert checkpoint_path.exists()
# Get pytorch from metafile.yml
pytorch_metric, metafile_dataset = get_pytorch_result(
model_name_origin, model_metafile_info, checkpoint_path,
model_cfg_path, model_config, metric_info, report_dict,
logger, report_txt_path, global_info.get('codebase_name'))
for pipeline in pipelines_info:
deploy_config = pipeline.get('deploy_config')
backend_name = get_backend(deploy_config).name.lower()
if backend_name not in backend_list:
logger.warning(f'backend_name ({backend_name}) not '
f'in {backend_list}, skip it...')
continue
backend_file_name = \
backend_file_info.get(backend_name, None)
if backend_file_name is None:
logger.warning('backend_file_name is None, '
'skip it...')
continue
get_backend_result(pipeline, model_cfg_path,
checkpoint_path, work_dir, args.device,
pytorch_metric, metric_info,
report_dict, test_type, logger,
backend_file_name, report_txt_path,
metafile_dataset, model_name_origin)
if len(report_dict.get('Model')) > 0:
save_report(report_dict, report_save_path, logger)
else:
logger.info(f'No model for {deploy_yaml}, not saving report.')
# merge report
merge_report(str(work_dir), logger)
logger.info('All done.')
if __name__ == '__main__':
main()
#!/bin/sh
set -e
ip=${1}
port=${2:8585}
date_today=`date +'%Y%m%d'`
# create http server
nohup python3 -m http.server --directory /data2/shared/nvidia $port > tmp.log 2>&1
export TENSORRT_URL=http://$ip:$port/TensorRT-8.2.3.0.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz
export TENSORRT_VERSION=8.2.3.0
export CUDA_INT=113
export TAG=ubuntu20.04-cuda11.3
# build docker image
docker build ./docker/Base/ -t openmmlab/mmdeploy:$TAG \
--build-arg CUDA_INT=$CUDA_INT \
--build-arg TENSORRT_URL=${TENSORRT_URL} \
--build-arg TENSORRT_VERSION=${TENSORRT_VERSION}
docker tag openmmlab/mmdeploy:$TAG openmmlab/mmdeploy:${TAG}-${date_today}
# test docker image
docker run --gpus=all -itd \
-v /data2/benchmark:/root/workspace/openmmlab-data \
-v /data2/checkpoints:/root/workspace/mmdeploy_checkpoints \
-v ~/mmdeploy:/root/workspace/mmdeploy \
openmmlab/mmdeploy:$TAG
# push to docker hub
docker login
docker push openmmlab/mmdeploy:$TAG
docker push openmmlab/mmdeploy:$TAG-${date_today}
#! /bin/bash
# check python version is 3.8 or not
check_python_38(){
MAJOR=`python3 --version | awk '{print $2}' | awk -F . '{print $1}'`
MINOR=`python3 --version | awk '{print $2}' | awk -F . '{print $2}'`
if [ ${MAJOR} -ne 3 ];then
echo 'This script needs python==3.8 +_+'
exit 0
fi
if [ ${MINOR} -ne 8 ];then
echo 'This script needs python==3.8 +_+'
exit 0
fi
}
install_torch() {
version=`python3 -c "import torch; print(torch.__version__)"`
if [ -n "$version" ];then
return 0
fi
TORCH_WHL="torch-1.11.0-cp38-cp38-linux_aarch64.whl"
if [ ! -e "${TORCH_WHL}" ];then
wget -q --show-progress https://nvidia.box.com/shared/static/ssf2v7pf5i245fk4i0q926hy4imzs2ph.whl -O ${TORCH_WHL}
fi
python3 -m pip install ${TORCH_WHL}
python3 -m pip install numpy
sudo apt install libopenblas-dev -y
python3 -c "import torch; print(torch.__version__)"
}
install_torchvision() {
version=`python3 -c "import torchvision; print(torchvision.__version__)"`
if [ -n "$version" ];then
return 0
fi
sudo apt-get install libjpeg-dev zlib1g-dev libpython3-dev libavcodec-dev libavformat-dev libswscale-dev -y
if [ ! -e "torchvision" ];then
git clone https://github.com/pytorch/vision torchvision --branch v0.11.1 --depth=1
fi
cd torchvision
export BUILD_VERSION=0.11.1
python3 -m pip install -e .
python3 -c "import torchvision; print(torchvision.__version__)"
cd -
}
install_cmake() {
command -v cmake > /dev/null 2>&1 || {
python3 -m pip install cmake
}
echo "cmake installed $(which cmake)"
}
install_tensorrt() {
echo 'export PYTHONPATH=/usr/lib/python3.8/dist-packages:${PYTHONPATH}' >> ~/mmdeploy.env
export PYTHONPATH=/usr/lib/python3.8/dist-packages:${PYTHONPATH}
echo 'export TENSORRT_DIR=/usr/include/aarch64-linux-gnu' >> ~/mmdeploy.env
export TENSORRT_DIR=/usr/include/aarch64-linux-gnu
echo 'export PATH=$PATH:/usr/local/cuda/bin' >> ~/mmdeploy.env
export PATH=$PATH:/usr/local/cuda/bin
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/lib64' >> ~/mmdeploy.env
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/lib64
echo 'export CUDA_HOME=/usr/local/cuda-11' >> ~/mmdeploy.env
export CUDA_HOME=/usr/local/cuda-11
echo 'export CUDA_ROOT=/usr/local/cuda-11' >> ~/mmdeploy.env
export CUDA_ROOT=/usr/local/cuda-11
python -c "import tensorrt; print(tensorrt.__version__)"
}
install_mmcv_pycuda() {
version=`python3 -c "import mmcv; print(mmcv.__version__)"`
if [ -n "$version" ];then
return 0
fi
# try prebuilt .whl
board=`cat /etc/nv_tegra_release | awk '{print $9}'`
release=`cat /etc/nv_tegra_release | awk '{print $2}'`
revision=`cat /etc/nv_tegra_release | awk '{print $5}'`
if [ ${board} = "t186ref," ];then
if [ ${release} = "R34," ];then
if [ ${revision} = "1.1," ];then
# use prebuilt whl
wget -q --show-progress --https://github.com/tpoisonooo/mmcv-jetson-orin-prebuilt-whl/raw/main/mmcv_full-1.5.1-cp38-cp38-linux_aarch64.whl
python3 -m pip install mmcv_full-1.5.1-cp38-cp38-linux_aarch64.whl
wget https://github.com/tpoisonooo/mmcv-jetson-orin-prebuilt-whl/raw/main/pycuda-2022.1-cp38-cp38-linux_aarch64.whl
python3 -m pip install pycuda-2022.1-cp38-cp38-linux_aarch64.whl
fi
fi
elif [ ! -e "mmcv" ];then
# source build mmcv and pycuda
sudo apt-get install -y libssl-dev
git clone https://github.com/open-mmlab/mmcv.git --branch v1.5.1 --depth=1
cd mmcv
echo 'Building mmcv-full with MMCV_WITH_OPS=1 and pycuda, it may take an hour, please wait..'
MMCV_WITH_OPS=1 python3 -m pip install -e .
python3 -m pip install pycuda
cd -
fi
python3 -c "import mmcv; print(mmcv.__version__)"
}
install_pplcv() {
if [ ! -e "ppl.cv" ];then
git clone https://github.com/openppl-public/ppl.cv.git --depth=1 --recursive
fi
cd ppl.cv
./build.sh cuda
echo "PPLCV_DIR=$(pwd)" >> ~/mmdeploy.env
export PPLCV_DIR=$(pwd)
cd -
}
install_mmdeploy() {
sudo apt-get install -y pkg-config libhdf5-103 libhdf5-dev libspdlog-dev
python3 -m pip install onnx
python3 -m pip install versioned-hdf5
# build and install mmdeploy
cd ../mmdeploy
git submodule init
git submodule update
if [ ! -e "build" ];then
mkdir -p build
fi
cd build
cmake .. \
-DMMDEPLOY_BUILD_SDK=ON \
-DMMDEPLOY_BUILD_SDK_PYTHON_API=ON \
-DMMDEPLOY_BUILD_EXAMPLES=ON \
-DMMDEPLOY_TARGET_DEVICES="cuda;cpu" \
-DMMDEPLOY_TARGET_BACKENDS="trt" \
-DMMDEPLOY_CODEBASES=all \
-Dpplcv_DIR=${PPLCV_DIR}/cuda-build/install/lib/cmake/ppl
make -j 7 && make install
cd -
python3 -m pip install -v -e .
python3 tools/check_env.py
}
show_env() {
echo ""
echo "----------------------------------------------------------------------------------------------------------"
echo '>> Install finished, `source ~/mmdeploy.env` to setup your environment !'
cat ~/mmdeploy.env
echo "----------------------------------------------------------------------------------------------------------"
}
# setup env
echo "" > ~/mmdeploy.env
echo 'export OPENBLAS_CORETYPE=ARMV8' >> ~/mmdeploy.env
export OPENBLAS_CORETYPE=ARMV8
echo 'export ARCH=aarch64' >> ~/mmdeploy.env
export ARCH=aarch64
check_python_38
if [ ! -e "../mmdeploy-dep" ];then
mkdir ../mmdeploy-dep
fi
cd ../mmdeploy-dep
echo $(pwd)
install_torch
install_torchvision
install_cmake
install_tensorrt
install_mmcv_pycuda
install_pplcv
install_mmdeploy
show_env
#!/bin/bash
# build_linux_nvidia.sh
# Date: 08-03-2022, 24-04-2022
#
# Run this script to build MMDeploy SDK and install necessary prerequisites.
# This script will also setup python venv and generate prebuild binaries if requested.
#
#####
# Build vars
BUILD_TYPE="Release"
ARCH=$(uname -i)
PROC_NUM=$(nproc)
# Default GCC
GCC_COMPILER="g++"
#####
# Directories
# WORKING_DIR must correspond to MMDeploy root dir
WORKING_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../.." && pwd )"
PPLCV_DIR=${WORKING_DIR}/ppl.cv
MMDEPLOY_DIR=${WORKING_DIR}
#####
# Versions
PPLCV_VER="0.7.0"
CMAKE_VER="3.23.0"
#####
# Flags
# WITH_PYTHON: Install misc. dependencies in the active venv
WITH_PYTHON=1
# WITH_CLEAN: Remove build output dirs
WITH_CLEAN=1
# WITH_PREBUILD: Generate prebuild archives
WITH_PREBUILD=0
# WITH_UNATTENDED: Unattended install, skip/use default options
WITH_UNATTENDED=0
#####
# Prefix: Set install prefix for ppl.cv, mmdeploy SDK depending on arch
if [[ "$ARCH" == aarch64 ]]; then
INSTALL_PREFIX="/usr/local/aarch64-linux-gnu"
else
INSTALL_PREFIX="/usr/local"
fi
PYTHON_VENV_DIR=${WORKING_DIR}/venv-mmdeploy
appargument1=$1
appargument2=$2
#####
# helper functions
echo_green() {
if [ -n "$1" ]; then
echo "$(tput setaf 10)$1$(tput sgr 0)"
fi
}
echo_red() {
if [ -n "$1" ]; then
echo "$(tput setaf 1)$1$(tput sgr 0)"
fi
}
echo_blue() {
if [ -n "$1" ]; then
echo "$(tput setaf 4)$1$(tput sgr 0)"
fi
}
contains_element () {
local e match="$1"
shift
for e; do [[ "$e" == "$match" ]] && return 0; done
return 1
}
function version {
echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }';
}
prompt_yesno() {
if [ -n "$1" ]; then
echo_blue "$1"
fi
if [[ $WITH_UNATTENDED -eq 1 ]]
then
echo_green "Unattended install, selecting default option"
return 2
else
echo_blue "(y/n/q) or press [ENTER] to select default option"
read -p "?" -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
return 1
elif [[ $REPLY =~ ^[Nn]$ ]]
then
return 0
elif [[ $REPLY = "" ]]
then
echo "Selecting default option..."
return 2
elif [[ $REPLY =~ ^[Qq]$ ]]
then
echo_green "Quitting!"
exit
else
echo_red "Invalid argument. Try again"
prompt_yesno
fi
fi
}
prereqs() {
echo_green "Installing prerequisites..."
# cmake check & install
echo_green "Checking your cmake version..."
CMAKE_DETECT_VER=$(cmake --version | grep -oP '(?<=version).*')
if [ $(version $CMAKE_DETECT_VER) -ge $(version "3.14.0") ]; then
echo "Cmake version $CMAKE_DETECT_VER is up to date"
else
echo "CMake too old, purging existing cmake and installing ${CMAKE_VER}..."
# purge existing
sudo apt-get purge cmake
sudo snap remove cmake
# install prebuild
wget https://github.com/Kitware/CMake/releases/download/v${CMAKE_VER}/cmake-${CMAKE_VER}-linux-${ARCH}.sh
chmod +x cmake-${CMAKE_VER}-linux-${ARCH}.sh
sudo ./cmake-${CMAKE_VER}-linux-${ARCH}.sh --prefix=/usr --skip-license
fi
# gcc-7 check
echo_green "Checking your gcc version..."
GCC_DETECT_VER=$(gcc --version | grep -oP '(?<=\)).*' -m1)
if [ $(version $GCC_DETECT_VER) -ge $(version "7.0.0") ]; then
echo "GCC version $GCC_DETECT_VER is up to date"
else
echo "gcc version too old, installing ${CMAKE_VER}..."
echo "Purge existing cmake and install ${GCC_DETECT_VER}..."
# Add repository if ubuntu < 18.04
sudo add-apt-repository ppa:ubuntu-toolchain-r/test
sudo apt-get update
sudo apt-get install gcc-7
sudo apt-get install g++-7
GCC_COMPILER="g++-7"
fi
# spdlog
echo_green "Checking spdlog version..."
prompt_yesno "Install latest spdlog from source? (Default:no)"
local res=$?
if [[ $res -eq 1 ]] # || [ $res -eq 2 ]
then
echo_green "Building and installing latest spdlog from source"
# remove libspdlog, as it might be an old version
sudo apt-get remove libspdlog-dev -y
git clone https://github.com/gabime/spdlog.git spdlog
cd spdlog
git pull
git checkout tags/v1.8.1
mkdir build -p && cd build
# we must build spdlog with -fPIC enabled
cmake .. -DCMAKE_POSITION_INDEPENDENT_CODE=ON && make -j${PROC_NUM}
sudo make install
sudo ldconfig
fi
# tensorrt check
echo_green "Check your TensorRT version:"
## Check if ${TENSORRT_DIR} env variable has been set
if [ -d "${TENSORRT_DIR}" ]; then
echo "TENSORRT_DIR env. variable has been set ${TENSORRT_DIR}"
else
echo_red "TENSORRT_DIR env. variable has NOT been set."
if [[ "$ARCH" == aarch64 ]]; then
echo "Added TENSORRT_DIR, CUDNN_DIR to env."
echo 'export TENSORRT_DIR="/usr/include/'${ARCH}'-linux-gnu/"' >> ${HOME}/.bashrc
echo 'export CUDNN_DIR="/usr/include/'${ARCH}'-linux-gnu/"' >> ${HOME}/.bashrc
echo 'export LD_LIBRARY_PATH="/usr/lib/'${ARCH}'-linux-gnu/:$LD_LIBRARY_PATH"' >> ${HOME}/.bashrc
#source ${HOME}/.bashrc
# sourcing in bash script won't set the env. variables so we will set them temporarily
export TENSORRT_DIR="/usr/include/'${ARCH}'-linux-gnu/"
export CUDNN_DIR="/usr/include/'${ARCH}'-linux-gnu/"
export LD_LIBRARY_PATH="/usr/lib/'${ARCH}'-linux-gnu/:$LD_LIBRARY_PATH"
else
echo_red "Please Install TensorRT, CUDNN and add TENSORRT_DIR, CUDNN_DIR to environment variables before running this script!"
exit
fi
fi
# Determine TensorRT version and set paths accordingly
echo "Checking TensorRT version...Please verify the detected versions below:"
if [[ "$ARCH" == aarch64 ]]; then
cat /usr/include/${ARCH}-linux-gnu/NvInferVersion.h | grep NV_TENSORRT
else
cat ${TENSORRT_DIR}/include/NvInferVersion.h | grep NV_TENSORRT
fi
prompt_yesno "Is TensorRT >=8.0.1.6 installed? (Always installed on Jetson) (Default:yes)"
local res=$?
if [[ $res -eq 1 ]] || [ $res -eq 2 ]
then
echo "TensorRT appears to be installed..."
else
echo_red "Error: You must install TensorRT before installing MMDeploy!"
exit
fi
prompt_yesno "Install OpenCV? (Always installed on Jetson) (Default:no)"
local res=$?
if [[ $res -eq 1 ]] # || [ $res -eq 2 ]
then
echo "Installing libopencv-dev..."
# opencv
sudo apt-get install libopencv-dev
fi
}
py_venv() {
## python venv
echo_green "Installing python venv..."
#check for python installed version
pyv="$(python3 -V 2>&1)"
pyv_old="Python 3.6"
if echo "$pyv" | grep -q "$pyv_old"; then
# use python 3.6
curl https://bootstrap.pypa.io/pip/3.6/get-pip.py -o get-pip.py
else
# use python >=3.7
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
fi
# dependencies
sudo apt-get install protobuf-compiler libprotoc-dev libssl-dev curl ninja-build -y
sudo apt-get install libopenblas-dev python3-venv python3-dev python3-setuptools -y
sudo python3 get-pip.py
pip3 install testresources
pip3 install --upgrade setuptools wheel
if [ -d "${PYTHON_VENV_DIR}" ]; then
prompt_yesno "Reinstall existing Python venv ${PYTHON_VENV_DIR}? (Default:no)"
local res=$?
if [[ $res -eq 1 ]]
then
rm -r ${PYTHON_VENV_DIR}
python3 -m venv ${PYTHON_VENV_DIR} --system-site-packages #system site packages to keep trt from system installation
fi
else
python3 -m venv ${PYTHON_VENV_DIR} --system-site-packages #system site packages to keep trt from system installation
fi
source ${PYTHON_VENV_DIR}/bin/activate
python3 get-pip.py
pip3 install testresources
pip3 install --upgrade setuptools wheel
# Latest PIL is not compatible with mmcv=1.4.1
pip3 install Pillow==7.0.0
if [[ "$ARCH" == aarch64 ]]
then
# protofbuf on jetson is quite old - must be upgraded
pip3 install --upgrade protobuf
# Install numpy >1.19.4 might give "Illegal instruction (core dumped)" on Jetson/aarch64
# To solve it, we should set OPENBLAS_CORETYPE
echo 'export OPENBLAS_CORETYPE=ARMV8' >> ~/.bashrc
#source ${HOME}/.bashrc
# sourcing in bash script won't set the env. variables so we will set them temporarily
export OPENBLAS_CORETYPE=ARMV8
fi
pip3 install numpy
pip3 install opencv-python
pip3 install matplotlib
prompt_yesno "Install PyTorch, Torchvision, mmcv in the active venv? (Default:no)"
local res=$?
if [[ $res -eq 1 ]]
then
# pytorch, torchvision, torchaudio
if [[ "$ARCH" == aarch64 ]]
then
# pytorch
wget https://nvidia.box.com/shared/static/fjtbno0vpo676a25cgvuqc1wty0fkkg6.whl -O torch-1.10.0-cp36-cp36m-linux_aarch64.whl
pip3 install torch-1.10.0-cp36-cp36m-linux_aarch64.whl
# torchvision
sudo apt-get install libjpeg-dev zlib1g-dev libpython3-dev libavcodec-dev libavformat-dev libswscale-dev -y
sudo rm -r torchvision
git clone --branch v0.11.1 https://github.com/pytorch/vision torchvision
cd torchvision
export BUILD_VERSION=0.11.1 # where 0.x.0 is the torchvision version
python3 setup.py install
cd ../
# torchaudio
#sudo apt-get install -y sox libsox-dev libsox-fmt-all
#sudo rm -r torchaudio
#git clone -b v0.10.0 https://github.com/pytorch/audio torchaudio
#cd torchaudio
#git submodule update --init --recursive
#python3 setup.py install
#cd ../
# mmcv
pip3 uninstall mmcv-full
pip3 install mmcv-full==1.4.1 -f https://download.openmmlab.com/mmcv/dist/cu102/torch1.10.0/index.html
else
pip3 install torch==1.10.0+cu113 torchvision==0.11.1+cu113 torchaudio==0.10.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
# mmcv
pip3 uninstall mmcv-full
pip3 install mmcv-full==1.4.1 -f https://download.openmmlab.com/mmcv/dist/cu113/torch1.10.0/index.html
fi
fi
# cleanup
rm get-pip.py
}
pplcv() {
## ppl.cv
echo_green "Building and installing ppl.cv..."
cd ${WORKING_DIR}
echo_blue "checking out '${PPLCV_DIR}' pkg..."
if [ -d "${PPLCV_DIR}" ]; then
echo "Already exists! Checking out the requested version..."
else
git clone https://github.com/openppl-public/ppl.cv.git ${PPLCV_DIR}
fi
cd ${PPLCV_DIR}
git pull
git checkout tags/v${PPLCV_VER}
# remove all build files
if [[ $WITH_CLEAN -eq 1 ]]
then
sudo rm -r ${PPLCV_DIR}/build
fi
# build
mkdir build -p && cd build
cmake -DPPLCV_USE_CUDA=ON -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} .. && make -j${processor_num} && sudo make install
sudo ldconfig
# generate prebuild and pack into .tar.gz
if [[ $WITH_PREBUILD -eq 1 ]]
then
sudo make DESTDIR=./prebuild install
tar -zcvf ${WORKING_DIR}/pplcv_${PPLCV_VER}_cuda-${ARCH}-build.tar.gz -C ./prebuild/ .
fi
}
mmdeploy(){
## mmdeploy SDK
echo_green "Building and installing mmdeploysdk..."
cd ${MMDEPLOY_DIR}
MMDEPLOY_DETECT_VER=$(cat mmdeploy/version.py | grep -Eo '[0-9]\.[0-9].[0-9]+')
# reinit submodules
git submodule update --init --recursive
# python dependencies
if [[ $WITH_PYTHON -eq 1 ]]
then
source ${PYTHON_VENV_DIR}/bin/activate
## h5py (Required by mmdeploy)
## h5py not directly supported by jetson and must be built/installed manually
sudo apt-get install pkg-config libhdf5-10* libhdf5-dev -y
sudo pip3 install Cython
sudo env H5PY_SETUP_REQUIRES=0 pip3 install -U h5py==2.9.0
pip install -e .
fi
# remove all build files
if [[ $WITH_CLEAN -eq 1 ]]
then
sudo rm -r ${MMDEPLOY_DIR}/build
fi
# build
mkdir build -p && cd build
cmake .. \
-DMMDEPLOY_BUILD_SDK=ON \
-DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} \
-DCMAKE_CXX_COMPILER=${GCC_COMPILER} \
-Dpplcv_DIR=${INSTALL_PREFIX}/lib/cmake/ppl \
-DMMDEPLOY_TARGET_DEVICES="cuda;cpu" \
-DMMDEPLOY_TARGET_BACKENDS=trt \
-DMMDEPLOY_CODEBASES=all \
-DMMDEPLOY_BUILD_SDK_PYTHON_API=ON \
-DTENSORRT_DIR=${TENSORRT_DIR} \
-DCUDNN_DIR=${CUDNN_DIR}
cmake --build . -- -j${PROC_NUM} && sudo make install
sudo ldconfig
# generate prebuild and pack into .tar.gz
if [[ $WITH_PREBUILD -eq 1 ]]
then
sudo make DESTDIR=./prebuild install
tar -zcvf ${WORKING_DIR}/mmdeploysdk_${MMDEPLOY_VER}_${ARCH}-build.tar.gz -C ./prebuild/ .
fi
## build mmdeploy examples
cp -r ${MMDEPLOY_DIR}/demo/csrc ${MMDEPLOY_DIR}/build/example
cd ${MMDEPLOY_DIR}/build/example
rm -r build
mkdir build -p && cd build
cmake -DMMDeploy_DIR=${INSTALL_PREFIX} ..
make all
}
all() {
# build all
prereqs
py_venv
pplcv
mmdeploy
}
#####
# supported package
package_list=(
"all"
"prereqs"
"py_venv"
"pplcv"
"mmdeploy"
)
#####
# check input argument
if contains_element "$appargument1" "${package_list[@]}"; then
echo_green "Build and install '$appargument1'..."
else
echo_red "Unsupported argument '$appargument1'. Use one of the following:"
for i in ${package_list[@]}
do
echo $i
done
exit
fi
#####
# Unattended/auto install
if [[ $appargument2 == "auto" ]]
then
WITH_UNATTENDED=1
fi
#####
# Install dependencies in venv?
prompt_yesno "Install misc. dependencies in the active venv? (Default:${WITH_PYTHON})"
res=$?
if [[ $res -eq 1 ]]
then
WITH_PYTHON=1
elif [[ $res -eq 0 ]]
then
WITH_PYTHON=0
fi
#####
# Clean previous build dirs?
prompt_yesno "Clean previous build dirs? (Default:${WITH_CLEAN})"
res=$?
if [[ $res -eq 1 ]]
then
WITH_CLEAN=1
elif [[ $res -eq 0 ]]
then
WITH_CLEAN=0
fi
#####
# Generate prebuild dirs?
prompt_yesno "Generate prebuild dirs? (Default:${WITH_PREBUILD})"
res=$?
if [[ $res -eq 1 ]]
then
WITH_PREBUILD=1
elif [[ $res -eq 0 ]]
then
WITH_PREBUILD=0
fi
$appargument1
cd ${WORKING_DIR}
# update env. variables
exec bash
#!/bin/sh
set -e
ip=${1}
port=${2:8585}
date_today=`date +'%Y%m%d'`
# create http server
nohup python3 -m http.server --directory /data2/shared/mmdeploy-manylinux2014_x86_64-cuda11.3 $port > tmp.log 2>&1
export ip=10.1.52.36
export port=8585
export CUDA_URL=http://$ip:$port/cuda_11.3.0_465.19.01_linux.run
export CUDNN_URL=http://$ip:$port/cudnn-11.3-linux-x64-v8.2.1.32.tgz
export TENSORRT_URL=http://$ip:$port/TensorRT-8.2.3.0.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz
export TENSORRT_VERSION=8.2.3.0
export TAG=manylinux2014_x86_64-cuda11.3
# build docker image
docker build ./docker/prebuild/ -t openmmlab/mmdeploy:$TAG \
--build-arg CUDA_URL=$CUDA_URL \
--build-arg CUDNN_URL=$CUDNN_URL \
--build-arg TENSORRT_URL=${TENSORRT_URL}
# push to docker hub
docker login
docker push openmmlab/mmdeploy:$TAG
# Copyright (c) OpenMMLab. All rights reserved.
import os
import sys
import time
from pathlib import Path
from ubuntu_utils import cmd_result, ensure_base_env, get_job
g_jobs = 2
def install_protobuf(dep_dir) -> int:
"""build and install protobuf. protobuf seems not support repeated install,
so clean build first.
Args:
wor_dir (_type_): _description_
Returns:
: _description_
"""
print('-' * 10 + 'install protobuf' + '-' * 10)
os.chdir(dep_dir)
if not os.path.exists('protobuf-3.20.0'):
os.system(
'wget https://github.com/protocolbuffers/protobuf/releases/download/v3.20.0/protobuf-cpp-3.20.0.tar.gz' # noqa: E501
)
os.system('tar xvf protobuf-cpp-3.20.0.tar.gz')
os.chdir(os.path.join(dep_dir, 'protobuf-3.20.0'))
install_dir = os.path.join(dep_dir, 'pbinstall')
if os.path.exists(install_dir):
os.system('rm -rf {}'.format(install_dir))
os.system('make clean')
os.system('./configure --prefix={}'.format(install_dir))
os.system('make -j {} && make install'.format(g_jobs))
protoc = os.path.join(install_dir, 'bin', 'protoc')
print('protoc \t:{}'.format(cmd_result('{} --version'.format(protoc))))
os.system(""" echo 'export PATH={}:$PATH' >> ~/mmdeploy.env """.format(
os.path.join(install_dir, 'bin')))
os.system(
""" echo 'export LD_LIBRARY_PATH={}:$LD_LIBRARY_PATH' >> ~/mmdeploy.env """ # noqa: E501
.format(os.path.join(install_dir, 'lib')))
return 0
def install_pyncnn(dep_dir):
print('-' * 10 + 'build and install pyncnn' + '-' * 10)
time.sleep(2)
# generate unzip and build dir
os.chdir(dep_dir)
# git clone
if not os.path.exists('ncnn'):
os.system(
'git clone --depth 1 --branch 20230816 https://github.com/tencent/ncnn && cd ncnn' # noqa: E501
)
ncnn_dir = os.path.join(dep_dir, 'ncnn')
os.chdir(ncnn_dir)
# update submodule pybind11, gslang not required
os.system('git submodule init && git submodule update python/pybind11')
# build
if not os.path.exists('build'):
os.system('mkdir build')
os.chdir(os.path.join(ncnn_dir, 'build'))
os.system('rm -rf CMakeCache.txt')
pb_install = os.path.join(dep_dir, 'pbinstall')
pb_bin = os.path.join(pb_install, 'bin', 'protoc')
pb_lib = os.path.join(pb_install, 'lib', 'libprotobuf.so')
pb_include = os.path.join(pb_install, 'include')
cmd = 'cmake .. '
cmd += ' -DNCNN_PYTHON=ON '
cmd += ' -DProtobuf_LIBRARIES={} '.format(pb_lib)
cmd += ' -DProtobuf_PROTOC_EXECUTABLE={} '.format(pb_bin)
cmd += ' -DProtobuf_INCLUDE_DIR={} '.format(pb_include)
cmd += ' && make -j {} '.format(g_jobs)
cmd += ' && make install '
os.system(cmd)
# install
os.chdir(ncnn_dir)
os.system('cd python && python -m pip install -e . --user --no-cache-dir')
ncnn_cmake_dir = os.path.join(ncnn_dir, 'build', 'install', 'lib', 'cmake',
'ncnn')
assert (os.path.exists(ncnn_cmake_dir))
print('ncnn cmake dir \t:{}'.format(ncnn_cmake_dir))
print('\n')
return ncnn_cmake_dir
def install_mmdeploy(work_dir, dep_dir, ncnn_cmake_dir):
print('-' * 10 + 'build and install mmdeploy' + '-' * 10)
time.sleep(3)
os.chdir(work_dir)
os.system('git submodule init')
os.system('git submodule update')
if not os.path.exists('build'):
os.system('mkdir build')
pb_install = os.path.join(dep_dir, 'pbinstall')
pb_bin = os.path.join(pb_install, 'bin', 'protoc')
pb_lib = os.path.join(pb_install, 'lib', 'libprotobuf.so')
pb_include = os.path.join(pb_install, 'include')
os.system('rm -rf build/CMakeCache.txt')
cmd = 'cd build && cmake ..'
cmd += ' -DMMDEPLOY_BUILD_SDK=ON '
cmd += ' -DMMDEPLOY_BUILD_EXAMPLES=ON '
cmd += ' -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON '
cmd += ' -DMMDEPLOY_TARGET_DEVICES=cpu '
cmd += ' -DMMDEPLOY_TARGET_BACKENDS=ncnn '
cmd += ' -DProtobuf_PROTOC_EXECUTABLE={} '.format(pb_bin)
cmd += ' -DProtobuf_LIBRARIES={} '.format(pb_lib)
cmd += ' -DProtobuf_INCLUDE_DIR={} '.format(pb_include)
cmd += ' -Dncnn_DIR={} '.format(ncnn_cmake_dir)
os.system(cmd)
os.system('cd build && make -j {} && make install'.format(g_jobs))
os.system('python3 -m pip install -v -e . --user --no-cache-dir')
os.system(""" echo 'export PATH={}:$PATH' >> ~/mmdeploy.env """.format(
os.path.join(work_dir, 'mmdeploy', 'backend', 'ncnn')))
try:
import mmcv
print(mmcv.__version__)
os.system('python3 tools/check_env.py')
except Exception:
print('Please install torch & mmcv later.. ╮(╯▽╰)╭')
return 0
def main():
"""Auto install mmdeploy with ncnn. To verify this script:
1) use `sudo docker run -v /path/to/mmdeploy:/root/mmdeploy -v /path/to/Miniconda3-latest-Linux-x86_64.sh:/root/miniconda.sh -it ubuntu:18.04 /bin/bash` # noqa: E501
2) install conda and setup python environment
3) run `python3 tools/scripts/build_ubuntu_x64_ncnn.py`
Returns:
_type_: _description_
"""
global g_jobs
g_jobs = get_job(sys.argv)
print('g_jobs {}'.format(g_jobs))
work_dir = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
dep_dir = os.path.abspath(os.path.join(work_dir, '..', 'mmdeploy-dep'))
if not os.path.exists(dep_dir):
if os.path.isfile(dep_dir):
print('{} already exists and it is a file, exit.'.format(work_dir))
return -1
os.mkdir(dep_dir)
success = ensure_base_env(work_dir, dep_dir)
if success != 0:
return -1
if install_protobuf(dep_dir) != 0:
return -1
ncnn_cmake_dir = install_pyncnn(dep_dir)
if install_mmdeploy(work_dir, dep_dir, ncnn_cmake_dir) != 0:
return -1
if os.path.exists(Path('~/mmdeploy.env').expanduser()):
print('Please source ~/mmdeploy.env to setup your env !')
os.system('cat ~/mmdeploy.env')
if __name__ == '__main__':
main()
# Copyright (c) OpenMMLab. All rights reserved.
import os
import sys
import time
from pathlib import Path
from ubuntu_utils import ensure_base_env, get_job
g_jobs = 2
def install_ort(dep_dir):
print('-' * 10 + 'install ort' + '-' * 10)
time.sleep(2)
# generate unzip and build dir
os.chdir(dep_dir)
# install python onnxruntime
os.system('python3 -m pip install onnxruntime==1.8.1')
# git clone
if not os.path.exists('onnxruntime-linux-x64-1.8.1'):
os.system(
'wget -q --show-progress https://github.com/microsoft/onnxruntime/releases/download/v1.8.1/onnxruntime-linux-x64-1.8.1.tgz' # noqa: E501
)
os.system('tar xvf onnxruntime-linux-x64-1.8.1.tgz')
ort_dir = os.path.join(dep_dir, 'onnxruntime-linux-x64-1.8.1')
print('onnxruntime dir \t:{}'.format(ort_dir))
print('\n')
return ort_dir
def install_mmdeploy(work_dir, ort_dir):
print('-' * 10 + 'build and install mmdeploy' + '-' * 10)
time.sleep(3)
os.chdir(work_dir)
os.system('git submodule init')
os.system('git submodule update')
if not os.path.exists('build'):
os.system('mkdir build')
os.system('rm -rf build/CMakeCache.txt')
cmd = 'cd build && cmake ..'
cmd += ' -DMMDEPLOY_BUILD_SDK=ON '
cmd += ' -DMMDEPLOY_BUILD_EXAMPLES=ON '
cmd += ' -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON '
cmd += ' -DMMDEPLOY_TARGET_DEVICES=cpu '
cmd += ' -DMMDEPLOY_TARGET_BACKENDS=ort '
cmd += ' -DONNXRUNTIME_DIR={} '.format(ort_dir)
os.system(cmd)
os.system('cd build && make -j {} && make install'.format(g_jobs))
os.system('python3 -m pip install -e .')
try:
import mmcv
print(mmcv.__version__)
os.system('python3 tools/check_env.py')
except Exception:
print('Please install torch & mmcv later.. ⊙▽⊙')
return 0
def main():
"""Auto install mmdeploy with ort. To verify this script:
1) use `sudo docker run -v /path/to/mmdeploy:/root/mmdeploy -v /path/to/Miniconda3-latest-Linux-x86_64.sh:/root/miniconda.sh -it ubuntu:18.04 /bin/bash` # noqa: E501
2) install conda and setup python environment
3) run `python3 tools/scripts/build_ubuntu_x64_ort.py`
Returns:
_type_: _description_
"""
global g_jobs
g_jobs = get_job(sys.argv)
print('g_jobs {}'.format(g_jobs))
work_dir = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
dep_dir = os.path.abspath(os.path.join(work_dir, '..', 'mmdeploy-dep'))
if not os.path.exists(dep_dir):
if os.path.isfile(dep_dir):
print('{} already exists and it is a file, exit.'.format(work_dir))
return -1
os.mkdir(dep_dir)
success = ensure_base_env(work_dir, dep_dir)
if success != 0:
return -1
ort_dir = install_ort(dep_dir)
if install_mmdeploy(work_dir, ort_dir) != 0:
return -1
if os.path.exists(Path('~/mmdeploy.env').expanduser()):
print('Please source ~/mmdeploy.env to setup your env !')
os.system('cat ~/mmdeploy.env')
if __name__ == '__main__':
main()
# Copyright (c) OpenMMLab. All rights reserved.
import os
import sys
import time
from pathlib import Path
from ubuntu_utils import cmd_result, ensure_base_env, get_job
g_jobs = 2
def install_pplcv(dep_dir, build_cuda):
print('-' * 10 + 'install pplcv' + '-' * 10)
time.sleep(2)
os.chdir(dep_dir)
pplcv_dir = os.path.join(dep_dir, 'ppl.cv')
# git clone
if not os.path.exists(pplcv_dir):
os.system(
'git clone --depth 1 --branch v0.7.0 https://github.com/openppl-public/ppl.cv/' # noqa: E501
)
# build
os.chdir(pplcv_dir)
if build_cuda is True:
os.system('./build.sh cuda')
pplcv_cmake_dir = os.path.join(pplcv_dir,
'cuda-build/install/lib/cmake/ppl')
else:
os.system('./build.sh x86_64')
pplcv_cmake_dir = os.path.join(pplcv_dir,
'x86-64-build/install/lib/cmake/ppl')
print('\n')
return pplcv_cmake_dir
def install_pplnn(dep_dir, build_cuda):
print('-' * 10 + 'install pplnn' + '-' * 10)
time.sleep(2)
# generate unzip and build dir
os.chdir(dep_dir)
pplnn_dir = os.path.join(dep_dir, 'ppl.nn')
# git clone
if not os.path.exists(pplnn_dir):
os.system(
'git clone --depth 1 --branch v0.8.2 https://github.com/openppl-public/ppl.nn/' # noqa: E501
)
# build
os.chdir(pplnn_dir)
if build_cuda is True:
os.system(
'./build.sh -DPPLNN_USE_CUDA=ON -DPPLNN_USE_X86_64=ON -DPPLNN_ENABLE_PYTHON_API=ON' # noqa: E501
)
else:
os.system(
'./build.sh -DPPLNN_USE_X86_64=ON -DPPLNN_ENABLE_PYTHON_API=ON' # noqa: E501
)
os.system('cd python/package && ./build.sh')
os.system(
'cd /tmp/pyppl-package/dist && python3 -m pip install pyppl*.whl --force-reinstall --user' # noqa: E501
)
pplnn_cmake_dir = os.path.join(pplnn_dir,
'pplnn-build/install/lib/cmake/ppl')
print('\n')
return pplnn_cmake_dir
def install_mmdeploy(work_dir, pplnn_cmake_dir, pplcv_cmake_dir, build_cuda):
print('-' * 10 + 'build and install mmdeploy' + '-' * 10)
time.sleep(3)
os.chdir(work_dir)
os.system('git submodule init')
os.system('git submodule update')
if not os.path.exists('build'):
os.system('mkdir build')
os.system('rm -rf build/CMakeCache.txt')
cmd = 'cd build && cmake ..'
cmd += ' -DMMDEPLOY_BUILD_SDK=ON '
cmd += ' -DMMDEPLOY_BUILD_EXAMPLES=ON '
cmd += ' -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON '
cmd += ' -DMMDEPLOY_TARGET_BACKENDS=pplnn '
if build_cuda is True:
cmd += ' -DMMDEPLOY_TARGET_DEVICES="cuda;cpu" '
else:
cmd += ' -DMMDEPLOY_TARGET_DEVICES=cpu '
cmd += ' -Dpplcv_DIR={} '.format(pplcv_cmake_dir)
cmd += ' -Dpplnn_DIR={} '.format(pplnn_cmake_dir)
os.system(cmd)
os.system('cd build && make -j {} && make install'.format(g_jobs))
os.system('python3 -m pip install -e .')
try:
import mmcv
print(mmcv.__version__)
os.system('python3 tools/check_env.py')
except Exception:
print('Please install torch & mmcv later.. ∩▽∩')
return 0
def main():
"""Auto install mmdeploy with pplnn. To verify this script:
1) use `sudo docker run -v /path/to/mmdeploy:/root/mmdeploy -v /path/to/Miniconda3-latest-Linux-x86_64.sh:/root/miniconda.sh -it ubuntu:18.04 /bin/bash` # noqa: E501
2) install conda and setup python environment
3) run `python3 tools/scripts/build_ubuntu_x64_pplnn.py`
Returns:
_type_: _description_
"""
global g_jobs
g_jobs = get_job(sys.argv)
print('g_jobs {}'.format(g_jobs))
work_dir = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
dep_dir = os.path.abspath(os.path.join(work_dir, '..', 'mmdeploy-dep'))
if not os.path.exists(dep_dir):
if os.path.isfile(dep_dir):
print('{} already exists and it is a file, exit.'.format(work_dir))
return -1
os.mkdir(dep_dir)
success = ensure_base_env(work_dir, dep_dir)
if success != 0:
return -1
# install pplcv and pplnn
nvcc = cmd_result('which nvcc')
build_cuda = False
if nvcc is not None and len(nvcc) > 1:
build_cuda = True
pplcv_cmake_dir = install_pplcv(dep_dir, build_cuda)
pplnn_cmake_dir = install_pplnn(dep_dir, build_cuda)
if install_mmdeploy(work_dir, pplnn_cmake_dir, pplcv_cmake_dir,
build_cuda) != 0:
return -1
if os.path.exists(Path('~/mmdeploy.env').expanduser()):
print('Please source ~/mmdeploy.env to setup your env !')
os.system('cat ~/mmdeploy.env')
if __name__ == '__main__':
main()
# Copyright (c) OpenMMLab. All rights reserved.
import os
import sys
import time
from pathlib import Path
from ubuntu_utils import (cmd_result, cu_version_name, ensure_base_env,
get_job, pytorch_version)
g_jobs = 2
def install_libtorch(dep_dir):
print('-' * 10 + 'install libtorch' + '-' * 10)
time.sleep(2)
os.chdir(dep_dir)
unzipped_name = 'libtorch'
if os.path.exists(unzipped_name):
return os.path.join(dep_dir, unzipped_name)
torch_version = pytorch_version()
if torch_version is None:
print('torch version is None, try 1.11.0')
torch_version = '1.11.0'
version_name = None
# first check `nvcc` version, if failed, use `nvidia-smi`
cuda = cmd_result(
" nvcc --version | grep release | awk '{print $5}' | awk -F , '{print $1}' " # noqa: E501
)
if cuda is None or len(cuda) < 1:
cuda = cmd_result(" nvidia-smi | grep CUDA | awk '{print $9}' ")
if cuda is not None and len(cuda) > 0:
version_name = cu_version_name(cuda)
else:
version_name = 'cpu'
filename = 'libtorch-shared-with-deps-{}%2B{}.zip'.format(
torch_version, version_name)
url = 'https://download.pytorch.org/libtorch/{}/{}'.format(
version_name, filename)
os.system('wget -q --show-progress {} -O libtorch.zip'.format(url))
os.system('unzip libtorch.zip')
if not os.path.exists(unzipped_name):
print(
'download or unzip libtorch from {} failed, please check https://pytorch.org/get-started/locally/' # noqa: E501
.format(url))
return None
return os.path.join(dep_dir, unzipped_name)
def install_mmdeploy(work_dir, libtorch_dir):
print('-' * 10 + 'build and install mmdeploy' + '-' * 10)
time.sleep(3)
os.chdir(work_dir)
os.system('git submodule init')
os.system('git submodule update')
if not os.path.exists('build'):
os.system('mkdir build')
os.system('rm -rf build/CMakeCache.txt')
cmd = 'cd build && Torch_DIR={} cmake ..'.format(libtorch_dir)
cmd += ' -DMMDEPLOY_BUILD_SDK=ON '
cmd += ' -DMMDEPLOY_BUILD_EXAMPLES=ON '
cmd += ' -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON '
cmd += ' -DMMDEPLOY_TARGET_DEVICES=cpu '
cmd += ' -DMMDEPLOY_TARGET_BACKENDS=torchscript '
cmd += ' -DTORCHSCRIPT_DIR={} '.format(libtorch_dir)
os.system(cmd)
os.system('cd build && make -j {} && make install'.format(g_jobs))
os.system('python3 -m pip install -e .')
try:
import mmcv
print(mmcv.__version__)
os.system('python3 tools/check_env.py')
except Exception:
print('Please install torch & mmcv later.. ≥▽≤')
return 0
def main():
"""Auto install mmdeploy with ort. To verify this script:
1) use `sudo docker run -v /path/to/mmdeploy:/root/mmdeploy -v /path/to/Miniconda3-latest-Linux-x86_64.sh:/root/miniconda.sh -it ubuntu:18.04 /bin/bash` # noqa: E501
2) install conda and setup python environment
3) run `python3 tools/scripts/build_ubuntu_x64_torchscript.py`
Returns:
_type_: _description_
"""
global g_jobs
g_jobs = get_job(sys.argv)
print('g_jobs {}'.format(g_jobs))
work_dir = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
dep_dir = os.path.abspath(os.path.join(work_dir, '..', 'mmdeploy-dep'))
if not os.path.exists(dep_dir):
if os.path.isfile(dep_dir):
print('{} already exists and it is a file, exit.'.format(work_dir))
return -1
os.mkdir(dep_dir)
success = ensure_base_env(work_dir, dep_dir)
if success != 0:
return -1
libtorch_dir = install_libtorch(dep_dir)
if libtorch_dir is None:
return -1
if install_mmdeploy(work_dir, libtorch_dir) != 0:
return -1
if os.path.exists(Path('~/mmdeploy.env').expanduser()):
print('Please source ~/mmdeploy.env to setup your env !')
os.system('cat ~/mmdeploy.env')
if __name__ == '__main__':
main()
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import sys
import time
from ubuntu_utils import cmd_result, ensure_base_env, get_job
def install_llvm(dep_dir):
print('-' * 10 + 'install llvm' + '-' * 10)
os.chdir(dep_dir)
os.system(
'wget --no-check-certificate -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -' # noqa: E501
)
ubuntu = cmd_result(
""" lsb_release -a 2>/dev/null | grep "Release" | tail -n 1 | awk '{print $NF}' """ # noqa: E501
)
nickname_dict = {
'18.04': 'bionic',
'20.04': 'focal',
'22.04': 'jammy',
'22.10': 'kinetic'
}
nickname = nickname_dict.get(ubuntu, None)
if nickname is None:
raise NotImplementedError(f'Unsupported ubuntu version {ubuntu}.')
os.system(
f"add-apt-repository 'deb http://apt.llvm.org/{nickname}/ llvm-toolchain-{nickname}-10 main'" # noqa: E501
)
os.system('sudo apt update')
os.system(
'sudo apt-get install llvm-10 lldb-10 llvm-10-dev libllvm10 llvm-10-runtime' # noqa: E501
)
def install_tvm(dep_dir):
print('-' * 10 + 'build and install tvm' + '-' * 10)
time.sleep(2)
os.system('sudo apt-get update')
os.system(
'sudo apt-get install -y python3 python3-dev python3-setuptools gcc libtinfo-dev zlib1g-dev build-essential cmake libedit-dev libxml2-dev' # noqa: E501
)
# generate unzip and build dir
os.chdir(dep_dir)
# git clone
if not osp.exists('tvm'):
os.system(
'git clone --branch v0.10.0 --depth 1 --recursive https://github.com/apache/tvm tvm' # noqa: E501
)
tvm_dir = osp.join(dep_dir, 'tvm')
os.chdir(tvm_dir)
# build
if not osp.exists('build'):
os.system('mkdir build')
os.system('cp cmake/config.cmake build')
os.chdir(osp.join(tvm_dir, 'build'))
os.system(
""" sed -i "s@set(USE_LLVM OFF)@set(USE_LLVM /usr/bin/llvm-config-10)@g" config.cmake """ # noqa: E501
)
os.system('cmake .. && make -j {} && make runtime'.format(g_jobs))
# set env
os.system(
""" echo 'export LD_LIBRARY_PATH={}:$LD_LIBRARY_PATH' >> ~/mmdeploy.env """ # noqa: E501
.format(os.path.join(tvm_dir, 'build')))
# install python package
os.chdir(osp.join(tvm_dir, 'python'))
os.system(""" python3 setup.py install --user """)
# install dependency
os.system(
""" python3 -m pip install xgboost decorator psutil scipy attrs tornado """ # noqa: E501
)
return tvm_dir
def install_mmdeploy(work_dir, tvm_dir):
print('-' * 10 + 'build and install mmdeploy' + '-' * 10)
time.sleep(3)
os.chdir(work_dir)
os.system('git submodule init')
os.system('git submodule update')
if not os.path.exists('build'):
os.system('mkdir build')
os.system('rm -rf build/CMakeCache.txt')
cmd = 'cd build && cmake ..'
cmd += ' -DMMDEPLOY_BUILD_SDK=ON '
cmd += ' -DMMDEPLOY_BUILD_EXAMPLES=ON '
cmd += ' -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON '
cmd += ' -DMMDEPLOY_TARGET_DEVICES=cpu '
cmd += ' -DMMDEPLOY_TARGET_BACKENDS=tvm '
cmd += ' -DTVM_DIR={} '.format(tvm_dir)
os.system(cmd)
os.system('cd build && make -j {} && make install'.format(g_jobs))
os.system('python3 -m pip install -v -e .')
os.system(""" echo 'export PATH={}:$PATH' >> ~/mmdeploy.env """.format(
os.path.join(work_dir, 'mmdeploy', 'backend', 'tvm')))
try:
import mmcv
print(mmcv.__version__)
os.system('python3 tools/check_env.py')
except Exception:
print('Please install torch & mmcv later...')
return 0
def main():
"""Auto install mmdeploy with tvm.
To verify this script:
1) use `sudo docker run -v /path/to/mmdeploy:/root/mmdeploy -v /path/to/Miniconda3-latest-Linux-x86_64.sh:/root/miniconda.sh -it ubuntu:18.04 /bin/bash` # noqa: E501
2) install conda and setup python environment
3) run `python3 tools/scripts/build_ubuntu_x64_tvm.py`
Returns:
_type_: _description_
"""
global g_jobs
g_jobs = get_job(sys.argv)
print('g_jobs {}'.format(g_jobs))
work_dir = osp.abspath(osp.join(__file__, '..', '..', '..'))
dep_dir = osp.abspath(osp.join(work_dir, '..', 'mmdeploy-dep'))
if not osp.exists(dep_dir):
if osp.isfile(dep_dir):
print('{} already exists and it is a file, exit.'.format(work_dir))
return -1
os.mkdir(dep_dir)
success = ensure_base_env(work_dir, dep_dir)
if success != 0:
return -1
install_llvm(dep_dir)
tvm_dir = install_tvm(dep_dir)
if install_mmdeploy(work_dir, tvm_dir) != 0:
return -1
if osp.exists('~/mmdeploy.env'):
print('Please source ~/mmdeploy.env to setup your env !')
os.system('cat ~/mmdeploy.env')
if __name__ == '__main__':
main()
#!/bin/bash
# set -ex
# get appropriate proc number: max(1, nproc-3)
good_nproc() {
num=`nproc`
num=`expr $num - 3`
if [ $num -lt 1 ];then
return 1
fi
return ${num}
}
install_tools() {
sudo apt install -y gcc-aarch64-linux-gnu g++-aarch64-linux-gnu
aarch64-linux-gnu-g++ --version
aarch64-linux-gnu-gcc --version
aarch64-linux-gnu-ld --version
sudo apt install wget git git-lfs
python3 -m pip install cmake==3.22.0
echo 'export PATH=~/.local/bin:${PATH}' >> ~/mmdeploy.env
export PATH=~/.local/bin:${PATH}
}
build_ocv() {
if [ ! -e "opencv" ];then
git clone https://github.com/opencv/opencv --depth=1 --branch=4.6.0 --recursive
fi
if [ ! -e "opencv/platforms/linux/cross_build_aarch64" ];then
mkdir opencv/platforms/linux/cross_build_aarch64
fi
cd opencv/platforms/linux/cross_build_aarch64
rm -rf CMakeCache.txt
cmake ../../.. -DBUILD_TIFF=ON -DCMAKE_INSTALL_PREFIX=/tmp/ocv-aarch64 -DCMAKE_TOOLCHAIN_FILE=../aarch64-gnu.toolchain.cmake
# good_nproc
jobs=`nproc`
make -j${jobs}
make install
cd -
}
build_ncnn() {
if [ ! -e "ncnn" ];then
git clone https://github.com/tencent/ncnn --branch 20221128 --depth=1
fi
if [ ! -e "ncnn/build_aarch64" ];then
mkdir -p ncnn/build_aarch64
fi
cd ncnn/build_aarch64
rm -rf CMakeCache.txt
cmake .. \
-DCMAKE_TOOLCHAIN_FILE=../toolchains/aarch64-linux-gnu.toolchain.cmake \
-DCMAKE_INSTALL_PREFIX=/tmp/ncnn-aarch64
# good_nproc
jobs=`nproc`
make -j${jobs}
make install
cd -
}
build_mmdeploy() {
git submodule init
git submodule update
if [ ! -e "build_aarch64" ];then
mkdir build_aarch64
fi
cd build_aarch64
rm -rf CMakeCache.txt
cmake .. \
-DCMAKE_TOOLCHAIN_FILE=../cmake/toolchains/aarch64-linux-gnu.cmake \
-DMMDEPLOY_TARGET_DEVICES="cpu" \
-DMMDEPLOY_TARGET_BACKENDS="ncnn" \
-Dncnn_DIR=/tmp/ncnn-aarch64/lib/cmake/ncnn \
-DOpenCV_DIR=/tmp/ocv-aarch64/lib/cmake/opencv4 \
-DMMDEPLOY_BUILD_EXAMPLES=ON \
-DMMDEPLOY_BUILD_SDK=ON
# good_nproc
jobs=`nproc`
make -j${jobs}
make install
ls -lah install/bin/*
}
print_success() {
echo "----------------------------------------------------------------------"
echo "Cross build finished, PLS copy bin/model/test_data to the device.. QVQ"
echo "----------------------------------------------------------------------"
}
if [ ! -e "../mmdeploy-dep" ];then
mkdir ../mmdeploy-dep
fi
cd ../mmdeploy-dep
install_tools
build_ocv
build_ncnn
cd ../mmdeploy
build_mmdeploy
print_success
#!/bin/bash
# set -ex
# get appropriate proc number: max(1, nproc-3)
good_nproc() {
num=`nproc`
num=`expr $num - 3`
if [ $num -lt 1 ];then
return 1
fi
return ${num}
}
install_rknpu_toolchain() {
# install gcc cross compiler
ubuntu_version=`cat /etc/issue`
ubuntu_major_version=`echo "$ubuntu_version" | grep -oP '\d{2}' | head -n 1`
if [ "$ubuntu_major_version" -lt 18 ]; then
echo "ubuntu 18.04 is minimum requirement, but got $ubuntu_version"
wget wget https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-arm-linux-gnueabihf.tar.xz
tar -xvf gcc-arm-8.3-2019.03-x86_64-arm-linux-gnueabihf.tar.xz
sudo ln -sf $(pwd)/gcc-arm-8.3-2019.03-x86_64-arm-linux-gnueabihf/bin/arm-linux-gnueabihf-gcc /usr/bin/arm-linux-gnueabihf-gcc
sudo ln -sf $(pwd)/gcc-arm-8.3-2019.03-x86_64-arm-linux-gnueabihf/bin/arm-linux-gnueabihf-g++ /usr/bin/arm-linux-gnueabihf-g++
else
sudo apt install -y gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf
fi
arm-linux-gnueabihf-gcc --version
arm-linux-gnueabihf-g++ --version
# install rknpu
git clone https://github.com/rockchip-linux/rknpu
export RKNPU_DIR=$(pwd)/rknpu
sudo apt install wget git git-lfs
python3 -m pip install cmake==3.22.0
echo 'export PATH=~/.local/bin:${PATH}' >> ~/mmdeploy.env
export PATH=~/.local/bin:${PATH}
}
install_rknpu2_toolchain() {
sudo apt install -y gcc-aarch64-linux-gnu g++-aarch64-linux-gnu
aarch64-linux-gnu-g++ --version
aarch64-linux-gnu-gcc --version
git clone https://github.com/Caesar-github/gcc-buildroot-9.3.0-2020.03-x86_64_aarch64-rockchip-linux-gnu.git
git clone https://github.com/rockchip-linux/rknpu2.git
export RKNN_TOOL_CHAIN=$(pwd)/gcc-buildroot-9.3.0-2020.03-x86_64_aarch64-rockchip-linux-gnu
export LD_LIBRARY_PATH=$RKNN_TOOL_CHAIN/lib64:$LD_LIBRARY_PATH
export RKNPU2_DIR=$(pwd)/rknpu2
}
build_ocv_arm_gnueabi() {
if [ ! -e "opencv" ];then
git clone https://github.com/opencv/opencv --depth=1 --branch=4.6.0 --recursive
fi
if [ ! -e "opencv/build_arm_gnueabi" ];then
mkdir -p opencv/build_arm_gnueabi
fi
cd opencv/build_arm_gnueabi
rm -rf CMakeCache.txt
cmake .. -DCMAKE_INSTALL_PREFIX=install -DCMAKE_TOOLCHAIN_FILE=../platforms/linux/arm-gnueabi.toolchain.cmake \
-DBUILD_TIFF=ON -DBUILD_PERF_TESTS=OFF -DBUILD_SHARED_LIBS=OFF -DBUILD_TESTS=OFF -DCMAKE_BUILD_TYPE=Release
# good_nproc
jobs=`nproc`
make -j${jobs} && make install
export OPENCV_PACKAGE_DIR=$(pwd)/install/lib/cmake/opencv4
cd -
}
build_ocv_aarch64() {
if [ ! -e "opencv" ];then
git clone https://github.com/opencv/opencv --depth=1 --branch=4.6.0 --recursive
fi
if [ ! -e "opencv/build_aarch64" ];then
mkdir -p opencv/build_aarch64
fi
cd opencv/build_aarch64
rm -rf CMakeCache.txt
cmake .. -DCMAKE_INSTALL_PREFIX=install -DCMAKE_TOOLCHAIN_FILE=../platforms/linux/aarch64-gnu.toolchain.cmake \
-DBUILD_PERF_TESTS=OFF -DBUILD_SHARED_LIBS=OFF -DBUILD_TESTS=OFF -DCMAKE_BUILD_TYPE=Release
# good_nproc
jobs=`nproc`
make -j${jobs} && make install
export OPENCV_PACKAGE_DIR=$(pwd)/install/lib/cmake/opencv4
cd -
}
build_mmdeploy_with_rknpu() {
git submodule init
git submodule update
if [ ! -e "build_rknpu" ];then
mkdir build_rknpu
fi
cd build_rknpu
rm -rf CMakeCache.txt
cmake .. \
-DCMAKE_TOOLCHAIN_FILE=../cmake/toolchains/arm-linux-gnueabihf.cmake \
-DMMDEPLOY_BUILD_SDK=ON \
-DMMDEPLOY_BUILD_EXAMPLES=ON \
-DMMDEPLOY_TARGET_BACKENDS="rknn" \
-DRKNPU_DEVICE_DIR="${RKNPU_DIR}"/rknn/rknn_api/librknn_api \
-DOpenCV_DIR="${OPENCV_PACKAGE_DIR}"
# good_nproc
jobs=`nproc`
make -j${jobs} && make install
ls -lah install/bin/*
}
build_mmdeploy_with_rknpu2() {
git submodule init
git submodule update
device_model=$1
if [ ! -e "build_rknpu2" ];then
mkdir build_rknpu2
fi
cd build_rknpu2
rm -rf CMakeCache.txt
cmake .. \
-DCMAKE_TOOLCHAIN_FILE=../cmake/toolchains/rknpu2-linux-gnu.cmake \
-DMMDEPLOY_BUILD_SDK=ON \
-DMMDEPLOY_BUILD_EXAMPLES=ON \
-DMMDEPLOY_TARGET_BACKENDS="rknn" \
-DRKNPU2_DEVICE_DIR="${RKNPU2_DIR}/runtime/${device_model}" \
-DOpenCV_DIR="${OPENCV_PACKAGE_DIR}"
# good_nproc
jobs=`nproc`
make -j${jobs} && make install
ls -lah install/bin/*
}
print_success() {
echo "----------------------------------------------------------------------"
echo "Cross build finished, PLS copy bin/model/test_data to the device.. QVQ"
echo "----------------------------------------------------------------------"
}
echo "the current workspace: $(pwd)"
if [ ! -e "../mmdeploy-dep" ];then
mkdir ../mmdeploy-dep
fi
cd ../mmdeploy-dep
device_model=$(echo "$1" | tr [:lower:] [:upper:])
case "$device_model" in
RK1808|RK1806|RV1109|RV1126)
install_rknpu_toolchain
build_ocv_arm_gnueabi
cd ../mmdeploy
build_mmdeploy_with_rknpu
;;
RK3566|RK3568)
install_rknpu2_toolchain
build_ocv_aarch64
cd ../mmdeploy
build_mmdeploy_with_rknpu2 "RK356X"
;;
RK3588|RV1106)
install_rknpu2_toolchain
build_ocv_aarch64
cd ../mmdeploy
build_mmdeploy_with_rknpu2 "$device_model"
;;
*)
echo "mmdeploy doesn't support rockchip '$1' yet"
exit 1
;;
esac
print_success
# Copyright (c) OpenMMLab. All rights reserved.
import os
import re
import time
def pytorch_version():
version = None
try:
import torch
raw = torch.__version__
pattern = re.compile(r'[0-9]+\.[0-9]+\.[0-9]+')
version = pattern.findall(raw)[0]
except Exception:
pass
return version
def cmd_result(txt: str):
cmd = os.popen(txt)
return cmd.read().rstrip().lstrip()
def get_job(argv) -> int:
# get nprocs, if user not specified, use max(1, nproc-2)
job = 2
if len(argv) <= 1:
print('your can use `python3 {} N` to set make -j [N]'.format(argv[0]))
nproc = cmd_result('nproc')
if nproc is not None and len(nproc) > 0:
job = max(int(nproc) - 2, 1)
else:
job = int(argv[1])
return job
def version_major(txt: str) -> int:
return int(txt.split('.')[0])
def version_minor(txt: str) -> int:
return int(txt.split('.')[1])
def cu_version_name(version: str) -> str:
versions = version.split('.')
return 'cu' + versions[0] + versions[1]
def simple_check_install(bin: str, sudo: str) -> str:
result = cmd_result('which {}'.format(bin))
if result is None or len(result) < 1:
print('{} not found, try install {} ..'.format(bin, bin), end='')
os.system('{} apt install {} -y'.format(sudo, bin))
result = cmd_result('which {}'.format(bin))
if result is None or len(result) < 1:
print('Check {} failed.'.format(bin))
return None
print('success')
return result
def ensure_base_env(work_dir, dep_dir):
description = """
check python, root, pytorch version, auto install these binary:
* make
* g++
* git
* wget
* unzip
* opencv
* mmcv (not compulsory)
"""
print('-' * 10 + 'ensure base env' + '-' * 10)
print(description)
os.system('python3 -m ensurepip')
os.system('python3 -m pip install wheel')
sudo = 'sudo'
if 'root' in cmd_result('whoami'):
sudo = ''
# check ubuntu
ubuntu = cmd_result(
""" lsb_release -a 2>/dev/null | grep "Release" | tail -n 1 | awk '{print $NF}' """ # noqa: E501
)
# check cmake version
cmake = cmd_result('which cmake')
if cmake is None or len(cmake) < 1:
print('cmake not found, try install cmake ..', end='')
os.system('python3 -m pip install cmake')
cmake = cmd_result('which cmake')
if cmake is None or len(cmake) < 1:
env = 'export PATH=${PATH}:~/.local/bin'
os.system(env)
os.system(""" echo '{}' >> ~/mmdeploy.env """.format(env))
cmake = cmd_result('which cmake')
if cmake is None or len(cmake) < 1:
print('Check cmake failed.')
return -1
print('success')
# check make
make = cmd_result('which make')
if make is None or len(make) < 1:
print('make not found, try install make ..', end='')
os.system('{} apt update --fix-missing'.format(sudo))
os.system(
'{} DEBIAN_FRONTEND="noninteractive" apt install make'.format(
sudo))
make = cmd_result('which make')
if make is None or len(make) < 1:
print('Check make failed.')
return -1
print('success')
# check g++ version
gplus = cmd_result('which g++')
if gplus is None or len(gplus) < 1:
# install g++
print('g++ not found, try install g++ ..', end='')
os.system(
'{} DEBIAN_FRONTEND="noninteractive" apt install software-properties-common -y' # noqa: E501
.format(sudo)) # noqa: E501
os.system('{} apt update'.format(sudo))
if ubuntu is None or len(ubuntu) < 1 or version_major(ubuntu) <= 18:
os.system(
'{} add-apt-repository ppa:ubuntu-toolchain-r/test -y'.format(
sudo))
os.system('{} apt install gcc g++ -y'.format(sudo))
gplus = cmd_result('which g++')
if gplus is None or len(gplus) < 1:
print('Check g++ failed.')
return -1
print('success')
# wget
wget = simple_check_install('wget', sudo)
# check torch and mmcv, we try to install mmcv, it is not compulsory
mmcv_version = None
torch_version = None
try:
import torch
torch_version = torch.__version__
try:
import mmcv
mmcv_version = mmcv.__version__
except Exception:
# install mmcv
print('mmcv not found, try install mmcv ..', end='')
os.system('python3 -m pip install -U openmim')
os.system('mim install mmcv-full==1.5.1')
except Exception:
pass
# git
git = simple_check_install('git', sudo)
# unzip
unzip = simple_check_install('unzip', sudo)
# opencv
ocv = cmd_result('which opencv_version')
if ocv is None or len(ocv) < 1:
print('ocv not found, try install ocv ..', end='')
os.system('{} apt update'.format(sudo))
pattern = re.compile(r'[0-9]+\.[0-9]+\.[0-9]+')
upstream = cmd_result('{} apt list libopencv-dev -a'.format(sudo))
add_ppa = True
if upstream is not None and len(upstream) > 0:
versions = pattern.findall(upstream)
if versions is not None and len(versions) > 0:
version = versions[0]
major = int(version.split('.')[0])
if major >= 3:
# Directly install upstream OCV, do not need add ppa
add_ppa = False
if add_ppa:
os.system(
'{} add-apt-repository ppa:ignaciovizzo/opencv3-nonfree -y'.
format(sudo))
os.system(
'{} DEBIAN_FRONTEND="noninteractive" apt install libopencv-dev -y'
.format(sudo))
ocv = cmd_result('which opencv_version')
if ocv is None or len(ocv) < 1:
print('Check ocv failed.')
return -1
print('success')
# print all
print('ubuntu \t\t:{}'.format(ubuntu))
# check python
print('python bin\t:{}'.format(cmd_result('which python3')))
print('python version\t:{}'.format(
cmd_result("python3 --version | awk '{print $2}'")))
print('cmake bin\t:{}'.format(cmake))
print('cmake version\t:{}'.format(
cmd_result("cmake --version | head -n 1 | awk '{print $3}'")))
print('make bin\t:{}'.format(make))
print('make version\t:{}'.format(
cmd_result(" make --version | head -n 1 | awk '{print $3}' ")))
print('wget bin\t:{}'.format(wget))
print('g++ bin\t:{}'.format(gplus))
print('mmcv version\t:{}'.format(mmcv_version))
if mmcv_version is None:
print('\t please install mmcv later.')
time.sleep(2)
print('torch version\t:{}'.format(torch_version))
if torch_version is None:
print('\t please install pytorch later.')
time.sleep(2)
print('ocv version\t:{}'.format(cmd_result('opencv_version')))
print('git bin\t\t:{}'.format(git))
print('git version\t:{}'.format(
cmd_result("git --version | awk '{print $3}' ")))
print('unzip bin\t:{}'.format(unzip))
# work dir
print('work dir \t:{}'.format(work_dir))
# dep dir
print('dep dir \t:{}'.format(dep_dir))
print('\n')
return 0
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import numpy as np
from texttable import Texttable
def parse_args():
parser = argparse.ArgumentParser(
description='Analyze sdk profiler file tool.')
parser.add_argument('profile_file', help='SDK profile file path')
args = parser.parse_args()
return args
def get_name(addr, prev, addr2name, used_addr, depth, skip):
node_name = addr2name[addr] if not skip else ''
if addr not in prev:
return ' ' * depth * 4 + node_name
prev_addr = prev[addr]
if prev_addr in used_addr:
depth += 1
skip = True
prev_name = get_name(prev[addr], prev, addr2name, used_addr, depth, skip)
if len(prev_name.split()) == 0:
return prev_name + node_name
return prev_name + '/' + node_name
def main():
args = parse_args()
with open(args.profile_file) as f:
data = f.read()
graph, events = data.split('----\n')
graph = graph.strip().split('\n')
events = events.strip().split('\n')
addr2name = {}
addr2id = {}
id2addr = {}
next = {}
prev = {}
for i, line in enumerate(graph):
info = line.split()
name, addr = info[:2]
addr2name[addr] = name
addr2id[addr] = i
id2addr[i] = addr
next[addr] = []
for child in info[2:]:
next[addr].append(child)
prev[child] = addr
n_active = {i: 0 for i in range(len(addr2id))}
n_call = {i: 0 for i in range(len(addr2id))}
t_occupy = {i: 0 for i in range(len(addr2id))}
t_usage = {i: 0 for i in range(len(addr2id))}
t_time = {i: [] for i in range(len(addr2id))}
used_id = set()
used_addr = set()
event_start = {}
now = 0
first_id = None
for event in events:
words = event.split()
addr = words[0]
id = addr2id[addr]
used_addr.add(addr)
used_id.add(id)
kind, index, ts = map(int, words[1:])
if first_id is None:
first_id = id
if id == first_id and kind == 0 and n_active[id] == 0:
now = ts
key = (id, index)
delta = ts - now
now = ts
for i, n_act in n_active.items():
if n_act > 0:
t_occupy[i] += delta
t_usage[i] += delta * n_act
if kind == 0:
event_start[key] = ts
n_active[id] += 1
n_call[id] += 1
else:
dt = ts - event_start[key]
t_time[id].append(dt)
event_start.pop(key)
n_active[id] -= 1
table = Texttable(max_width=0)
table.header(
['name', 'occupy', 'usage', 'n_call', 't_mean', 't_50%', 't_90%'])
for id in sorted(list(used_id)):
occupy = t_occupy[id] / (t_occupy[first_id])
usage = t_usage[id] / (t_occupy[first_id])
times = sorted(t_time[id])
t_mean = np.mean(times) / 1000
t_50 = times[int(len(times) * 0.5)] / 1000
t_90 = times[int(len(times) * 0.9)] / 1000
name = get_name(id2addr[id], prev, addr2name, used_addr, 0, False)
if len(next[id2addr[id]]) != 0:
occupy = '-'
usage = '-'
table.add_row([name, occupy, usage, n_call[id], t_mean, t_50, t_90])
print(table.draw())
if __name__ == '__main__':
main()
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
from copy import deepcopy
from mmengine import DictAction
from mmdeploy.apis import build_task_processor
from mmdeploy.utils.config_utils import load_config
from mmdeploy.utils.timer import TimeCounter
def parse_args():
parser = argparse.ArgumentParser(
description='MMDeploy test (and eval) a backend.')
parser.add_argument('deploy_cfg', help='Deploy config path')
parser.add_argument('model_cfg', help='Model config path')
parser.add_argument(
'--model', type=str, nargs='+', help='Input model files.')
parser.add_argument(
'--device', help='device used for conversion', default='cpu')
parser.add_argument(
'--work-dir',
default='./work_dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--interval',
type=int,
default=1,
help='visualize per interval samples.')
parser.add_argument(
'--wait-time',
type=float,
default=2,
help='display time of every window. (second)')
parser.add_argument(
'--log2file',
type=str,
help='log evaluation results and speed to file',
default=None)
parser.add_argument(
'--speed-test', action='store_true', help='activate speed test')
parser.add_argument(
'--warmup',
type=int,
help='warmup before counting inference elapse, require setting '
'speed-test first',
default=10)
parser.add_argument(
'--log-interval',
type=int,
help='the interval between each log, require setting '
'speed-test first',
default=100)
parser.add_argument(
'--batch-size',
type=int,
default=1,
help='the batch size for test, would override `samples_per_gpu`'
'in data config.')
parser.add_argument(
'--uri',
action='store_true',
default='192.168.1.1:60000',
help='Remote ipv4:port or ipv6:port for inference on edge device.')
args = parser.parse_args()
return args
def main():
args = parse_args()
deploy_cfg_path = args.deploy_cfg
model_cfg_path = args.model_cfg
# load deploy_cfg
deploy_cfg, model_cfg = load_config(deploy_cfg_path, model_cfg_path)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
work_dir = args.work_dir
elif model_cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# merge options for model cfg
if args.cfg_options is not None:
model_cfg.merge_from_dict(args.cfg_options)
task_processor = build_task_processor(model_cfg, deploy_cfg, args.device)
# prepare the dataset loader
test_dataloader = deepcopy(model_cfg['test_dataloader'])
if type(test_dataloader) == list:
dataset = []
for loader in test_dataloader:
ds = task_processor.build_dataset(loader['dataset'])
dataset.append(ds)
loader['dataset'] = ds
loader['batch_size'] = args.batch_size
loader = task_processor.build_dataloader(loader)
dataloader = test_dataloader
else:
test_dataloader['batch_size'] = args.batch_size
dataset = task_processor.build_dataset(test_dataloader['dataset'])
test_dataloader['dataset'] = dataset
dataloader = task_processor.build_dataloader(test_dataloader)
# load the model of the backend
model = task_processor.build_backend_model(
args.model,
data_preprocessor_updater=task_processor.update_data_preprocessor)
destroy_model = model.destroy
is_device_cpu = (args.device == 'cpu')
runner = task_processor.build_test_runner(
model,
work_dir,
log_file=args.log2file,
show=args.show,
show_dir=args.show_dir,
wait_time=args.wait_time,
interval=args.interval,
dataloader=dataloader)
if args.speed_test:
with_sync = not is_device_cpu
with TimeCounter.activate(
warmup=args.warmup,
log_interval=args.log_interval,
with_sync=with_sync,
file=args.log2file,
batch_size=args.batch_size):
runner.test()
else:
runner.test()
# only effective when the backend requires explicit clean-up (e.g. Ascend)
destroy_model()
if __name__ == '__main__':
main()
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import os.path as osp
from mmdeploy.apis import (extract_model, get_predefined_partition_cfg,
torch2onnx)
from mmdeploy.utils import (get_ir_config, get_partition_config,
get_root_logger, load_config)
def parse_args():
parser = argparse.ArgumentParser(description='Export model to ONNX.')
parser.add_argument('deploy_cfg', help='deploy config path')
parser.add_argument('model_cfg', help='model config path')
parser.add_argument('checkpoint', help='model checkpoint path')
parser.add_argument('img', help='image used to convert model model')
parser.add_argument(
'--work-dir',
default='./work-dir',
help='Directory to save output files.')
parser.add_argument(
'--device', help='device used for conversion', default='cpu')
parser.add_argument(
'--log-level',
help='set log level',
default='INFO',
choices=list(logging._nameToLevel.keys()))
args = parser.parse_args()
return args
def main():
args = parse_args()
logger = get_root_logger(log_level=args.log_level)
logger.info(f'torch2onnx: \n\tmodel_cfg: {args.model_cfg} '
f'\n\tdeploy_cfg: {args.deploy_cfg}')
os.makedirs(args.work_dir, exist_ok=True)
# load deploy_cfg
deploy_cfg = load_config(args.deploy_cfg)[0]
save_file = get_ir_config(deploy_cfg)['save_file']
torch2onnx(
args.img,
args.work_dir,
save_file,
deploy_cfg=args.deploy_cfg,
model_cfg=args.model_cfg,
model_checkpoint=args.checkpoint,
device=args.device)
# partition model
partition_cfgs = get_partition_config(deploy_cfg)
if partition_cfgs is not None:
if 'partition_cfg' in partition_cfgs:
partition_cfgs = partition_cfgs.get('partition_cfg', None)
else:
assert 'type' in partition_cfgs
partition_cfgs = get_predefined_partition_cfg(
deploy_cfg, partition_cfgs['type'])
origin_ir_file = osp.join(args.work_dir, save_file)
for partition_cfg in partition_cfgs:
save_file = partition_cfg['save_file']
save_path = osp.join(args.work_dir, save_file)
start = partition_cfg['start']
end = partition_cfg['end']
dynamic_axes = partition_cfg.get('dynamic_axes', None)
extract_model(
origin_ir_file,
start,
end,
dynamic_axes=dynamic_axes,
save_file=save_path)
logger.info(f'torch2onnx finished. Results saved to {args.work_dir}')
if __name__ == '__main__':
main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment