Unverified Commit 32a4328b authored by Wenwei Zhang's avatar Wenwei Zhang Committed by GitHub
Browse files

Bump version to V1.0.0rc0

Bump version to V1.0.0rc0
parents 86cc487c a8817998
# Copyright (c) OpenMMLab. All rights reserved.
import base64
import os
import numpy as np
import torch
from ts.torch_handler.base_handler import BaseHandler
from mmdet3d.apis import inference_detector, init_model
from mmdet3d.core.points import get_points_type
class MMdet3dHandler(BaseHandler):
"""MMDetection3D Handler used in TorchServe.
Handler to load models in MMDetection3D, and it will process data to get
predicted results. For now, it only supports SECOND.
"""
threshold = 0.5
load_dim = 4
use_dim = [0, 1, 2, 3]
coord_type = 'LIDAR'
attribute_dims = None
def initialize(self, context):
"""Initialize function loads the model in MMDetection3D.
Args:
context (context): It is a JSON Object containing information
pertaining to the model artifacts parameters.
"""
properties = context.system_properties
self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = torch.device(self.map_location + ':' +
str(properties.get('gpu_id')) if torch.cuda.
is_available() else self.map_location)
self.manifest = context.manifest
model_dir = properties.get('model_dir')
serialized_file = self.manifest['model']['serializedFile']
checkpoint = os.path.join(model_dir, serialized_file)
self.config_file = os.path.join(model_dir, 'config.py')
self.model = init_model(self.config_file, checkpoint, self.device)
self.initialized = True
def preprocess(self, data):
"""Preprocess function converts data into LiDARPoints class.
Args:
data (List): Input data from the request.
Returns:
`LiDARPoints` : The preprocess function returns the input
point cloud data as LiDARPoints class.
"""
for row in data:
# Compat layer: normally the envelope should just return the data
# directly, but older versions of Torchserve didn't have envelope.
pts = row.get('data') or row.get('body')
if isinstance(pts, str):
pts = base64.b64decode(pts)
points = np.frombuffer(pts, dtype=np.float32)
points = points.reshape(-1, self.load_dim)
points = points[:, self.use_dim]
points_class = get_points_type(self.coord_type)
points = points_class(
points,
points_dim=points.shape[-1],
attribute_dims=self.attribute_dims)
return points
def inference(self, data):
"""Inference Function.
This function is used to make a prediction call on the
given input request.
Args:
data (`LiDARPoints`): LiDARPoints class passed to make
the inference request.
Returns:
List(dict) : The predicted result is returned in this function.
"""
results, _ = inference_detector(self.model, data)
return results
def postprocess(self, data):
"""Postprocess function.
This function makes use of the output from the inference and
converts it into a torchserve supported response output.
Args:
data (List[dict]): The data received from the prediction
output of the model.
Returns:
List: The post process function returns a list of the predicted
output.
"""
output = []
for pts_index, result in enumerate(data):
output.append([])
if 'pts_bbox' in result.keys():
pred_bboxes = result['pts_bbox']['boxes_3d'].tensor.numpy()
pred_scores = result['pts_bbox']['scores_3d'].numpy()
else:
pred_bboxes = result['boxes_3d'].tensor.numpy()
pred_scores = result['scores_3d'].numpy()
index = pred_scores > self.threshold
bbox_coords = pred_bboxes[index].tolist()
score = pred_scores[index].tolist()
output[pts_index].append({'3dbbox': bbox_coords, 'score': score})
return output
from argparse import ArgumentParser
import numpy as np
import requests
from mmdet3d.apis import inference_detector, init_model
def parse_args():
parser = ArgumentParser()
parser.add_argument('pcd', help='Point cloud file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('model_name', help='The model name in the server')
parser.add_argument(
'--inference-addr',
default='127.0.0.1:8080',
help='Address and port of the inference server')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.5, help='3d bbox score threshold')
args = parser.parse_args()
return args
def parse_result(input):
bbox = input[0]['3dbbox']
result = np.array(bbox)
return result
def main(args):
# build the model from a config file and a checkpoint file
model = init_model(args.config, args.checkpoint, device=args.device)
# test a single point cloud file
model_result, _ = inference_detector(model, args.pcd)
# filter the 3d bboxes whose scores > 0.5
if 'pts_bbox' in model_result[0].keys():
pred_bboxes = model_result[0]['pts_bbox']['boxes_3d'].tensor.numpy()
pred_scores = model_result[0]['pts_bbox']['scores_3d'].numpy()
else:
pred_bboxes = model_result[0]['boxes_3d'].tensor.numpy()
pred_scores = model_result[0]['scores_3d'].numpy()
model_result = pred_bboxes[pred_scores > 0.5]
url = 'http://' + args.inference_addr + '/predictions/' + args.model_name
with open(args.pcd, 'rb') as points:
response = requests.post(url, points)
server_result = parse_result(response.json())
assert np.allclose(model_result, server_result)
if __name__ == '__main__':
args = parse_args()
main(args)
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import numpy as np
import warnings
from mmcv import Config, DictAction, mkdir_or_exist, track_iter_progress
from os import path as osp
from pathlib import Path
import mmcv
import numpy as np
from mmcv import Config, DictAction, mkdir_or_exist
from mmdet3d.core.bbox import (Box3DMode, CameraInstance3DBoxes, Coord3DMode,
DepthInstance3DBoxes, LiDARInstance3DBoxes)
......@@ -31,6 +34,10 @@ def parse_args():
type=str,
choices=['det', 'seg', 'multi_modality-det', 'mono-det'],
help='Determine the visualization method depending on the task.')
parser.add_argument(
'--aug',
action='store_true',
help='Whether to visualize augmented datasets or original dataset.')
parser.add_argument(
'--online',
action='store_true',
......@@ -50,8 +57,9 @@ def parse_args():
return args
def build_data_cfg(config_path, skip_type, cfg_options):
def build_data_cfg(config_path, skip_type, aug, cfg_options):
"""Build data config for loading visualization data."""
cfg = Config.fromfile(config_path)
if cfg_options is not None:
cfg.merge_from_dict(cfg_options)
......@@ -63,10 +71,17 @@ def build_data_cfg(config_path, skip_type, cfg_options):
if cfg.data.train['type'] == 'ConcatDataset':
cfg.data.train = cfg.data.train.datasets[0]
train_data_cfg = cfg.data.train
# eval_pipeline purely consists of loading functions
# use eval_pipeline for data loading
if aug:
show_pipeline = cfg.train_pipeline
else:
show_pipeline = cfg.eval_pipeline
for i in range(len(cfg.train_pipeline)):
if cfg.train_pipeline[i]['type'] == 'LoadAnnotations3D':
show_pipeline.insert(i, cfg.train_pipeline[i])
train_data_cfg['pipeline'] = [
x for x in cfg.eval_pipeline if x['type'] not in skip_type
x for x in show_pipeline if x['type'] not in skip_type
]
return cfg
......@@ -83,13 +98,14 @@ def to_depth_mode(points, bboxes):
return points, bboxes
def show_det_data(idx, dataset, out_dir, filename, show=False):
def show_det_data(input, out_dir, show=False):
"""Visualize 3D point cloud and 3D bboxes."""
example = dataset.prepare_train_data(idx)
points = example['points']._data.numpy()
gt_bboxes = dataset.get_ann_info(idx)['gt_bboxes_3d'].tensor
if dataset.box_mode_3d != Box3DMode.DEPTH:
img_metas = input['img_metas']._data
points = input['points']._data.numpy()
gt_bboxes = input['gt_bboxes_3d']._data.tensor
if img_metas['box_mode_3d'] != Box3DMode.DEPTH:
points, gt_bboxes = to_depth_mode(points, gt_bboxes)
filename = osp.splitext(osp.basename(img_metas['pts_filename']))[0]
show_result(
points,
gt_bboxes.clone(),
......@@ -100,42 +116,35 @@ def show_det_data(idx, dataset, out_dir, filename, show=False):
snapshot=True)
def show_seg_data(idx, dataset, out_dir, filename, show=False):
def show_seg_data(input, out_dir, show=False):
"""Visualize 3D point cloud and segmentation mask."""
example = dataset.prepare_train_data(idx)
points = example['points']._data.numpy()
gt_seg = example['pts_semantic_mask']._data.numpy()
img_metas = input['img_metas']._data
points = input['points']._data.numpy()
gt_seg = input['pts_semantic_mask']._data.numpy()
filename = osp.splitext(osp.basename(img_metas['pts_filename']))[0]
show_seg_result(
points,
gt_seg.copy(),
None,
out_dir,
filename,
np.array(dataset.PALETTE),
dataset.ignore_index,
np.array(img_metas['PALETTE']),
img_metas['ignore_index'],
show=show,
snapshot=True)
def show_proj_bbox_img(idx,
dataset,
out_dir,
filename,
show=False,
is_nus_mono=False):
def show_proj_bbox_img(input, out_dir, show=False, is_nus_mono=False):
"""Visualize 3D bboxes on 2D image by projection."""
try:
example = dataset.prepare_train_data(idx)
except AttributeError: # for Mono-3D datasets
example = dataset.prepare_train_img(idx)
gt_bboxes = dataset.get_ann_info(idx)['gt_bboxes_3d']
img_metas = example['img_metas']._data
img = example['img']._data.numpy()
gt_bboxes = input['gt_bboxes_3d']._data
img_metas = input['img_metas']._data
img = input['img']._data.numpy()
# need to transpose channel to first dim
img = img.transpose(1, 2, 0)
# no 3D gt bboxes, just show img
if gt_bboxes.tensor.shape[0] == 0:
gt_bboxes = None
filename = Path(img_metas['filename']).name
if isinstance(gt_bboxes, DepthInstance3DBoxes):
show_multi_modality_result(
img,
......@@ -183,53 +192,34 @@ def main():
if args.output_dir is not None:
mkdir_or_exist(args.output_dir)
cfg = build_data_cfg(args.config, args.skip_type, args.cfg_options)
cfg = build_data_cfg(args.config, args.skip_type, args.aug,
args.cfg_options)
try:
dataset = build_dataset(
cfg.data.train, default_args=dict(filter_empty_gt=False))
except TypeError: # seg dataset doesn't have `filter_empty_gt` key
dataset = build_dataset(cfg.data.train)
data_infos = dataset.data_infos
dataset_type = cfg.dataset_type
dataset_type = cfg.dataset_type
# configure visualization mode
vis_task = args.task # 'det', 'seg', 'multi_modality-det', 'mono-det'
progress_bar = mmcv.ProgressBar(len(dataset))
for idx, data_info in enumerate(track_iter_progress(data_infos)):
if dataset_type in ['KittiDataset', 'WaymoDataset']:
data_path = data_info['point_cloud']['velodyne_path']
elif dataset_type in [
'ScanNetDataset', 'SUNRGBDDataset', 'ScanNetSegDataset',
'S3DISSegDataset', 'S3DISDataset'
]:
data_path = data_info['pts_path']
elif dataset_type in ['NuScenesDataset', 'LyftDataset']:
data_path = data_info['lidar_path']
elif dataset_type in ['NuScenesMonoDataset']:
data_path = data_info['file_name']
else:
raise NotImplementedError(
f'unsupported dataset type {dataset_type}')
file_name = osp.splitext(osp.basename(data_path))[0]
for input in dataset:
if vis_task in ['det', 'multi_modality-det']:
# show 3D bboxes on 3D point clouds
show_det_data(
idx, dataset, args.output_dir, file_name, show=args.online)
show_det_data(input, args.output_dir, show=args.online)
if vis_task in ['multi_modality-det', 'mono-det']:
# project 3D bboxes to 2D image
show_proj_bbox_img(
idx,
dataset,
input,
args.output_dir,
file_name,
show=args.online,
is_nus_mono=(dataset_type == 'NuScenesMonoDataset'))
elif vis_task in ['seg']:
# show 3D segmentation mask on 3D point clouds
show_seg_data(
idx, dataset, args.output_dir, file_name, show=args.online)
show_seg_data(input, args.output_dir, show=args.online)
progress_bar.update()
if __name__ == '__main__':
......
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import torch
from mmcv.runner import save_checkpoint
from torch import nn as nn
......
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from mmcv import Config, DictAction
......
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import mmcv
from mmcv import Config
......
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import tempfile
import torch
from mmcv import Config
from mmcv.runner import load_state_dict
......
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import tempfile
import torch
from mmcv import Config
from mmcv.runner import load_state_dict
......
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import subprocess
import torch
......
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import torch
from collections import OrderedDict
import torch
def convert_stem(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('stem.conv', 'conv1')
......
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import mmcv
import os
import torch
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
......@@ -16,6 +17,13 @@ from mmdet3d.models import build_model
from mmdet.apis import multi_gpu_test, set_random_seed
from mmdet.datasets import replace_ImageToTensor
try:
# If mmdet version > 2.20.0, setup_multi_processes would be imported and
# used from mmdet instead of mmdet3d.
from mmdet.utils import setup_multi_processes
except ImportError:
from mmdet3d.utils import setup_multi_processes
def parse_args():
parser = argparse.ArgumentParser(
......@@ -129,6 +137,10 @@ def main():
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set multi-process settings
setup_multi_processes(cfg)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
......
# Copyright (c) OpenMMLab. All rights reserved.
from __future__ import division
import argparse
import copy
import mmcv
import os
import time
import torch
import warnings
from os import path as osp
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from os import path as osp
from mmdet import __version__ as mmdet_version
from mmdet3d import __version__ as mmdet3d_version
from mmdet3d.apis import train_model
from mmdet3d.apis import init_random_seed, train_model
from mmdet3d.datasets import build_dataset
from mmdet3d.models import build_model
from mmdet3d.utils import collect_env, get_root_logger
from mmdet.apis import set_random_seed
from mmseg import __version__ as mmseg_version
try:
# If mmdet version > 2.20.0, setup_multi_processes would be imported and
# used from mmdet instead of mmdet3d.
from mmdet.utils import setup_multi_processes
except ImportError:
from mmdet3d.utils import setup_multi_processes
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
......@@ -104,6 +111,9 @@ def main():
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set multi-process settings
setup_multi_processes(cfg)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
......@@ -180,12 +190,12 @@ def main():
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
seed = init_random_seed(args.seed)
logger.info(f'Set random seed to {seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(seed, deterministic=args.deterministic)
cfg.seed = seed
meta['seed'] = seed
meta['exp_name'] = osp.basename(args.config)
model = build_model(
......
import argparse
import time
from os import path as osp
import mmcv
import numpy as np
from mmdet3d.core.bbox import limit_period
def update_sunrgbd_infos(root_dir, out_dir, pkl_files):
print(f'{pkl_files} will be modified because '
f'of the refactor of the Depth coordinate system.')
if root_dir == out_dir:
print(f'Warning, you are overwriting '
f'the original data under {root_dir}.')
time.sleep(3)
for pkl_file in pkl_files:
in_path = osp.join(root_dir, pkl_file)
print(f'Reading from input file: {in_path}.')
a = mmcv.load(in_path)
print('Start updating:')
for item in mmcv.track_iter_progress(a):
if 'rotation_y' in item['annos']:
item['annos']['rotation_y'] = -item['annos']['rotation_y']
item['annos']['gt_boxes_upright_depth'][:, -1:] = \
-item['annos']['gt_boxes_upright_depth'][:, -1:]
out_path = osp.join(out_dir, pkl_file)
print(f'Writing to output file: {out_path}.')
mmcv.dump(a, out_path, 'pkl')
def update_outdoor_dbinfos(root_dir, out_dir, pkl_files):
print(f'{pkl_files} will be modified because '
f'of the refactor of the LIDAR coordinate system.')
if root_dir == out_dir:
print(f'Warning, you are overwriting '
f'the original data under {root_dir}.')
time.sleep(3)
for pkl_file in pkl_files:
in_path = osp.join(root_dir, pkl_file)
print(f'Reading from input file: {in_path}.')
a = mmcv.load(in_path)
print('Start updating:')
for k in a.keys():
print(f'Updating samples of class {k}:')
for item in mmcv.track_iter_progress(a[k]):
boxes = item['box3d_lidar'].copy()
# swap l, w (or dx, dy)
item['box3d_lidar'][3] = boxes[4]
item['box3d_lidar'][4] = boxes[3]
# change yaw
item['box3d_lidar'][6] = -boxes[6] - np.pi / 2
item['box3d_lidar'][6] = limit_period(
item['box3d_lidar'][6], period=np.pi * 2)
out_path = osp.join(out_dir, pkl_file)
print(f'Writing to output file: {out_path}.')
mmcv.dump(a, out_path, 'pkl')
def update_nuscenes_or_lyft_infos(root_dir, out_dir, pkl_files):
print(f'{pkl_files} will be modified because '
f'of the refactor of the LIDAR coordinate system.')
if root_dir == out_dir:
print(f'Warning, you are overwriting '
f'the original data under {root_dir}.')
time.sleep(3)
for pkl_file in pkl_files:
in_path = osp.join(root_dir, pkl_file)
print(f'Reading from input file: {in_path}.')
a = mmcv.load(in_path)
print('Start updating:')
for item in mmcv.track_iter_progress(a['infos']):
boxes = item['gt_boxes'].copy()
# swap l, w (or dx, dy)
item['gt_boxes'][:, 3] = boxes[:, 4]
item['gt_boxes'][:, 4] = boxes[:, 3]
# change yaw
item['gt_boxes'][:, 6] = -boxes[:, 6] - np.pi / 2
item['gt_boxes'][:, 6] = limit_period(
item['gt_boxes'][:, 6], period=np.pi * 2)
out_path = osp.join(out_dir, pkl_file)
print(f'Writing to output file: {out_path}.')
mmcv.dump(a, out_path, 'pkl')
parser = argparse.ArgumentParser(description='Arg parser for data coords '
'update due to coords sys refactor.')
parser.add_argument('dataset', metavar='kitti', help='name of the dataset')
parser.add_argument(
'--root-dir',
type=str,
default='./data/kitti',
help='specify the root dir of dataset')
parser.add_argument(
'--version',
type=str,
default='v1.0',
required=False,
help='specify the dataset version, no need for kitti')
parser.add_argument(
'--out-dir',
type=str,
default=None,
required=False,
help='name of info pkl')
args = parser.parse_args()
if __name__ == '__main__':
if args.out_dir is None:
args.out_dir = args.root_dir
if args.dataset == 'kitti':
# KITTI infos is in CAM coord sys (unchanged)
# KITTI dbinfos is in LIDAR coord sys (changed)
# so we only update dbinfos
pkl_files = ['kitti_dbinfos_train.pkl']
update_outdoor_dbinfos(
root_dir=args.root_dir, out_dir=args.out_dir, pkl_files=pkl_files)
elif args.dataset == 'nuscenes':
# nuScenes infos is in LIDAR coord sys (changed)
# nuScenes dbinfos is in LIDAR coord sys (changed)
# so we update both infos and dbinfos
pkl_files = ['nuscenes_infos_val.pkl']
if args.version != 'v1.0-mini':
pkl_files.append('nuscenes_infos_train.pkl')
else:
pkl_files.append('nuscenes_infos_train_tiny.pkl')
update_nuscenes_or_lyft_infos(
root_dir=args.root_dir, out_dir=args.out_dir, pkl_files=pkl_files)
if args.version != 'v1.0-mini':
pkl_files = ['nuscenes_dbinfos_train.pkl']
update_outdoor_dbinfos(
root_dir=args.root_dir,
out_dir=args.out_dir,
pkl_files=pkl_files)
elif args.dataset == 'lyft':
# Lyft infos is in LIDAR coord sys (changed)
# Lyft has no dbinfos
# so we update infos
pkl_files = ['lyft_infos_train.pkl', 'lyft_infos_val.pkl']
update_nuscenes_or_lyft_infos(
root_dir=args.root_dir, out_dir=args.out_dir, pkl_files=pkl_files)
elif args.dataset == 'waymo':
# Waymo infos is in CAM coord sys (unchanged)
# Waymo dbinfos is in LIDAR coord sys (changed)
# so we only update dbinfos
pkl_files = ['waymo_dbinfos_train.pkl']
update_outdoor_dbinfos(
root_dir=args.root_dir, out_dir=args.out_dir, pkl_files=pkl_files)
elif args.dataset == 'scannet':
# ScanNet infos is in DEPTH coord sys (changed)
# but bbox is without yaw
# so ScanNet is unaffected
pass
elif args.dataset == 's3dis':
# Segmentation datasets are not affected
pass
elif args.dataset == 'sunrgbd':
# SUNRGBD infos is in DEPTH coord sys (changed)
# and bbox is with yaw
# so we update infos
pkl_files = ['sunrgbd_infos_train.pkl', 'sunrgbd_infos_val.pkl']
update_sunrgbd_infos(
root_dir=args.root_dir, out_dir=args.out_dir, pkl_files=pkl_files)
#!/usr/bin/env bash
set -x
export PYTHONPATH=`pwd`:$PYTHONPATH
PARTITION=$1
DATASET=$2
GPUS=${GPUS:-1}
GPUS_PER_NODE=${GPUS_PER_NODE:-1}
SRUN_ARGS=${SRUN_ARGS:-""}
JOB_NAME=update_data_coords
srun -p ${PARTITION} \
--job-name=${JOB_NAME} \
--gres=gpu:${GPUS_PER_NODE} \
--ntasks=${GPUS} \
--ntasks-per-node=${GPUS_PER_NODE} \
--kill-on-bad-exit=1 \
${SRUN_ARGS} \
python -u tools/update_data_coords.py ${DATASET} \
--root-dir ./data/${DATASET} \
--out-dir ./data/${DATASET}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment