multi_modality_demo.py 2.51 KB
Newer Older
dingchang's avatar
dingchang committed
1
# Copyright (c) OpenMMLab. All rights reserved.
2
3
from argparse import ArgumentParser

ZCMax's avatar
ZCMax committed
4
import mmcv
5

ZCMax's avatar
ZCMax committed
6
7
from mmdet3d.apis import inference_multi_modality_detector, init_model
from mmdet3d.registry import VISUALIZERS
8

ZCMax's avatar
ZCMax committed
9
10

def parse_args():
11
12
    parser = ArgumentParser()
    parser.add_argument('pcd', help='Point cloud file')
13
    parser.add_argument('img', help='image file')
14
15
16
17
18
    parser.add_argument('ann', help='ann file')
    parser.add_argument('config', help='Config file')
    parser.add_argument('checkpoint', help='Checkpoint file')
    parser.add_argument(
        '--device', default='cuda:0', help='Device used for inference')
19
20
21
22
23
    parser.add_argument(
        '--cam-type',
        type=str,
        default='CAM_FRONT',
        help='choose camera type to inference')
24
25
26
27
    parser.add_argument(
        '--score-thr', type=float, default=0.0, help='bbox score threshold')
    parser.add_argument(
        '--out-dir', type=str, default='demo', help='dir to save results')
28
    parser.add_argument(
Zongbao Feng's avatar
Zongbao Feng committed
29
30
31
        '--show',
        action='store_true',
        help='show online visualization results')
32
33
34
    parser.add_argument(
        '--snapshot',
        action='store_true',
Zongbao Feng's avatar
Zongbao Feng committed
35
        help='whether to save online visualization results')
36
    args = parser.parse_args()
ZCMax's avatar
ZCMax committed
37
38
39
40
    return args


def main(args):
41
    # build the model from a config file and a checkpoint file
42
    model = init_model(args.config, args.checkpoint, device=args.device)
ZCMax's avatar
ZCMax committed
43
44
45

    # init visualizer
    visualizer = VISUALIZERS.build(model.cfg.visualizer)
ChaimZhu's avatar
ChaimZhu committed
46
    visualizer.dataset_meta = model.dataset_meta
ZCMax's avatar
ZCMax committed
47
48

    # test a single image and point cloud sample
49
50
51
    result, data = inference_multi_modality_detector(model, args.pcd, args.img,
                                                     args.ann, args.cam_type)
    points = data['inputs']['points']
52
53
54
55
56
57
58
59
60
    if isinstance(result.img_path, list):
        img = []
        for img_path in result.img_path:
            single_img = mmcv.imread(img_path)
            single_img = mmcv.imconvert(single_img, 'bgr', 'rgb')
            img.append(single_img)
    else:
        img = mmcv.imread(result.img_path)
        img = mmcv.imconvert(img, 'bgr', 'rgb')
ZCMax's avatar
ZCMax committed
61
    data_input = dict(points=points, img=img)
62

63
    # show the results
ZCMax's avatar
ZCMax committed
64
65
66
    visualizer.add_datasample(
        'result',
        data_input,
67
68
        data_sample=result,
        draw_gt=False,
Xiang Xu's avatar
Xiang Xu committed
69
        show=args.show,
70
        wait_time=-1,
71
        out_file=args.out_dir,
ZCMax's avatar
ZCMax committed
72
        pred_score_thr=args.score_thr,
73
        vis_task='multi-modality_det')
74
75
76


if __name__ == '__main__':
ZCMax's avatar
ZCMax committed
77
78
    args = parse_args()
    main(args)