multi_modality_demo.py 2.28 KB
Newer Older
dingchang's avatar
dingchang committed
1
# Copyright (c) OpenMMLab. All rights reserved.
2
3
from argparse import ArgumentParser

ZCMax's avatar
ZCMax committed
4
5
import mmcv
import numpy as np
6

ZCMax's avatar
ZCMax committed
7
8
9
from mmdet3d.apis import inference_multi_modality_detector, init_model
from mmdet3d.registry import VISUALIZERS
from mmdet3d.utils import register_all_modules
10

ZCMax's avatar
ZCMax committed
11
12

def parse_args():
13
14
15
16
17
18
19
20
21
22
23
24
    parser = ArgumentParser()
    parser.add_argument('pcd', help='Point cloud file')
    parser.add_argument('image', help='image file')
    parser.add_argument('ann', help='ann file')
    parser.add_argument('config', help='Config file')
    parser.add_argument('checkpoint', help='Checkpoint file')
    parser.add_argument(
        '--device', default='cuda:0', help='Device used for inference')
    parser.add_argument(
        '--score-thr', type=float, default=0.0, help='bbox score threshold')
    parser.add_argument(
        '--out-dir', type=str, default='demo', help='dir to save results')
25
    parser.add_argument(
Zongbao Feng's avatar
Zongbao Feng committed
26
27
28
        '--show',
        action='store_true',
        help='show online visualization results')
29
30
31
    parser.add_argument(
        '--snapshot',
        action='store_true',
Zongbao Feng's avatar
Zongbao Feng committed
32
        help='whether to save online visualization results')
33
    args = parser.parse_args()
ZCMax's avatar
ZCMax committed
34
35
36
37
38
39
    return args


def main(args):
    # register all modules in mmdet into the registries
    register_all_modules()
40
41

    # build the model from a config file and a checkpoint file
42
    model = init_model(args.config, args.checkpoint, device=args.device)
ZCMax's avatar
ZCMax committed
43
44
45
46
47
48
49
50
51

    # init visualizer
    visualizer = VISUALIZERS.build(model.cfg.visualizer)
    visualizer.dataset_meta = {
        'CLASSES': model.CLASSES,
        'PALETTE': model.PALETTE
    }

    # test a single image and point cloud sample
52
53
    result, data = inference_multi_modality_detector(model, args.pcd,
                                                     args.image, args.ann)
ZCMax's avatar
ZCMax committed
54
55
56
57
58
59

    points = np.fromfile(args.pcd, dtype=np.float32)
    img = mmcv.imread(args.img)
    img = mmcv.imconvert(img, 'bgr', 'rgb')

    data_input = dict(points=points, img=img)
60
    # show the results
ZCMax's avatar
ZCMax committed
61
62
63
64
65
66
67
68
69
    visualizer.add_datasample(
        'result',
        data_input,
        pred_sample=result,
        show=True,
        wait_time=0,
        out_file=args.out_file,
        pred_score_thr=args.score_thr,
        vis_task='multi_modality-det')
70
71
72


if __name__ == '__main__':
ZCMax's avatar
ZCMax committed
73
74
    args = parse_args()
    main(args)