show_result.py 10.7 KB
Newer Older
dingchang's avatar
dingchang committed
1
# Copyright (c) OpenMMLab. All rights reserved.
liyinhao's avatar
liyinhao committed
2
3
4
import mmcv
import numpy as np
import trimesh
zhangwenwei's avatar
zhangwenwei committed
5
from os import path as osp
liyinhao's avatar
liyinhao committed
6

7
8
from .image_vis import (draw_camera_bbox3d_on_img, draw_depth_bbox3d_on_img,
                        draw_lidar_bbox3d_on_img)
9

liyinhao's avatar
liyinhao committed
10

11
12
def _write_obj(points, out_filename):
    """Write points into ``obj`` format for meshlab visualization.
zhangwenwei's avatar
zhangwenwei committed
13
14
15
16
17

    Args:
        points (np.ndarray): Points in shape (N, dim).
        out_filename (str): Filename to be saved.
    """
liyinhao's avatar
liyinhao committed
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
    N = points.shape[0]
    fout = open(out_filename, 'w')
    for i in range(N):
        if points.shape[1] == 6:
            c = points[i, 3:].astype(int)
            fout.write(
                'v %f %f %f %d %d %d\n' %
                (points[i, 0], points[i, 1], points[i, 2], c[0], c[1], c[2]))

        else:
            fout.write('v %f %f %f\n' %
                       (points[i, 0], points[i, 1], points[i, 2]))
    fout.close()


def _write_oriented_bbox(scene_bbox, out_filename):
zhangwenwei's avatar
zhangwenwei committed
34
    """Export oriented (around Z axis) scene bbox to meshes.
liyinhao's avatar
liyinhao committed
35
36
37

    Args:
        scene_bbox(list[ndarray] or ndarray): xyz pos of center and
38
            3 lengths (x_size, y_size, z_size) and heading angle around Z axis.
liyinhao's avatar
liyinhao committed
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
            Y forward, X right, Z upward. heading angle of positive X is 0,
            heading angle of positive Y is 90 degrees.
        out_filename(str): Filename.
    """

    def heading2rotmat(heading_angle):
        rotmat = np.zeros((3, 3))
        rotmat[2, 2] = 1
        cosval = np.cos(heading_angle)
        sinval = np.sin(heading_angle)
        rotmat[0:2, 0:2] = np.array([[cosval, -sinval], [sinval, cosval]])
        return rotmat

    def convert_oriented_box_to_trimesh_fmt(box):
        ctr = box[:3]
        lengths = box[3:6]
        trns = np.eye(4)
        trns[0:3, 3] = ctr
        trns[3, 3] = 1.0
        trns[0:3, 0:3] = heading2rotmat(box[6])
        box_trimesh_fmt = trimesh.creation.box(lengths, trns)
        return box_trimesh_fmt

    if len(scene_bbox) == 0:
        scene_bbox = np.zeros((1, 7))
    scene = trimesh.scene.Scene()
    for box in scene_bbox:
        scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box))

    mesh_list = trimesh.util.concatenate(scene.dump())
69
70
    # save to obj file
    trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='obj')
liyinhao's avatar
liyinhao committed
71
72
73
74

    return


75
76
77
78
79
def show_result(points,
                gt_bboxes,
                pred_bboxes,
                out_dir,
                filename,
ChaimZhu's avatar
ChaimZhu committed
80
                show=False,
MilkClouds's avatar
MilkClouds committed
81
82
                snapshot=False,
                pred_labels=None):
zhangwenwei's avatar
zhangwenwei committed
83
84
85
86
87
88
89
90
    """Convert results into format that is directly readable for meshlab.

    Args:
        points (np.ndarray): Points.
        gt_bboxes (np.ndarray): Ground truth boxes.
        pred_bboxes (np.ndarray): Predicted boxes.
        out_dir (str): Path of output directory
        filename (str): Filename of the current frame.
MilkClouds's avatar
MilkClouds committed
91
        show (bool, optional): Visualize the results online. Defaults to False.
92
93
        snapshot (bool, optional): Whether to save the online results.
            Defaults to False.
MilkClouds's avatar
MilkClouds committed
94
95
        pred_labels (np.ndarray, optional): Predicted labels of boxes.
            Defaults to None.
zhangwenwei's avatar
zhangwenwei committed
96
    """
97
98
99
    result_path = osp.join(out_dir, filename)
    mmcv.mkdir_or_exist(result_path)

100
    if show:
101
102
        from .open3d_vis import Visualizer

103
104
        vis = Visualizer(points)
        if pred_bboxes is not None:
MilkClouds's avatar
MilkClouds committed
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
            if pred_labels is None:
                vis.add_bboxes(bbox3d=pred_bboxes)
            else:
                palette = np.random.randint(
                    0, 255, size=(pred_labels.max() + 1, 3)) / 256
                labelDict = {}
                for j in range(len(pred_labels)):
                    i = int(pred_labels[j].numpy())
                    if labelDict.get(i) is None:
                        labelDict[i] = []
                    labelDict[i].append(pred_bboxes[j])
                for i in labelDict:
                    vis.add_bboxes(
                        bbox3d=np.array(labelDict[i]),
                        bbox_color=palette[i],
                        points_in_box_color=palette[i])

122
123
        if gt_bboxes is not None:
            vis.add_bboxes(bbox3d=gt_bboxes, bbox_color=(0, 0, 1))
124
125
126
        show_path = osp.join(result_path,
                             f'{filename}_online.png') if snapshot else None
        vis.show(show_path)
liyinhao's avatar
liyinhao committed
127

128
    if points is not None:
129
        _write_obj(points, osp.join(result_path, f'{filename}_points.obj'))
130

liyinhao's avatar
liyinhao committed
131
    if gt_bboxes is not None:
132
133
        # bottom center to gravity center
        gt_bboxes[..., 2] += gt_bboxes[..., 5] / 2
134

liyinhao's avatar
liyinhao committed
135
        _write_oriented_bbox(gt_bboxes,
136
                             osp.join(result_path, f'{filename}_gt.obj'))
liyinhao's avatar
liyinhao committed
137
138

    if pred_bboxes is not None:
139
140
        # bottom center to gravity center
        pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
141

liyinhao's avatar
liyinhao committed
142
        _write_oriented_bbox(pred_bboxes,
143
144
145
146
147
148
149
150
151
152
                             osp.join(result_path, f'{filename}_pred.obj'))


def show_seg_result(points,
                    gt_seg,
                    pred_seg,
                    out_dir,
                    filename,
                    palette,
                    ignore_index=None,
153
                    show=False,
154
                    snapshot=False):
155
156
157
158
159
160
161
162
163
    """Convert results into format that is directly readable for meshlab.

    Args:
        points (np.ndarray): Points.
        gt_seg (np.ndarray): Ground truth segmentation mask.
        pred_seg (np.ndarray): Predicted segmentation mask.
        out_dir (str): Path of output directory
        filename (str): Filename of the current frame.
        palette (np.ndarray): Mapping between class labels and colors.
164
        ignore_index (int, optional): The label index to be ignored, e.g.
165
166
            unannotated points. Defaults to None.
        show (bool, optional): Visualize the results online. Defaults to False.
167
        snapshot (bool, optional): Whether to save the online results.
168
            Defaults to False.
169
    """
170
171
172
173
    # we need 3D coordinates to visualize segmentation mask
    if gt_seg is not None or pred_seg is not None:
        assert points is not None, \
            '3D coordinates are required for segmentation visualization'
174
175
176
177
178
179
180
181
182
183
184

    # filter out ignored points
    if gt_seg is not None and ignore_index is not None:
        if points is not None:
            points = points[gt_seg != ignore_index]
        if pred_seg is not None:
            pred_seg = pred_seg[gt_seg != ignore_index]
        gt_seg = gt_seg[gt_seg != ignore_index]

    if gt_seg is not None:
        gt_seg_color = palette[gt_seg]
185
        gt_seg_color = np.concatenate([points[:, :3], gt_seg_color], axis=1)
186
187
    if pred_seg is not None:
        pred_seg_color = palette[pred_seg]
188
189
190
        pred_seg_color = np.concatenate([points[:, :3], pred_seg_color],
                                        axis=1)

191
192
193
    result_path = osp.join(out_dir, filename)
    mmcv.mkdir_or_exist(result_path)

194
195
196
197
198
199
200
201
202
203
    # online visualization of segmentation mask
    # we show three masks in a row, scene_points, gt_mask, pred_mask
    if show:
        from .open3d_vis import Visualizer
        mode = 'xyzrgb' if points.shape[1] == 6 else 'xyz'
        vis = Visualizer(points, mode=mode)
        if gt_seg is not None:
            vis.add_seg_mask(gt_seg_color)
        if pred_seg is not None:
            vis.add_seg_mask(pred_seg_color)
204
205
206
        show_path = osp.join(result_path,
                             f'{filename}_online.png') if snapshot else None
        vis.show(show_path)
207
208
209
210
211

    if points is not None:
        _write_obj(points, osp.join(result_path, f'{filename}_points.obj'))

    if gt_seg is not None:
212
        _write_obj(gt_seg_color, osp.join(result_path, f'{filename}_gt.obj'))
213
214

    if pred_seg is not None:
215
216
        _write_obj(pred_seg_color, osp.join(result_path,
                                            f'{filename}_pred.obj'))
217
218
219
220
221
222
223
224


def show_multi_modality_result(img,
                               gt_bboxes,
                               pred_bboxes,
                               proj_mat,
                               out_dir,
                               filename,
225
                               box_mode='lidar',
226
                               img_metas=None,
227
                               show=False,
228
229
230
231
232
233
234
235
                               gt_bbox_color=(61, 102, 255),
                               pred_bbox_color=(241, 101, 72)):
    """Convert multi-modality detection results into 2D results.

    Project the predicted 3D bbox to 2D image plane and visualize them.

    Args:
        img (np.ndarray): The numpy array of image in cv2 fashion.
236
237
        gt_bboxes (:obj:`BaseInstance3DBoxes`): Ground truth boxes.
        pred_bboxes (:obj:`BaseInstance3DBoxes`): Predicted boxes.
238
239
        proj_mat (numpy.array, shape=[4, 4]): The projection matrix
            according to the camera intrinsic parameters.
240
        out_dir (str): Path of output directory.
241
        filename (str): Filename of the current frame.
242
243
244
245
246
247
248
249
250
251
        box_mode (str, optional): Coordinate system the boxes are in.
            Should be one of 'depth', 'lidar' and 'camera'.
            Defaults to 'lidar'.
        img_metas (dict, optional): Used in projecting depth bbox.
            Defaults to None.
        show (bool, optional): Visualize the results online. Defaults to False.
        gt_bbox_color (str or tuple(int), optional): Color of bbox lines.
           The tuple of color should be in BGR order. Default: (255, 102, 61).
        pred_bbox_color (str or tuple(int), optional): Color of bbox lines.
           The tuple of color should be in BGR order. Default: (72, 101, 241).
252
    """
253
    if box_mode == 'depth':
254
        draw_bbox = draw_depth_bbox3d_on_img
255
    elif box_mode == 'lidar':
256
        draw_bbox = draw_lidar_bbox3d_on_img
257
258
259
260
    elif box_mode == 'camera':
        draw_bbox = draw_camera_bbox3d_on_img
    else:
        raise NotImplementedError(f'unsupported box mode {box_mode}')
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290

    result_path = osp.join(out_dir, filename)
    mmcv.mkdir_or_exist(result_path)

    if show:
        show_img = img.copy()
        if gt_bboxes is not None:
            show_img = draw_bbox(
                gt_bboxes, show_img, proj_mat, img_metas, color=gt_bbox_color)
        if pred_bboxes is not None:
            show_img = draw_bbox(
                pred_bboxes,
                show_img,
                proj_mat,
                img_metas,
                color=pred_bbox_color)
        mmcv.imshow(show_img, win_name='project_bbox3d_img', wait_time=0)

    if img is not None:
        mmcv.imwrite(img, osp.join(result_path, f'{filename}_img.png'))

    if gt_bboxes is not None:
        gt_img = draw_bbox(
            gt_bboxes, img, proj_mat, img_metas, color=gt_bbox_color)
        mmcv.imwrite(gt_img, osp.join(result_path, f'{filename}_gt.png'))

    if pred_bboxes is not None:
        pred_img = draw_bbox(
            pred_bboxes, img, proj_mat, img_metas, color=pred_bbox_color)
        mmcv.imwrite(pred_img, osp.join(result_path, f'{filename}_pred.png'))