show_result.py 9.91 KB
Newer Older
liyinhao's avatar
liyinhao committed
1
2
3
import mmcv
import numpy as np
import trimesh
zhangwenwei's avatar
zhangwenwei committed
4
from os import path as osp
liyinhao's avatar
liyinhao committed
5

6
7
from .image_vis import (draw_camera_bbox3d_on_img, draw_depth_bbox3d_on_img,
                        draw_lidar_bbox3d_on_img)
8

liyinhao's avatar
liyinhao committed
9

10
11
def _write_obj(points, out_filename):
    """Write points into ``obj`` format for meshlab visualization.
zhangwenwei's avatar
zhangwenwei committed
12
13
14
15
16

    Args:
        points (np.ndarray): Points in shape (N, dim).
        out_filename (str): Filename to be saved.
    """
liyinhao's avatar
liyinhao committed
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
    N = points.shape[0]
    fout = open(out_filename, 'w')
    for i in range(N):
        if points.shape[1] == 6:
            c = points[i, 3:].astype(int)
            fout.write(
                'v %f %f %f %d %d %d\n' %
                (points[i, 0], points[i, 1], points[i, 2], c[0], c[1], c[2]))

        else:
            fout.write('v %f %f %f\n' %
                       (points[i, 0], points[i, 1], points[i, 2]))
    fout.close()


def _write_oriented_bbox(scene_bbox, out_filename):
zhangwenwei's avatar
zhangwenwei committed
33
    """Export oriented (around Z axis) scene bbox to meshes.
liyinhao's avatar
liyinhao committed
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67

    Args:
        scene_bbox(list[ndarray] or ndarray): xyz pos of center and
            3 lengths (dx,dy,dz) and heading angle around Z axis.
            Y forward, X right, Z upward. heading angle of positive X is 0,
            heading angle of positive Y is 90 degrees.
        out_filename(str): Filename.
    """

    def heading2rotmat(heading_angle):
        rotmat = np.zeros((3, 3))
        rotmat[2, 2] = 1
        cosval = np.cos(heading_angle)
        sinval = np.sin(heading_angle)
        rotmat[0:2, 0:2] = np.array([[cosval, -sinval], [sinval, cosval]])
        return rotmat

    def convert_oriented_box_to_trimesh_fmt(box):
        ctr = box[:3]
        lengths = box[3:6]
        trns = np.eye(4)
        trns[0:3, 3] = ctr
        trns[3, 3] = 1.0
        trns[0:3, 0:3] = heading2rotmat(box[6])
        box_trimesh_fmt = trimesh.creation.box(lengths, trns)
        return box_trimesh_fmt

    if len(scene_bbox) == 0:
        scene_bbox = np.zeros((1, 7))
    scene = trimesh.scene.Scene()
    for box in scene_bbox:
        scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box))

    mesh_list = trimesh.util.concatenate(scene.dump())
68
69
    # save to obj file
    trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='obj')
liyinhao's avatar
liyinhao committed
70
71
72
73

    return


74
75
76
77
78
def show_result(points,
                gt_bboxes,
                pred_bboxes,
                out_dir,
                filename,
79
                show=True,
80
                snapshot=False):
zhangwenwei's avatar
zhangwenwei committed
81
82
83
84
85
86
87
88
    """Convert results into format that is directly readable for meshlab.

    Args:
        points (np.ndarray): Points.
        gt_bboxes (np.ndarray): Ground truth boxes.
        pred_bboxes (np.ndarray): Predicted boxes.
        out_dir (str): Path of output directory
        filename (str): Filename of the current frame.
89
90
        show (bool): Visualize the results online. Defaults to False.
        snapshot (bool): Whether to save the online results. Defaults to False.
zhangwenwei's avatar
zhangwenwei committed
91
    """
92
93
94
    result_path = osp.join(out_dir, filename)
    mmcv.mkdir_or_exist(result_path)

95
    if show:
96
97
        from .open3d_vis import Visualizer

98
99
100
101
102
        vis = Visualizer(points)
        if pred_bboxes is not None:
            vis.add_bboxes(bbox3d=pred_bboxes)
        if gt_bboxes is not None:
            vis.add_bboxes(bbox3d=gt_bboxes, bbox_color=(0, 0, 1))
103
104
105
        show_path = osp.join(result_path,
                             f'{filename}_online.png') if snapshot else None
        vis.show(show_path)
liyinhao's avatar
liyinhao committed
106

107
    if points is not None:
108
        _write_obj(points, osp.join(result_path, f'{filename}_points.obj'))
109

liyinhao's avatar
liyinhao committed
110
    if gt_bboxes is not None:
111
112
113
        # bottom center to gravity center
        gt_bboxes[..., 2] += gt_bboxes[..., 5] / 2
        # the positive direction for yaw in meshlab is clockwise
liyinhao's avatar
liyinhao committed
114
        gt_bboxes[:, 6] *= -1
liyinhao's avatar
liyinhao committed
115
        _write_oriented_bbox(gt_bboxes,
116
                             osp.join(result_path, f'{filename}_gt.obj'))
liyinhao's avatar
liyinhao committed
117
118

    if pred_bboxes is not None:
119
120
121
        # bottom center to gravity center
        pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
        # the positive direction for yaw in meshlab is clockwise
liyinhao's avatar
liyinhao committed
122
        pred_bboxes[:, 6] *= -1
liyinhao's avatar
liyinhao committed
123
        _write_oriented_bbox(pred_bboxes,
124
125
126
127
128
129
130
131
132
133
                             osp.join(result_path, f'{filename}_pred.obj'))


def show_seg_result(points,
                    gt_seg,
                    pred_seg,
                    out_dir,
                    filename,
                    palette,
                    ignore_index=None,
Ziyi Wu's avatar
Ziyi Wu committed
134
                    show=True,
135
                    snapshot=False):
136
137
138
139
140
141
142
143
144
145
146
147
    """Convert results into format that is directly readable for meshlab.

    Args:
        points (np.ndarray): Points.
        gt_seg (np.ndarray): Ground truth segmentation mask.
        pred_seg (np.ndarray): Predicted segmentation mask.
        out_dir (str): Path of output directory
        filename (str): Filename of the current frame.
        palette (np.ndarray): Mapping between class labels and colors.
        ignore_index (int, optional): The label index to be ignored, e.g. \
            unannotated points. Defaults to None.
        show (bool, optional): Visualize the results online. Defaults to False.
148
149
        snapshot (bool, optional): Whether to save the online results. \
            Defaults to False.
150
    """
151
152
153
154
    # we need 3D coordinates to visualize segmentation mask
    if gt_seg is not None or pred_seg is not None:
        assert points is not None, \
            '3D coordinates are required for segmentation visualization'
155
156
157
158
159
160
161
162
163
164
165

    # filter out ignored points
    if gt_seg is not None and ignore_index is not None:
        if points is not None:
            points = points[gt_seg != ignore_index]
        if pred_seg is not None:
            pred_seg = pred_seg[gt_seg != ignore_index]
        gt_seg = gt_seg[gt_seg != ignore_index]

    if gt_seg is not None:
        gt_seg_color = palette[gt_seg]
166
        gt_seg_color = np.concatenate([points[:, :3], gt_seg_color], axis=1)
167
168
    if pred_seg is not None:
        pred_seg_color = palette[pred_seg]
169
170
171
        pred_seg_color = np.concatenate([points[:, :3], pred_seg_color],
                                        axis=1)

172
173
174
    result_path = osp.join(out_dir, filename)
    mmcv.mkdir_or_exist(result_path)

175
176
177
178
179
180
181
182
183
184
    # online visualization of segmentation mask
    # we show three masks in a row, scene_points, gt_mask, pred_mask
    if show:
        from .open3d_vis import Visualizer
        mode = 'xyzrgb' if points.shape[1] == 6 else 'xyz'
        vis = Visualizer(points, mode=mode)
        if gt_seg is not None:
            vis.add_seg_mask(gt_seg_color)
        if pred_seg is not None:
            vis.add_seg_mask(pred_seg_color)
185
186
187
        show_path = osp.join(result_path,
                             f'{filename}_online.png') if snapshot else None
        vis.show(show_path)
188
189
190
191
192

    if points is not None:
        _write_obj(points, osp.join(result_path, f'{filename}_points.obj'))

    if gt_seg is not None:
193
        _write_obj(gt_seg_color, osp.join(result_path, f'{filename}_gt.obj'))
194
195

    if pred_seg is not None:
196
197
        _write_obj(pred_seg_color, osp.join(result_path,
                                            f'{filename}_pred.obj'))
198
199
200
201
202
203
204
205


def show_multi_modality_result(img,
                               gt_bboxes,
                               pred_bboxes,
                               proj_mat,
                               out_dir,
                               filename,
206
                               box_mode,
207
                               img_metas=None,
Ziyi Wu's avatar
Ziyi Wu committed
208
                               show=True,
209
210
211
212
213
214
215
216
                               gt_bbox_color=(61, 102, 255),
                               pred_bbox_color=(241, 101, 72)):
    """Convert multi-modality detection results into 2D results.

    Project the predicted 3D bbox to 2D image plane and visualize them.

    Args:
        img (np.ndarray): The numpy array of image in cv2 fashion.
217
218
        gt_bboxes (:obj:`BaseInstance3DBoxes`): Ground truth boxes.
        pred_bboxes (:obj:`BaseInstance3DBoxes`): Predicted boxes.
219
220
        proj_mat (numpy.array, shape=[4, 4]): The projection matrix
            according to the camera intrinsic parameters.
221
        out_dir (str): Path of output directory.
222
        filename (str): Filename of the current frame.
223
224
225
        box_mode (str): Coordinate system the boxes are in.
            Should be one of 'depth', 'lidar' and 'camera'.
        img_metas (dict): Used in projecting depth bbox.
226
        show (bool): Visualize the results online. Defaults to False.
227
228
229
230
231
        gt_bbox_color (str or tuple(int)): Color of bbox lines.
           The tuple of color should be in BGR order. Default: (255, 102, 61)
        pred_bbox_color (str or tuple(int)): Color of bbox lines.
           The tuple of color should be in BGR order. Default: (72, 101, 241)
    """
232
    if box_mode == 'depth':
233
        draw_bbox = draw_depth_bbox3d_on_img
234
    elif box_mode == 'lidar':
235
        draw_bbox = draw_lidar_bbox3d_on_img
236
237
238
239
    elif box_mode == 'camera':
        draw_bbox = draw_camera_bbox3d_on_img
    else:
        raise NotImplementedError(f'unsupported box mode {box_mode}')
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269

    result_path = osp.join(out_dir, filename)
    mmcv.mkdir_or_exist(result_path)

    if show:
        show_img = img.copy()
        if gt_bboxes is not None:
            show_img = draw_bbox(
                gt_bboxes, show_img, proj_mat, img_metas, color=gt_bbox_color)
        if pred_bboxes is not None:
            show_img = draw_bbox(
                pred_bboxes,
                show_img,
                proj_mat,
                img_metas,
                color=pred_bbox_color)
        mmcv.imshow(show_img, win_name='project_bbox3d_img', wait_time=0)

    if img is not None:
        mmcv.imwrite(img, osp.join(result_path, f'{filename}_img.png'))

    if gt_bboxes is not None:
        gt_img = draw_bbox(
            gt_bboxes, img, proj_mat, img_metas, color=gt_bbox_color)
        mmcv.imwrite(gt_img, osp.join(result_path, f'{filename}_gt.png'))

    if pred_bboxes is not None:
        pred_img = draw_bbox(
            pred_bboxes, img, proj_mat, img_metas, color=pred_bbox_color)
        mmcv.imwrite(pred_img, osp.join(result_path, f'{filename}_pred.png'))