visualization_hook.py 9.59 KB
Newer Older
1
2
3
4
5
6
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import warnings
from typing import Optional, Sequence

import mmcv
ChaimZhu's avatar
ChaimZhu committed
7
import numpy as np
8
from mmengine.fileio import get
9
from mmengine.hooks import Hook
10
from mmengine.logging import print_log
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
from mmengine.runner import Runner
from mmengine.utils import mkdir_or_exist
from mmengine.visualization import Visualizer

from mmdet3d.registry import HOOKS
from mmdet3d.structures import Det3DDataSample


@HOOKS.register_module()
class Det3DVisualizationHook(Hook):
    """Detection Visualization Hook. Used to visualize validation and testing
    process prediction results.

    In the testing phase:

    1. If ``show`` is True, it means that only the prediction results are
        visualized without storing data, so ``vis_backends`` needs to
        be excluded.
    2. If ``test_out_dir`` is specified, it means that the prediction results
        need to be saved to ``test_out_dir``. In order to avoid vis_backends
        also storing data, so ``vis_backends`` needs to be excluded.
    3. ``vis_backends`` takes effect if the user does not specify ``show``
        and `test_out_dir``. You can set ``vis_backends`` to WandbVisBackend or
        TensorboardVisBackend to store the prediction result in Wandb or
        Tensorboard.

    Args:
        draw (bool): whether to draw prediction results. If it is False,
            it means that no drawing will be done. Defaults to False.
        interval (int): The interval of visualization. Defaults to 50.
        score_thr (float): The threshold to visualize the bboxes
            and masks. Defaults to 0.3.
        show (bool): Whether to display the drawn image. Default to False.
44
        vis_task (str): Visualization task. Defaults to 'mono_det'.
45
46
47
        wait_time (float): The interval of show (s). Defaults to 0.
        test_out_dir (str, optional): directory where painted images
            will be saved in testing process.
48
49
        backend_args (dict, optional): Arguments to instantiate the
            corresponding backend. Defaults to None.
50
51
52
53
54
55
56
    """

    def __init__(self,
                 draw: bool = False,
                 interval: int = 50,
                 score_thr: float = 0.3,
                 show: bool = False,
57
                 vis_task: str = 'mono_det',
58
59
                 wait_time: float = 0.,
                 test_out_dir: Optional[str] = None,
60
61
                 draw_gt: bool = True,
                 draw_pred: bool = True,
62
                 backend_args: Optional[dict] = None):
63
64
65
66
67
68
69
70
71
72
73
        self._visualizer: Visualizer = Visualizer.get_current_instance()
        self.interval = interval
        self.score_thr = score_thr
        self.show = show
        if self.show:
            # No need to think about vis backends.
            self._visualizer._vis_backends = {}
            warnings.warn('The show is True, it means that only '
                          'the prediction results are visualized '
                          'without storing data, so vis_backends '
                          'needs to be excluded.')
74
        self.vis_task = vis_task
75

76
77
78
79
80
81
82
        if wait_time == -1:
            print_log(
                'Manual control mode, press [Right] to next sample.',
                logger='current')
        else:
            print_log(
                'Autoplay mode, press [SPACE] to pause.', logger='current')
83
        self.wait_time = wait_time
84
        self.backend_args = backend_args
85
86
87
        self.draw = draw
        self.test_out_dir = test_out_dir
        self._test_index = 0
88
89
        self.draw_gt = draw_gt
        self.draw_pred = draw_pred
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108

    def after_val_iter(self, runner: Runner, batch_idx: int, data_batch: dict,
                       outputs: Sequence[Det3DDataSample]) -> None:
        """Run after every ``self.interval`` validation iterations.

        Args:
            runner (:obj:`Runner`): The runner of the validation process.
            batch_idx (int): The index of the current batch in the val loop.
            data_batch (dict): Data from dataloader.
            outputs (Sequence[:obj:`DetDataSample`]]): A batch of data samples
                that contain annotations and predictions.
        """
        if self.draw is False:
            return

        # There is no guarantee that the same batch of images
        # is visualized for each evaluation.
        total_curr_iter = runner.iter + batch_idx

ChaimZhu's avatar
ChaimZhu committed
109
110
        data_input = dict()

111
        # Visualize only the first data
112
113
114
115
        if self.vis_task in [
                'mono_det', 'multi-view_det', 'multi-modality_det'
        ]:
            assert 'img_path' in outputs[0], 'img_path is not in outputs[0]'
ChaimZhu's avatar
ChaimZhu committed
116
            img_path = outputs[0].img_path
117
118
119
120
121
122
123
124
125
126
127
            if isinstance(img_path, list):
                img = []
                for single_img_path in img_path:
                    img_bytes = get(
                        single_img_path, backend_args=self.backend_args)
                    single_img = mmcv.imfrombytes(
                        img_bytes, channel_order='rgb')
                    img.append(single_img)
            else:
                img_bytes = get(img_path, backend_args=self.backend_args)
                img = mmcv.imfrombytes(img_bytes, channel_order='rgb')
ChaimZhu's avatar
ChaimZhu committed
128
129
            data_input['img'] = img

130
131
132
        if self.vis_task in ['lidar_det', 'multi-modality_det', 'lidar_seg']:
            assert 'lidar_path' in outputs[
                0], 'lidar_path is not in outputs[0]'
ChaimZhu's avatar
ChaimZhu committed
133
134
            lidar_path = outputs[0].lidar_path
            num_pts_feats = outputs[0].num_pts_feats
135
            pts_bytes = get(lidar_path, backend_args=self.backend_args)
ChaimZhu's avatar
ChaimZhu committed
136
137
138
            points = np.frombuffer(pts_bytes, dtype=np.float32)
            points = points.reshape(-1, num_pts_feats)
            data_input['points'] = points
139
140
141

        if total_curr_iter % self.interval == 0:
            self._visualizer.add_datasample(
ChaimZhu's avatar
ChaimZhu committed
142
143
                'val sample',
                data_input,
144
145
                data_sample=outputs[0],
                show=self.show,
146
                vis_task=self.vis_task,
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
                wait_time=self.wait_time,
                pred_score_thr=self.score_thr,
                step=total_curr_iter)

    def after_test_iter(self, runner: Runner, batch_idx: int, data_batch: dict,
                        outputs: Sequence[Det3DDataSample]) -> None:
        """Run after every testing iterations.

        Args:
            runner (:obj:`Runner`): The runner of the testing process.
            batch_idx (int): The index of the current batch in the val loop.
            data_batch (dict): Data from dataloader.
            outputs (Sequence[:obj:`DetDataSample`]): A batch of data samples
                that contain annotations and predictions.
        """
        if self.draw is False:
            return

        if self.test_out_dir is not None:
            self.test_out_dir = osp.join(runner.work_dir, runner.timestamp,
                                         self.test_out_dir)
            mkdir_or_exist(self.test_out_dir)

        for data_sample in outputs:
            self._test_index += 1

ChaimZhu's avatar
ChaimZhu committed
173
            data_input = dict()
174
175
176
177
178
179
180
181
182
183
            assert 'img_path' in data_sample or 'lidar_path' in data_sample, \
                "'data_sample' must contain 'img_path' or 'lidar_path'"

            out_file = o3d_save_path = None

            if self.vis_task in [
                    'mono_det', 'multi-view_det', 'multi-modality_det'
            ]:
                assert 'img_path' in data_sample, \
                    'img_path is not in data_sample'
ChaimZhu's avatar
ChaimZhu committed
184
                img_path = data_sample.img_path
185
186
187
188
189
190
191
192
193
194
195
                if isinstance(img_path, list):
                    img = []
                    for single_img_path in img_path:
                        img_bytes = get(
                            single_img_path, backend_args=self.backend_args)
                        single_img = mmcv.imfrombytes(
                            img_bytes, channel_order='rgb')
                        img.append(single_img)
                else:
                    img_bytes = get(img_path, backend_args=self.backend_args)
                    img = mmcv.imfrombytes(img_bytes, channel_order='rgb')
ChaimZhu's avatar
ChaimZhu committed
196
                data_input['img'] = img
197
                if self.test_out_dir is not None:
198
199
                    if isinstance(img_path, list):
                        img_path = img_path[0]
200
201
202
203
204
205
206
207
                    out_file = osp.basename(img_path)
                    out_file = osp.join(self.test_out_dir, out_file)

            if self.vis_task in [
                    'lidar_det', 'multi-modality_det', 'lidar_seg'
            ]:
                assert 'lidar_path' in data_sample, \
                    'lidar_path is not in data_sample'
ChaimZhu's avatar
ChaimZhu committed
208
209
                lidar_path = data_sample.lidar_path
                num_pts_feats = data_sample.num_pts_feats
210
                pts_bytes = get(lidar_path, backend_args=self.backend_args)
ChaimZhu's avatar
ChaimZhu committed
211
212
213
                points = np.frombuffer(pts_bytes, dtype=np.float32)
                points = points.reshape(-1, num_pts_feats)
                data_input['points'] = points
214
215
216
217
                if self.test_out_dir is not None:
                    o3d_save_path = osp.basename(lidar_path).split(
                        '.')[0] + '.png'
                    o3d_save_path = osp.join(self.test_out_dir, o3d_save_path)
218
219

            self._visualizer.add_datasample(
ChaimZhu's avatar
ChaimZhu committed
220
221
                'test sample',
                data_input,
222
                data_sample=data_sample,
223
224
                draw_gt=self.draw_gt,
                draw_pred=self.draw_pred,
225
                show=self.show,
226
                vis_task=self.vis_task,
227
228
229
                wait_time=self.wait_time,
                pred_score_thr=self.score_thr,
                out_file=out_file,
230
                o3d_save_path=o3d_save_path,
231
                step=self._test_index)