transforms_3d.py 64 KB
Newer Older
dingchang's avatar
dingchang committed
1
# Copyright (c) OpenMMLab. All rights reserved.
2
import cv2
zhangwenwei's avatar
zhangwenwei committed
3
import numpy as np
4
import random
5
import warnings
6
from mmcv import is_tuple_of
7
from mmcv.utils import build_from_cfg
zhangwenwei's avatar
zhangwenwei committed
8

9
from mmdet3d.core import VoxelGenerator
10
11
from mmdet3d.core.bbox import (CameraInstance3DBoxes, DepthInstance3DBoxes,
                               LiDARInstance3DBoxes, box_np_ops)
12
from mmdet.datasets.builder import PIPELINES
zhangwenwei's avatar
zhangwenwei committed
13
from mmdet.datasets.pipelines import RandomFlip
14
from ..builder import OBJECTSAMPLERS
zhangwenwei's avatar
zhangwenwei committed
15
16
17
from .data_augment_utils import noise_per_object_v3_


18
19
20
21
22
23
24
25
26
@PIPELINES.register_module()
class RandomDropPointsColor(object):
    r"""Randomly set the color of points to all zeros.

    Once this transform is executed, all the points' color will be dropped.
    Refer to `PAConv <https://github.com/CVMI-Lab/PAConv/blob/main/scene_seg/
    util/transform.py#L223>`_ for more details.

    Args:
27
        drop_ratio (float, optional): The probability of dropping point colors.
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
            Defaults to 0.2.
    """

    def __init__(self, drop_ratio=0.2):
        assert isinstance(drop_ratio, (int, float)) and 0 <= drop_ratio <= 1, \
            f'invalid drop_ratio value {drop_ratio}'
        self.drop_ratio = drop_ratio

    def __call__(self, input_dict):
        """Call function to drop point colors.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
43
            dict: Results after color dropping,
44
45
46
47
48
49
50
                'points' key is updated in the result dict.
        """
        points = input_dict['points']
        assert points.attribute_dims is not None and \
            'color' in points.attribute_dims, \
            'Expect points have color attribute'

51
52
53
54
55
56
57
        # this if-expression is a bit strange
        # `RandomDropPointsColor` is used in training 3D segmentor PAConv
        # we discovered in our experiments that, using
        # `if np.random.rand() > 1.0 - self.drop_ratio` consistently leads to
        # better results than using `if np.random.rand() < self.drop_ratio`
        # so we keep this hack in our codebase
        if np.random.rand() > 1.0 - self.drop_ratio:
58
59
60
61
62
63
64
65
66
67
            points.color = points.color * 0.0
        return input_dict

    def __repr__(self):
        """str: Return a string that describes the module."""
        repr_str = self.__class__.__name__
        repr_str += f'(drop_ratio={self.drop_ratio})'
        return repr_str


68
@PIPELINES.register_module()
zhangwenwei's avatar
zhangwenwei committed
69
70
71
72
73
74
75
76
class RandomFlip3D(RandomFlip):
    """Flip the points & bbox.

    If the input dict contains the key "flip", then the flag will be used,
    otherwise it will be randomly decided by a ratio specified in the init
    method.

    Args:
zhangwenwei's avatar
zhangwenwei committed
77
78
79
        sync_2d (bool, optional): Whether to apply flip according to the 2D
            images. If True, it will apply the same flip as that to 2D images.
            If False, it will decide whether to flip randomly and independently
liyinhao's avatar
liyinhao committed
80
            to that of 2D images. Defaults to True.
wuyuefeng's avatar
wuyuefeng committed
81
        flip_ratio_bev_horizontal (float, optional): The flipping probability
liyinhao's avatar
liyinhao committed
82
            in horizontal direction. Defaults to 0.0.
wuyuefeng's avatar
wuyuefeng committed
83
        flip_ratio_bev_vertical (float, optional): The flipping probability
liyinhao's avatar
liyinhao committed
84
            in vertical direction. Defaults to 0.0.
zhangwenwei's avatar
zhangwenwei committed
85
86
    """

wuyuefeng's avatar
wuyuefeng committed
87
88
89
90
91
92
93
    def __init__(self,
                 sync_2d=True,
                 flip_ratio_bev_horizontal=0.0,
                 flip_ratio_bev_vertical=0.0,
                 **kwargs):
        super(RandomFlip3D, self).__init__(
            flip_ratio=flip_ratio_bev_horizontal, **kwargs)
zhangwenwei's avatar
zhangwenwei committed
94
        self.sync_2d = sync_2d
wuyuefeng's avatar
wuyuefeng committed
95
96
97
98
99
100
101
102
103
104
105
        self.flip_ratio_bev_vertical = flip_ratio_bev_vertical
        if flip_ratio_bev_horizontal is not None:
            assert isinstance(
                flip_ratio_bev_horizontal,
                (int, float)) and 0 <= flip_ratio_bev_horizontal <= 1
        if flip_ratio_bev_vertical is not None:
            assert isinstance(
                flip_ratio_bev_vertical,
                (int, float)) and 0 <= flip_ratio_bev_vertical <= 1

    def random_flip_data_3d(self, input_dict, direction='horizontal'):
106
107
108
109
        """Flip 3D data randomly.

        Args:
            input_dict (dict): Result dict from loading pipeline.
110
111
            direction (str, optional): Flip direction.
                Default: 'horizontal'.
112
113

        Returns:
114
            dict: Flipped results, 'points', 'bbox3d_fields' keys are
115
116
                updated in the result dict.
        """
wuyuefeng's avatar
wuyuefeng committed
117
        assert direction in ['horizontal', 'vertical']
118
119
120
121
122
        if len(input_dict['bbox3d_fields']) == 0:  # test mode
            input_dict['bbox3d_fields'].append('empty_box3d')
            input_dict['empty_box3d'] = input_dict['box_type_3d'](
                np.array([], dtype=np.float32))
        assert len(input_dict['bbox3d_fields']) == 1
zhangwenwei's avatar
zhangwenwei committed
123
        for key in input_dict['bbox3d_fields']:
124
125
126
127
128
129
130
131
            if 'points' in input_dict:
                input_dict['points'] = input_dict[key].flip(
                    direction, points=input_dict['points'])
            else:
                input_dict[key].flip(direction)
        if 'centers2d' in input_dict:
            assert self.sync_2d is True and direction == 'horizontal', \
                'Only support sync_2d=True and horizontal flip with images'
132
            w = input_dict['ori_shape'][1]
133
134
            input_dict['centers2d'][..., 0] = \
                w - input_dict['centers2d'][..., 0]
135
136
            # need to modify the horizontal position of camera center
            # along u-axis in the image (flip like centers2d)
137
            # ['cam2img'][0][2] = c_u
138
139
            # see more details and examples at
            # https://github.com/open-mmlab/mmdetection3d/pull/744
140
            input_dict['cam2img'][0][2] = w - input_dict['cam2img'][0][2]
zhangwenwei's avatar
zhangwenwei committed
141
142

    def __call__(self, input_dict):
143
        """Call function to flip points, values in the ``bbox3d_fields`` and
144
145
146
147
148
149
        also flip 2D image and its annotations.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
150
151
            dict: Flipped results, 'flip', 'flip_direction',
                'pcd_horizontal_flip' and 'pcd_vertical_flip' keys are added
152
153
                into result dict.
        """
154
        # flip 2D image and its annotations
zhangwenwei's avatar
zhangwenwei committed
155
        super(RandomFlip3D, self).__call__(input_dict)
zhangwenwei's avatar
zhangwenwei committed
156

zhangwenwei's avatar
zhangwenwei committed
157
        if self.sync_2d:
wuyuefeng's avatar
wuyuefeng committed
158
159
            input_dict['pcd_horizontal_flip'] = input_dict['flip']
            input_dict['pcd_vertical_flip'] = False
zhangwenwei's avatar
zhangwenwei committed
160
        else:
wuyuefeng's avatar
wuyuefeng committed
161
162
163
164
165
166
167
168
169
            if 'pcd_horizontal_flip' not in input_dict:
                flip_horizontal = True if np.random.rand(
                ) < self.flip_ratio else False
                input_dict['pcd_horizontal_flip'] = flip_horizontal
            if 'pcd_vertical_flip' not in input_dict:
                flip_vertical = True if np.random.rand(
                ) < self.flip_ratio_bev_vertical else False
                input_dict['pcd_vertical_flip'] = flip_vertical

170
171
172
        if 'transformation_3d_flow' not in input_dict:
            input_dict['transformation_3d_flow'] = []

wuyuefeng's avatar
wuyuefeng committed
173
174
        if input_dict['pcd_horizontal_flip']:
            self.random_flip_data_3d(input_dict, 'horizontal')
175
            input_dict['transformation_3d_flow'].extend(['HF'])
wuyuefeng's avatar
wuyuefeng committed
176
177
        if input_dict['pcd_vertical_flip']:
            self.random_flip_data_3d(input_dict, 'vertical')
178
            input_dict['transformation_3d_flow'].extend(['VF'])
zhangwenwei's avatar
zhangwenwei committed
179
180
        return input_dict

zhangwenwei's avatar
zhangwenwei committed
181
    def __repr__(self):
182
        """str: Return a string that describes the module."""
wuyuefeng's avatar
wuyuefeng committed
183
        repr_str = self.__class__.__name__
184
        repr_str += f'(sync_2d={self.sync_2d},'
185
        repr_str += f' flip_ratio_bev_vertical={self.flip_ratio_bev_vertical})'
wuyuefeng's avatar
wuyuefeng committed
186
        return repr_str
zhangwenwei's avatar
zhangwenwei committed
187

zhangwenwei's avatar
zhangwenwei committed
188

189
190
191
192
@PIPELINES.register_module()
class RandomJitterPoints(object):
    """Randomly jitter point coordinates.

193
    Different from the global translation in ``GlobalRotScaleTrans``, here we
194
195
196
197
        apply different noises to each point in a scene.

    Args:
        jitter_std (list[float]): The standard deviation of jittering noise.
198
199
            This applies random noise to all points in a 3D scene, which is
            sampled from a gaussian distribution whose standard deviation is
200
            set by ``jitter_std``. Defaults to [0.01, 0.01, 0.01]
201
        clip_range (list[float]): Clip the randomly generated jitter
202
203
204
205
            noise into this range. If None is given, don't perform clipping.
            Defaults to [-0.05, 0.05]

    Note:
206
        This transform should only be used in point cloud segmentation tasks
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
            because we don't transform ground-truth bboxes accordingly.
        For similar transform in detection task, please refer to `ObjectNoise`.
    """

    def __init__(self,
                 jitter_std=[0.01, 0.01, 0.01],
                 clip_range=[-0.05, 0.05]):
        seq_types = (list, tuple, np.ndarray)
        if not isinstance(jitter_std, seq_types):
            assert isinstance(jitter_std, (int, float)), \
                f'unsupported jitter_std type {type(jitter_std)}'
            jitter_std = [jitter_std, jitter_std, jitter_std]
        self.jitter_std = jitter_std

        if clip_range is not None:
            if not isinstance(clip_range, seq_types):
                assert isinstance(clip_range, (int, float)), \
                    f'unsupported clip_range type {type(clip_range)}'
                clip_range = [-clip_range, clip_range]
        self.clip_range = clip_range

    def __call__(self, input_dict):
        """Call function to jitter all the points in the scene.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
235
            dict: Results after adding noise to each point,
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
                'points' key is updated in the result dict.
        """
        points = input_dict['points']
        jitter_std = np.array(self.jitter_std, dtype=np.float32)
        jitter_noise = \
            np.random.randn(points.shape[0], 3) * jitter_std[None, :]
        if self.clip_range is not None:
            jitter_noise = np.clip(jitter_noise, self.clip_range[0],
                                   self.clip_range[1])

        points.translate(jitter_noise)
        return input_dict

    def __repr__(self):
        """str: Return a string that describes the module."""
        repr_str = self.__class__.__name__
        repr_str += f'(jitter_std={self.jitter_std},'
        repr_str += f' clip_range={self.clip_range})'
        return repr_str


257
@PIPELINES.register_module()
zhangwenwei's avatar
zhangwenwei committed
258
class ObjectSample(object):
zhangwenwei's avatar
zhangwenwei committed
259
    """Sample GT objects to the data.
zhangwenwei's avatar
zhangwenwei committed
260
261
262
263
264

    Args:
        db_sampler (dict): Config dict of the database sampler.
        sample_2d (bool): Whether to also paste 2D image patch to the images
            This should be true when applying multi-modality cut-and-paste.
liyinhao's avatar
liyinhao committed
265
            Defaults to False.
zhangwenwei's avatar
zhangwenwei committed
266
    """
zhangwenwei's avatar
zhangwenwei committed
267
268
269
270
271
272
273
274
275
276

    def __init__(self, db_sampler, sample_2d=False):
        self.sampler_cfg = db_sampler
        self.sample_2d = sample_2d
        if 'type' not in db_sampler.keys():
            db_sampler['type'] = 'DataBaseSampler'
        self.db_sampler = build_from_cfg(db_sampler, OBJECTSAMPLERS)

    @staticmethod
    def remove_points_in_boxes(points, boxes):
277
278
279
        """Remove the points in the sampled bounding boxes.

        Args:
280
            points (:obj:`BasePoints`): Input point cloud array.
281
282
283
284
285
            boxes (np.ndarray): Sampled ground truth boxes.

        Returns:
            np.ndarray: Points with those in the boxes removed.
        """
286
        masks = box_np_ops.points_in_rbbox(points.coord.numpy(), boxes)
zhangwenwei's avatar
zhangwenwei committed
287
288
289
290
        points = points[np.logical_not(masks.any(-1))]
        return points

    def __call__(self, input_dict):
291
292
293
294
295
296
        """Call function to sample ground truth objects to the data.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
297
298
            dict: Results after object sampling augmentation,
                'points', 'gt_bboxes_3d', 'gt_labels_3d' keys are updated
299
300
                in the result dict.
        """
zhangwenwei's avatar
zhangwenwei committed
301
        gt_bboxes_3d = input_dict['gt_bboxes_3d']
zhangwenwei's avatar
zhangwenwei committed
302
303
        gt_labels_3d = input_dict['gt_labels_3d']

zhangwenwei's avatar
zhangwenwei committed
304
305
306
        # change to float for blending operation
        points = input_dict['points']
        if self.sample_2d:
wuyuefeng's avatar
wuyuefeng committed
307
            img = input_dict['img']
zhangwenwei's avatar
zhangwenwei committed
308
309
310
            gt_bboxes_2d = input_dict['gt_bboxes']
            # Assume for now 3D & 2D bboxes are the same
            sampled_dict = self.db_sampler.sample_all(
311
312
313
314
                gt_bboxes_3d.tensor.numpy(),
                gt_labels_3d,
                gt_bboxes_2d=gt_bboxes_2d,
                img=img)
zhangwenwei's avatar
zhangwenwei committed
315
316
        else:
            sampled_dict = self.db_sampler.sample_all(
317
                gt_bboxes_3d.tensor.numpy(), gt_labels_3d, img=None)
zhangwenwei's avatar
zhangwenwei committed
318
319
320
321

        if sampled_dict is not None:
            sampled_gt_bboxes_3d = sampled_dict['gt_bboxes_3d']
            sampled_points = sampled_dict['points']
zhangwenwei's avatar
zhangwenwei committed
322
            sampled_gt_labels = sampled_dict['gt_labels_3d']
zhangwenwei's avatar
zhangwenwei committed
323

zhangwenwei's avatar
zhangwenwei committed
324
325
            gt_labels_3d = np.concatenate([gt_labels_3d, sampled_gt_labels],
                                          axis=0)
326
327
328
            gt_bboxes_3d = gt_bboxes_3d.new_box(
                np.concatenate(
                    [gt_bboxes_3d.tensor.numpy(), sampled_gt_bboxes_3d]))
zhangwenwei's avatar
zhangwenwei committed
329

zhangwenwei's avatar
zhangwenwei committed
330
331
            points = self.remove_points_in_boxes(points, sampled_gt_bboxes_3d)
            # check the points dimension
332
            points = points.cat([sampled_points, points])
zhangwenwei's avatar
zhangwenwei committed
333
334
335
336
337

            if self.sample_2d:
                sampled_gt_bboxes_2d = sampled_dict['gt_bboxes_2d']
                gt_bboxes_2d = np.concatenate(
                    [gt_bboxes_2d, sampled_gt_bboxes_2d]).astype(np.float32)
zhangwenwei's avatar
zhangwenwei committed
338

zhangwenwei's avatar
zhangwenwei committed
339
                input_dict['gt_bboxes'] = gt_bboxes_2d
wuyuefeng's avatar
wuyuefeng committed
340
                input_dict['img'] = sampled_dict['img']
zhangwenwei's avatar
zhangwenwei committed
341
342

        input_dict['gt_bboxes_3d'] = gt_bboxes_3d
343
        input_dict['gt_labels_3d'] = gt_labels_3d.astype(np.long)
zhangwenwei's avatar
zhangwenwei committed
344
        input_dict['points'] = points
zhangwenwei's avatar
zhangwenwei committed
345

zhangwenwei's avatar
zhangwenwei committed
346
347
348
        return input_dict

    def __repr__(self):
349
        """str: Return a string that describes the module."""
350
351
352
353
354
355
356
357
358
        repr_str = self.__class__.__name__
        repr_str += f' sample_2d={self.sample_2d},'
        repr_str += f' data_root={self.sampler_cfg.data_root},'
        repr_str += f' info_path={self.sampler_cfg.info_path},'
        repr_str += f' rate={self.sampler_cfg.rate},'
        repr_str += f' prepare={self.sampler_cfg.prepare},'
        repr_str += f' classes={self.sampler_cfg.classes},'
        repr_str += f' sample_groups={self.sampler_cfg.sample_groups}'
        return repr_str
zhangwenwei's avatar
zhangwenwei committed
359
360


361
@PIPELINES.register_module()
zhangwenwei's avatar
zhangwenwei committed
362
class ObjectNoise(object):
zhangwenwei's avatar
zhangwenwei committed
363
    """Apply noise to each GT objects in the scene.
zhangwenwei's avatar
zhangwenwei committed
364
365

    Args:
366
        translation_std (list[float], optional): Standard deviation of the
zhangwenwei's avatar
zhangwenwei committed
367
368
            distribution where translation noise are sampled from.
            Defaults to [0.25, 0.25, 0.25].
369
        global_rot_range (list[float], optional): Global rotation to the scene.
zhangwenwei's avatar
zhangwenwei committed
370
            Defaults to [0.0, 0.0].
371
        rot_range (list[float], optional): Object rotation range.
zhangwenwei's avatar
zhangwenwei committed
372
373
374
375
            Defaults to [-0.15707963267, 0.15707963267].
        num_try (int, optional): Number of times to try if the noise applied is
            invalid. Defaults to 100.
    """
zhangwenwei's avatar
zhangwenwei committed
376
377

    def __init__(self,
zhangwenwei's avatar
zhangwenwei committed
378
                 translation_std=[0.25, 0.25, 0.25],
zhangwenwei's avatar
zhangwenwei committed
379
                 global_rot_range=[0.0, 0.0],
zhangwenwei's avatar
zhangwenwei committed
380
                 rot_range=[-0.15707963267, 0.15707963267],
zhangwenwei's avatar
zhangwenwei committed
381
                 num_try=100):
zhangwenwei's avatar
zhangwenwei committed
382
        self.translation_std = translation_std
zhangwenwei's avatar
zhangwenwei committed
383
        self.global_rot_range = global_rot_range
zhangwenwei's avatar
zhangwenwei committed
384
        self.rot_range = rot_range
zhangwenwei's avatar
zhangwenwei committed
385
386
387
        self.num_try = num_try

    def __call__(self, input_dict):
388
389
390
391
392
393
        """Call function to apply noise to each ground truth in the scene.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
394
            dict: Results after adding noise to each object,
395
396
                'points', 'gt_bboxes_3d' keys are updated in the result dict.
        """
zhangwenwei's avatar
zhangwenwei committed
397
398
        gt_bboxes_3d = input_dict['gt_bboxes_3d']
        points = input_dict['points']
zhangwenwei's avatar
zhangwenwei committed
399

400
        # TODO: this is inplace operation
401
        numpy_box = gt_bboxes_3d.tensor.numpy()
402
403
        numpy_points = points.tensor.numpy()

zhangwenwei's avatar
zhangwenwei committed
404
        noise_per_object_v3_(
405
            numpy_box,
406
            numpy_points,
zhangwenwei's avatar
zhangwenwei committed
407
408
            rotation_perturb=self.rot_range,
            center_noise_std=self.translation_std,
zhangwenwei's avatar
zhangwenwei committed
409
410
            global_random_rot_range=self.global_rot_range,
            num_try=self.num_try)
411
412

        input_dict['gt_bboxes_3d'] = gt_bboxes_3d.new_box(numpy_box)
413
        input_dict['points'] = points.new_point(numpy_points)
zhangwenwei's avatar
zhangwenwei committed
414
415
416
        return input_dict

    def __repr__(self):
417
        """str: Return a string that describes the module."""
zhangwenwei's avatar
zhangwenwei committed
418
        repr_str = self.__class__.__name__
419
420
421
422
        repr_str += f'(num_try={self.num_try},'
        repr_str += f' translation_std={self.translation_std},'
        repr_str += f' global_rot_range={self.global_rot_range},'
        repr_str += f' rot_range={self.rot_range})'
zhangwenwei's avatar
zhangwenwei committed
423
424
425
        return repr_str


426
427
428
429
430
431
432
433
@PIPELINES.register_module()
class GlobalAlignment(object):
    """Apply global alignment to 3D scene points by rotation and translation.

    Args:
        rotation_axis (int): Rotation axis for points and bboxes rotation.

    Note:
434
435
        We do not record the applied rotation and translation as in
            GlobalRotScaleTrans. Because usually, we do not need to reverse
436
            the alignment step.
437
        For example, ScanNet 3D detection task uses aligned ground-truth
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
            bounding boxes for evaluation.
    """

    def __init__(self, rotation_axis):
        self.rotation_axis = rotation_axis

    def _trans_points(self, input_dict, trans_factor):
        """Private function to translate points.

        Args:
            input_dict (dict): Result dict from loading pipeline.
            trans_factor (np.ndarray): Translation vector to be applied.

        Returns:
            dict: Results after translation, 'points' is updated in the dict.
        """
        input_dict['points'].translate(trans_factor)

    def _rot_points(self, input_dict, rot_mat):
        """Private function to rotate bounding boxes and points.

        Args:
            input_dict (dict): Result dict from loading pipeline.
            rot_mat (np.ndarray): Rotation matrix to be applied.

        Returns:
            dict: Results after rotation, 'points' is updated in the dict.
        """
        # input should be rot_mat_T so I transpose it here
        input_dict['points'].rotate(rot_mat.T)

    def _check_rot_mat(self, rot_mat):
        """Check if rotation matrix is valid for self.rotation_axis.

        Args:
            rot_mat (np.ndarray): Rotation matrix to be applied.
        """
        is_valid = np.allclose(np.linalg.det(rot_mat), 1.0)
        valid_array = np.zeros(3)
        valid_array[self.rotation_axis] = 1.0
        is_valid &= (rot_mat[self.rotation_axis, :] == valid_array).all()
        is_valid &= (rot_mat[:, self.rotation_axis] == valid_array).all()
        assert is_valid, f'invalid rotation matrix {rot_mat}'

    def __call__(self, input_dict):
        """Call function to shuffle points.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
489
            dict: Results after global alignment, 'points' and keys in
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
                input_dict['bbox3d_fields'] are updated in the result dict.
        """
        assert 'axis_align_matrix' in input_dict['ann_info'].keys(), \
            'axis_align_matrix is not provided in GlobalAlignment'

        axis_align_matrix = input_dict['ann_info']['axis_align_matrix']
        assert axis_align_matrix.shape == (4, 4), \
            f'invalid shape {axis_align_matrix.shape} for axis_align_matrix'
        rot_mat = axis_align_matrix[:3, :3]
        trans_vec = axis_align_matrix[:3, -1]

        self._check_rot_mat(rot_mat)
        self._rot_points(input_dict, rot_mat)
        self._trans_points(input_dict, trans_vec)

        return input_dict

    def __repr__(self):
        repr_str = self.__class__.__name__
        repr_str += f'(rotation_axis={self.rotation_axis})'
        return repr_str


513
@PIPELINES.register_module()
zhangwenwei's avatar
zhangwenwei committed
514
class GlobalRotScaleTrans(object):
zhangwenwei's avatar
zhangwenwei committed
515
    """Apply global rotation, scaling and translation to a 3D scene.
zhangwenwei's avatar
zhangwenwei committed
516
517

    Args:
518
        rot_range (list[float], optional): Range of rotation angle.
liyinhao's avatar
liyinhao committed
519
            Defaults to [-0.78539816, 0.78539816] (close to [-pi/4, pi/4]).
520
        scale_ratio_range (list[float], optional): Range of scale ratio.
liyinhao's avatar
liyinhao committed
521
            Defaults to [0.95, 1.05].
522
523
        translation_std (list[float], optional): The standard deviation of
            translation noise applied to a scene, which
zhangwenwei's avatar
zhangwenwei committed
524
            is sampled from a gaussian distribution whose standard deviation
liyinhao's avatar
liyinhao committed
525
            is set by ``translation_std``. Defaults to [0, 0, 0]
526
        shift_height (bool, optional): Whether to shift height.
wuyuefeng's avatar
wuyuefeng committed
527
            (the fourth dimension of indoor points) when scaling.
liyinhao's avatar
liyinhao committed
528
            Defaults to False.
zhangwenwei's avatar
zhangwenwei committed
529
    """
zhangwenwei's avatar
zhangwenwei committed
530
531

    def __init__(self,
zhangwenwei's avatar
zhangwenwei committed
532
533
                 rot_range=[-0.78539816, 0.78539816],
                 scale_ratio_range=[0.95, 1.05],
wuyuefeng's avatar
wuyuefeng committed
534
535
                 translation_std=[0, 0, 0],
                 shift_height=False):
536
537
538
539
540
        seq_types = (list, tuple, np.ndarray)
        if not isinstance(rot_range, seq_types):
            assert isinstance(rot_range, (int, float)), \
                f'unsupported rot_range type {type(rot_range)}'
            rot_range = [-rot_range, rot_range]
zhangwenwei's avatar
zhangwenwei committed
541
        self.rot_range = rot_range
542
543
544

        assert isinstance(scale_ratio_range, seq_types), \
            f'unsupported scale_ratio_range type {type(scale_ratio_range)}'
zhangwenwei's avatar
zhangwenwei committed
545
        self.scale_ratio_range = scale_ratio_range
546
547
548
549
550
551
552

        if not isinstance(translation_std, seq_types):
            assert isinstance(translation_std, (int, float)), \
                f'unsupported translation_std type {type(translation_std)}'
            translation_std = [
                translation_std, translation_std, translation_std
            ]
553
554
        assert all([std >= 0 for std in translation_std]), \
            'translation_std should be positive'
zhangwenwei's avatar
zhangwenwei committed
555
        self.translation_std = translation_std
wuyuefeng's avatar
wuyuefeng committed
556
        self.shift_height = shift_height
zhangwenwei's avatar
zhangwenwei committed
557
558

    def _trans_bbox_points(self, input_dict):
559
560
561
562
563
564
        """Private function to translate bounding boxes and points.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
565
566
            dict: Results after translation, 'points', 'pcd_trans'
                and keys in input_dict['bbox3d_fields'] are updated
567
568
                in the result dict.
        """
569
        translation_std = np.array(self.translation_std, dtype=np.float32)
zhangwenwei's avatar
zhangwenwei committed
570
571
        trans_factor = np.random.normal(scale=translation_std, size=3).T

572
        input_dict['points'].translate(trans_factor)
zhangwenwei's avatar
zhangwenwei committed
573
574
575
576
577
        input_dict['pcd_trans'] = trans_factor
        for key in input_dict['bbox3d_fields']:
            input_dict[key].translate(trans_factor)

    def _rot_bbox_points(self, input_dict):
578
579
580
581
582
583
        """Private function to rotate bounding boxes and points.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
584
585
            dict: Results after rotation, 'points', 'pcd_rotation'
                and keys in input_dict['bbox3d_fields'] are updated
586
587
                in the result dict.
        """
zhangwenwei's avatar
zhangwenwei committed
588
        rotation = self.rot_range
zhangwenwei's avatar
zhangwenwei committed
589
        noise_rotation = np.random.uniform(rotation[0], rotation[1])
zhangwenwei's avatar
zhangwenwei committed
590

591
592
593
594
        # if no bbox in input_dict, only rotate points
        if len(input_dict['bbox3d_fields']) == 0:
            rot_mat_T = input_dict['points'].rotate(noise_rotation)
            input_dict['pcd_rotation'] = rot_mat_T
595
            input_dict['pcd_rotation_angle'] = noise_rotation
596
597
598
            return

        # rotate points with bboxes
zhangwenwei's avatar
zhangwenwei committed
599
        for key in input_dict['bbox3d_fields']:
wuyuefeng's avatar
wuyuefeng committed
600
601
602
603
604
            if len(input_dict[key].tensor) != 0:
                points, rot_mat_T = input_dict[key].rotate(
                    noise_rotation, input_dict['points'])
                input_dict['points'] = points
                input_dict['pcd_rotation'] = rot_mat_T
605
                input_dict['pcd_rotation_angle'] = noise_rotation
606

zhangwenwei's avatar
zhangwenwei committed
607
    def _scale_bbox_points(self, input_dict):
608
609
610
611
612
613
        """Private function to scale bounding boxes and points.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
614
            dict: Results after scaling, 'points'and keys in
615
616
                input_dict['bbox3d_fields'] are updated in the result dict.
        """
zhangwenwei's avatar
zhangwenwei committed
617
        scale = input_dict['pcd_scale_factor']
618
619
        points = input_dict['points']
        points.scale(scale)
wuyuefeng's avatar
wuyuefeng committed
620
        if self.shift_height:
621
622
            assert 'height' in points.attribute_dims.keys(), \
                'setting shift_height=True but points have no height attribute'
623
624
            points.tensor[:, points.attribute_dims['height']] *= scale
        input_dict['points'] = points
wuyuefeng's avatar
wuyuefeng committed
625

zhangwenwei's avatar
zhangwenwei committed
626
627
        for key in input_dict['bbox3d_fields']:
            input_dict[key].scale(scale)
zhangwenwei's avatar
zhangwenwei committed
628

zhangwenwei's avatar
zhangwenwei committed
629
    def _random_scale(self, input_dict):
630
631
632
633
634
635
        """Private function to randomly set the scale factor.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
636
            dict: Results after scaling, 'pcd_scale_factor' are updated
637
638
                in the result dict.
        """
zhangwenwei's avatar
zhangwenwei committed
639
640
641
        scale_factor = np.random.uniform(self.scale_ratio_range[0],
                                         self.scale_ratio_range[1])
        input_dict['pcd_scale_factor'] = scale_factor
zhangwenwei's avatar
zhangwenwei committed
642
643

    def __call__(self, input_dict):
644
        """Private function to rotate, scale and translate bounding boxes and
645
646
647
648
649
650
651
        points.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
            dict: Results after scaling, 'points', 'pcd_rotation',
652
                'pcd_scale_factor', 'pcd_trans' and keys in
653
654
                input_dict['bbox3d_fields'] are updated in the result dict.
        """
655
656
657
        if 'transformation_3d_flow' not in input_dict:
            input_dict['transformation_3d_flow'] = []

zhangwenwei's avatar
zhangwenwei committed
658
        self._rot_bbox_points(input_dict)
zhangwenwei's avatar
zhangwenwei committed
659

zhangwenwei's avatar
zhangwenwei committed
660
661
662
        if 'pcd_scale_factor' not in input_dict:
            self._random_scale(input_dict)
        self._scale_bbox_points(input_dict)
zhangwenwei's avatar
zhangwenwei committed
663

zhangwenwei's avatar
zhangwenwei committed
664
        self._trans_bbox_points(input_dict)
665
666

        input_dict['transformation_3d_flow'].extend(['R', 'S', 'T'])
zhangwenwei's avatar
zhangwenwei committed
667
668
669
        return input_dict

    def __repr__(self):
670
        """str: Return a string that describes the module."""
zhangwenwei's avatar
zhangwenwei committed
671
        repr_str = self.__class__.__name__
672
673
674
675
        repr_str += f'(rot_range={self.rot_range},'
        repr_str += f' scale_ratio_range={self.scale_ratio_range},'
        repr_str += f' translation_std={self.translation_std},'
        repr_str += f' shift_height={self.shift_height})'
zhangwenwei's avatar
zhangwenwei committed
676
677
678
        return repr_str


679
@PIPELINES.register_module()
zhangwenwei's avatar
zhangwenwei committed
680
class PointShuffle(object):
681
    """Shuffle input points."""
zhangwenwei's avatar
zhangwenwei committed
682
683

    def __call__(self, input_dict):
684
685
686
687
688
689
        """Call function to shuffle points.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
690
            dict: Results after filtering, 'points', 'pts_instance_mask'
691
                and 'pts_semantic_mask' keys are updated in the result dict.
692
        """
693
694
695
696
697
698
699
700
701
702
703
704
        idx = input_dict['points'].shuffle()
        idx = idx.numpy()

        pts_instance_mask = input_dict.get('pts_instance_mask', None)
        pts_semantic_mask = input_dict.get('pts_semantic_mask', None)

        if pts_instance_mask is not None:
            input_dict['pts_instance_mask'] = pts_instance_mask[idx]

        if pts_semantic_mask is not None:
            input_dict['pts_semantic_mask'] = pts_semantic_mask[idx]

zhangwenwei's avatar
zhangwenwei committed
705
706
707
708
709
710
        return input_dict

    def __repr__(self):
        return self.__class__.__name__


711
@PIPELINES.register_module()
zhangwenwei's avatar
zhangwenwei committed
712
class ObjectRangeFilter(object):
713
714
715
716
717
    """Filter objects by the range.

    Args:
        point_cloud_range (list[float]): Point cloud range.
    """
zhangwenwei's avatar
zhangwenwei committed
718
719
720
721
722

    def __init__(self, point_cloud_range):
        self.pcd_range = np.array(point_cloud_range, dtype=np.float32)

    def __call__(self, input_dict):
723
724
725
726
727
728
        """Call function to filter objects by the range.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
729
            dict: Results after filtering, 'gt_bboxes_3d', 'gt_labels_3d'
730
731
                keys are updated in the result dict.
        """
732
733
734
735
736
737
738
        # Check points instance type and initialise bev_range
        if isinstance(input_dict['gt_bboxes_3d'],
                      (LiDARInstance3DBoxes, DepthInstance3DBoxes)):
            bev_range = self.pcd_range[[0, 1, 3, 4]]
        elif isinstance(input_dict['gt_bboxes_3d'], CameraInstance3DBoxes):
            bev_range = self.pcd_range[[0, 2, 3, 5]]

zhangwenwei's avatar
zhangwenwei committed
739
        gt_bboxes_3d = input_dict['gt_bboxes_3d']
zhangwenwei's avatar
zhangwenwei committed
740
        gt_labels_3d = input_dict['gt_labels_3d']
741
        mask = gt_bboxes_3d.in_range_bev(bev_range)
zhangwenwei's avatar
zhangwenwei committed
742
        gt_bboxes_3d = gt_bboxes_3d[mask]
ZwwWayne's avatar
ZwwWayne committed
743
744
745
746
747
        # mask is a torch tensor but gt_labels_3d is still numpy array
        # using mask to index gt_labels_3d will cause bug when
        # len(gt_labels_3d) == 1, where mask=1 will be interpreted
        # as gt_labels_3d[1] and cause out of index error
        gt_labels_3d = gt_labels_3d[mask.numpy().astype(np.bool)]
zhangwenwei's avatar
zhangwenwei committed
748
749

        # limit rad to [-pi, pi]
750
751
        gt_bboxes_3d.limit_yaw(offset=0.5, period=2 * np.pi)
        input_dict['gt_bboxes_3d'] = gt_bboxes_3d
zhangwenwei's avatar
zhangwenwei committed
752
753
        input_dict['gt_labels_3d'] = gt_labels_3d

zhangwenwei's avatar
zhangwenwei committed
754
755
756
        return input_dict

    def __repr__(self):
757
        """str: Return a string that describes the module."""
zhangwenwei's avatar
zhangwenwei committed
758
        repr_str = self.__class__.__name__
759
        repr_str += f'(point_cloud_range={self.pcd_range.tolist()})'
zhangwenwei's avatar
zhangwenwei committed
760
761
762
        return repr_str


763
@PIPELINES.register_module()
zhangwenwei's avatar
zhangwenwei committed
764
class PointsRangeFilter(object):
765
766
767
768
769
    """Filter points by the range.

    Args:
        point_cloud_range (list[float]): Point cloud range.
    """
zhangwenwei's avatar
zhangwenwei committed
770
771

    def __init__(self, point_cloud_range):
772
        self.pcd_range = np.array(point_cloud_range, dtype=np.float32)
zhangwenwei's avatar
zhangwenwei committed
773
774

    def __call__(self, input_dict):
775
776
777
778
779
780
        """Call function to filter points by the range.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
781
            dict: Results after filtering, 'points', 'pts_instance_mask'
782
                and 'pts_semantic_mask' keys are updated in the result dict.
783
        """
zhangwenwei's avatar
zhangwenwei committed
784
        points = input_dict['points']
785
786
        points_mask = points.in_range_3d(self.pcd_range)
        clean_points = points[points_mask]
zhangwenwei's avatar
zhangwenwei committed
787
        input_dict['points'] = clean_points
788
789
790
791
792
793
794
795
796
797
798
        points_mask = points_mask.numpy()

        pts_instance_mask = input_dict.get('pts_instance_mask', None)
        pts_semantic_mask = input_dict.get('pts_semantic_mask', None)

        if pts_instance_mask is not None:
            input_dict['pts_instance_mask'] = pts_instance_mask[points_mask]

        if pts_semantic_mask is not None:
            input_dict['pts_semantic_mask'] = pts_semantic_mask[points_mask]

zhangwenwei's avatar
zhangwenwei committed
799
800
801
        return input_dict

    def __repr__(self):
802
        """str: Return a string that describes the module."""
zhangwenwei's avatar
zhangwenwei committed
803
        repr_str = self.__class__.__name__
804
        repr_str += f'(point_cloud_range={self.pcd_range.tolist()})'
zhangwenwei's avatar
zhangwenwei committed
805
        return repr_str
zhangwenwei's avatar
zhangwenwei committed
806
807
808
809


@PIPELINES.register_module()
class ObjectNameFilter(object):
zhangwenwei's avatar
zhangwenwei committed
810
    """Filter GT objects by their names.
zhangwenwei's avatar
zhangwenwei committed
811
812

    Args:
liyinhao's avatar
liyinhao committed
813
        classes (list[str]): List of class names to be kept for training.
zhangwenwei's avatar
zhangwenwei committed
814
815
816
817
818
819
820
    """

    def __init__(self, classes):
        self.classes = classes
        self.labels = list(range(len(self.classes)))

    def __call__(self, input_dict):
821
822
823
824
825
826
        """Call function to filter objects by their names.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
827
            dict: Results after filtering, 'gt_bboxes_3d', 'gt_labels_3d'
828
829
                keys are updated in the result dict.
        """
zhangwenwei's avatar
zhangwenwei committed
830
831
832
833
834
835
836
837
838
        gt_labels_3d = input_dict['gt_labels_3d']
        gt_bboxes_mask = np.array([n in self.labels for n in gt_labels_3d],
                                  dtype=np.bool_)
        input_dict['gt_bboxes_3d'] = input_dict['gt_bboxes_3d'][gt_bboxes_mask]
        input_dict['gt_labels_3d'] = input_dict['gt_labels_3d'][gt_bboxes_mask]

        return input_dict

    def __repr__(self):
839
        """str: Return a string that describes the module."""
zhangwenwei's avatar
zhangwenwei committed
840
841
842
        repr_str = self.__class__.__name__
        repr_str += f'(classes={self.classes})'
        return repr_str
wuyuefeng's avatar
wuyuefeng committed
843
844
845


@PIPELINES.register_module()
846
847
class PointSample(object):
    """Point sample.
wuyuefeng's avatar
wuyuefeng committed
848
849
850
851
852

    Sampling data to a certain number.

    Args:
        num_points (int): Number of points to be sampled.
853
        sample_range (float, optional): The range where to sample points.
854
855
856
857
            If not None, the points with depth larger than `sample_range` are
            prior to be sampled. Defaults to None.
        replace (bool, optional): Whether the sampling is with or without
            replacement. Defaults to False.
wuyuefeng's avatar
wuyuefeng committed
858
859
    """

860
    def __init__(self, num_points, sample_range=None, replace=False):
wuyuefeng's avatar
wuyuefeng committed
861
        self.num_points = num_points
862
863
864
865
866
867
868
869
870
        self.sample_range = sample_range
        self.replace = replace

    def _points_random_sampling(self,
                                points,
                                num_samples,
                                sample_range=None,
                                replace=False,
                                return_choices=False):
wuyuefeng's avatar
wuyuefeng committed
871
872
873
874
875
        """Points random sampling.

        Sample points to a certain number.

        Args:
876
            points (np.ndarray | :obj:`BasePoints`): 3D Points.
wuyuefeng's avatar
wuyuefeng committed
877
            num_samples (int): Number of samples to be sampled.
878
            sample_range (float, optional): Indicating the range where the
879
                points will be sampled. Defaults to None.
880
881
882
883
            replace (bool, optional): Sampling with or without replacement.
                Defaults to None.
            return_choices (bool, optional): Whether return choice.
                Defaults to False.
wuyuefeng's avatar
wuyuefeng committed
884
        Returns:
885
            tuple[np.ndarray] | np.ndarray:
886
                - points (np.ndarray | :obj:`BasePoints`): 3D Points.
887
                - choices (np.ndarray, optional): The generated random samples.
wuyuefeng's avatar
wuyuefeng committed
888
        """
889
        if not replace:
wuyuefeng's avatar
wuyuefeng committed
890
            replace = (points.shape[0] < num_samples)
891
892
893
894
        point_range = range(len(points))
        if sample_range is not None and not replace:
            # Only sampling the near points when len(points) >= num_samples
            depth = np.linalg.norm(points.tensor, axis=1)
895
896
            far_inds = np.where(depth >= sample_range)[0]
            near_inds = np.where(depth < sample_range)[0]
897
898
899
900
            # in case there are too many far points
            if len(far_inds) > num_samples:
                far_inds = np.random.choice(
                    far_inds, num_samples, replace=False)
901
902
903
904
905
906
907
            point_range = near_inds
            num_samples -= len(far_inds)
        choices = np.random.choice(point_range, num_samples, replace=replace)
        if sample_range is not None and not replace:
            choices = np.concatenate((far_inds, choices))
            # Shuffle points after sampling
            np.random.shuffle(choices)
wuyuefeng's avatar
wuyuefeng committed
908
909
910
911
912
913
        if return_choices:
            return points[choices], choices
        else:
            return points[choices]

    def __call__(self, results):
914
915
916
917
918
        """Call function to sample points to in indoor scenes.

        Args:
            input_dict (dict): Result dict from loading pipeline.
        Returns:
919
            dict: Results after sampling, 'points', 'pts_instance_mask'
920
921
                and 'pts_semantic_mask' keys are updated in the result dict.
        """
wuyuefeng's avatar
wuyuefeng committed
922
        points = results['points']
923
924
925
926
927
928
        points, choices = self._points_random_sampling(
            points,
            self.num_points,
            self.sample_range,
            self.replace,
            return_choices=True)
929
        results['points'] = points
930

wuyuefeng's avatar
wuyuefeng committed
931
932
933
        pts_instance_mask = results.get('pts_instance_mask', None)
        pts_semantic_mask = results.get('pts_semantic_mask', None)

934
        if pts_instance_mask is not None:
wuyuefeng's avatar
wuyuefeng committed
935
936
            pts_instance_mask = pts_instance_mask[choices]
            results['pts_instance_mask'] = pts_instance_mask
937
938
939

        if pts_semantic_mask is not None:
            pts_semantic_mask = pts_semantic_mask[choices]
wuyuefeng's avatar
wuyuefeng committed
940
941
942
943
944
            results['pts_semantic_mask'] = pts_semantic_mask

        return results

    def __repr__(self):
945
        """str: Return a string that describes the module."""
wuyuefeng's avatar
wuyuefeng committed
946
        repr_str = self.__class__.__name__
947
        repr_str += f'(num_points={self.num_points},'
948
949
        repr_str += f' sample_range={self.sample_range},'
        repr_str += f' replace={self.replace})'
950

951
952
953
        return repr_str


954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
@PIPELINES.register_module()
class IndoorPointSample(PointSample):
    """Indoor point sample.

    Sampling data to a certain number.
    NOTE: IndoorPointSample is deprecated in favor of PointSample

    Args:
        num_points (int): Number of points to be sampled.
    """

    def __init__(self, *args, **kwargs):
        warnings.warn(
            'IndoorPointSample is deprecated in favor of PointSample')
        super(IndoorPointSample, self).__init__(*args, **kwargs)


971
972
973
974
975
976
977
978
979
980
981
982
@PIPELINES.register_module()
class IndoorPatchPointSample(object):
    r"""Indoor point sample within a patch. Modified from `PointNet++ <https://
    github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py>`_.

    Sampling data to a certain number for semantic segmentation.

    Args:
        num_points (int): Number of points to be sampled.
        block_size (float, optional): Size of a block to sample points from.
            Defaults to 1.5.
        sample_rate (float, optional): Stride used in sliding patch generation.
983
984
985
            This parameter is unused in `IndoorPatchPointSample` and thus has
            been deprecated. We plan to remove it in the future.
            Defaults to None.
986
987
        ignore_index (int, optional): Label index that won't be used for the
            segmentation task. This is set in PointSegClassMapping as neg_cls.
988
            If not None, will be used as a patch selection criterion.
989
990
991
992
993
            Defaults to None.
        use_normalized_coord (bool, optional): Whether to use normalized xyz as
            additional features. Defaults to False.
        num_try (int, optional): Number of times to try if the patch selected
            is invalid. Defaults to 10.
994
        enlarge_size (float, optional): Enlarge the sampled patch to
995
            [-block_size / 2 - enlarge_size, block_size / 2 + enlarge_size] as
996
            an augmentation. If None, set it as 0. Defaults to 0.2.
997
        min_unique_num (int, optional): Minimum number of unique points
998
999
            the sampled patch should contain. If None, use PointNet++'s method
            to judge uniqueness. Defaults to None.
1000
1001
        eps (float, optional): A value added to patch boundary to guarantee
            points coverage. Defaults to 1e-2.
1002
1003
1004
1005
1006
1007

    Note:
        This transform should only be used in the training process of point
            cloud segmentation tasks. For the sliding patch generation and
            inference process in testing, please refer to the `slide_inference`
            function of `EncoderDecoder3D` class.
1008
1009
1010
1011
1012
    """

    def __init__(self,
                 num_points,
                 block_size=1.5,
1013
                 sample_rate=None,
1014
1015
                 ignore_index=None,
                 use_normalized_coord=False,
1016
1017
                 num_try=10,
                 enlarge_size=0.2,
1018
1019
                 min_unique_num=None,
                 eps=1e-2):
1020
1021
1022
1023
1024
        self.num_points = num_points
        self.block_size = block_size
        self.ignore_index = ignore_index
        self.use_normalized_coord = use_normalized_coord
        self.num_try = num_try
1025
        self.enlarge_size = enlarge_size if enlarge_size is not None else 0.0
1026
        self.min_unique_num = min_unique_num
1027
        self.eps = eps
1028
1029
1030
1031
1032

        if sample_rate is not None:
            warnings.warn(
                "'sample_rate' has been deprecated and will be removed in "
                'the future. Please remove them from your code.')
1033
1034
1035
1036
1037

    def _input_generation(self, coords, patch_center, coord_max, attributes,
                          attribute_dims, point_type):
        """Generating model input.

1038
        Generate input by subtracting patch center and adding additional
1039
1040
1041
1042
1043
1044
1045
1046
1047
            features. Currently support colors and normalized xyz as features.

        Args:
            coords (np.ndarray): Sampled 3D Points.
            patch_center (np.ndarray): Center coordinate of the selected patch.
            coord_max (np.ndarray): Max coordinate of all 3D Points.
            attributes (np.ndarray): features of input points.
            attribute_dims (dict): Dictionary to indicate the meaning of extra
                dimension.
1048
            point_type (type): class of input points inherited from BasePoints.
1049
1050

        Returns:
1051
            :obj:`BasePoints`: The generated input data.
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
        """
        # subtract patch center, the z dimension is not centered
        centered_coords = coords.copy()
        centered_coords[:, 0] -= patch_center[0]
        centered_coords[:, 1] -= patch_center[1]

        if self.use_normalized_coord:
            normalized_coord = coords / coord_max
            attributes = np.concatenate([attributes, normalized_coord], axis=1)
            if attribute_dims is None:
                attribute_dims = dict()
            attribute_dims.update(
                dict(normalized_coord=[
                    attributes.shape[1], attributes.shape[1] +
                    1, attributes.shape[1] + 2
                ]))

        points = np.concatenate([centered_coords, attributes], axis=1)
        points = point_type(
            points, points_dim=points.shape[1], attribute_dims=attribute_dims)

        return points

1075
    def _patch_points_sampling(self, points, sem_mask):
1076
1077
1078
1079
1080
1081
        """Patch points sampling.

        First sample a valid patch.
        Then sample points within that patch to a certain number.

        Args:
1082
            points (:obj:`BasePoints`): 3D Points.
1083
1084
1085
            sem_mask (np.ndarray): semantic segmentation mask for input points.

        Returns:
1086
            tuple[:obj:`BasePoints`, np.ndarray] | :obj:`BasePoints`:
1087

1088
                - points (:obj:`BasePoints`): 3D Points.
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
                - choices (np.ndarray): The generated random samples.
        """
        coords = points.coord.numpy()
        attributes = points.tensor[:, 3:].numpy()
        attribute_dims = points.attribute_dims
        point_type = type(points)

        coord_max = np.amax(coords, axis=0)
        coord_min = np.amin(coords, axis=0)

1099
        for _ in range(self.num_try):
1100
1101
1102
            # random sample a point as patch center
            cur_center = coords[np.random.choice(coords.shape[0])]

1103
1104
            # boundary of a patch, which would be enlarged by
            # `self.enlarge_size` as an augmentation
1105
1106
1107
1108
1109
1110
1111
            cur_max = cur_center + np.array(
                [self.block_size / 2.0, self.block_size / 2.0, 0.0])
            cur_min = cur_center - np.array(
                [self.block_size / 2.0, self.block_size / 2.0, 0.0])
            cur_max[2] = coord_max[2]
            cur_min[2] = coord_min[2]
            cur_choice = np.sum(
1112
1113
                (coords >= (cur_min - self.enlarge_size)) *
                (coords <= (cur_max + self.enlarge_size)),
1114
1115
1116
1117
1118
1119
1120
                axis=1) == 3

            if not cur_choice.any():  # no points in this patch
                continue

            cur_coords = coords[cur_choice, :]
            cur_sem_mask = sem_mask[cur_choice]
1121
            point_idxs = np.where(cur_choice)[0]
1122
            mask = np.sum(
1123
1124
                (cur_coords >= (cur_min - self.eps)) * (cur_coords <=
                                                        (cur_max + self.eps)),
1125
                axis=1) == 3
1126

1127
1128
            # two criteria for patch sampling, adopted from PointNet++
            # 1. selected patch should contain enough unique points
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
            if self.min_unique_num is None:
                # use PointNet++'s method as default
                # [31, 31, 62] are just some big values used to transform
                # coords from 3d array to 1d and then check their uniqueness
                # this is used in all the ScanNet code following PointNet++
                vidx = np.ceil(
                    (cur_coords[mask, :] - cur_min) / (cur_max - cur_min) *
                    np.array([31.0, 31.0, 62.0]))
                vidx = np.unique(vidx[:, 0] * 31.0 * 62.0 + vidx[:, 1] * 62.0 +
                                 vidx[:, 2])
                flag1 = len(vidx) / 31.0 / 31.0 / 62.0 >= 0.02
            else:
1141
                # if `min_unique_num` is provided, directly compare with it
1142
                flag1 = mask.sum() >= self.min_unique_num
1143

1144
            # 2. selected patch should contain enough annotated points
1145
1146
1147
1148
1149
1150
1151
1152
1153
            if self.ignore_index is None:
                flag2 = True
            else:
                flag2 = np.sum(cur_sem_mask != self.ignore_index) / \
                               len(cur_sem_mask) >= 0.7

            if flag1 and flag2:
                break

1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
        # sample idx to `self.num_points`
        if point_idxs.size >= self.num_points:
            # no duplicate in sub-sampling
            choices = np.random.choice(
                point_idxs, self.num_points, replace=False)
        else:
            # do not use random choice here to avoid some points not counted
            dup = np.random.choice(point_idxs.size,
                                   self.num_points - point_idxs.size)
            idx_dup = np.concatenate(
                [np.arange(point_idxs.size),
                 np.array(dup)], 0)
            choices = point_idxs[idx_dup]
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181

        # construct model input
        points = self._input_generation(coords[choices], cur_center, coord_max,
                                        attributes[choices], attribute_dims,
                                        point_type)

        return points, choices

    def __call__(self, results):
        """Call function to sample points to in indoor scenes.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
1182
            dict: Results after sampling, 'points', 'pts_instance_mask'
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
                and 'pts_semantic_mask' keys are updated in the result dict.
        """
        points = results['points']

        assert 'pts_semantic_mask' in results.keys(), \
            'semantic mask should be provided in training and evaluation'
        pts_semantic_mask = results['pts_semantic_mask']

        points, choices = self._patch_points_sampling(points,
                                                      pts_semantic_mask)

        results['points'] = points
        results['pts_semantic_mask'] = pts_semantic_mask[choices]
        pts_instance_mask = results.get('pts_instance_mask', None)
        if pts_instance_mask is not None:
            results['pts_instance_mask'] = pts_instance_mask[choices]

        return results

    def __repr__(self):
        """str: Return a string that describes the module."""
        repr_str = self.__class__.__name__
        repr_str += f'(num_points={self.num_points},'
        repr_str += f' block_size={self.block_size},'
        repr_str += f' ignore_index={self.ignore_index},'
        repr_str += f' use_normalized_coord={self.use_normalized_coord},'
1209
1210
        repr_str += f' num_try={self.num_try},'
        repr_str += f' enlarge_size={self.enlarge_size},'
1211
1212
        repr_str += f' min_unique_num={self.min_unique_num},'
        repr_str += f' eps={self.eps})'
wuyuefeng's avatar
wuyuefeng committed
1213
        return repr_str
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241


@PIPELINES.register_module()
class BackgroundPointsFilter(object):
    """Filter background points near the bounding box.

    Args:
        bbox_enlarge_range (tuple[float], float): Bbox enlarge range.
    """

    def __init__(self, bbox_enlarge_range):
        assert (is_tuple_of(bbox_enlarge_range, float)
                and len(bbox_enlarge_range) == 3) \
            or isinstance(bbox_enlarge_range, float), \
            f'Invalid arguments bbox_enlarge_range {bbox_enlarge_range}'

        if isinstance(bbox_enlarge_range, float):
            bbox_enlarge_range = [bbox_enlarge_range] * 3
        self.bbox_enlarge_range = np.array(
            bbox_enlarge_range, dtype=np.float32)[np.newaxis, :]

    def __call__(self, input_dict):
        """Call function to filter points by the range.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
1242
            dict: Results after filtering, 'points', 'pts_instance_mask'
1243
                and 'pts_semantic_mask' keys are updated in the result dict.
1244
1245
1246
1247
        """
        points = input_dict['points']
        gt_bboxes_3d = input_dict['gt_bboxes_3d']

xiliu8006's avatar
xiliu8006 committed
1248
1249
1250
1251
        # avoid groundtruth being modified
        gt_bboxes_3d_np = gt_bboxes_3d.tensor.clone().numpy()
        gt_bboxes_3d_np[:, :3] = gt_bboxes_3d.gravity_center.clone().numpy()

1252
1253
        enlarged_gt_bboxes_3d = gt_bboxes_3d_np.copy()
        enlarged_gt_bboxes_3d[:, 3:6] += self.bbox_enlarge_range
xiliu8006's avatar
xiliu8006 committed
1254
        points_numpy = points.tensor.clone().numpy()
1255
1256
        foreground_masks = box_np_ops.points_in_rbbox(
            points_numpy, gt_bboxes_3d_np, origin=(0.5, 0.5, 0.5))
1257
        enlarge_foreground_masks = box_np_ops.points_in_rbbox(
1258
            points_numpy, enlarged_gt_bboxes_3d, origin=(0.5, 0.5, 0.5))
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
        foreground_masks = foreground_masks.max(1)
        enlarge_foreground_masks = enlarge_foreground_masks.max(1)
        valid_masks = ~np.logical_and(~foreground_masks,
                                      enlarge_foreground_masks)

        input_dict['points'] = points[valid_masks]
        pts_instance_mask = input_dict.get('pts_instance_mask', None)
        if pts_instance_mask is not None:
            input_dict['pts_instance_mask'] = pts_instance_mask[valid_masks]

        pts_semantic_mask = input_dict.get('pts_semantic_mask', None)
        if pts_semantic_mask is not None:
            input_dict['pts_semantic_mask'] = pts_semantic_mask[valid_masks]
        return input_dict

    def __repr__(self):
        """str: Return a string that describes the module."""
        repr_str = self.__class__.__name__
1277
        repr_str += f'(bbox_enlarge_range={self.bbox_enlarge_range.tolist()})'
1278
        return repr_str
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289


@PIPELINES.register_module()
class VoxelBasedPointSampler(object):
    """Voxel based point sampler.

    Apply voxel sampling to multiple sweep points.

    Args:
        cur_sweep_cfg (dict): Config for sampling current points.
        prev_sweep_cfg (dict): Config for sampling previous points.
1290
        time_dim (int): Index that indicate the time dimension
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
            for input points.
    """

    def __init__(self, cur_sweep_cfg, prev_sweep_cfg=None, time_dim=3):
        self.cur_voxel_generator = VoxelGenerator(**cur_sweep_cfg)
        self.cur_voxel_num = self.cur_voxel_generator._max_voxels
        self.time_dim = time_dim
        if prev_sweep_cfg is not None:
            assert prev_sweep_cfg['max_num_points'] == \
                cur_sweep_cfg['max_num_points']
            self.prev_voxel_generator = VoxelGenerator(**prev_sweep_cfg)
            self.prev_voxel_num = self.prev_voxel_generator._max_voxels
        else:
            self.prev_voxel_generator = None
            self.prev_voxel_num = 0

    def _sample_points(self, points, sampler, point_dim):
        """Sample points for each points subset.

        Args:
            points (np.ndarray): Points subset to be sampled.
            sampler (VoxelGenerator): Voxel based sampler for
                each points subset.
1314
            point_dim (int): The dimension of each points
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339

        Returns:
            np.ndarray: Sampled points.
        """
        voxels, coors, num_points_per_voxel = sampler.generate(points)
        if voxels.shape[0] < sampler._max_voxels:
            padding_points = np.zeros([
                sampler._max_voxels - voxels.shape[0], sampler._max_num_points,
                point_dim
            ],
                                      dtype=points.dtype)
            padding_points[:] = voxels[0]
            sample_points = np.concatenate([voxels, padding_points], axis=0)
        else:
            sample_points = voxels

        return sample_points

    def __call__(self, results):
        """Call function to sample points from multiple sweeps.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
1340
            dict: Results after sampling, 'points', 'pts_instance_mask'
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
                and 'pts_semantic_mask' keys are updated in the result dict.
        """
        points = results['points']
        original_dim = points.shape[1]

        # TODO: process instance and semantic mask while _max_num_points
        # is larger than 1
        # Extend points with seg and mask fields
        map_fields2dim = []
        start_dim = original_dim
1351
1352
        points_numpy = points.tensor.numpy()
        extra_channel = [points_numpy]
1353
1354
1355
1356
1357
1358
1359
1360
1361
        for idx, key in enumerate(results['pts_mask_fields']):
            map_fields2dim.append((key, idx + start_dim))
            extra_channel.append(results[key][..., None])

        start_dim += len(results['pts_mask_fields'])
        for idx, key in enumerate(results['pts_seg_fields']):
            map_fields2dim.append((key, idx + start_dim))
            extra_channel.append(results[key][..., None])

1362
        points_numpy = np.concatenate(extra_channel, axis=-1)
1363
1364
1365
1366
1367

        # Split points into two part, current sweep points and
        # previous sweeps points.
        # TODO: support different sampling methods for next sweeps points
        # and previous sweeps points.
1368
1369
1370
        cur_points_flag = (points_numpy[:, self.time_dim] == 0)
        cur_sweep_points = points_numpy[cur_points_flag]
        prev_sweeps_points = points_numpy[~cur_points_flag]
1371
1372
1373
1374
1375
1376
1377
1378
1379
        if prev_sweeps_points.shape[0] == 0:
            prev_sweeps_points = cur_sweep_points

        # Shuffle points before sampling
        np.random.shuffle(cur_sweep_points)
        np.random.shuffle(prev_sweeps_points)

        cur_sweep_points = self._sample_points(cur_sweep_points,
                                               self.cur_voxel_generator,
1380
                                               points_numpy.shape[1])
1381
1382
1383
        if self.prev_voxel_generator is not None:
            prev_sweeps_points = self._sample_points(prev_sweeps_points,
                                                     self.prev_voxel_generator,
1384
                                                     points_numpy.shape[1])
1385

1386
1387
            points_numpy = np.concatenate(
                [cur_sweep_points, prev_sweeps_points], 0)
1388
        else:
1389
            points_numpy = cur_sweep_points
1390
1391

        if self.cur_voxel_generator._max_num_points == 1:
1392
1393
            points_numpy = points_numpy.squeeze(1)
        results['points'] = points.new_point(points_numpy[..., :original_dim])
1394

1395
        # Restore the corresponding seg and mask fields
1396
        for key, dim_index in map_fields2dim:
1397
            results[key] = points_numpy[..., dim_index]
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420

        return results

    def __repr__(self):
        """str: Return a string that describes the module."""

        def _auto_indent(repr_str, indent):
            repr_str = repr_str.split('\n')
            repr_str = [' ' * indent + t + '\n' for t in repr_str]
            repr_str = ''.join(repr_str)[:-1]
            return repr_str

        repr_str = self.__class__.__name__
        indent = 4
        repr_str += '(\n'
        repr_str += ' ' * indent + f'num_cur_sweep={self.cur_voxel_num},\n'
        repr_str += ' ' * indent + f'num_prev_sweep={self.prev_voxel_num},\n'
        repr_str += ' ' * indent + f'time_dim={self.time_dim},\n'
        repr_str += ' ' * indent + 'cur_voxel_generator=\n'
        repr_str += f'{_auto_indent(repr(self.cur_voxel_generator), 8)},\n'
        repr_str += ' ' * indent + 'prev_voxel_generator=\n'
        repr_str += f'{_auto_indent(repr(self.prev_voxel_generator), 8)})'
        return repr_str
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547


@PIPELINES.register_module()
class AffineResize(object):
    """Get the affine transform matrices to the target size.

    Different from :class:`RandomAffine` in MMDetection, this class can
    calculate the affine transform matrices while resizing the input image
    to a fixed size. The affine transform matrices include: 1) matrix
    transforming original image to the network input image size. 2) matrix
    transforming original image to the network output feature map size.

    Args:
        img_scale (tuple): Images scales for resizing.
        down_ratio (int): The down ratio of feature map.
            Actually the arg should be >= 1.
        bbox_clip_border (bool, optional): Whether clip the objects
            outside the border of the image. Defaults to True.
    """

    def __init__(self, img_scale, down_ratio, bbox_clip_border=True):

        self.img_scale = img_scale
        self.down_ratio = down_ratio
        self.bbox_clip_border = bbox_clip_border

    def __call__(self, results):
        """Call function to do affine transform to input image and labels.

        Args:
            results (dict): Result dict from loading pipeline.

        Returns:
            dict: Results after affine resize, 'affine_aug', 'trans_mat'
                keys are added in the result dict.
        """
        # The results have gone through RandomShiftScale before AffineResize
        if 'center' not in results:
            img = results['img']
            height, width = img.shape[:2]
            center = np.array([width / 2, height / 2], dtype=np.float32)
            size = np.array([width, height], dtype=np.float32)
            results['affine_aug'] = False
        else:
            # The results did not go through RandomShiftScale before
            # AffineResize
            img = results['img']
            center = results['center']
            size = results['size']

        trans_affine = self._get_transform_matrix(center, size, self.img_scale)

        img = cv2.warpAffine(img, trans_affine[:2, :], self.img_scale)

        if isinstance(self.down_ratio, tuple):
            trans_mat = [
                self._get_transform_matrix(
                    center, size,
                    (self.img_scale[0] // ratio, self.img_scale[1] // ratio))
                for ratio in self.down_ratio
            ]  # (3, 3)
        else:
            trans_mat = self._get_transform_matrix(
                center, size, (self.img_scale[0] // self.down_ratio,
                               self.img_scale[1] // self.down_ratio))

        results['img'] = img
        results['img_shape'] = img.shape
        results['pad_shape'] = img.shape
        results['trans_mat'] = trans_mat

        self._affine_bboxes(results, trans_affine)

        if 'centers2d' in results:
            centers2d = self._affine_transform(results['centers2d'],
                                               trans_affine)
            valid_index = (centers2d[:, 0] >
                           0) & (centers2d[:, 0] <
                                 self.img_scale[0]) & (centers2d[:, 1] > 0) & (
                                     centers2d[:, 1] < self.img_scale[1])
            results['centers2d'] = centers2d[valid_index]

            for key in results.get('bbox_fields', []):
                if key in ['gt_bboxes']:
                    results[key] = results[key][valid_index]
                    if 'gt_labels' in results:
                        results['gt_labels'] = results['gt_labels'][
                            valid_index]
                    if 'gt_masks' in results:
                        raise NotImplementedError(
                            'AffineResize only supports bbox.')

            for key in results.get('bbox3d_fields', []):
                if key in ['gt_bboxes_3d']:
                    results[key].tensor = results[key].tensor[valid_index]
                    if 'gt_labels_3d' in results:
                        results['gt_labels_3d'] = results['gt_labels_3d'][
                            valid_index]

            results['depths'] = results['depths'][valid_index]

        return results

    def _affine_bboxes(self, results, matrix):
        """Affine transform bboxes to input image.

        Args:
            results (dict): Result dict from loading pipeline.
            matrix (np.ndarray): Matrix transforming original
                image to the network input image size.
                shape: (3, 3)
        """

        for key in results.get('bbox_fields', []):
            bboxes = results[key]
            bboxes[:, :2] = self._affine_transform(bboxes[:, :2], matrix)
            bboxes[:, 2:] = self._affine_transform(bboxes[:, 2:], matrix)
            if self.bbox_clip_border:
                bboxes[:,
                       [0, 2]] = bboxes[:,
                                        [0, 2]].clip(0, self.img_scale[0] - 1)
                bboxes[:,
                       [1, 3]] = bboxes[:,
                                        [1, 3]].clip(0, self.img_scale[1] - 1)
            results[key] = bboxes

    def _affine_transform(self, points, matrix):
1548
        """Affine transform bbox points to input image.
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601

        Args:
            points (np.ndarray): Points to be transformed.
                shape: (N, 2)
            matrix (np.ndarray): Affine transform matrix.
                shape: (3, 3)

        Returns:
            np.ndarray: Transformed points.
        """
        num_points = points.shape[0]
        hom_points_2d = np.concatenate((points, np.ones((num_points, 1))),
                                       axis=1)
        hom_points_2d = hom_points_2d.T
        affined_points = np.matmul(matrix, hom_points_2d).T
        return affined_points[:, :2]

    def _get_transform_matrix(self, center, scale, output_scale):
        """Get affine transform matrix.

        Args:
            center (tuple): Center of current image.
            scale (tuple): Scale of current image.
            output_scale (tuple[float]): The transform target image scales.

        Returns:
            np.ndarray: Affine transform matrix.
        """
        # TODO: further add rot and shift here.
        src_w = scale[0]
        dst_w = output_scale[0]
        dst_h = output_scale[1]

        src_dir = np.array([0, src_w * -0.5])
        dst_dir = np.array([0, dst_w * -0.5])

        src = np.zeros((3, 2), dtype=np.float32)
        dst = np.zeros((3, 2), dtype=np.float32)
        src[0, :] = center
        src[1, :] = center + src_dir
        dst[0, :] = np.array([dst_w * 0.5, dst_h * 0.5])
        dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir

        src[2, :] = self._get_ref_point(src[0, :], src[1, :])
        dst[2, :] = self._get_ref_point(dst[0, :], dst[1, :])

        get_matrix = cv2.getAffineTransform(src, dst)

        matrix = np.concatenate((get_matrix, [[0., 0., 1.]]))

        return matrix.astype(np.float32)

    def _get_ref_point(self, ref_point1, ref_point2):
1602
        """Get reference point to calculate affine transform matrix.
1603
1604

        While using opencv to calculate the affine matrix, we need at least
1605
        three corresponding points separately on original image and target
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
        image. Here we use two points to get the the third reference point.
        """
        d = ref_point1 - ref_point2
        ref_point3 = ref_point2 + np.array([-d[1], d[0]])
        return ref_point3

    def __repr__(self):
        repr_str = self.__class__.__name__
        repr_str += f'(img_scale={self.img_scale}, '
        repr_str += f'down_ratio={self.down_ratio}) '
        return repr_str


@PIPELINES.register_module()
class RandomShiftScale(object):
    """Random shift scale.

    Different from the normal shift and scale function, it doesn't
    directly shift or scale image. It can record the shift and scale
1625
    infos into loading pipelines. It's designed to be used with
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
    AffineResize together.

    Args:
        shift_scale (tuple[float]): Shift and scale range.
        aug_prob (float): The shifting and scaling probability.
    """

    def __init__(self, shift_scale, aug_prob):

        self.shift_scale = shift_scale
        self.aug_prob = aug_prob

    def __call__(self, results):
        """Call function to record random shift and scale infos.

        Args:
            results (dict): Result dict from loading pipeline.

        Returns:
            dict: Results after random shift and scale, 'center', 'size'
                and 'affine_aug' keys are added in the result dict.
        """
        img = results['img']

        height, width = img.shape[:2]

        center = np.array([width / 2, height / 2], dtype=np.float32)
        size = np.array([width, height], dtype=np.float32)

        if random.random() < self.aug_prob:
            shift, scale = self.shift_scale[0], self.shift_scale[1]
            shift_ranges = np.arange(-shift, shift + 0.1, 0.1)
            center[0] += size[0] * random.choice(shift_ranges)
            center[1] += size[1] * random.choice(shift_ranges)
            scale_ranges = np.arange(1 - scale, 1 + scale + 0.1, 0.1)
            size *= random.choice(scale_ranges)
            results['affine_aug'] = True
        else:
            results['affine_aug'] = False

        results['center'] = center
        results['size'] = size

        return results

    def __repr__(self):
        repr_str = self.__class__.__name__
        repr_str += f'(shift_scale={self.shift_scale}, '
        repr_str += f'aug_prob={self.aug_prob}) '
        return repr_str