"src/targets/vscode:/vscode.git/clone" did not exist on "307c2024b0dce309015311e7ad97c49c1f3fe731"
transforms_3d.py 64.6 KB
Newer Older
dingchang's avatar
dingchang committed
1
# Copyright (c) OpenMMLab. All rights reserved.
2
import random
3
import warnings
4
5
6

import cv2
import numpy as np
7
from mmcv import is_tuple_of
8
from mmcv.utils import build_from_cfg
zhangwenwei's avatar
zhangwenwei committed
9

10
from mmdet3d.core import VoxelGenerator
11
12
from mmdet3d.core.bbox import (CameraInstance3DBoxes, DepthInstance3DBoxes,
                               LiDARInstance3DBoxes, box_np_ops)
13
from mmdet.datasets.builder import PIPELINES
zhangwenwei's avatar
zhangwenwei committed
14
from mmdet.datasets.pipelines import RandomFlip
15
from ..builder import OBJECTSAMPLERS
zhangwenwei's avatar
zhangwenwei committed
16
17
18
from .data_augment_utils import noise_per_object_v3_


19
20
21
22
23
24
25
26
27
@PIPELINES.register_module()
class RandomDropPointsColor(object):
    r"""Randomly set the color of points to all zeros.

    Once this transform is executed, all the points' color will be dropped.
    Refer to `PAConv <https://github.com/CVMI-Lab/PAConv/blob/main/scene_seg/
    util/transform.py#L223>`_ for more details.

    Args:
28
        drop_ratio (float, optional): The probability of dropping point colors.
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
            Defaults to 0.2.
    """

    def __init__(self, drop_ratio=0.2):
        assert isinstance(drop_ratio, (int, float)) and 0 <= drop_ratio <= 1, \
            f'invalid drop_ratio value {drop_ratio}'
        self.drop_ratio = drop_ratio

    def __call__(self, input_dict):
        """Call function to drop point colors.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
44
            dict: Results after color dropping,
45
46
47
48
49
50
51
                'points' key is updated in the result dict.
        """
        points = input_dict['points']
        assert points.attribute_dims is not None and \
            'color' in points.attribute_dims, \
            'Expect points have color attribute'

52
53
54
55
56
57
58
        # this if-expression is a bit strange
        # `RandomDropPointsColor` is used in training 3D segmentor PAConv
        # we discovered in our experiments that, using
        # `if np.random.rand() > 1.0 - self.drop_ratio` consistently leads to
        # better results than using `if np.random.rand() < self.drop_ratio`
        # so we keep this hack in our codebase
        if np.random.rand() > 1.0 - self.drop_ratio:
59
60
61
62
63
64
65
66
67
68
            points.color = points.color * 0.0
        return input_dict

    def __repr__(self):
        """str: Return a string that describes the module."""
        repr_str = self.__class__.__name__
        repr_str += f'(drop_ratio={self.drop_ratio})'
        return repr_str


69
@PIPELINES.register_module()
zhangwenwei's avatar
zhangwenwei committed
70
71
72
73
74
75
76
77
class RandomFlip3D(RandomFlip):
    """Flip the points & bbox.

    If the input dict contains the key "flip", then the flag will be used,
    otherwise it will be randomly decided by a ratio specified in the init
    method.

    Args:
zhangwenwei's avatar
zhangwenwei committed
78
79
80
        sync_2d (bool, optional): Whether to apply flip according to the 2D
            images. If True, it will apply the same flip as that to 2D images.
            If False, it will decide whether to flip randomly and independently
liyinhao's avatar
liyinhao committed
81
            to that of 2D images. Defaults to True.
wuyuefeng's avatar
wuyuefeng committed
82
        flip_ratio_bev_horizontal (float, optional): The flipping probability
liyinhao's avatar
liyinhao committed
83
            in horizontal direction. Defaults to 0.0.
wuyuefeng's avatar
wuyuefeng committed
84
        flip_ratio_bev_vertical (float, optional): The flipping probability
liyinhao's avatar
liyinhao committed
85
            in vertical direction. Defaults to 0.0.
zhangwenwei's avatar
zhangwenwei committed
86
87
    """

wuyuefeng's avatar
wuyuefeng committed
88
89
90
91
92
93
94
    def __init__(self,
                 sync_2d=True,
                 flip_ratio_bev_horizontal=0.0,
                 flip_ratio_bev_vertical=0.0,
                 **kwargs):
        super(RandomFlip3D, self).__init__(
            flip_ratio=flip_ratio_bev_horizontal, **kwargs)
zhangwenwei's avatar
zhangwenwei committed
95
        self.sync_2d = sync_2d
wuyuefeng's avatar
wuyuefeng committed
96
97
98
99
100
101
102
103
104
105
106
        self.flip_ratio_bev_vertical = flip_ratio_bev_vertical
        if flip_ratio_bev_horizontal is not None:
            assert isinstance(
                flip_ratio_bev_horizontal,
                (int, float)) and 0 <= flip_ratio_bev_horizontal <= 1
        if flip_ratio_bev_vertical is not None:
            assert isinstance(
                flip_ratio_bev_vertical,
                (int, float)) and 0 <= flip_ratio_bev_vertical <= 1

    def random_flip_data_3d(self, input_dict, direction='horizontal'):
107
108
109
110
        """Flip 3D data randomly.

        Args:
            input_dict (dict): Result dict from loading pipeline.
111
112
            direction (str, optional): Flip direction.
                Default: 'horizontal'.
113
114

        Returns:
115
            dict: Flipped results, 'points', 'bbox3d_fields' keys are
116
117
                updated in the result dict.
        """
wuyuefeng's avatar
wuyuefeng committed
118
        assert direction in ['horizontal', 'vertical']
119
120
121
122
        # for semantic segmentation task, only points will be flipped.
        if 'bbox3d_fields' not in input_dict:
            input_dict['points'].flip(direction)
            return
123
124
125
126
127
        if len(input_dict['bbox3d_fields']) == 0:  # test mode
            input_dict['bbox3d_fields'].append('empty_box3d')
            input_dict['empty_box3d'] = input_dict['box_type_3d'](
                np.array([], dtype=np.float32))
        assert len(input_dict['bbox3d_fields']) == 1
zhangwenwei's avatar
zhangwenwei committed
128
        for key in input_dict['bbox3d_fields']:
129
130
131
132
133
134
135
136
            if 'points' in input_dict:
                input_dict['points'] = input_dict[key].flip(
                    direction, points=input_dict['points'])
            else:
                input_dict[key].flip(direction)
        if 'centers2d' in input_dict:
            assert self.sync_2d is True and direction == 'horizontal', \
                'Only support sync_2d=True and horizontal flip with images'
137
            w = input_dict['ori_shape'][1]
138
139
            input_dict['centers2d'][..., 0] = \
                w - input_dict['centers2d'][..., 0]
140
141
            # need to modify the horizontal position of camera center
            # along u-axis in the image (flip like centers2d)
142
            # ['cam2img'][0][2] = c_u
143
144
            # see more details and examples at
            # https://github.com/open-mmlab/mmdetection3d/pull/744
145
            input_dict['cam2img'][0][2] = w - input_dict['cam2img'][0][2]
zhangwenwei's avatar
zhangwenwei committed
146
147

    def __call__(self, input_dict):
148
        """Call function to flip points, values in the ``bbox3d_fields`` and
149
150
151
152
153
154
        also flip 2D image and its annotations.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
155
156
            dict: Flipped results, 'flip', 'flip_direction',
                'pcd_horizontal_flip' and 'pcd_vertical_flip' keys are added
157
158
                into result dict.
        """
159
        # flip 2D image and its annotations
zhangwenwei's avatar
zhangwenwei committed
160
        super(RandomFlip3D, self).__call__(input_dict)
zhangwenwei's avatar
zhangwenwei committed
161

zhangwenwei's avatar
zhangwenwei committed
162
        if self.sync_2d:
wuyuefeng's avatar
wuyuefeng committed
163
164
            input_dict['pcd_horizontal_flip'] = input_dict['flip']
            input_dict['pcd_vertical_flip'] = False
zhangwenwei's avatar
zhangwenwei committed
165
        else:
wuyuefeng's avatar
wuyuefeng committed
166
167
168
169
170
171
172
173
174
            if 'pcd_horizontal_flip' not in input_dict:
                flip_horizontal = True if np.random.rand(
                ) < self.flip_ratio else False
                input_dict['pcd_horizontal_flip'] = flip_horizontal
            if 'pcd_vertical_flip' not in input_dict:
                flip_vertical = True if np.random.rand(
                ) < self.flip_ratio_bev_vertical else False
                input_dict['pcd_vertical_flip'] = flip_vertical

175
176
177
        if 'transformation_3d_flow' not in input_dict:
            input_dict['transformation_3d_flow'] = []

wuyuefeng's avatar
wuyuefeng committed
178
179
        if input_dict['pcd_horizontal_flip']:
            self.random_flip_data_3d(input_dict, 'horizontal')
180
            input_dict['transformation_3d_flow'].extend(['HF'])
wuyuefeng's avatar
wuyuefeng committed
181
182
        if input_dict['pcd_vertical_flip']:
            self.random_flip_data_3d(input_dict, 'vertical')
183
            input_dict['transformation_3d_flow'].extend(['VF'])
zhangwenwei's avatar
zhangwenwei committed
184
185
        return input_dict

zhangwenwei's avatar
zhangwenwei committed
186
    def __repr__(self):
187
        """str: Return a string that describes the module."""
wuyuefeng's avatar
wuyuefeng committed
188
        repr_str = self.__class__.__name__
189
        repr_str += f'(sync_2d={self.sync_2d},'
190
        repr_str += f' flip_ratio_bev_vertical={self.flip_ratio_bev_vertical})'
wuyuefeng's avatar
wuyuefeng committed
191
        return repr_str
zhangwenwei's avatar
zhangwenwei committed
192

zhangwenwei's avatar
zhangwenwei committed
193

194
195
196
197
@PIPELINES.register_module()
class RandomJitterPoints(object):
    """Randomly jitter point coordinates.

198
    Different from the global translation in ``GlobalRotScaleTrans``, here we
199
200
201
202
        apply different noises to each point in a scene.

    Args:
        jitter_std (list[float]): The standard deviation of jittering noise.
203
204
            This applies random noise to all points in a 3D scene, which is
            sampled from a gaussian distribution whose standard deviation is
205
            set by ``jitter_std``. Defaults to [0.01, 0.01, 0.01]
206
        clip_range (list[float]): Clip the randomly generated jitter
207
208
209
210
            noise into this range. If None is given, don't perform clipping.
            Defaults to [-0.05, 0.05]

    Note:
211
        This transform should only be used in point cloud segmentation tasks
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
            because we don't transform ground-truth bboxes accordingly.
        For similar transform in detection task, please refer to `ObjectNoise`.
    """

    def __init__(self,
                 jitter_std=[0.01, 0.01, 0.01],
                 clip_range=[-0.05, 0.05]):
        seq_types = (list, tuple, np.ndarray)
        if not isinstance(jitter_std, seq_types):
            assert isinstance(jitter_std, (int, float)), \
                f'unsupported jitter_std type {type(jitter_std)}'
            jitter_std = [jitter_std, jitter_std, jitter_std]
        self.jitter_std = jitter_std

        if clip_range is not None:
            if not isinstance(clip_range, seq_types):
                assert isinstance(clip_range, (int, float)), \
                    f'unsupported clip_range type {type(clip_range)}'
                clip_range = [-clip_range, clip_range]
        self.clip_range = clip_range

    def __call__(self, input_dict):
        """Call function to jitter all the points in the scene.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
240
            dict: Results after adding noise to each point,
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
                'points' key is updated in the result dict.
        """
        points = input_dict['points']
        jitter_std = np.array(self.jitter_std, dtype=np.float32)
        jitter_noise = \
            np.random.randn(points.shape[0], 3) * jitter_std[None, :]
        if self.clip_range is not None:
            jitter_noise = np.clip(jitter_noise, self.clip_range[0],
                                   self.clip_range[1])

        points.translate(jitter_noise)
        return input_dict

    def __repr__(self):
        """str: Return a string that describes the module."""
        repr_str = self.__class__.__name__
        repr_str += f'(jitter_std={self.jitter_std},'
        repr_str += f' clip_range={self.clip_range})'
        return repr_str


262
@PIPELINES.register_module()
zhangwenwei's avatar
zhangwenwei committed
263
class ObjectSample(object):
zhangwenwei's avatar
zhangwenwei committed
264
    """Sample GT objects to the data.
zhangwenwei's avatar
zhangwenwei committed
265
266
267
268
269

    Args:
        db_sampler (dict): Config dict of the database sampler.
        sample_2d (bool): Whether to also paste 2D image patch to the images
            This should be true when applying multi-modality cut-and-paste.
liyinhao's avatar
liyinhao committed
270
            Defaults to False.
zhangwenwei's avatar
zhangwenwei committed
271
    """
zhangwenwei's avatar
zhangwenwei committed
272
273
274
275
276
277
278
279
280
281

    def __init__(self, db_sampler, sample_2d=False):
        self.sampler_cfg = db_sampler
        self.sample_2d = sample_2d
        if 'type' not in db_sampler.keys():
            db_sampler['type'] = 'DataBaseSampler'
        self.db_sampler = build_from_cfg(db_sampler, OBJECTSAMPLERS)

    @staticmethod
    def remove_points_in_boxes(points, boxes):
282
283
284
        """Remove the points in the sampled bounding boxes.

        Args:
285
            points (:obj:`BasePoints`): Input point cloud array.
286
287
288
289
290
            boxes (np.ndarray): Sampled ground truth boxes.

        Returns:
            np.ndarray: Points with those in the boxes removed.
        """
291
        masks = box_np_ops.points_in_rbbox(points.coord.numpy(), boxes)
zhangwenwei's avatar
zhangwenwei committed
292
293
294
295
        points = points[np.logical_not(masks.any(-1))]
        return points

    def __call__(self, input_dict):
296
297
298
299
300
301
        """Call function to sample ground truth objects to the data.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
302
303
            dict: Results after object sampling augmentation,
                'points', 'gt_bboxes_3d', 'gt_labels_3d' keys are updated
304
305
                in the result dict.
        """
zhangwenwei's avatar
zhangwenwei committed
306
        gt_bboxes_3d = input_dict['gt_bboxes_3d']
zhangwenwei's avatar
zhangwenwei committed
307
308
        gt_labels_3d = input_dict['gt_labels_3d']

zhangwenwei's avatar
zhangwenwei committed
309
310
311
        # change to float for blending operation
        points = input_dict['points']
        if self.sample_2d:
wuyuefeng's avatar
wuyuefeng committed
312
            img = input_dict['img']
zhangwenwei's avatar
zhangwenwei committed
313
314
315
            gt_bboxes_2d = input_dict['gt_bboxes']
            # Assume for now 3D & 2D bboxes are the same
            sampled_dict = self.db_sampler.sample_all(
316
317
318
319
                gt_bboxes_3d.tensor.numpy(),
                gt_labels_3d,
                gt_bboxes_2d=gt_bboxes_2d,
                img=img)
zhangwenwei's avatar
zhangwenwei committed
320
321
        else:
            sampled_dict = self.db_sampler.sample_all(
322
                gt_bboxes_3d.tensor.numpy(), gt_labels_3d, img=None)
zhangwenwei's avatar
zhangwenwei committed
323
324
325
326

        if sampled_dict is not None:
            sampled_gt_bboxes_3d = sampled_dict['gt_bboxes_3d']
            sampled_points = sampled_dict['points']
zhangwenwei's avatar
zhangwenwei committed
327
            sampled_gt_labels = sampled_dict['gt_labels_3d']
zhangwenwei's avatar
zhangwenwei committed
328

zhangwenwei's avatar
zhangwenwei committed
329
330
            gt_labels_3d = np.concatenate([gt_labels_3d, sampled_gt_labels],
                                          axis=0)
331
332
333
            gt_bboxes_3d = gt_bboxes_3d.new_box(
                np.concatenate(
                    [gt_bboxes_3d.tensor.numpy(), sampled_gt_bboxes_3d]))
zhangwenwei's avatar
zhangwenwei committed
334

zhangwenwei's avatar
zhangwenwei committed
335
336
            points = self.remove_points_in_boxes(points, sampled_gt_bboxes_3d)
            # check the points dimension
337
            points = points.cat([sampled_points, points])
zhangwenwei's avatar
zhangwenwei committed
338
339
340
341
342

            if self.sample_2d:
                sampled_gt_bboxes_2d = sampled_dict['gt_bboxes_2d']
                gt_bboxes_2d = np.concatenate(
                    [gt_bboxes_2d, sampled_gt_bboxes_2d]).astype(np.float32)
zhangwenwei's avatar
zhangwenwei committed
343

zhangwenwei's avatar
zhangwenwei committed
344
                input_dict['gt_bboxes'] = gt_bboxes_2d
wuyuefeng's avatar
wuyuefeng committed
345
                input_dict['img'] = sampled_dict['img']
zhangwenwei's avatar
zhangwenwei committed
346
347

        input_dict['gt_bboxes_3d'] = gt_bboxes_3d
348
        input_dict['gt_labels_3d'] = gt_labels_3d.astype(np.long)
zhangwenwei's avatar
zhangwenwei committed
349
        input_dict['points'] = points
zhangwenwei's avatar
zhangwenwei committed
350

zhangwenwei's avatar
zhangwenwei committed
351
352
353
        return input_dict

    def __repr__(self):
354
        """str: Return a string that describes the module."""
355
356
357
358
359
360
361
362
363
        repr_str = self.__class__.__name__
        repr_str += f' sample_2d={self.sample_2d},'
        repr_str += f' data_root={self.sampler_cfg.data_root},'
        repr_str += f' info_path={self.sampler_cfg.info_path},'
        repr_str += f' rate={self.sampler_cfg.rate},'
        repr_str += f' prepare={self.sampler_cfg.prepare},'
        repr_str += f' classes={self.sampler_cfg.classes},'
        repr_str += f' sample_groups={self.sampler_cfg.sample_groups}'
        return repr_str
zhangwenwei's avatar
zhangwenwei committed
364
365


366
@PIPELINES.register_module()
zhangwenwei's avatar
zhangwenwei committed
367
class ObjectNoise(object):
zhangwenwei's avatar
zhangwenwei committed
368
    """Apply noise to each GT objects in the scene.
zhangwenwei's avatar
zhangwenwei committed
369
370

    Args:
371
        translation_std (list[float], optional): Standard deviation of the
zhangwenwei's avatar
zhangwenwei committed
372
373
            distribution where translation noise are sampled from.
            Defaults to [0.25, 0.25, 0.25].
374
        global_rot_range (list[float], optional): Global rotation to the scene.
zhangwenwei's avatar
zhangwenwei committed
375
            Defaults to [0.0, 0.0].
376
        rot_range (list[float], optional): Object rotation range.
zhangwenwei's avatar
zhangwenwei committed
377
378
379
380
            Defaults to [-0.15707963267, 0.15707963267].
        num_try (int, optional): Number of times to try if the noise applied is
            invalid. Defaults to 100.
    """
zhangwenwei's avatar
zhangwenwei committed
381
382

    def __init__(self,
zhangwenwei's avatar
zhangwenwei committed
383
                 translation_std=[0.25, 0.25, 0.25],
zhangwenwei's avatar
zhangwenwei committed
384
                 global_rot_range=[0.0, 0.0],
zhangwenwei's avatar
zhangwenwei committed
385
                 rot_range=[-0.15707963267, 0.15707963267],
zhangwenwei's avatar
zhangwenwei committed
386
                 num_try=100):
zhangwenwei's avatar
zhangwenwei committed
387
        self.translation_std = translation_std
zhangwenwei's avatar
zhangwenwei committed
388
        self.global_rot_range = global_rot_range
zhangwenwei's avatar
zhangwenwei committed
389
        self.rot_range = rot_range
zhangwenwei's avatar
zhangwenwei committed
390
391
392
        self.num_try = num_try

    def __call__(self, input_dict):
393
394
395
396
397
398
        """Call function to apply noise to each ground truth in the scene.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
399
            dict: Results after adding noise to each object,
400
401
                'points', 'gt_bboxes_3d' keys are updated in the result dict.
        """
zhangwenwei's avatar
zhangwenwei committed
402
403
        gt_bboxes_3d = input_dict['gt_bboxes_3d']
        points = input_dict['points']
zhangwenwei's avatar
zhangwenwei committed
404

405
        # TODO: this is inplace operation
406
        numpy_box = gt_bboxes_3d.tensor.numpy()
407
408
        numpy_points = points.tensor.numpy()

zhangwenwei's avatar
zhangwenwei committed
409
        noise_per_object_v3_(
410
            numpy_box,
411
            numpy_points,
zhangwenwei's avatar
zhangwenwei committed
412
413
            rotation_perturb=self.rot_range,
            center_noise_std=self.translation_std,
zhangwenwei's avatar
zhangwenwei committed
414
415
            global_random_rot_range=self.global_rot_range,
            num_try=self.num_try)
416
417

        input_dict['gt_bboxes_3d'] = gt_bboxes_3d.new_box(numpy_box)
418
        input_dict['points'] = points.new_point(numpy_points)
zhangwenwei's avatar
zhangwenwei committed
419
420
421
        return input_dict

    def __repr__(self):
422
        """str: Return a string that describes the module."""
zhangwenwei's avatar
zhangwenwei committed
423
        repr_str = self.__class__.__name__
424
425
426
427
        repr_str += f'(num_try={self.num_try},'
        repr_str += f' translation_std={self.translation_std},'
        repr_str += f' global_rot_range={self.global_rot_range},'
        repr_str += f' rot_range={self.rot_range})'
zhangwenwei's avatar
zhangwenwei committed
428
429
430
        return repr_str


431
432
433
434
435
436
437
438
@PIPELINES.register_module()
class GlobalAlignment(object):
    """Apply global alignment to 3D scene points by rotation and translation.

    Args:
        rotation_axis (int): Rotation axis for points and bboxes rotation.

    Note:
439
440
        We do not record the applied rotation and translation as in
            GlobalRotScaleTrans. Because usually, we do not need to reverse
441
            the alignment step.
442
        For example, ScanNet 3D detection task uses aligned ground-truth
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
            bounding boxes for evaluation.
    """

    def __init__(self, rotation_axis):
        self.rotation_axis = rotation_axis

    def _trans_points(self, input_dict, trans_factor):
        """Private function to translate points.

        Args:
            input_dict (dict): Result dict from loading pipeline.
            trans_factor (np.ndarray): Translation vector to be applied.

        Returns:
            dict: Results after translation, 'points' is updated in the dict.
        """
        input_dict['points'].translate(trans_factor)

    def _rot_points(self, input_dict, rot_mat):
        """Private function to rotate bounding boxes and points.

        Args:
            input_dict (dict): Result dict from loading pipeline.
            rot_mat (np.ndarray): Rotation matrix to be applied.

        Returns:
            dict: Results after rotation, 'points' is updated in the dict.
        """
        # input should be rot_mat_T so I transpose it here
        input_dict['points'].rotate(rot_mat.T)

    def _check_rot_mat(self, rot_mat):
        """Check if rotation matrix is valid for self.rotation_axis.

        Args:
            rot_mat (np.ndarray): Rotation matrix to be applied.
        """
        is_valid = np.allclose(np.linalg.det(rot_mat), 1.0)
        valid_array = np.zeros(3)
        valid_array[self.rotation_axis] = 1.0
        is_valid &= (rot_mat[self.rotation_axis, :] == valid_array).all()
        is_valid &= (rot_mat[:, self.rotation_axis] == valid_array).all()
        assert is_valid, f'invalid rotation matrix {rot_mat}'

    def __call__(self, input_dict):
        """Call function to shuffle points.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
494
            dict: Results after global alignment, 'points' and keys in
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
                input_dict['bbox3d_fields'] are updated in the result dict.
        """
        assert 'axis_align_matrix' in input_dict['ann_info'].keys(), \
            'axis_align_matrix is not provided in GlobalAlignment'

        axis_align_matrix = input_dict['ann_info']['axis_align_matrix']
        assert axis_align_matrix.shape == (4, 4), \
            f'invalid shape {axis_align_matrix.shape} for axis_align_matrix'
        rot_mat = axis_align_matrix[:3, :3]
        trans_vec = axis_align_matrix[:3, -1]

        self._check_rot_mat(rot_mat)
        self._rot_points(input_dict, rot_mat)
        self._trans_points(input_dict, trans_vec)

        return input_dict

    def __repr__(self):
        repr_str = self.__class__.__name__
        repr_str += f'(rotation_axis={self.rotation_axis})'
        return repr_str


518
@PIPELINES.register_module()
zhangwenwei's avatar
zhangwenwei committed
519
class GlobalRotScaleTrans(object):
zhangwenwei's avatar
zhangwenwei committed
520
    """Apply global rotation, scaling and translation to a 3D scene.
zhangwenwei's avatar
zhangwenwei committed
521
522

    Args:
523
        rot_range (list[float], optional): Range of rotation angle.
liyinhao's avatar
liyinhao committed
524
            Defaults to [-0.78539816, 0.78539816] (close to [-pi/4, pi/4]).
525
        scale_ratio_range (list[float], optional): Range of scale ratio.
liyinhao's avatar
liyinhao committed
526
            Defaults to [0.95, 1.05].
527
528
        translation_std (list[float], optional): The standard deviation of
            translation noise applied to a scene, which
zhangwenwei's avatar
zhangwenwei committed
529
            is sampled from a gaussian distribution whose standard deviation
liyinhao's avatar
liyinhao committed
530
            is set by ``translation_std``. Defaults to [0, 0, 0]
531
        shift_height (bool, optional): Whether to shift height.
wuyuefeng's avatar
wuyuefeng committed
532
            (the fourth dimension of indoor points) when scaling.
liyinhao's avatar
liyinhao committed
533
            Defaults to False.
zhangwenwei's avatar
zhangwenwei committed
534
    """
zhangwenwei's avatar
zhangwenwei committed
535
536

    def __init__(self,
zhangwenwei's avatar
zhangwenwei committed
537
538
                 rot_range=[-0.78539816, 0.78539816],
                 scale_ratio_range=[0.95, 1.05],
wuyuefeng's avatar
wuyuefeng committed
539
540
                 translation_std=[0, 0, 0],
                 shift_height=False):
541
542
543
544
545
        seq_types = (list, tuple, np.ndarray)
        if not isinstance(rot_range, seq_types):
            assert isinstance(rot_range, (int, float)), \
                f'unsupported rot_range type {type(rot_range)}'
            rot_range = [-rot_range, rot_range]
zhangwenwei's avatar
zhangwenwei committed
546
        self.rot_range = rot_range
547
548
549

        assert isinstance(scale_ratio_range, seq_types), \
            f'unsupported scale_ratio_range type {type(scale_ratio_range)}'
zhangwenwei's avatar
zhangwenwei committed
550
        self.scale_ratio_range = scale_ratio_range
551
552
553
554
555
556
557

        if not isinstance(translation_std, seq_types):
            assert isinstance(translation_std, (int, float)), \
                f'unsupported translation_std type {type(translation_std)}'
            translation_std = [
                translation_std, translation_std, translation_std
            ]
558
559
        assert all([std >= 0 for std in translation_std]), \
            'translation_std should be positive'
zhangwenwei's avatar
zhangwenwei committed
560
        self.translation_std = translation_std
wuyuefeng's avatar
wuyuefeng committed
561
        self.shift_height = shift_height
zhangwenwei's avatar
zhangwenwei committed
562
563

    def _trans_bbox_points(self, input_dict):
564
565
566
567
568
569
        """Private function to translate bounding boxes and points.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
570
571
            dict: Results after translation, 'points', 'pcd_trans'
                and keys in input_dict['bbox3d_fields'] are updated
572
573
                in the result dict.
        """
574
        translation_std = np.array(self.translation_std, dtype=np.float32)
zhangwenwei's avatar
zhangwenwei committed
575
576
        trans_factor = np.random.normal(scale=translation_std, size=3).T

577
        input_dict['points'].translate(trans_factor)
zhangwenwei's avatar
zhangwenwei committed
578
579
580
581
582
        input_dict['pcd_trans'] = trans_factor
        for key in input_dict['bbox3d_fields']:
            input_dict[key].translate(trans_factor)

    def _rot_bbox_points(self, input_dict):
583
584
585
586
587
588
        """Private function to rotate bounding boxes and points.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
589
590
            dict: Results after rotation, 'points', 'pcd_rotation'
                and keys in input_dict['bbox3d_fields'] are updated
591
592
                in the result dict.
        """
zhangwenwei's avatar
zhangwenwei committed
593
        rotation = self.rot_range
zhangwenwei's avatar
zhangwenwei committed
594
        noise_rotation = np.random.uniform(rotation[0], rotation[1])
zhangwenwei's avatar
zhangwenwei committed
595

596
597
598
599
        # if no bbox in input_dict, only rotate points
        if len(input_dict['bbox3d_fields']) == 0:
            rot_mat_T = input_dict['points'].rotate(noise_rotation)
            input_dict['pcd_rotation'] = rot_mat_T
600
            input_dict['pcd_rotation_angle'] = noise_rotation
601
602
603
            return

        # rotate points with bboxes
zhangwenwei's avatar
zhangwenwei committed
604
        for key in input_dict['bbox3d_fields']:
wuyuefeng's avatar
wuyuefeng committed
605
606
607
608
609
            if len(input_dict[key].tensor) != 0:
                points, rot_mat_T = input_dict[key].rotate(
                    noise_rotation, input_dict['points'])
                input_dict['points'] = points
                input_dict['pcd_rotation'] = rot_mat_T
610
                input_dict['pcd_rotation_angle'] = noise_rotation
611

zhangwenwei's avatar
zhangwenwei committed
612
    def _scale_bbox_points(self, input_dict):
613
614
615
616
617
618
        """Private function to scale bounding boxes and points.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
619
            dict: Results after scaling, 'points'and keys in
620
621
                input_dict['bbox3d_fields'] are updated in the result dict.
        """
zhangwenwei's avatar
zhangwenwei committed
622
        scale = input_dict['pcd_scale_factor']
623
624
        points = input_dict['points']
        points.scale(scale)
wuyuefeng's avatar
wuyuefeng committed
625
        if self.shift_height:
626
627
            assert 'height' in points.attribute_dims.keys(), \
                'setting shift_height=True but points have no height attribute'
628
629
            points.tensor[:, points.attribute_dims['height']] *= scale
        input_dict['points'] = points
wuyuefeng's avatar
wuyuefeng committed
630

zhangwenwei's avatar
zhangwenwei committed
631
632
        for key in input_dict['bbox3d_fields']:
            input_dict[key].scale(scale)
zhangwenwei's avatar
zhangwenwei committed
633

zhangwenwei's avatar
zhangwenwei committed
634
    def _random_scale(self, input_dict):
635
636
637
638
639
640
        """Private function to randomly set the scale factor.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
641
            dict: Results after scaling, 'pcd_scale_factor' are updated
642
643
                in the result dict.
        """
zhangwenwei's avatar
zhangwenwei committed
644
645
646
        scale_factor = np.random.uniform(self.scale_ratio_range[0],
                                         self.scale_ratio_range[1])
        input_dict['pcd_scale_factor'] = scale_factor
zhangwenwei's avatar
zhangwenwei committed
647
648

    def __call__(self, input_dict):
649
        """Private function to rotate, scale and translate bounding boxes and
650
651
652
653
654
655
656
        points.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
            dict: Results after scaling, 'points', 'pcd_rotation',
657
                'pcd_scale_factor', 'pcd_trans' and keys in
658
659
                input_dict['bbox3d_fields'] are updated in the result dict.
        """
660
661
662
        if 'transformation_3d_flow' not in input_dict:
            input_dict['transformation_3d_flow'] = []

zhangwenwei's avatar
zhangwenwei committed
663
        self._rot_bbox_points(input_dict)
zhangwenwei's avatar
zhangwenwei committed
664

zhangwenwei's avatar
zhangwenwei committed
665
666
667
        if 'pcd_scale_factor' not in input_dict:
            self._random_scale(input_dict)
        self._scale_bbox_points(input_dict)
zhangwenwei's avatar
zhangwenwei committed
668

zhangwenwei's avatar
zhangwenwei committed
669
        self._trans_bbox_points(input_dict)
670
671

        input_dict['transformation_3d_flow'].extend(['R', 'S', 'T'])
zhangwenwei's avatar
zhangwenwei committed
672
673
674
        return input_dict

    def __repr__(self):
675
        """str: Return a string that describes the module."""
zhangwenwei's avatar
zhangwenwei committed
676
        repr_str = self.__class__.__name__
677
678
679
680
        repr_str += f'(rot_range={self.rot_range},'
        repr_str += f' scale_ratio_range={self.scale_ratio_range},'
        repr_str += f' translation_std={self.translation_std},'
        repr_str += f' shift_height={self.shift_height})'
zhangwenwei's avatar
zhangwenwei committed
681
682
683
        return repr_str


684
@PIPELINES.register_module()
zhangwenwei's avatar
zhangwenwei committed
685
class PointShuffle(object):
686
    """Shuffle input points."""
zhangwenwei's avatar
zhangwenwei committed
687
688

    def __call__(self, input_dict):
689
690
691
692
693
694
        """Call function to shuffle points.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
695
            dict: Results after filtering, 'points', 'pts_instance_mask'
696
                and 'pts_semantic_mask' keys are updated in the result dict.
697
        """
698
699
700
701
702
703
704
705
706
707
708
709
        idx = input_dict['points'].shuffle()
        idx = idx.numpy()

        pts_instance_mask = input_dict.get('pts_instance_mask', None)
        pts_semantic_mask = input_dict.get('pts_semantic_mask', None)

        if pts_instance_mask is not None:
            input_dict['pts_instance_mask'] = pts_instance_mask[idx]

        if pts_semantic_mask is not None:
            input_dict['pts_semantic_mask'] = pts_semantic_mask[idx]

zhangwenwei's avatar
zhangwenwei committed
710
711
712
713
714
715
        return input_dict

    def __repr__(self):
        return self.__class__.__name__


716
@PIPELINES.register_module()
zhangwenwei's avatar
zhangwenwei committed
717
class ObjectRangeFilter(object):
718
719
720
721
722
    """Filter objects by the range.

    Args:
        point_cloud_range (list[float]): Point cloud range.
    """
zhangwenwei's avatar
zhangwenwei committed
723
724
725
726
727

    def __init__(self, point_cloud_range):
        self.pcd_range = np.array(point_cloud_range, dtype=np.float32)

    def __call__(self, input_dict):
728
729
730
731
732
733
        """Call function to filter objects by the range.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
734
            dict: Results after filtering, 'gt_bboxes_3d', 'gt_labels_3d'
735
736
                keys are updated in the result dict.
        """
737
738
739
740
741
742
743
        # Check points instance type and initialise bev_range
        if isinstance(input_dict['gt_bboxes_3d'],
                      (LiDARInstance3DBoxes, DepthInstance3DBoxes)):
            bev_range = self.pcd_range[[0, 1, 3, 4]]
        elif isinstance(input_dict['gt_bboxes_3d'], CameraInstance3DBoxes):
            bev_range = self.pcd_range[[0, 2, 3, 5]]

zhangwenwei's avatar
zhangwenwei committed
744
        gt_bboxes_3d = input_dict['gt_bboxes_3d']
zhangwenwei's avatar
zhangwenwei committed
745
        gt_labels_3d = input_dict['gt_labels_3d']
746
        mask = gt_bboxes_3d.in_range_bev(bev_range)
zhangwenwei's avatar
zhangwenwei committed
747
        gt_bboxes_3d = gt_bboxes_3d[mask]
ZwwWayne's avatar
ZwwWayne committed
748
749
750
751
752
        # mask is a torch tensor but gt_labels_3d is still numpy array
        # using mask to index gt_labels_3d will cause bug when
        # len(gt_labels_3d) == 1, where mask=1 will be interpreted
        # as gt_labels_3d[1] and cause out of index error
        gt_labels_3d = gt_labels_3d[mask.numpy().astype(np.bool)]
zhangwenwei's avatar
zhangwenwei committed
753
754

        # limit rad to [-pi, pi]
755
756
        gt_bboxes_3d.limit_yaw(offset=0.5, period=2 * np.pi)
        input_dict['gt_bboxes_3d'] = gt_bboxes_3d
zhangwenwei's avatar
zhangwenwei committed
757
758
        input_dict['gt_labels_3d'] = gt_labels_3d

zhangwenwei's avatar
zhangwenwei committed
759
760
761
        return input_dict

    def __repr__(self):
762
        """str: Return a string that describes the module."""
zhangwenwei's avatar
zhangwenwei committed
763
        repr_str = self.__class__.__name__
764
        repr_str += f'(point_cloud_range={self.pcd_range.tolist()})'
zhangwenwei's avatar
zhangwenwei committed
765
766
767
        return repr_str


768
@PIPELINES.register_module()
zhangwenwei's avatar
zhangwenwei committed
769
class PointsRangeFilter(object):
770
771
772
773
774
    """Filter points by the range.

    Args:
        point_cloud_range (list[float]): Point cloud range.
    """
zhangwenwei's avatar
zhangwenwei committed
775
776

    def __init__(self, point_cloud_range):
777
        self.pcd_range = np.array(point_cloud_range, dtype=np.float32)
zhangwenwei's avatar
zhangwenwei committed
778
779

    def __call__(self, input_dict):
780
781
782
783
784
785
        """Call function to filter points by the range.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
786
            dict: Results after filtering, 'points', 'pts_instance_mask'
787
                and 'pts_semantic_mask' keys are updated in the result dict.
788
        """
zhangwenwei's avatar
zhangwenwei committed
789
        points = input_dict['points']
790
791
        points_mask = points.in_range_3d(self.pcd_range)
        clean_points = points[points_mask]
zhangwenwei's avatar
zhangwenwei committed
792
        input_dict['points'] = clean_points
793
794
795
796
797
798
799
800
801
802
803
        points_mask = points_mask.numpy()

        pts_instance_mask = input_dict.get('pts_instance_mask', None)
        pts_semantic_mask = input_dict.get('pts_semantic_mask', None)

        if pts_instance_mask is not None:
            input_dict['pts_instance_mask'] = pts_instance_mask[points_mask]

        if pts_semantic_mask is not None:
            input_dict['pts_semantic_mask'] = pts_semantic_mask[points_mask]

zhangwenwei's avatar
zhangwenwei committed
804
805
806
        return input_dict

    def __repr__(self):
807
        """str: Return a string that describes the module."""
zhangwenwei's avatar
zhangwenwei committed
808
        repr_str = self.__class__.__name__
809
        repr_str += f'(point_cloud_range={self.pcd_range.tolist()})'
zhangwenwei's avatar
zhangwenwei committed
810
        return repr_str
zhangwenwei's avatar
zhangwenwei committed
811
812
813
814


@PIPELINES.register_module()
class ObjectNameFilter(object):
zhangwenwei's avatar
zhangwenwei committed
815
    """Filter GT objects by their names.
zhangwenwei's avatar
zhangwenwei committed
816
817

    Args:
liyinhao's avatar
liyinhao committed
818
        classes (list[str]): List of class names to be kept for training.
zhangwenwei's avatar
zhangwenwei committed
819
820
821
822
823
824
825
    """

    def __init__(self, classes):
        self.classes = classes
        self.labels = list(range(len(self.classes)))

    def __call__(self, input_dict):
826
827
828
829
830
831
        """Call function to filter objects by their names.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
832
            dict: Results after filtering, 'gt_bboxes_3d', 'gt_labels_3d'
833
834
                keys are updated in the result dict.
        """
zhangwenwei's avatar
zhangwenwei committed
835
836
837
838
839
840
841
842
843
        gt_labels_3d = input_dict['gt_labels_3d']
        gt_bboxes_mask = np.array([n in self.labels for n in gt_labels_3d],
                                  dtype=np.bool_)
        input_dict['gt_bboxes_3d'] = input_dict['gt_bboxes_3d'][gt_bboxes_mask]
        input_dict['gt_labels_3d'] = input_dict['gt_labels_3d'][gt_bboxes_mask]

        return input_dict

    def __repr__(self):
844
        """str: Return a string that describes the module."""
zhangwenwei's avatar
zhangwenwei committed
845
846
847
        repr_str = self.__class__.__name__
        repr_str += f'(classes={self.classes})'
        return repr_str
wuyuefeng's avatar
wuyuefeng committed
848
849
850


@PIPELINES.register_module()
851
852
class PointSample(object):
    """Point sample.
wuyuefeng's avatar
wuyuefeng committed
853
854
855
856
857

    Sampling data to a certain number.

    Args:
        num_points (int): Number of points to be sampled.
858
        sample_range (float, optional): The range where to sample points.
859
860
861
862
            If not None, the points with depth larger than `sample_range` are
            prior to be sampled. Defaults to None.
        replace (bool, optional): Whether the sampling is with or without
            replacement. Defaults to False.
wuyuefeng's avatar
wuyuefeng committed
863
864
    """

865
    def __init__(self, num_points, sample_range=None, replace=False):
wuyuefeng's avatar
wuyuefeng committed
866
        self.num_points = num_points
867
868
869
870
871
872
873
874
875
        self.sample_range = sample_range
        self.replace = replace

    def _points_random_sampling(self,
                                points,
                                num_samples,
                                sample_range=None,
                                replace=False,
                                return_choices=False):
wuyuefeng's avatar
wuyuefeng committed
876
877
878
879
880
        """Points random sampling.

        Sample points to a certain number.

        Args:
881
            points (np.ndarray | :obj:`BasePoints`): 3D Points.
wuyuefeng's avatar
wuyuefeng committed
882
            num_samples (int): Number of samples to be sampled.
883
            sample_range (float, optional): Indicating the range where the
884
                points will be sampled. Defaults to None.
885
886
887
888
            replace (bool, optional): Sampling with or without replacement.
                Defaults to None.
            return_choices (bool, optional): Whether return choice.
                Defaults to False.
wuyuefeng's avatar
wuyuefeng committed
889
        Returns:
890
            tuple[np.ndarray] | np.ndarray:
891
                - points (np.ndarray | :obj:`BasePoints`): 3D Points.
892
                - choices (np.ndarray, optional): The generated random samples.
wuyuefeng's avatar
wuyuefeng committed
893
        """
894
        if not replace:
wuyuefeng's avatar
wuyuefeng committed
895
            replace = (points.shape[0] < num_samples)
896
897
898
899
        point_range = range(len(points))
        if sample_range is not None and not replace:
            # Only sampling the near points when len(points) >= num_samples
            depth = np.linalg.norm(points.tensor, axis=1)
900
901
            far_inds = np.where(depth >= sample_range)[0]
            near_inds = np.where(depth < sample_range)[0]
902
903
904
905
            # in case there are too many far points
            if len(far_inds) > num_samples:
                far_inds = np.random.choice(
                    far_inds, num_samples, replace=False)
906
907
908
909
910
911
912
            point_range = near_inds
            num_samples -= len(far_inds)
        choices = np.random.choice(point_range, num_samples, replace=replace)
        if sample_range is not None and not replace:
            choices = np.concatenate((far_inds, choices))
            # Shuffle points after sampling
            np.random.shuffle(choices)
wuyuefeng's avatar
wuyuefeng committed
913
914
915
916
917
918
        if return_choices:
            return points[choices], choices
        else:
            return points[choices]

    def __call__(self, results):
919
920
921
922
923
        """Call function to sample points to in indoor scenes.

        Args:
            input_dict (dict): Result dict from loading pipeline.
        Returns:
924
            dict: Results after sampling, 'points', 'pts_instance_mask'
925
926
                and 'pts_semantic_mask' keys are updated in the result dict.
        """
wuyuefeng's avatar
wuyuefeng committed
927
        points = results['points']
928
        # Points in Camera coord can provide the depth information.
929
        # TODO: Need to support distance-based sampling for other coord system.
930
        if self.sample_range is not None:
931
            from mmdet3d.core.points import CameraPoints
932
            assert isinstance(points, CameraPoints), \
933
                'Sampling based on distance is only applicable for CAM coord'
934
935
936
937
938
939
        points, choices = self._points_random_sampling(
            points,
            self.num_points,
            self.sample_range,
            self.replace,
            return_choices=True)
940
        results['points'] = points
941

wuyuefeng's avatar
wuyuefeng committed
942
943
944
        pts_instance_mask = results.get('pts_instance_mask', None)
        pts_semantic_mask = results.get('pts_semantic_mask', None)

945
        if pts_instance_mask is not None:
wuyuefeng's avatar
wuyuefeng committed
946
947
            pts_instance_mask = pts_instance_mask[choices]
            results['pts_instance_mask'] = pts_instance_mask
948
949
950

        if pts_semantic_mask is not None:
            pts_semantic_mask = pts_semantic_mask[choices]
wuyuefeng's avatar
wuyuefeng committed
951
952
953
954
955
            results['pts_semantic_mask'] = pts_semantic_mask

        return results

    def __repr__(self):
956
        """str: Return a string that describes the module."""
wuyuefeng's avatar
wuyuefeng committed
957
        repr_str = self.__class__.__name__
958
        repr_str += f'(num_points={self.num_points},'
959
960
        repr_str += f' sample_range={self.sample_range},'
        repr_str += f' replace={self.replace})'
961

962
963
964
        return repr_str


965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
@PIPELINES.register_module()
class IndoorPointSample(PointSample):
    """Indoor point sample.

    Sampling data to a certain number.
    NOTE: IndoorPointSample is deprecated in favor of PointSample

    Args:
        num_points (int): Number of points to be sampled.
    """

    def __init__(self, *args, **kwargs):
        warnings.warn(
            'IndoorPointSample is deprecated in favor of PointSample')
        super(IndoorPointSample, self).__init__(*args, **kwargs)


982
983
984
985
986
987
988
989
990
991
992
993
@PIPELINES.register_module()
class IndoorPatchPointSample(object):
    r"""Indoor point sample within a patch. Modified from `PointNet++ <https://
    github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py>`_.

    Sampling data to a certain number for semantic segmentation.

    Args:
        num_points (int): Number of points to be sampled.
        block_size (float, optional): Size of a block to sample points from.
            Defaults to 1.5.
        sample_rate (float, optional): Stride used in sliding patch generation.
994
995
996
            This parameter is unused in `IndoorPatchPointSample` and thus has
            been deprecated. We plan to remove it in the future.
            Defaults to None.
997
998
        ignore_index (int, optional): Label index that won't be used for the
            segmentation task. This is set in PointSegClassMapping as neg_cls.
999
            If not None, will be used as a patch selection criterion.
1000
1001
1002
1003
1004
            Defaults to None.
        use_normalized_coord (bool, optional): Whether to use normalized xyz as
            additional features. Defaults to False.
        num_try (int, optional): Number of times to try if the patch selected
            is invalid. Defaults to 10.
1005
        enlarge_size (float, optional): Enlarge the sampled patch to
1006
            [-block_size / 2 - enlarge_size, block_size / 2 + enlarge_size] as
1007
            an augmentation. If None, set it as 0. Defaults to 0.2.
1008
        min_unique_num (int, optional): Minimum number of unique points
1009
1010
            the sampled patch should contain. If None, use PointNet++'s method
            to judge uniqueness. Defaults to None.
1011
1012
        eps (float, optional): A value added to patch boundary to guarantee
            points coverage. Defaults to 1e-2.
1013
1014
1015
1016
1017
1018

    Note:
        This transform should only be used in the training process of point
            cloud segmentation tasks. For the sliding patch generation and
            inference process in testing, please refer to the `slide_inference`
            function of `EncoderDecoder3D` class.
1019
1020
1021
1022
1023
    """

    def __init__(self,
                 num_points,
                 block_size=1.5,
1024
                 sample_rate=None,
1025
1026
                 ignore_index=None,
                 use_normalized_coord=False,
1027
1028
                 num_try=10,
                 enlarge_size=0.2,
1029
1030
                 min_unique_num=None,
                 eps=1e-2):
1031
1032
1033
1034
1035
        self.num_points = num_points
        self.block_size = block_size
        self.ignore_index = ignore_index
        self.use_normalized_coord = use_normalized_coord
        self.num_try = num_try
1036
        self.enlarge_size = enlarge_size if enlarge_size is not None else 0.0
1037
        self.min_unique_num = min_unique_num
1038
        self.eps = eps
1039
1040
1041
1042
1043

        if sample_rate is not None:
            warnings.warn(
                "'sample_rate' has been deprecated and will be removed in "
                'the future. Please remove them from your code.')
1044
1045
1046
1047
1048

    def _input_generation(self, coords, patch_center, coord_max, attributes,
                          attribute_dims, point_type):
        """Generating model input.

1049
        Generate input by subtracting patch center and adding additional
1050
1051
1052
1053
1054
1055
1056
1057
1058
            features. Currently support colors and normalized xyz as features.

        Args:
            coords (np.ndarray): Sampled 3D Points.
            patch_center (np.ndarray): Center coordinate of the selected patch.
            coord_max (np.ndarray): Max coordinate of all 3D Points.
            attributes (np.ndarray): features of input points.
            attribute_dims (dict): Dictionary to indicate the meaning of extra
                dimension.
1059
            point_type (type): class of input points inherited from BasePoints.
1060
1061

        Returns:
1062
            :obj:`BasePoints`: The generated input data.
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
        """
        # subtract patch center, the z dimension is not centered
        centered_coords = coords.copy()
        centered_coords[:, 0] -= patch_center[0]
        centered_coords[:, 1] -= patch_center[1]

        if self.use_normalized_coord:
            normalized_coord = coords / coord_max
            attributes = np.concatenate([attributes, normalized_coord], axis=1)
            if attribute_dims is None:
                attribute_dims = dict()
            attribute_dims.update(
                dict(normalized_coord=[
                    attributes.shape[1], attributes.shape[1] +
                    1, attributes.shape[1] + 2
                ]))

        points = np.concatenate([centered_coords, attributes], axis=1)
        points = point_type(
            points, points_dim=points.shape[1], attribute_dims=attribute_dims)

        return points

1086
    def _patch_points_sampling(self, points, sem_mask):
1087
1088
1089
1090
1091
1092
        """Patch points sampling.

        First sample a valid patch.
        Then sample points within that patch to a certain number.

        Args:
1093
            points (:obj:`BasePoints`): 3D Points.
1094
1095
1096
            sem_mask (np.ndarray): semantic segmentation mask for input points.

        Returns:
1097
            tuple[:obj:`BasePoints`, np.ndarray] | :obj:`BasePoints`:
1098

1099
                - points (:obj:`BasePoints`): 3D Points.
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
                - choices (np.ndarray): The generated random samples.
        """
        coords = points.coord.numpy()
        attributes = points.tensor[:, 3:].numpy()
        attribute_dims = points.attribute_dims
        point_type = type(points)

        coord_max = np.amax(coords, axis=0)
        coord_min = np.amin(coords, axis=0)

1110
        for _ in range(self.num_try):
1111
1112
1113
            # random sample a point as patch center
            cur_center = coords[np.random.choice(coords.shape[0])]

1114
1115
            # boundary of a patch, which would be enlarged by
            # `self.enlarge_size` as an augmentation
1116
1117
1118
1119
1120
1121
1122
            cur_max = cur_center + np.array(
                [self.block_size / 2.0, self.block_size / 2.0, 0.0])
            cur_min = cur_center - np.array(
                [self.block_size / 2.0, self.block_size / 2.0, 0.0])
            cur_max[2] = coord_max[2]
            cur_min[2] = coord_min[2]
            cur_choice = np.sum(
1123
1124
                (coords >= (cur_min - self.enlarge_size)) *
                (coords <= (cur_max + self.enlarge_size)),
1125
1126
1127
1128
1129
1130
1131
                axis=1) == 3

            if not cur_choice.any():  # no points in this patch
                continue

            cur_coords = coords[cur_choice, :]
            cur_sem_mask = sem_mask[cur_choice]
1132
            point_idxs = np.where(cur_choice)[0]
1133
            mask = np.sum(
1134
1135
                (cur_coords >= (cur_min - self.eps)) * (cur_coords <=
                                                        (cur_max + self.eps)),
1136
                axis=1) == 3
1137

1138
1139
            # two criteria for patch sampling, adopted from PointNet++
            # 1. selected patch should contain enough unique points
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
            if self.min_unique_num is None:
                # use PointNet++'s method as default
                # [31, 31, 62] are just some big values used to transform
                # coords from 3d array to 1d and then check their uniqueness
                # this is used in all the ScanNet code following PointNet++
                vidx = np.ceil(
                    (cur_coords[mask, :] - cur_min) / (cur_max - cur_min) *
                    np.array([31.0, 31.0, 62.0]))
                vidx = np.unique(vidx[:, 0] * 31.0 * 62.0 + vidx[:, 1] * 62.0 +
                                 vidx[:, 2])
                flag1 = len(vidx) / 31.0 / 31.0 / 62.0 >= 0.02
            else:
1152
                # if `min_unique_num` is provided, directly compare with it
1153
                flag1 = mask.sum() >= self.min_unique_num
1154

1155
            # 2. selected patch should contain enough annotated points
1156
1157
1158
1159
1160
1161
1162
1163
1164
            if self.ignore_index is None:
                flag2 = True
            else:
                flag2 = np.sum(cur_sem_mask != self.ignore_index) / \
                               len(cur_sem_mask) >= 0.7

            if flag1 and flag2:
                break

1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
        # sample idx to `self.num_points`
        if point_idxs.size >= self.num_points:
            # no duplicate in sub-sampling
            choices = np.random.choice(
                point_idxs, self.num_points, replace=False)
        else:
            # do not use random choice here to avoid some points not counted
            dup = np.random.choice(point_idxs.size,
                                   self.num_points - point_idxs.size)
            idx_dup = np.concatenate(
                [np.arange(point_idxs.size),
                 np.array(dup)], 0)
            choices = point_idxs[idx_dup]
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192

        # construct model input
        points = self._input_generation(coords[choices], cur_center, coord_max,
                                        attributes[choices], attribute_dims,
                                        point_type)

        return points, choices

    def __call__(self, results):
        """Call function to sample points to in indoor scenes.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
1193
            dict: Results after sampling, 'points', 'pts_instance_mask'
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
                and 'pts_semantic_mask' keys are updated in the result dict.
        """
        points = results['points']

        assert 'pts_semantic_mask' in results.keys(), \
            'semantic mask should be provided in training and evaluation'
        pts_semantic_mask = results['pts_semantic_mask']

        points, choices = self._patch_points_sampling(points,
                                                      pts_semantic_mask)

        results['points'] = points
        results['pts_semantic_mask'] = pts_semantic_mask[choices]
        pts_instance_mask = results.get('pts_instance_mask', None)
        if pts_instance_mask is not None:
            results['pts_instance_mask'] = pts_instance_mask[choices]

        return results

    def __repr__(self):
        """str: Return a string that describes the module."""
        repr_str = self.__class__.__name__
        repr_str += f'(num_points={self.num_points},'
        repr_str += f' block_size={self.block_size},'
        repr_str += f' ignore_index={self.ignore_index},'
        repr_str += f' use_normalized_coord={self.use_normalized_coord},'
1220
1221
        repr_str += f' num_try={self.num_try},'
        repr_str += f' enlarge_size={self.enlarge_size},'
1222
1223
        repr_str += f' min_unique_num={self.min_unique_num},'
        repr_str += f' eps={self.eps})'
wuyuefeng's avatar
wuyuefeng committed
1224
        return repr_str
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252


@PIPELINES.register_module()
class BackgroundPointsFilter(object):
    """Filter background points near the bounding box.

    Args:
        bbox_enlarge_range (tuple[float], float): Bbox enlarge range.
    """

    def __init__(self, bbox_enlarge_range):
        assert (is_tuple_of(bbox_enlarge_range, float)
                and len(bbox_enlarge_range) == 3) \
            or isinstance(bbox_enlarge_range, float), \
            f'Invalid arguments bbox_enlarge_range {bbox_enlarge_range}'

        if isinstance(bbox_enlarge_range, float):
            bbox_enlarge_range = [bbox_enlarge_range] * 3
        self.bbox_enlarge_range = np.array(
            bbox_enlarge_range, dtype=np.float32)[np.newaxis, :]

    def __call__(self, input_dict):
        """Call function to filter points by the range.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
1253
            dict: Results after filtering, 'points', 'pts_instance_mask'
1254
                and 'pts_semantic_mask' keys are updated in the result dict.
1255
1256
1257
1258
        """
        points = input_dict['points']
        gt_bboxes_3d = input_dict['gt_bboxes_3d']

xiliu8006's avatar
xiliu8006 committed
1259
1260
1261
1262
        # avoid groundtruth being modified
        gt_bboxes_3d_np = gt_bboxes_3d.tensor.clone().numpy()
        gt_bboxes_3d_np[:, :3] = gt_bboxes_3d.gravity_center.clone().numpy()

1263
1264
        enlarged_gt_bboxes_3d = gt_bboxes_3d_np.copy()
        enlarged_gt_bboxes_3d[:, 3:6] += self.bbox_enlarge_range
xiliu8006's avatar
xiliu8006 committed
1265
        points_numpy = points.tensor.clone().numpy()
1266
1267
        foreground_masks = box_np_ops.points_in_rbbox(
            points_numpy, gt_bboxes_3d_np, origin=(0.5, 0.5, 0.5))
1268
        enlarge_foreground_masks = box_np_ops.points_in_rbbox(
1269
            points_numpy, enlarged_gt_bboxes_3d, origin=(0.5, 0.5, 0.5))
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
        foreground_masks = foreground_masks.max(1)
        enlarge_foreground_masks = enlarge_foreground_masks.max(1)
        valid_masks = ~np.logical_and(~foreground_masks,
                                      enlarge_foreground_masks)

        input_dict['points'] = points[valid_masks]
        pts_instance_mask = input_dict.get('pts_instance_mask', None)
        if pts_instance_mask is not None:
            input_dict['pts_instance_mask'] = pts_instance_mask[valid_masks]

        pts_semantic_mask = input_dict.get('pts_semantic_mask', None)
        if pts_semantic_mask is not None:
            input_dict['pts_semantic_mask'] = pts_semantic_mask[valid_masks]
        return input_dict

    def __repr__(self):
        """str: Return a string that describes the module."""
        repr_str = self.__class__.__name__
1288
        repr_str += f'(bbox_enlarge_range={self.bbox_enlarge_range.tolist()})'
1289
        return repr_str
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300


@PIPELINES.register_module()
class VoxelBasedPointSampler(object):
    """Voxel based point sampler.

    Apply voxel sampling to multiple sweep points.

    Args:
        cur_sweep_cfg (dict): Config for sampling current points.
        prev_sweep_cfg (dict): Config for sampling previous points.
1301
        time_dim (int): Index that indicate the time dimension
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
            for input points.
    """

    def __init__(self, cur_sweep_cfg, prev_sweep_cfg=None, time_dim=3):
        self.cur_voxel_generator = VoxelGenerator(**cur_sweep_cfg)
        self.cur_voxel_num = self.cur_voxel_generator._max_voxels
        self.time_dim = time_dim
        if prev_sweep_cfg is not None:
            assert prev_sweep_cfg['max_num_points'] == \
                cur_sweep_cfg['max_num_points']
            self.prev_voxel_generator = VoxelGenerator(**prev_sweep_cfg)
            self.prev_voxel_num = self.prev_voxel_generator._max_voxels
        else:
            self.prev_voxel_generator = None
            self.prev_voxel_num = 0

    def _sample_points(self, points, sampler, point_dim):
        """Sample points for each points subset.

        Args:
            points (np.ndarray): Points subset to be sampled.
            sampler (VoxelGenerator): Voxel based sampler for
                each points subset.
1325
            point_dim (int): The dimension of each points
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350

        Returns:
            np.ndarray: Sampled points.
        """
        voxels, coors, num_points_per_voxel = sampler.generate(points)
        if voxels.shape[0] < sampler._max_voxels:
            padding_points = np.zeros([
                sampler._max_voxels - voxels.shape[0], sampler._max_num_points,
                point_dim
            ],
                                      dtype=points.dtype)
            padding_points[:] = voxels[0]
            sample_points = np.concatenate([voxels, padding_points], axis=0)
        else:
            sample_points = voxels

        return sample_points

    def __call__(self, results):
        """Call function to sample points from multiple sweeps.

        Args:
            input_dict (dict): Result dict from loading pipeline.

        Returns:
1351
            dict: Results after sampling, 'points', 'pts_instance_mask'
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
                and 'pts_semantic_mask' keys are updated in the result dict.
        """
        points = results['points']
        original_dim = points.shape[1]

        # TODO: process instance and semantic mask while _max_num_points
        # is larger than 1
        # Extend points with seg and mask fields
        map_fields2dim = []
        start_dim = original_dim
1362
1363
        points_numpy = points.tensor.numpy()
        extra_channel = [points_numpy]
1364
1365
1366
1367
1368
1369
1370
1371
1372
        for idx, key in enumerate(results['pts_mask_fields']):
            map_fields2dim.append((key, idx + start_dim))
            extra_channel.append(results[key][..., None])

        start_dim += len(results['pts_mask_fields'])
        for idx, key in enumerate(results['pts_seg_fields']):
            map_fields2dim.append((key, idx + start_dim))
            extra_channel.append(results[key][..., None])

1373
        points_numpy = np.concatenate(extra_channel, axis=-1)
1374
1375
1376
1377
1378

        # Split points into two part, current sweep points and
        # previous sweeps points.
        # TODO: support different sampling methods for next sweeps points
        # and previous sweeps points.
1379
1380
1381
        cur_points_flag = (points_numpy[:, self.time_dim] == 0)
        cur_sweep_points = points_numpy[cur_points_flag]
        prev_sweeps_points = points_numpy[~cur_points_flag]
1382
1383
1384
1385
1386
1387
1388
1389
1390
        if prev_sweeps_points.shape[0] == 0:
            prev_sweeps_points = cur_sweep_points

        # Shuffle points before sampling
        np.random.shuffle(cur_sweep_points)
        np.random.shuffle(prev_sweeps_points)

        cur_sweep_points = self._sample_points(cur_sweep_points,
                                               self.cur_voxel_generator,
1391
                                               points_numpy.shape[1])
1392
1393
1394
        if self.prev_voxel_generator is not None:
            prev_sweeps_points = self._sample_points(prev_sweeps_points,
                                                     self.prev_voxel_generator,
1395
                                                     points_numpy.shape[1])
1396

1397
1398
            points_numpy = np.concatenate(
                [cur_sweep_points, prev_sweeps_points], 0)
1399
        else:
1400
            points_numpy = cur_sweep_points
1401
1402

        if self.cur_voxel_generator._max_num_points == 1:
1403
1404
            points_numpy = points_numpy.squeeze(1)
        results['points'] = points.new_point(points_numpy[..., :original_dim])
1405

1406
        # Restore the corresponding seg and mask fields
1407
        for key, dim_index in map_fields2dim:
1408
            results[key] = points_numpy[..., dim_index]
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431

        return results

    def __repr__(self):
        """str: Return a string that describes the module."""

        def _auto_indent(repr_str, indent):
            repr_str = repr_str.split('\n')
            repr_str = [' ' * indent + t + '\n' for t in repr_str]
            repr_str = ''.join(repr_str)[:-1]
            return repr_str

        repr_str = self.__class__.__name__
        indent = 4
        repr_str += '(\n'
        repr_str += ' ' * indent + f'num_cur_sweep={self.cur_voxel_num},\n'
        repr_str += ' ' * indent + f'num_prev_sweep={self.prev_voxel_num},\n'
        repr_str += ' ' * indent + f'time_dim={self.time_dim},\n'
        repr_str += ' ' * indent + 'cur_voxel_generator=\n'
        repr_str += f'{_auto_indent(repr(self.cur_voxel_generator), 8)},\n'
        repr_str += ' ' * indent + 'prev_voxel_generator=\n'
        repr_str += f'{_auto_indent(repr(self.prev_voxel_generator), 8)})'
        return repr_str
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558


@PIPELINES.register_module()
class AffineResize(object):
    """Get the affine transform matrices to the target size.

    Different from :class:`RandomAffine` in MMDetection, this class can
    calculate the affine transform matrices while resizing the input image
    to a fixed size. The affine transform matrices include: 1) matrix
    transforming original image to the network input image size. 2) matrix
    transforming original image to the network output feature map size.

    Args:
        img_scale (tuple): Images scales for resizing.
        down_ratio (int): The down ratio of feature map.
            Actually the arg should be >= 1.
        bbox_clip_border (bool, optional): Whether clip the objects
            outside the border of the image. Defaults to True.
    """

    def __init__(self, img_scale, down_ratio, bbox_clip_border=True):

        self.img_scale = img_scale
        self.down_ratio = down_ratio
        self.bbox_clip_border = bbox_clip_border

    def __call__(self, results):
        """Call function to do affine transform to input image and labels.

        Args:
            results (dict): Result dict from loading pipeline.

        Returns:
            dict: Results after affine resize, 'affine_aug', 'trans_mat'
                keys are added in the result dict.
        """
        # The results have gone through RandomShiftScale before AffineResize
        if 'center' not in results:
            img = results['img']
            height, width = img.shape[:2]
            center = np.array([width / 2, height / 2], dtype=np.float32)
            size = np.array([width, height], dtype=np.float32)
            results['affine_aug'] = False
        else:
            # The results did not go through RandomShiftScale before
            # AffineResize
            img = results['img']
            center = results['center']
            size = results['size']

        trans_affine = self._get_transform_matrix(center, size, self.img_scale)

        img = cv2.warpAffine(img, trans_affine[:2, :], self.img_scale)

        if isinstance(self.down_ratio, tuple):
            trans_mat = [
                self._get_transform_matrix(
                    center, size,
                    (self.img_scale[0] // ratio, self.img_scale[1] // ratio))
                for ratio in self.down_ratio
            ]  # (3, 3)
        else:
            trans_mat = self._get_transform_matrix(
                center, size, (self.img_scale[0] // self.down_ratio,
                               self.img_scale[1] // self.down_ratio))

        results['img'] = img
        results['img_shape'] = img.shape
        results['pad_shape'] = img.shape
        results['trans_mat'] = trans_mat

        self._affine_bboxes(results, trans_affine)

        if 'centers2d' in results:
            centers2d = self._affine_transform(results['centers2d'],
                                               trans_affine)
            valid_index = (centers2d[:, 0] >
                           0) & (centers2d[:, 0] <
                                 self.img_scale[0]) & (centers2d[:, 1] > 0) & (
                                     centers2d[:, 1] < self.img_scale[1])
            results['centers2d'] = centers2d[valid_index]

            for key in results.get('bbox_fields', []):
                if key in ['gt_bboxes']:
                    results[key] = results[key][valid_index]
                    if 'gt_labels' in results:
                        results['gt_labels'] = results['gt_labels'][
                            valid_index]
                    if 'gt_masks' in results:
                        raise NotImplementedError(
                            'AffineResize only supports bbox.')

            for key in results.get('bbox3d_fields', []):
                if key in ['gt_bboxes_3d']:
                    results[key].tensor = results[key].tensor[valid_index]
                    if 'gt_labels_3d' in results:
                        results['gt_labels_3d'] = results['gt_labels_3d'][
                            valid_index]

            results['depths'] = results['depths'][valid_index]

        return results

    def _affine_bboxes(self, results, matrix):
        """Affine transform bboxes to input image.

        Args:
            results (dict): Result dict from loading pipeline.
            matrix (np.ndarray): Matrix transforming original
                image to the network input image size.
                shape: (3, 3)
        """

        for key in results.get('bbox_fields', []):
            bboxes = results[key]
            bboxes[:, :2] = self._affine_transform(bboxes[:, :2], matrix)
            bboxes[:, 2:] = self._affine_transform(bboxes[:, 2:], matrix)
            if self.bbox_clip_border:
                bboxes[:,
                       [0, 2]] = bboxes[:,
                                        [0, 2]].clip(0, self.img_scale[0] - 1)
                bboxes[:,
                       [1, 3]] = bboxes[:,
                                        [1, 3]].clip(0, self.img_scale[1] - 1)
            results[key] = bboxes

    def _affine_transform(self, points, matrix):
1559
        """Affine transform bbox points to input image.
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612

        Args:
            points (np.ndarray): Points to be transformed.
                shape: (N, 2)
            matrix (np.ndarray): Affine transform matrix.
                shape: (3, 3)

        Returns:
            np.ndarray: Transformed points.
        """
        num_points = points.shape[0]
        hom_points_2d = np.concatenate((points, np.ones((num_points, 1))),
                                       axis=1)
        hom_points_2d = hom_points_2d.T
        affined_points = np.matmul(matrix, hom_points_2d).T
        return affined_points[:, :2]

    def _get_transform_matrix(self, center, scale, output_scale):
        """Get affine transform matrix.

        Args:
            center (tuple): Center of current image.
            scale (tuple): Scale of current image.
            output_scale (tuple[float]): The transform target image scales.

        Returns:
            np.ndarray: Affine transform matrix.
        """
        # TODO: further add rot and shift here.
        src_w = scale[0]
        dst_w = output_scale[0]
        dst_h = output_scale[1]

        src_dir = np.array([0, src_w * -0.5])
        dst_dir = np.array([0, dst_w * -0.5])

        src = np.zeros((3, 2), dtype=np.float32)
        dst = np.zeros((3, 2), dtype=np.float32)
        src[0, :] = center
        src[1, :] = center + src_dir
        dst[0, :] = np.array([dst_w * 0.5, dst_h * 0.5])
        dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir

        src[2, :] = self._get_ref_point(src[0, :], src[1, :])
        dst[2, :] = self._get_ref_point(dst[0, :], dst[1, :])

        get_matrix = cv2.getAffineTransform(src, dst)

        matrix = np.concatenate((get_matrix, [[0., 0., 1.]]))

        return matrix.astype(np.float32)

    def _get_ref_point(self, ref_point1, ref_point2):
1613
        """Get reference point to calculate affine transform matrix.
1614
1615

        While using opencv to calculate the affine matrix, we need at least
1616
        three corresponding points separately on original image and target
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
        image. Here we use two points to get the the third reference point.
        """
        d = ref_point1 - ref_point2
        ref_point3 = ref_point2 + np.array([-d[1], d[0]])
        return ref_point3

    def __repr__(self):
        repr_str = self.__class__.__name__
        repr_str += f'(img_scale={self.img_scale}, '
        repr_str += f'down_ratio={self.down_ratio}) '
        return repr_str


@PIPELINES.register_module()
class RandomShiftScale(object):
    """Random shift scale.

    Different from the normal shift and scale function, it doesn't
    directly shift or scale image. It can record the shift and scale
1636
    infos into loading pipelines. It's designed to be used with
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
    AffineResize together.

    Args:
        shift_scale (tuple[float]): Shift and scale range.
        aug_prob (float): The shifting and scaling probability.
    """

    def __init__(self, shift_scale, aug_prob):

        self.shift_scale = shift_scale
        self.aug_prob = aug_prob

    def __call__(self, results):
        """Call function to record random shift and scale infos.

        Args:
            results (dict): Result dict from loading pipeline.

        Returns:
            dict: Results after random shift and scale, 'center', 'size'
                and 'affine_aug' keys are added in the result dict.
        """
        img = results['img']

        height, width = img.shape[:2]

        center = np.array([width / 2, height / 2], dtype=np.float32)
        size = np.array([width, height], dtype=np.float32)

        if random.random() < self.aug_prob:
            shift, scale = self.shift_scale[0], self.shift_scale[1]
            shift_ranges = np.arange(-shift, shift + 0.1, 0.1)
            center[0] += size[0] * random.choice(shift_ranges)
            center[1] += size[1] * random.choice(shift_ranges)
            scale_ranges = np.arange(1 - scale, 1 + scale + 0.1, 0.1)
            size *= random.choice(scale_ranges)
            results['affine_aug'] = True
        else:
            results['affine_aug'] = False

        results['center'] = center
        results['size'] = size

        return results

    def __repr__(self):
        repr_str = self.__class__.__name__
        repr_str += f'(shift_scale={self.shift_scale}, '
        repr_str += f'aug_prob={self.aug_prob}) '
        return repr_str