base_box3d.py 24.2 KB
Newer Older
dingchang's avatar
dingchang committed
1
# Copyright (c) OpenMMLab. All rights reserved.
2
import warnings
zhangwenwei's avatar
zhangwenwei committed
3
from abc import abstractmethod
4
from typing import Iterator, Optional, Sequence, Tuple, Union
5

6
7
import numpy as np
import torch
8
from mmcv.ops import box_iou_rotated, points_in_boxes_all, points_in_boxes_part
9
from torch import Tensor
10

11
from mmdet3d.structures.points import BasePoints
12
from .utils import limit_period
zhangwenwei's avatar
zhangwenwei committed
13

14

15
class BaseInstance3DBoxes:
zhangwenwei's avatar
zhangwenwei committed
16
    """Base class for 3D Boxes.
17

zhangwenwei's avatar
zhangwenwei committed
18
    Note:
19
20
        The box is bottom centered, i.e. the relative position of origin in the
        box is (0.5, 0.5, 0).
zhangwenwei's avatar
zhangwenwei committed
21

zhangwenwei's avatar
zhangwenwei committed
22
    Args:
23
24
25
26
27
28
29
        tensor (Tensor or np.ndarray or Sequence[Sequence[float]]): The boxes
            data with shape (N, box_dim).
        box_dim (int): Number of the dimension of a box. Each row is
            (x, y, z, x_size, y_size, z_size, yaw). Defaults to 7.
        with_yaw (bool): Whether the box is with yaw rotation. If False, the
            value of yaw will be set to 0 as minmax boxes. Defaults to True.
        origin (Tuple[float]): Relative position of the box origin.
30
            Defaults to (0.5, 0.5, 0). This will guide the box be converted to
wuyuefeng's avatar
wuyuefeng committed
31
            (0.5, 0.5, 0) mode.
Wenwei Zhang's avatar
Wenwei Zhang committed
32
33

    Attributes:
34
35
36
        tensor (Tensor): Float matrix with shape (N, box_dim).
        box_dim (int): Integer indicating the dimension of a box. Each row is
            (x, y, z, x_size, y_size, z_size, yaw, ...).
Wenwei Zhang's avatar
Wenwei Zhang committed
37
38
        with_yaw (bool): If True, the value of yaw will be set to 0 as minmax
            boxes.
39
40
    """

41
42
43
44
45
46
47
48
    def __init__(
        self,
        tensor: Union[Tensor, np.ndarray, Sequence[Sequence[float]]],
        box_dim: int = 7,
        with_yaw: bool = True,
        origin: Tuple[float, float, float] = (0.5, 0.5, 0)
    ) -> None:
        if isinstance(tensor, Tensor):
49
50
51
52
53
            device = tensor.device
        else:
            device = torch.device('cpu')
        tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
        if tensor.numel() == 0:
54
55
56
57
58
59
60
            # Use reshape, so we don't end up creating a new tensor that does
            # not depend on the inputs (and consequently confuses jit)
            tensor = tensor.reshape((-1, box_dim))
        assert tensor.dim() == 2 and tensor.size(-1) == box_dim, \
            ('The box dimension must be 2 and the length of the last '
             f'dimension must be {box_dim}, but got boxes with shape '
             f'{tensor.shape}.')
wuyuefeng's avatar
wuyuefeng committed
61

wuyuefeng's avatar
wuyuefeng committed
62
        if tensor.shape[-1] == 6:
63
64
            # If the dimension of boxes is 6, we expand box_dim by padding 0 as
            # a fake yaw and set with_yaw to False
wuyuefeng's avatar
wuyuefeng committed
65
66
67
68
            assert box_dim == 6
            fake_rot = tensor.new_zeros(tensor.shape[0], 1)
            tensor = torch.cat((tensor, fake_rot), dim=-1)
            self.box_dim = box_dim + 1
wuyuefeng's avatar
wuyuefeng committed
69
            self.with_yaw = False
wuyuefeng's avatar
wuyuefeng committed
70
71
        else:
            self.box_dim = box_dim
wuyuefeng's avatar
wuyuefeng committed
72
            self.with_yaw = with_yaw
73
        self.tensor = tensor.clone()
74

wuyuefeng's avatar
wuyuefeng committed
75
76
        if origin != (0.5, 0.5, 0):
            dst = self.tensor.new_tensor((0.5, 0.5, 0))
zhangwenwei's avatar
zhangwenwei committed
77
78
79
            src = self.tensor.new_tensor(origin)
            self.tensor[:, :3] += self.tensor[:, 3:6] * (dst - src)

zhangwenwei's avatar
zhangwenwei committed
80
    @property
81
82
    def volume(self) -> Tensor:
        """Tensor: A vector with volume of each box in shape (N, )."""
83
84
        return self.tensor[:, 3] * self.tensor[:, 4] * self.tensor[:, 5]

zhangwenwei's avatar
zhangwenwei committed
85
    @property
86
87
    def dims(self) -> Tensor:
        """Tensor: Size dimensions of each box in shape (N, 3)."""
zhangwenwei's avatar
zhangwenwei committed
88
89
        return self.tensor[:, 3:6]

zhangwenwei's avatar
zhangwenwei committed
90
    @property
91
92
    def yaw(self) -> Tensor:
        """Tensor: A vector with yaw of each box in shape (N, )."""
zhangwenwei's avatar
zhangwenwei committed
93
94
        return self.tensor[:, 6]

95
    @property
96
97
    def height(self) -> Tensor:
        """Tensor: A vector with height of each box in shape (N, )."""
98
99
        return self.tensor[:, 5]

100
    @property
101
102
    def top_height(self) -> Tensor:
        """Tensor: A vector with top height of each box in shape (N, )."""
103
104
105
        return self.bottom_height + self.height

    @property
106
107
    def bottom_height(self) -> Tensor:
        """Tensor: A vector with bottom height of each box in shape (N, )."""
108
109
        return self.tensor[:, 2]

zhangwenwei's avatar
zhangwenwei committed
110
    @property
111
    def center(self) -> Tensor:
zhangwenwei's avatar
zhangwenwei committed
112
113
114
        """Calculate the center of all the boxes.

        Note:
115
116
            In MMDetection3D's convention, the bottom center is usually taken
            as the default center.
zhangwenwei's avatar
zhangwenwei committed
117

118
119
120
121
122
            The relative position of the centers in different kinds of boxes
            are different, e.g., the relative center of a boxes is
            (0.5, 1.0, 0.5) in camera and (0.5, 0.5, 0) in lidar. It is
            recommended to use ``bottom_center`` or ``gravity_center`` for
            clearer usage.
zhangwenwei's avatar
zhangwenwei committed
123
124

        Returns:
125
            Tensor: A tensor with center of each box in shape (N, 3).
zhangwenwei's avatar
zhangwenwei committed
126
127
128
        """
        return self.bottom_center

zhangwenwei's avatar
zhangwenwei committed
129
    @property
130
131
    def bottom_center(self) -> Tensor:
        """Tensor: A tensor with center of each box in shape (N, 3)."""
zhangwenwei's avatar
zhangwenwei committed
132
        return self.tensor[:, :3]
133

zhangwenwei's avatar
zhangwenwei committed
134
    @property
135
136
137
138
139
140
141
    def gravity_center(self) -> Tensor:
        """Tensor: A tensor with center of each box in shape (N, 3)."""
        bottom_center = self.bottom_center
        gravity_center = torch.zeros_like(bottom_center)
        gravity_center[:, :2] = bottom_center[:, :2]
        gravity_center[:, 2] = bottom_center[:, 2] + self.tensor[:, 5] * 0.5
        return gravity_center
142

zhangwenwei's avatar
zhangwenwei committed
143
    @property
144
145
    def corners(self) -> Tensor:
        """Tensor: A tensor with 8 corners of each box in shape (N, 8, 3)."""
146
147
        pass

148
    @property
149
150
151
    def bev(self) -> Tensor:
        """Tensor: 2D BEV box of each box with rotation in XYWHR format, in
        shape (N, 5)."""
152
153
154
        return self.tensor[:, [0, 1, 3, 4, 6]]

    @property
155
156
    def nearest_bev(self) -> Tensor:
        """Tensor: A tensor of 2D BEV box of each box without rotation."""
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
        # Obtain BEV boxes with rotation in XYWHR format
        bev_rotated_boxes = self.bev
        # convert the rotation to a valid range
        rotations = bev_rotated_boxes[:, -1]
        normed_rotations = torch.abs(limit_period(rotations, 0.5, np.pi))

        # find the center of boxes
        conditions = (normed_rotations > np.pi / 4)[..., None]
        bboxes_xywh = torch.where(conditions, bev_rotated_boxes[:,
                                                                [0, 1, 3, 2]],
                                  bev_rotated_boxes[:, :4])

        centers = bboxes_xywh[:, :2]
        dims = bboxes_xywh[:, 2:]
        bev_boxes = torch.cat([centers - dims / 2, centers + dims / 2], dim=-1)
        return bev_boxes

174
175
176
    def in_range_bev(
            self, box_range: Union[Tensor, np.ndarray,
                                   Sequence[float]]) -> Tensor:
177
178
179
        """Check whether the boxes are in the given range.

        Args:
180
181
            box_range (Tensor or np.ndarray or Sequence[float]): The range of
                box in order of (x_min, y_min, x_max, y_max).
182
183

        Note:
184
185
186
            The original implementation of SECOND checks whether boxes in a
            range by checking whether the points are in a convex polygon, we
            reduce the burden for simpler cases.
187
188

        Returns:
189
190
            Tensor: A binary vector indicating whether each box is inside the
            reference range.
191
192
193
194
195
196
197
        """
        in_range_flags = ((self.bev[:, 0] > box_range[0])
                          & (self.bev[:, 1] > box_range[1])
                          & (self.bev[:, 0] < box_range[2])
                          & (self.bev[:, 1] < box_range[3]))
        return in_range_flags

198
    @abstractmethod
199
200
201
202
203
204
    def rotate(
        self,
        angle: Union[Tensor, np.ndarray, float],
        points: Optional[Union[Tensor, np.ndarray, BasePoints]] = None
    ) -> Union[Tuple[Tensor, Tensor], Tuple[np.ndarray, np.ndarray], Tuple[
            BasePoints, Tensor], None]:
205
206
        """Rotate boxes with points (optional) with the given angle or rotation
        matrix.
207
208

        Args:
209
210
211
            angle (Tensor or np.ndarray or float): Rotation angle or rotation
                matrix.
            points (Tensor or np.ndarray or :obj:`BasePoints`, optional):
212
                Points to rotate. Defaults to None.
213
214
215
216
217

        Returns:
            tuple or None: When ``points`` is None, the function returns None,
            otherwise it returns the rotated points and the rotation matrix
            ``rot_mat_T``.
218
219
220
221
        """
        pass

    @abstractmethod
222
223
224
225
226
    def flip(
        self,
        bev_direction: str = 'horizontal',
        points: Optional[Union[Tensor, np.ndarray, BasePoints]] = None
    ) -> Union[Tensor, np.ndarray, BasePoints, None]:
227
228
229
        """Flip the boxes in BEV along given BEV direction.

        Args:
230
231
232
233
234
235
236
237
238
            bev_direction (str): Direction by which to flip. Can be chosen from
                'horizontal' and 'vertical'. Defaults to 'horizontal'.
            points (Tensor or np.ndarray or :obj:`BasePoints`, optional):
                Points to flip. Defaults to None.

        Returns:
            Tensor or np.ndarray or :obj:`BasePoints` or None: When ``points``
            is None, the function returns None, otherwise it returns the
            flipped points.
239
        """
240
241
        pass

242
    def translate(self, trans_vector: Union[Tensor, np.ndarray]) -> None:
243
        """Translate boxes with the given translation vector.
244
245

        Args:
246
247
            trans_vector (Tensor or np.ndarray): Translation vector of size
                1x3.
248
        """
249
        if not isinstance(trans_vector, Tensor):
zhangwenwei's avatar
zhangwenwei committed
250
251
            trans_vector = self.tensor.new_tensor(trans_vector)
        self.tensor[:, :3] += trans_vector
252

253
254
255
    def in_range_3d(
            self, box_range: Union[Tensor, np.ndarray,
                                   Sequence[float]]) -> Tensor:
zhangwenwei's avatar
zhangwenwei committed
256
        """Check whether the boxes are in the given range.
257
258

        Args:
259
260
            box_range (Tensor or np.ndarray or Sequence[float]): The range of
                box (x_min, y_min, z_min, x_max, y_max, z_max).
261

zhangwenwei's avatar
zhangwenwei committed
262
        Note:
263
264
265
            In the original implementation of SECOND, checking whether a box in
            the range checks whether the points are in a convex polygon, we try
            to reduce the burden for simpler cases.
zhangwenwei's avatar
zhangwenwei committed
266

267
        Returns:
268
269
            Tensor: A binary vector indicating whether each point is inside the
            reference range.
270
        """
zhangwenwei's avatar
zhangwenwei committed
271
272
273
274
275
276
277
        in_range_flags = ((self.tensor[:, 0] > box_range[0])
                          & (self.tensor[:, 1] > box_range[1])
                          & (self.tensor[:, 2] > box_range[2])
                          & (self.tensor[:, 0] < box_range[3])
                          & (self.tensor[:, 1] < box_range[4])
                          & (self.tensor[:, 2] < box_range[5]))
        return in_range_flags
278

279
    @abstractmethod
280
281
282
283
    def convert_to(self,
                   dst: int,
                   rt_mat: Optional[Union[Tensor, np.ndarray]] = None,
                   correct_yaw: bool = False) -> 'BaseInstance3DBoxes':
Wenwei Zhang's avatar
Wenwei Zhang committed
284
        """Convert self to ``dst`` mode.
285
286

        Args:
287
288
            dst (int): The target Box mode.
            rt_mat (Tensor or np.ndarray, optional): The rotation and
289
                translation matrix between different coordinates.
290
291
292
293
294
295
                Defaults to None. The conversion from ``src`` coordinates to
                ``dst`` coordinates usually comes along the change of sensors,
                e.g., from camera to LiDAR. This requires a transformation
                matrix.
            correct_yaw (bool): Whether to convert the yaw angle to the target
                coordinate. Defaults to False.
296
297

        Returns:
298
299
            :obj:`BaseInstance3DBoxes`: The converted box of the same type in
            the ``dst`` mode.
300
301
302
        """
        pass

303
    def scale(self, scale_factor: float) -> None:
zhangwenwei's avatar
zhangwenwei committed
304
        """Scale the box with horizontal and vertical scaling factors.
zhangwenwei's avatar
zhangwenwei committed
305
306

        Args:
liyinhao's avatar
liyinhao committed
307
            scale_factors (float): Scale factors to scale the boxes.
zhangwenwei's avatar
zhangwenwei committed
308
        """
zhangwenwei's avatar
zhangwenwei committed
309
        self.tensor[:, :6] *= scale_factor
310
        self.tensor[:, 7:] *= scale_factor  # velocity
zhangwenwei's avatar
zhangwenwei committed
311

312
    def limit_yaw(self, offset: float = 0.5, period: float = np.pi) -> None:
zhangwenwei's avatar
zhangwenwei committed
313
        """Limit the yaw to a given period and offset.
zhangwenwei's avatar
zhangwenwei committed
314
315

        Args:
316
317
            offset (float): The offset of the yaw. Defaults to 0.5.
            period (float): The expected period. Defaults to np.pi.
zhangwenwei's avatar
zhangwenwei committed
318
319
        """
        self.tensor[:, 6] = limit_period(self.tensor[:, 6], offset, period)
zhangwenwei's avatar
zhangwenwei committed
320

321
    def nonempty(self, threshold: float = 0.0) -> Tensor:
322
323
        """Find boxes that are non-empty.

324
325
        A box is considered empty if either of its side is no larger than
        threshold.
326

zhangwenwei's avatar
zhangwenwei committed
327
        Args:
328
            threshold (float): The threshold of minimal sizes. Defaults to 0.0.
zhangwenwei's avatar
zhangwenwei committed
329

330
        Returns:
331
332
            Tensor: A binary vector which represents whether each box is empty
            (False) or non-empty (True).
333
334
335
336
337
338
339
340
341
        """
        box = self.tensor
        size_x = box[..., 3]
        size_y = box[..., 4]
        size_z = box[..., 5]
        keep = ((size_x > threshold)
                & (size_y > threshold) & (size_z > threshold))
        return keep

342
343
344
    def __getitem__(
            self, item: Union[int, slice, np.ndarray,
                              Tensor]) -> 'BaseInstance3DBoxes':
345
        """
346
347
348
        Args:
            item (int or slice or np.ndarray or Tensor): Index of boxes.

349
350
        Note:
            The following usage are allowed:
351
352
353
354
355
356
357
358

            1. `new_boxes = boxes[3]`: Return a `Boxes` that contains only one
               box.
            2. `new_boxes = boxes[2:10]`: Return a slice of boxes.
            3. `new_boxes = boxes[vector]`: Where vector is a
               torch.BoolTensor with `length = len(boxes)`. Nonzero elements in
               the vector will be selected.

359
            Note that the returned Boxes might share storage with this Boxes,
360
            subject to PyTorch's indexing semantics.
361
362

        Returns:
363
            :obj:`BaseInstance3DBoxes`: A new object of
364
            :class:`BaseInstance3DBoxes` after indexing.
365
366
367
        """
        original_type = type(self)
        if isinstance(item, int):
wuyuefeng's avatar
wuyuefeng committed
368
369
370
371
            return original_type(
                self.tensor[item].view(1, -1),
                box_dim=self.box_dim,
                with_yaw=self.with_yaw)
372
373
374
        b = self.tensor[item]
        assert b.dim() == 2, \
            f'Indexing on Boxes with {item} failed to return a matrix!'
wuyuefeng's avatar
wuyuefeng committed
375
        return original_type(b, box_dim=self.box_dim, with_yaw=self.with_yaw)
376

377
    def __len__(self) -> int:
wangtai's avatar
wangtai committed
378
        """int: Number of boxes in the current object."""
379
380
        return self.tensor.shape[0]

381
382
    def __repr__(self) -> str:
        """str: Return a string that describes the object."""
383
384
385
        return self.__class__.__name__ + '(\n    ' + str(self.tensor) + ')'

    @classmethod
386
387
    def cat(cls, boxes_list: Sequence['BaseInstance3DBoxes']
            ) -> 'BaseInstance3DBoxes':
388
        """Concatenate a list of Boxes into a single Boxes.
389

liyinhao's avatar
liyinhao committed
390
        Args:
391
            boxes_list (Sequence[:obj:`BaseInstance3DBoxes`]): List of boxes.
zhangwenwei's avatar
zhangwenwei committed
392

393
        Returns:
394
            :obj:`BaseInstance3DBoxes`: The concatenated boxes.
395
396
397
398
399
400
401
402
        """
        assert isinstance(boxes_list, (list, tuple))
        if len(boxes_list) == 0:
            return cls(torch.empty(0))
        assert all(isinstance(box, cls) for box in boxes_list)

        # use torch.cat (v.s. layers.cat)
        # so the returned boxes never share storage with input
zhangwenwei's avatar
zhangwenwei committed
403
404
        cat_boxes = cls(
            torch.cat([b.tensor for b in boxes_list], dim=0),
405
            box_dim=boxes_list[0].box_dim,
zhangwenwei's avatar
zhangwenwei committed
406
            with_yaw=boxes_list[0].with_yaw)
407
408
        return cat_boxes

409
410
    def to(self, device: Union[str, torch.device], *args,
           **kwargs) -> 'BaseInstance3DBoxes':
zhangwenwei's avatar
zhangwenwei committed
411
        """Convert current boxes to a specific device.
zhangwenwei's avatar
zhangwenwei committed
412
413

        Args:
414
            device (str or :obj:`torch.device`): The name of the device.
zhangwenwei's avatar
zhangwenwei committed
415
416

        Returns:
417
418
            :obj:`BaseInstance3DBoxes`: A new boxes object on the specific
                device.
zhangwenwei's avatar
zhangwenwei committed
419
        """
420
        original_type = type(self)
wuyuefeng's avatar
wuyuefeng committed
421
        return original_type(
JingweiZhang12's avatar
JingweiZhang12 committed
422
            self.tensor.to(device, *args, **kwargs),
wuyuefeng's avatar
wuyuefeng committed
423
424
            box_dim=self.box_dim,
            with_yaw=self.with_yaw)
425

426
427
    def clone(self) -> 'BaseInstance3DBoxes':
        """Clone the boxes.
428
429

        Returns:
430
431
            :obj:`BaseInstance3DBoxes`: Box object with the same properties as
            self.
432
433
        """
        original_type = type(self)
wuyuefeng's avatar
wuyuefeng committed
434
435
        return original_type(
            self.tensor.clone(), box_dim=self.box_dim, with_yaw=self.with_yaw)
436
437

    @property
438
439
    def device(self) -> torch.device:
        """torch.device: The device of the boxes are on."""
440
441
        return self.tensor.device

442
443
    def __iter__(self) -> Iterator[Tensor]:
        """Yield a box as a Tensor at a time.
wuyuefeng's avatar
wuyuefeng committed
444
445

        Returns:
446
            Iterator[Tensor]: A box of shape (box_dim, ).
447
448
        """
        yield from self.tensor
449
450

    @classmethod
451
452
    def height_overlaps(cls, boxes1: 'BaseInstance3DBoxes',
                        boxes2: 'BaseInstance3DBoxes') -> Tensor:
zhangwenwei's avatar
zhangwenwei committed
453
        """Calculate height overlaps of two boxes.
454
455

        Note:
456
457
            This function calculates the height overlaps between ``boxes1`` and
            ``boxes2``, ``boxes1`` and ``boxes2`` should be in the same type.
458
459

        Args:
460
461
            boxes1 (:obj:`BaseInstance3DBoxes`): Boxes 1 contain N boxes.
            boxes2 (:obj:`BaseInstance3DBoxes`): Boxes 2 contain M boxes.
462
463

        Returns:
464
            Tensor: Calculated height overlap of the boxes.
465
        """
466
467
        assert isinstance(boxes1, BaseInstance3DBoxes)
        assert isinstance(boxes2, BaseInstance3DBoxes)
468
469
470
        assert type(boxes1) == type(boxes2), \
            '"boxes1" and "boxes2" should be in the same type, ' \
            f'but got {type(boxes1)} and {type(boxes2)}.'
471
472
473
474
475
476
477
478
479
480
481
482
483

        boxes1_top_height = boxes1.top_height.view(-1, 1)
        boxes1_bottom_height = boxes1.bottom_height.view(-1, 1)
        boxes2_top_height = boxes2.top_height.view(1, -1)
        boxes2_bottom_height = boxes2.bottom_height.view(1, -1)

        heighest_of_bottom = torch.max(boxes1_bottom_height,
                                       boxes2_bottom_height)
        lowest_of_top = torch.min(boxes1_top_height, boxes2_top_height)
        overlaps_h = torch.clamp(lowest_of_top - heighest_of_bottom, min=0)
        return overlaps_h

    @classmethod
484
485
486
487
    def overlaps(cls,
                 boxes1: 'BaseInstance3DBoxes',
                 boxes2: 'BaseInstance3DBoxes',
                 mode: str = 'iou') -> Tensor:
zhangwenwei's avatar
zhangwenwei committed
488
        """Calculate 3D overlaps of two boxes.
489
490

        Note:
Wenwei Zhang's avatar
Wenwei Zhang committed
491
492
            This function calculates the overlaps between ``boxes1`` and
            ``boxes2``, ``boxes1`` and ``boxes2`` should be in the same type.
493
494

        Args:
495
496
            boxes1 (:obj:`BaseInstance3DBoxes`): Boxes 1 contain N boxes.
            boxes2 (:obj:`BaseInstance3DBoxes`): Boxes 2 contain M boxes.
497
            mode (str): Mode of iou calculation. Defaults to 'iou'.
498
499

        Returns:
500
            Tensor: Calculated 3D overlap of the boxes.
501
502
503
        """
        assert isinstance(boxes1, BaseInstance3DBoxes)
        assert isinstance(boxes2, BaseInstance3DBoxes)
504
505
506
        assert type(boxes1) == type(boxes2), \
            '"boxes1" and "boxes2" should be in the same type, ' \
            f'but got {type(boxes1)} and {type(boxes2)}.'
507
508
509

        assert mode in ['iou', 'iof']

zhangwenwei's avatar
zhangwenwei committed
510
511
512
513
514
        rows = len(boxes1)
        cols = len(boxes2)
        if rows * cols == 0:
            return boxes1.tensor.new(rows, cols)

515
516
517
        # height overlap
        overlaps_h = cls.height_overlaps(boxes1, boxes2)

518
519
520
521
        # Restrict the min values of W and H to avoid memory overflow in
        # ``box_iou_rotated``.
        boxes1_bev, boxes2_bev = boxes1.bev, boxes2.bev
        boxes1_bev[:, 2:4] = boxes1_bev[:, 2:4].clamp(min=1e-4)
522
        boxes2_bev[:, 2:4] = boxes2_bev[:, 2:4].clamp(min=1e-4)
523

524
        # bev overlap
525
526
        iou2d = box_iou_rotated(boxes1_bev, boxes2_bev)
        areas1 = (boxes1_bev[:, 2] * boxes1_bev[:, 3]).unsqueeze(1).expand(
527
            rows, cols)
528
        areas2 = (boxes2_bev[:, 2] * boxes2_bev[:, 3]).unsqueeze(0).expand(
529
530
            rows, cols)
        overlaps_bev = iou2d * (areas1 + areas2) / (1 + iou2d)
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545

        # 3d overlaps
        overlaps_3d = overlaps_bev.to(boxes1.device) * overlaps_h

        volume1 = boxes1.volume.view(-1, 1)
        volume2 = boxes2.volume.view(1, -1)

        if mode == 'iou':
            # the clamp func is used to avoid division of 0
            iou3d = overlaps_3d / torch.clamp(
                volume1 + volume2 - overlaps_3d, min=1e-8)
        else:
            iou3d = overlaps_3d / torch.clamp(volume1, min=1e-8)

        return iou3d
wuyuefeng's avatar
wuyuefeng committed
546

547
548
549
    def new_box(
        self, data: Union[Tensor, np.ndarray, Sequence[Sequence[float]]]
    ) -> 'BaseInstance3DBoxes':
wuyuefeng's avatar
wuyuefeng committed
550
551
        """Create a new box object with data.

552
553
        The new box and its tensor has the similar properties as self and
        self.tensor, respectively.
wuyuefeng's avatar
wuyuefeng committed
554
555

        Args:
556
557
            data (Tensor or np.ndarray or Sequence[Sequence[float]]): Data to
                be copied.
wuyuefeng's avatar
wuyuefeng committed
558
559

        Returns:
560
561
            :obj:`BaseInstance3DBoxes`: A new bbox object with ``data``, the
            object's other properties are similar to ``self``.
wuyuefeng's avatar
wuyuefeng committed
562
        """
zhangwenwei's avatar
zhangwenwei committed
563
        new_tensor = self.tensor.new_tensor(data) \
564
            if not isinstance(data, Tensor) else data.to(self.device)
wuyuefeng's avatar
wuyuefeng committed
565
566
567
        original_type = type(self)
        return original_type(
            new_tensor, box_dim=self.box_dim, with_yaw=self.with_yaw)
568

569
570
571
572
    def points_in_boxes_part(
            self,
            points: Tensor,
            boxes_override: Optional[Tensor] = None) -> Tensor:
573
        """Find the box in which each point is.
574
575

        Args:
576
577
578
579
            points (Tensor): Points in shape (1, M, 3) or (M, 3), 3 dimensions
                are (x, y, z) in LiDAR or depth coordinate.
            boxes_override (Tensor, optional): Boxes to override `self.tensor`.
                Defaults to None.
580
581

        Note:
582
583
584
585
586
587
588
            If a point is enclosed by multiple boxes, the index of the first
            box will be returned.

        Returns:
            Tensor: The index of the first box that each point is in with shape
            (M, ). Default value is -1 (if the point is not enclosed by any
            box).
589
590
591
592
593
        """
        if boxes_override is not None:
            boxes = boxes_override
        else:
            boxes = self.tensor
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608

        points_clone = points.clone()[..., :3]
        if points_clone.dim() == 2:
            points_clone = points_clone.unsqueeze(0)
        else:
            assert points_clone.dim() == 3 and points_clone.shape[0] == 1

        boxes = boxes.to(points_clone.device).unsqueeze(0)
        box_idx = points_in_boxes_part(points_clone, boxes)

        return box_idx.squeeze(0)

    def points_in_boxes_all(self,
                            points: Tensor,
                            boxes_override: Optional[Tensor] = None) -> Tensor:
609
        """Find all boxes in which each point is.
610
611

        Args:
612
613
614
615
            points (Tensor): Points in shape (1, M, 3) or (M, 3), 3 dimensions
                are (x, y, z) in LiDAR or depth coordinate.
            boxes_override (Tensor, optional): Boxes to override `self.tensor`.
                Defaults to None.
616
617

        Returns:
618
619
620
621
            Tensor: A tensor indicating whether a point is in a box with shape
            (M, T). T is the number of boxes. Denote this tensor as A, it the
            m^th point is in the t^th box, then `A[m, t] == 1`, otherwise
            `A[m, t] == 0`.
622
623
624
625
626
627
628
629
630
631
632
633
634
        """
        if boxes_override is not None:
            boxes = boxes_override
        else:
            boxes = self.tensor

        points_clone = points.clone()[..., :3]
        if points_clone.dim() == 2:
            points_clone = points_clone.unsqueeze(0)
        else:
            assert points_clone.dim() == 3 and points_clone.shape[0] == 1

        boxes = boxes.to(points_clone.device).unsqueeze(0)
635
        box_idxs_of_pts = points_in_boxes_all(points_clone, boxes)
636
637

        return box_idxs_of_pts.squeeze(0)
638

639
640
641
642
643
    def points_in_boxes(self,
                        points: Tensor,
                        boxes_override: Optional[Tensor] = None) -> Tensor:
        warnings.warn('DeprecationWarning: points_in_boxes is a deprecated '
                      'method, please consider using points_in_boxes_part.')
644
645
        return self.points_in_boxes_part(points, boxes_override)

646
647
648
649
    def points_in_boxes_batch(
            self,
            points: Tensor,
            boxes_override: Optional[Tensor] = None) -> Tensor:
650
651
652
653
        warnings.warn('DeprecationWarning: points_in_boxes_batch is a '
                      'deprecated method, please consider using '
                      'points_in_boxes_all.')
        return self.points_in_boxes_all(points, boxes_override)