utils.py 4.28 KB
Newer Older
1
2
3
4
5
import numpy as np
import torch


def limit_period(val, offset=0.5, period=np.pi):
zhangwenwei's avatar
zhangwenwei committed
6
7
8
    """Limit the value into a period for periodic function.

    Args:
liyinhao's avatar
liyinhao committed
9
        val (torch.Tensor): The value to be converted.
zhangwenwei's avatar
zhangwenwei committed
10
11
12
13
14
15
16
17
        offset (float, optional): Offset to set the value range.
            Defaults to 0.5.
        period ([type], optional): Period of the value. Defaults to np.pi.

    Returns:
        torch.Tensor: value in the range of
            [-offset * period, (1-offset) * period]
    """
18
19
20
21
    return val - torch.floor(val / period + offset) * period


def rotation_3d_in_axis(points, angles, axis=0):
zhangwenwei's avatar
zhangwenwei committed
22
    """Rotate points by angles according to axis.
zhangwenwei's avatar
zhangwenwei committed
23
24
25
26
27
28
29
30
31
32
33
34
35

    Args:
        points (torch.Tensor): Points of shape (N, M, 3).
        angles (torch.Tensor): Vector of angles in shape (N,)
        axis (int, optional): The axis to be rotated. Defaults to 0.

    Raises:
        ValueError: when the axis is not in range [0, 1, 2], it will
            raise value error.

    Returns:
        torch.Tensor: rotated points in shape (N, M, 3)
    """
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
    rot_sin = torch.sin(angles)
    rot_cos = torch.cos(angles)
    ones = torch.ones_like(rot_cos)
    zeros = torch.zeros_like(rot_cos)
    if axis == 1:
        rot_mat_T = torch.stack([
            torch.stack([rot_cos, zeros, -rot_sin]),
            torch.stack([zeros, ones, zeros]),
            torch.stack([rot_sin, zeros, rot_cos])
        ])
    elif axis == 2 or axis == -1:
        rot_mat_T = torch.stack([
            torch.stack([rot_cos, -rot_sin, zeros]),
            torch.stack([rot_sin, rot_cos, zeros]),
            torch.stack([zeros, zeros, ones])
        ])
    elif axis == 0:
        rot_mat_T = torch.stack([
            torch.stack([zeros, rot_cos, -rot_sin]),
            torch.stack([zeros, rot_sin, rot_cos]),
            torch.stack([ones, zeros, zeros])
        ])
    else:
zhangwenwei's avatar
zhangwenwei committed
59
        raise ValueError(f'axis should in range [0, 1, 2], got {axis}')
60
61

    return torch.einsum('aij,jka->aik', (points, rot_mat_T))
62
63
64


def xywhr2xyxyr(boxes_xywhr):
zhangwenwei's avatar
zhangwenwei committed
65
66
67
68
69
70
71
72
    """Convert a rotated boxes in XYWHR format to XYXYR format.

    Args:
        boxes_xywhr (torch.Tensor): Rotated boxes in XYWHR format.

    Returns:
        torch.Tensor: Converted boxes in XYXYR format.
    """
73
74
75
76
77
78
79
80
81
82
    boxes = torch.zeros_like(boxes_xywhr)
    half_w = boxes_xywhr[:, 2] / 2
    half_h = boxes_xywhr[:, 3] / 2

    boxes[:, 0] = boxes_xywhr[:, 0] - half_w
    boxes[:, 1] = boxes_xywhr[:, 1] - half_h
    boxes[:, 2] = boxes_xywhr[:, 0] + half_w
    boxes[:, 3] = boxes_xywhr[:, 1] + half_h
    boxes[:, 4] = boxes_xywhr[:, 4]
    return boxes
zhangwenwei's avatar
zhangwenwei committed
83
84


wuyuefeng's avatar
Demo  
wuyuefeng committed
85
86
87
88
89
90
91
92
93
94
def get_box_type(box_type):
    """Get the type and mode of box structure.

    Args:
        box_type (str): Indicate the type of box structure.
            The valid value are "LiDAR", "Camera", or "Depth".

    Returns:
        tuple: box type and box mode.
    """
zhangwenwei's avatar
zhangwenwei committed
95
96
    from .box_3d_mode import (Box3DMode, CameraInstance3DBoxes,
                              DepthInstance3DBoxes, LiDARInstance3DBoxes)
wuyuefeng's avatar
Demo  
wuyuefeng committed
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
    box_type_lower = box_type.lower()
    if box_type_lower == 'lidar':
        box_type_3d = LiDARInstance3DBoxes
        box_mode_3d = Box3DMode.LIDAR
    elif box_type_lower == 'camera':
        box_type_3d = CameraInstance3DBoxes
        box_mode_3d = Box3DMode.CAM
    elif box_type_lower == 'depth':
        box_type_3d = DepthInstance3DBoxes
        box_mode_3d = Box3DMode.DEPTH
    else:
        raise ValueError('Only "box_type" of "camera", "lidar", "depth"'
                         f' are supported, got {box_type}')

    return box_type_3d, box_mode_3d


zhangwenwei's avatar
zhangwenwei committed
114
def points_cam2img(points_3d, proj_mat):
zhangwenwei's avatar
zhangwenwei committed
115
    """Project points from camera coordicates to image coordinates.
zhangwenwei's avatar
zhangwenwei committed
116
117
118
119
120
121
122
123

    Args:
        points_3d (torch.Tensor): Points in shape (N, 3)
        proj_mat (torch.Tensor): Transformation matrix between coordinates.

    Returns:
        torch.Tensor: Points in image coordinates with shape [N, 2].
    """
zhangwenwei's avatar
zhangwenwei committed
124
125
126
127
128
129
130
131
132
    points_num = list(points_3d.shape)[:-1]
    points_shape = np.concatenate([points_num, [1]], axis=0).tolist()
    # previous implementation use new_zeros, new_one yeilds better results
    points_4 = torch.cat(
        [points_3d, points_3d.new_ones(*points_shape)], dim=-1)
    # point_2d = points_4 @ tf.transpose(proj_mat, [1, 0])
    point_2d = torch.matmul(points_4, proj_mat.t())
    point_2d_res = point_2d[..., :2] / point_2d[..., 2:3]
    return point_2d_res