Unverified Commit 32a4328b authored by Wenwei Zhang's avatar Wenwei Zhang Committed by GitHub
Browse files

Bump version to V1.0.0rc0

Bump version to V1.0.0rc0
parents 86cc487c a8817998
# Copyright (c) OpenMMLab. All rights reserved.
from .dgcnn_fa_module import DGCNNFAModule
from .dgcnn_fp_module import DGCNNFPModule
from .dgcnn_gf_module import DGCNNGFModule
__all__ = ['DGCNNFAModule', 'DGCNNFPModule', 'DGCNNGFModule']
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, force_fp32
from torch import nn as nn
class DGCNNFAModule(BaseModule):
"""Point feature aggregation module used in DGCNN.
Aggregate all the features of points.
Args:
mlp_channels (list[int]): List of mlp channels.
norm_cfg (dict, optional): Type of normalization method.
Defaults to dict(type='BN1d').
act_cfg (dict, optional): Type of activation method.
Defaults to dict(type='ReLU').
init_cfg (dict, optional): Initialization config. Defaults to None.
"""
def __init__(self,
mlp_channels,
norm_cfg=dict(type='BN1d'),
act_cfg=dict(type='ReLU'),
init_cfg=None):
super().__init__(init_cfg=init_cfg)
self.fp16_enabled = False
self.mlps = nn.Sequential()
for i in range(len(mlp_channels) - 1):
self.mlps.add_module(
f'layer{i}',
ConvModule(
mlp_channels[i],
mlp_channels[i + 1],
kernel_size=(1, ),
stride=(1, ),
conv_cfg=dict(type='Conv1d'),
norm_cfg=norm_cfg,
act_cfg=act_cfg))
@force_fp32()
def forward(self, points):
"""forward.
Args:
points (List[Tensor]): tensor of the features to be aggregated.
Returns:
Tensor: (B, N, M) M = mlp[-1], tensor of the output points.
"""
if len(points) > 1:
new_points = torch.cat(points[1:], dim=-1)
new_points = new_points.transpose(1, 2).contiguous() # (B, C, N)
new_points_copy = new_points
new_points = self.mlps(new_points)
new_fa_points = new_points.max(dim=-1, keepdim=True)[0]
new_fa_points = new_fa_points.repeat(1, 1, new_points.shape[-1])
new_points = torch.cat([new_fa_points, new_points_copy], dim=1)
new_points = new_points.transpose(1, 2).contiguous()
else:
new_points = points
return new_points
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, force_fp32
from torch import nn as nn
class DGCNNFPModule(BaseModule):
"""Point feature propagation module used in DGCNN.
Propagate the features from one set to another.
Args:
mlp_channels (list[int]): List of mlp channels.
norm_cfg (dict, optional): Type of activation method.
Defaults to dict(type='BN1d').
act_cfg (dict, optional): Type of activation method.
Defaults to dict(type='ReLU').
init_cfg (dict, optional): Initialization config. Defaults to None.
"""
def __init__(self,
mlp_channels,
norm_cfg=dict(type='BN1d'),
act_cfg=dict(type='ReLU'),
init_cfg=None):
super().__init__(init_cfg=init_cfg)
self.fp16_enabled = False
self.mlps = nn.Sequential()
for i in range(len(mlp_channels) - 1):
self.mlps.add_module(
f'layer{i}',
ConvModule(
mlp_channels[i],
mlp_channels[i + 1],
kernel_size=(1, ),
stride=(1, ),
conv_cfg=dict(type='Conv1d'),
norm_cfg=norm_cfg,
act_cfg=act_cfg))
@force_fp32()
def forward(self, points):
"""forward.
Args:
points (Tensor): (B, N, C) tensor of the input points.
Returns:
Tensor: (B, N, M) M = mlp[-1], tensor of the new points.
"""
if points is not None:
new_points = points.transpose(1, 2).contiguous() # (B, C, N)
new_points = self.mlps(new_points)
new_points = new_points.transpose(1, 2).contiguous()
else:
new_points = points
return new_points
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.cnn import ConvModule
from torch import nn as nn
from torch.nn import functional as F
from ..group_points import GroupAll, QueryAndGroup, grouping_operation
class BaseDGCNNGFModule(nn.Module):
"""Base module for point graph feature module used in DGCNN.
Args:
radii (list[float]): List of radius in each knn or ball query.
sample_nums (list[int]): Number of samples in each knn or ball query.
mlp_channels (list[list[int]]): Specify of the dgcnn before
the global pooling for each graph feature module.
knn_modes (list[str], optional): Type of KNN method, valid mode
['F-KNN', 'D-KNN'], Defaults to ['F-KNN'].
dilated_group (bool, optional): Whether to use dilated ball query.
Defaults to False.
use_xyz (bool, optional): Whether to use xyz as point features.
Defaults to True.
pool_mode (str, optional): Type of pooling method. Defaults to 'max'.
normalize_xyz (bool, optional): If ball query, whether to normalize
local XYZ with radius. Defaults to False.
grouper_return_grouped_xyz (bool, optional): Whether to return grouped
xyz in `QueryAndGroup`. Defaults to False.
grouper_return_grouped_idx (bool, optional): Whether to return grouped
idx in `QueryAndGroup`. Defaults to False.
"""
def __init__(self,
radii,
sample_nums,
mlp_channels,
knn_modes=['F-KNN'],
dilated_group=False,
use_xyz=True,
pool_mode='max',
normalize_xyz=False,
grouper_return_grouped_xyz=False,
grouper_return_grouped_idx=False):
super(BaseDGCNNGFModule, self).__init__()
assert len(sample_nums) == len(
mlp_channels
), 'Num_samples and mlp_channels should have the same length.'
assert pool_mode in ['max', 'avg'
], "Pool_mode should be one of ['max', 'avg']."
assert isinstance(knn_modes, list) or isinstance(
knn_modes, tuple), 'The type of knn_modes should be list or tuple.'
if isinstance(mlp_channels, tuple):
mlp_channels = list(map(list, mlp_channels))
self.mlp_channels = mlp_channels
self.pool_mode = pool_mode
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
self.knn_modes = knn_modes
for i in range(len(sample_nums)):
sample_num = sample_nums[i]
if sample_num is not None:
if self.knn_modes[i] == 'D-KNN':
grouper = QueryAndGroup(
radii[i],
sample_num,
use_xyz=use_xyz,
normalize_xyz=normalize_xyz,
return_grouped_xyz=grouper_return_grouped_xyz,
return_grouped_idx=True)
else:
grouper = QueryAndGroup(
radii[i],
sample_num,
use_xyz=use_xyz,
normalize_xyz=normalize_xyz,
return_grouped_xyz=grouper_return_grouped_xyz,
return_grouped_idx=grouper_return_grouped_idx)
else:
grouper = GroupAll(use_xyz)
self.groupers.append(grouper)
def _pool_features(self, features):
"""Perform feature aggregation using pooling operation.
Args:
features (torch.Tensor): (B, C, N, K)
Features of locally grouped points before pooling.
Returns:
torch.Tensor: (B, C, N)
Pooled features aggregating local information.
"""
if self.pool_mode == 'max':
# (B, C, N, 1)
new_features = F.max_pool2d(
features, kernel_size=[1, features.size(3)])
elif self.pool_mode == 'avg':
# (B, C, N, 1)
new_features = F.avg_pool2d(
features, kernel_size=[1, features.size(3)])
else:
raise NotImplementedError
return new_features.squeeze(-1).contiguous()
def forward(self, points):
"""forward.
Args:
points (Tensor): (B, N, C) input points.
Returns:
List[Tensor]: (B, N, C1) new points generated from each graph
feature module.
"""
new_points_list = [points]
for i in range(len(self.groupers)):
new_points = new_points_list[i]
new_points_trans = new_points.transpose(
1, 2).contiguous() # (B, C, N)
if self.knn_modes[i] == 'D-KNN':
# (B, N, C) -> (B, N, K)
idx = self.groupers[i](new_points[..., -3:].contiguous(),
new_points[..., -3:].contiguous())[-1]
grouped_results = grouping_operation(
new_points_trans, idx) # (B, C, N) -> (B, C, N, K)
grouped_results -= new_points_trans.unsqueeze(-1)
else:
grouped_results = self.groupers[i](
new_points, new_points) # (B, N, C) -> (B, C, N, K)
new_points = new_points_trans.unsqueeze(-1).repeat(
1, 1, 1, grouped_results.shape[-1])
new_points = torch.cat([grouped_results, new_points], dim=1)
# (B, mlp[-1], N, K)
new_points = self.mlps[i](new_points)
# (B, mlp[-1], N)
new_points = self._pool_features(new_points)
new_points = new_points.transpose(1, 2).contiguous()
new_points_list.append(new_points)
return new_points
class DGCNNGFModule(BaseDGCNNGFModule):
"""Point graph feature module used in DGCNN.
Args:
mlp_channels (list[int]): Specify of the dgcnn before
the global pooling for each graph feature module.
num_sample (int, optional): Number of samples in each knn or ball
query. Defaults to None.
knn_mode (str, optional): Type of KNN method, valid mode
['F-KNN', 'D-KNN']. Defaults to 'F-KNN'.
radius (float, optional): Radius to group with.
Defaults to None.
dilated_group (bool, optional): Whether to use dilated ball query.
Defaults to False.
norm_cfg (dict, optional): Type of normalization method.
Defaults to dict(type='BN2d').
act_cfg (dict, optional): Type of activation method.
Defaults to dict(type='ReLU').
use_xyz (bool, optional): Whether to use xyz as point features.
Defaults to True.
pool_mode (str, optional): Type of pooling method.
Defaults to 'max'.
normalize_xyz (bool, optional): If ball query, whether to normalize
local XYZ with radius. Defaults to False.
bias (bool | str, optional): If specified as `auto`, it will be decided
by the norm_cfg. Bias will be set as True if `norm_cfg` is None,
otherwise False. Defaults to 'auto'.
"""
def __init__(self,
mlp_channels,
num_sample=None,
knn_mode='F-KNN',
radius=None,
dilated_group=False,
norm_cfg=dict(type='BN2d'),
act_cfg=dict(type='ReLU'),
use_xyz=True,
pool_mode='max',
normalize_xyz=False,
bias='auto'):
super(DGCNNGFModule, self).__init__(
mlp_channels=[mlp_channels],
sample_nums=[num_sample],
knn_modes=[knn_mode],
radii=[radius],
use_xyz=use_xyz,
pool_mode=pool_mode,
normalize_xyz=normalize_xyz,
dilated_group=dilated_group)
for i in range(len(self.mlp_channels)):
mlp_channel = self.mlp_channels[i]
mlp = nn.Sequential()
for i in range(len(mlp_channel) - 1):
mlp.add_module(
f'layer{i}',
ConvModule(
mlp_channel[i],
mlp_channel[i + 1],
kernel_size=(1, 1),
stride=(1, 1),
conv_cfg=dict(type='Conv2d'),
norm_cfg=norm_cfg,
act_cfg=act_cfg,
bias=bias))
self.mlps.append(mlp)
# Copyright (c) OpenMMLab. All rights reserved.
from .furthest_point_sample import (furthest_point_sample, from .furthest_point_sample import (furthest_point_sample,
furthest_point_sample_with_dist) furthest_point_sample_with_dist)
from .points_sampler import Points_Sampler from .points_sampler import Points_Sampler
......
# Copyright (c) OpenMMLab. All rights reserved.
import torch import torch
from torch.autograd import Function from torch.autograd import Function
......
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List
import torch import torch
from mmcv.runner import force_fp32 from mmcv.runner import force_fp32
from torch import nn as nn from torch import nn as nn
from typing import List
from .furthest_point_sample import (furthest_point_sample, from .furthest_point_sample import (furthest_point_sample,
furthest_point_sample_with_dist) furthest_point_sample_with_dist)
...@@ -36,13 +38,13 @@ class Points_Sampler(nn.Module): ...@@ -36,13 +38,13 @@ class Points_Sampler(nn.Module):
Args: Args:
num_point (list[int]): Number of sample points. num_point (list[int]): Number of sample points.
fps_mod_list (list[str]: Type of FPS method, valid mod fps_mod_list (list[str], optional): Type of FPS method, valid mod
['F-FPS', 'D-FPS', 'FS'], Default: ['D-FPS']. ['F-FPS', 'D-FPS', 'FS'], Default: ['D-FPS'].
F-FPS: using feature distances for FPS. F-FPS: using feature distances for FPS.
D-FPS: using Euclidean distances of points for FPS. D-FPS: using Euclidean distances of points for FPS.
FS: using F-FPS and D-FPS simultaneously. FS: using F-FPS and D-FPS simultaneously.
fps_sample_range_list (list[int]): Range of points to apply FPS. fps_sample_range_list (list[int], optional):
Default: [-1]. Range of points to apply FPS. Default: [-1].
""" """
def __init__(self, def __init__(self,
......
# Copyright (c) OpenMMLab. All rights reserved.
import torch import torch
...@@ -7,7 +8,7 @@ def calc_square_dist(point_feat_a, point_feat_b, norm=True): ...@@ -7,7 +8,7 @@ def calc_square_dist(point_feat_a, point_feat_b, norm=True):
Args: Args:
point_feat_a (Tensor): (B, N, C) Feature vector of each point. point_feat_a (Tensor): (B, N, C) Feature vector of each point.
point_feat_b (Tensor): (B, M, C) Feature vector of each point. point_feat_b (Tensor): (B, M, C) Feature vector of each point.
norm (Bool): Whether to normalize the distance. norm (Bool, optional): Whether to normalize the distance.
Default: True. Default: True.
Returns: Returns:
......
# Copyright (c) OpenMMLab. All rights reserved.
from .gather_points import gather_points from .gather_points import gather_points
__all__ = ['gather_points'] __all__ = ['gather_points']
# Copyright (c) OpenMMLab. All rights reserved.
import torch import torch
from torch.autograd import Function from torch.autograd import Function
......
# Copyright (c) OpenMMLab. All rights reserved.
from .group_points import GroupAll, QueryAndGroup, grouping_operation from .group_points import GroupAll, QueryAndGroup, grouping_operation
__all__ = ['QueryAndGroup', 'GroupAll', 'grouping_operation'] __all__ = ['QueryAndGroup', 'GroupAll', 'grouping_operation']
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple from typing import Tuple
import torch import torch
...@@ -16,22 +18,22 @@ class QueryAndGroup(nn.Module): ...@@ -16,22 +18,22 @@ class QueryAndGroup(nn.Module):
Groups with a ball query of radius Groups with a ball query of radius
Args: Args:
max_radius (float | None): The maximum radius of the balls. max_radius (float): The maximum radius of the balls.
If None is given, we will use kNN sampling instead of ball query. If None is given, we will use kNN sampling instead of ball query.
sample_num (int): Maximum number of features to gather in the ball. sample_num (int): Maximum number of features to gather in the ball.
min_radius (float): The minimum radius of the balls. min_radius (float, optional): The minimum radius of the balls.
use_xyz (bool): Whether to use xyz. Default: 0.
use_xyz (bool, optional): Whether to use xyz.
Default: True. Default: True.
return_grouped_xyz (bool): Whether to return grouped xyz. return_grouped_xyz (bool, optional): Whether to return grouped xyz.
Default: False. Default: False.
normalize_xyz (bool): Whether to normalize xyz. normalize_xyz (bool, optional): Whether to normalize xyz.
Default: False. Default: False.
uniform_sample (bool): Whether to sample uniformly. uniform_sample (bool, optional): Whether to sample uniformly.
Default: False Default: False
return_unique_cnt (bool): Whether to return the count of return_unique_cnt (bool, optional): Whether to return the count of
unique samples. unique samples. Default: False.
Default: False. return_grouped_idx (bool, optional): Whether to return grouped idx.
return_grouped_idx (bool): Whether to return grouped idx.
Default: False. Default: False.
""" """
......
# Copyright (c) OpenMMLab. All rights reserved.
from .three_interpolate import three_interpolate from .three_interpolate import three_interpolate
from .three_nn import three_nn from .three_nn import three_nn
......
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
import torch import torch
from torch.autograd import Function from torch.autograd import Function
from typing import Tuple
from . import interpolate_ext from . import interpolate_ext
......
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
import torch import torch
from torch.autograd import Function from torch.autograd import Function
from typing import Tuple
from . import interpolate_ext from . import interpolate_ext
......
# Copyright (c) OpenMMLab. All rights reserved.
from .iou3d_utils import boxes_iou_bev, nms_gpu, nms_normal_gpu from .iou3d_utils import boxes_iou_bev, nms_gpu, nms_normal_gpu
__all__ = ['boxes_iou_bev', 'nms_gpu', 'nms_normal_gpu'] __all__ = ['boxes_iou_bev', 'nms_gpu', 'nms_normal_gpu']
# Copyright (c) OpenMMLab. All rights reserved.
import torch import torch
from . import iou3d_cuda from . import iou3d_cuda
def boxes_iou_bev(boxes_a, boxes_b): def boxes_iou_bev(boxes_a, boxes_b):
"""Calculate boxes IoU in the bird view. """Calculate boxes IoU in the Bird's Eye View.
Args: Args:
boxes_a (torch.Tensor): Input boxes a with shape (M, 5). boxes_a (torch.Tensor): Input boxes a with shape (M, 5).
...@@ -22,24 +23,29 @@ def boxes_iou_bev(boxes_a, boxes_b): ...@@ -22,24 +23,29 @@ def boxes_iou_bev(boxes_a, boxes_b):
return ans_iou return ans_iou
def nms_gpu(boxes, scores, thresh, pre_maxsize=None, post_max_size=None): def nms_gpu(boxes, scores, thresh, pre_max_size=None, post_max_size=None):
"""Nms function with gpu implementation. """NMS function GPU implementation (for BEV boxes). The overlap of two
boxes for IoU calculation is defined as the exact overlapping area of the
two boxes. In this function, one can also set `pre_max_size` and
`post_max_size`.
Args: Args:
boxes (torch.Tensor): Input boxes with the shape of [N, 5] boxes (torch.Tensor): Input boxes with the shape of [N, 5]
([x1, y1, x2, y2, ry]). ([x1, y1, x2, y2, ry]).
scores (torch.Tensor): Scores of boxes with the shape of [N]. scores (torch.Tensor): Scores of boxes with the shape of [N].
thresh (int): Threshold. thresh (int): Threshold.
pre_maxsize (int): Max size of boxes before nms. Default: None. pre_max_size (int, optional): Max size of boxes before NMS.
post_maxsize (int): Max size of boxes after nms. Default: None. Default: None.
post_max_size (int, optional): Max size of boxes after NMS.
Default: None.
Returns: Returns:
torch.Tensor: Indexes after nms. torch.Tensor: Indexes after NMS.
""" """
order = scores.sort(0, descending=True)[1] order = scores.sort(0, descending=True)[1]
if pre_maxsize is not None: if pre_max_size is not None:
order = order[:pre_maxsize] order = order[:pre_max_size]
boxes = boxes[order].contiguous() boxes = boxes[order].contiguous()
keep = torch.zeros(boxes.size(0), dtype=torch.long) keep = torch.zeros(boxes.size(0), dtype=torch.long)
...@@ -51,12 +57,14 @@ def nms_gpu(boxes, scores, thresh, pre_maxsize=None, post_max_size=None): ...@@ -51,12 +57,14 @@ def nms_gpu(boxes, scores, thresh, pre_maxsize=None, post_max_size=None):
def nms_normal_gpu(boxes, scores, thresh): def nms_normal_gpu(boxes, scores, thresh):
"""Normal non maximum suppression on GPU. """Normal NMS function GPU implementation (for BEV boxes). The overlap of
two boxes for IoU calculation is defined as the exact overlapping area of
the two boxes WITH their yaw angle set to 0.
Args: Args:
boxes (torch.Tensor): Input boxes with shape (N, 5). boxes (torch.Tensor): Input boxes with shape (N, 5).
scores (torch.Tensor): Scores of predicted boxes with shape (N). scores (torch.Tensor): Scores of predicted boxes with shape (N).
thresh (torch.Tensor): Threshold of non maximum suppression. thresh (torch.Tensor): Threshold of NMS.
Returns: Returns:
torch.Tensor: Remaining indices with scores in descending order. torch.Tensor: Remaining indices with scores in descending order.
......
...@@ -61,9 +61,9 @@ __device__ inline int check_in_box2d(const float *box, const Point &p) { ...@@ -61,9 +61,9 @@ __device__ inline int check_in_box2d(const float *box, const Point &p) {
angle_sin = angle_sin =
sin(-box[4]); // rotate the point in the opposite direction of box sin(-box[4]); // rotate the point in the opposite direction of box
float rot_x = float rot_x =
(p.x - center_x) * angle_cos + (p.y - center_y) * angle_sin + center_x; (p.x - center_x) * angle_cos - (p.y - center_y) * angle_sin + center_x;
float rot_y = float rot_y =
-(p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos + center_y; (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos + center_y;
#ifdef DEBUG #ifdef DEBUG
printf("box: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", box[0], box[1], box[2], printf("box: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", box[0], box[1], box[2],
box[3], box[4]); box[3], box[4]);
...@@ -112,9 +112,9 @@ __device__ inline void rotate_around_center(const Point &center, ...@@ -112,9 +112,9 @@ __device__ inline void rotate_around_center(const Point &center,
const float angle_cos, const float angle_cos,
const float angle_sin, Point &p) { const float angle_sin, Point &p) {
float new_x = float new_x =
(p.x - center.x) * angle_cos + (p.y - center.y) * angle_sin + center.x; (p.x - center.x) * angle_cos - (p.y - center.y) * angle_sin + center.x;
float new_y = float new_y =
-(p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y;
p.set(new_x, new_y); p.set(new_x, new_y);
} }
......
# Copyright (c) OpenMMLab. All rights reserved.
from .knn import knn from .knn import knn
__all__ = ['knn'] __all__ = ['knn']
# Copyright (c) OpenMMLab. All rights reserved.
import torch import torch
from torch.autograd import Function from torch.autograd import Function
...@@ -27,11 +28,11 @@ class KNN(Function): ...@@ -27,11 +28,11 @@ class KNN(Function):
center_xyz (Tensor): (B, npoint, 3) if transposed == False, center_xyz (Tensor): (B, npoint, 3) if transposed == False,
else (B, 3, npoint). centers of the knn query. else (B, 3, npoint). centers of the knn query.
transposed (bool): whether the input tensors are transposed. transposed (bool): whether the input tensors are transposed.
defaults to False. Should not expicitly use this keyword defaults to False. Should not explicitly use this keyword
when calling knn (=KNN.apply), just add the fourth param. when calling knn (=KNN.apply), just add the fourth param.
Returns: Returns:
Tensor: (B, k, npoint) tensor with the indicies of Tensor: (B, k, npoint) tensor with the indices of
the features that form k-nearest neighbours. the features that form k-nearest neighbours.
""" """
assert k > 0 assert k > 0
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment