Unverified Commit 299d666a authored by encore-zhou's avatar encore-zhou Committed by GitHub
Browse files

Feature_3dssd_FPS_With_Dist_OP (#66)

* add op fps with distance

* add op fps with distance

* modify F-DFS unittest

* modify sa module

* modify sa module

* SA Module support D-FPS and F-FPS

* modify sa module

* update points sa module

* modify point_sa_module

* modify point sa module

* reconstruct FPS

* reconstruct FPS

* modify docstring

* modify docstring
parent 010cd4ba
......@@ -2,7 +2,8 @@ from mmcv.ops import (RoIAlign, SigmoidFocalLoss, nms, roi_align,
sigmoid_focal_loss)
from .ball_query import ball_query
from .furthest_point_sample import furthest_point_sample
from .furthest_point_sample import (Points_Sampler, furthest_point_sample,
furthest_point_sample_with_dist)
from .gather_points import gather_points
from .group_points import (GroupAll, QueryAndGroup, group_points,
grouping_operation)
......@@ -24,8 +25,9 @@ __all__ = [
'SigmoidFocalLoss', 'SparseBasicBlock', 'SparseBottleneck',
'RoIAwarePool3d', 'points_in_boxes_gpu', 'points_in_boxes_cpu',
'make_sparse_convmodule', 'ball_query', 'furthest_point_sample',
'three_interpolate', 'three_nn', 'gather_points', 'grouping_operation',
'group_points', 'GroupAll', 'QueryAndGroup', 'PointSAModule',
'PointSAModuleMSG', 'PointFPModule', 'points_in_boxes_batch',
'get_compiler_version', 'get_compiling_cuda_version'
'furthest_point_sample_with_dist', 'three_interpolate', 'three_nn',
'gather_points', 'grouping_operation', 'group_points', 'GroupAll',
'QueryAndGroup', 'PointSAModule', 'PointSAModuleMSG', 'PointFPModule',
'points_in_boxes_batch', 'get_compiler_version',
'get_compiling_cuda_version', 'Points_Sampler'
]
from .furthest_point_sample import furthest_point_sample
from .furthest_point_sample import (furthest_point_sample,
furthest_point_sample_with_dist)
from .points_sampler import Points_Sampler
__all__ = ['furthest_point_sample']
__all__ = [
'furthest_point_sample', 'furthest_point_sample_with_dist',
'Points_Sampler'
]
......@@ -39,4 +39,40 @@ class FurthestPointSampling(Function):
return None, None
class FurthestPointSamplingWithDist(Function):
"""Furthest Point Sampling With Distance.
Uses iterative furthest point sampling to select a set of features whose
corresponding points have the furthest distance.
"""
@staticmethod
def forward(ctx, points_dist: torch.Tensor,
num_points: int) -> torch.Tensor:
"""forward.
Args:
points_dist (Tensor): (B, N, N) Distance between each point pair.
num_points (int): Number of points in the sampled set.
Returns:
Tensor: (B, num_points) indices of the sampled points.
"""
assert points_dist.is_contiguous()
B, N, _ = points_dist.size()
output = points_dist.new_zeros([B, num_points], dtype=torch.int32)
temp = points_dist.new_zeros([B, N]).fill_(1e10)
furthest_point_sample_ext.furthest_point_sampling_with_dist_wrapper(
B, N, num_points, points_dist, temp, output)
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(xyz, a=None):
return None, None
furthest_point_sample = FurthestPointSampling.apply
furthest_point_sample_with_dist = FurthestPointSamplingWithDist.apply
import torch
from torch import nn as nn
from typing import List
from .furthest_point_sample import (furthest_point_sample,
furthest_point_sample_with_dist)
from .utils import calc_square_dist
def get_sampler_type(sampler_type):
"""Get the type and mode of points sampler.
Args:
sampler_type (str): The type of points sampler.
The valid value are "D-FPS", "F-FPS", or "FS".
Returns:
class: Points sampler type.
"""
if sampler_type == 'D-FPS':
sampler = DFPS_Sampler
elif sampler_type == 'F-FPS':
sampler = FFPS_Sampler
elif sampler_type == 'FS':
sampler = FS_Sampler
else:
raise ValueError('Only "sampler_type" of "D-FPS", "F-FPS", or "FS"'
f' are supported, got {sampler_type}')
return sampler
class Points_Sampler(nn.Module):
"""Points sampling.
Args:
num_point (list[int]): Number of sample points.
fps_mod_list (list[str]: Type of FPS method, valid mod
['F-FPS', 'D-FPS', 'FS'], Default: ['D-FPS'].
F-FPS: using feature distances for FPS.
D-FPS: using Euclidean distances of points for FPS.
FS: using F-FPS and D-FPS simultaneously.
fps_sample_range_list (list[int]): Range of points to apply FPS.
Default: [-1].
"""
def __init__(self,
num_point: List[int],
fps_mod_list: List[str] = ['D-FPS'],
fps_sample_range_list: List[int] = [-1]):
super(Points_Sampler, self).__init__()
# FPS would be applied to different fps_mod in the list,
# so the length of the num_point should be equal to
# fps_mod_list and fps_sample_range_list.
assert len(num_point) == len(fps_mod_list) == len(
fps_sample_range_list)
self.num_point = num_point
self.fps_sample_range_list = fps_sample_range_list
self.samplers = nn.ModuleList()
for fps_mod in fps_mod_list:
self.samplers.append(get_sampler_type(fps_mod)())
def forward(self, points_xyz, features):
"""forward.
Args:
points_xyz (Tensor): (B, N, 3) xyz coordinates of the features.
features (Tensor): (B, C, N) Descriptors of the features.
Return:
Tensor: (B, npoint, sample_num) Indices of sampled points.
"""
indices = []
last_fps_end_index = 0
for fps_sample_range, sampler, npoint in zip(
self.fps_sample_range_list, self.samplers, self.num_point):
assert fps_sample_range < points_xyz.shape[1]
if fps_sample_range == -1:
sample_points_xyz = points_xyz[:, last_fps_end_index:]
sample_features = features[:, :, last_fps_end_index:]
else:
sample_points_xyz = \
points_xyz[:, last_fps_end_index:fps_sample_range]
sample_features = \
features[:, :, last_fps_end_index:fps_sample_range]
fps_idx = sampler(sample_points_xyz.contiguous(), sample_features,
npoint)
indices.append(fps_idx + last_fps_end_index)
last_fps_end_index += fps_sample_range
indices = torch.cat(indices, dim=1)
return indices
class DFPS_Sampler(nn.Module):
"""DFPS_Sampling.
Using Euclidean distances of points for FPS.
"""
def __init__(self):
super(DFPS_Sampler, self).__init__()
def forward(self, points, features, npoint):
"""Sampling points with D-FPS."""
fps_idx = furthest_point_sample(points.contiguous(), npoint)
return fps_idx
class FFPS_Sampler(nn.Module):
"""FFPS_Sampler.
Using feature distances for FPS.
"""
def __init__(self):
super(FFPS_Sampler, self).__init__()
def forward(self, points, features, npoint):
"""Sampling points with F-FPS."""
features_for_fps = torch.cat([points, features.transpose(1, 2)], dim=2)
features_dist = calc_square_dist(
features_for_fps, features_for_fps, norm=False)
fps_idx = furthest_point_sample_with_dist(features_dist, npoint)
return fps_idx
class FS_Sampler(nn.Module):
"""FS_Sampling.
Using F-FPS and D-FPS simultaneously.
"""
def __init__(self):
super(FS_Sampler, self).__init__()
def forward(self, points, features, npoint):
"""Sampling points with FS_Sampling."""
features_for_fps = torch.cat([points, features.transpose(1, 2)], dim=2)
features_dist = calc_square_dist(
features_for_fps, features_for_fps, norm=False)
fps_idx_ffps = furthest_point_sample_with_dist(features_dist, npoint)
fps_idx_dfps = furthest_point_sample(points, npoint)
fps_idx = torch.cat([fps_idx_ffps, fps_idx_dfps], dim=1)
return fps_idx
......@@ -19,6 +19,16 @@ void furthest_point_sampling_kernel_launcher(int b, int n, int m,
const float *dataset, float *temp,
int *idxs, cudaStream_t stream);
int furthest_point_sampling_with_dist_wrapper(int b, int n, int m,
at::Tensor points_tensor,
at::Tensor temp_tensor,
at::Tensor idx_tensor);
void furthest_point_sampling_with_dist_kernel_launcher(int b, int n, int m,
const float *dataset,
float *temp, int *idxs,
cudaStream_t stream);
int furthest_point_sampling_wrapper(int b, int n, int m,
at::Tensor points_tensor,
at::Tensor temp_tensor,
......@@ -32,7 +42,24 @@ int furthest_point_sampling_wrapper(int b, int n, int m,
return 1;
}
int furthest_point_sampling_with_dist_wrapper(int b, int n, int m,
at::Tensor points_tensor,
at::Tensor temp_tensor,
at::Tensor idx_tensor) {
const float *points = points_tensor.data<float>();
float *temp = temp_tensor.data<float>();
int *idx = idx_tensor.data<int>();
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
furthest_point_sampling_with_dist_kernel_launcher(b, n, m, points, temp, idx, stream);
return 1;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("furthest_point_sampling_wrapper", &furthest_point_sampling_wrapper,
"furthest_point_sampling_wrapper");
m.def("furthest_point_sampling_with_dist_wrapper",
&furthest_point_sampling_with_dist_wrapper,
"furthest_point_sampling_with_dist_wrapper");
}
......@@ -207,3 +207,194 @@ void furthest_point_sampling_kernel_launcher(int b, int n, int m,
exit(-1);
}
}
// Modified from
// https://github.com/qiqihaer/3DSSD-pytorch/blob/master/lib/pointnet2/src/sampling_gpu.cu
template <unsigned int block_size>
__global__ void furthest_point_sampling_with_dist_kernel(
int b, int n, int m, const float *__restrict__ dataset,
float *__restrict__ temp, int *__restrict__ idxs) {
// dataset: (B, N, N)
// tmp: (B, N)
// output:
// idx: (B, M)
if (m <= 0)
return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
int batch_index = blockIdx.x;
dataset += batch_index * n * n;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
const int stride = block_size;
int old = 0;
if (threadIdx.x == 0)
idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
// float x1 = dataset[old * 3 + 0];
// float y1 = dataset[old * 3 + 1];
// float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
// float x2, y2, z2;
// x2 = dataset[k * 3 + 0];
// y2 = dataset[k * 3 + 1];
// z2 = dataset[k * 3 + 2];
// float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) *
// (z2 - z1);
float d = dataset[old * n + k];
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 1024) {
if (tid < 512) {
__update(dists, dists_i, tid, tid + 512);
}
__syncthreads();
}
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0)
idxs[j] = old;
}
}
void furthest_point_sampling_with_dist_kernel_launcher(int b, int n, int m,
const float *dataset,
float *temp, int *idxs,
cudaStream_t stream) {
// dataset: (B, N, N)
// temp: (B, N)
// output:
// idx: (B, M)
cudaError_t err;
unsigned int n_threads = opt_n_threads(n);
switch (n_threads) {
case 1024:
furthest_point_sampling_with_dist_kernel<1024><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 512:
furthest_point_sampling_with_dist_kernel<512><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 256:
furthest_point_sampling_with_dist_kernel<256><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 128:
furthest_point_sampling_with_dist_kernel<128><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 64:
furthest_point_sampling_with_dist_kernel<64><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 32:
furthest_point_sampling_with_dist_kernel<32><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 16:
furthest_point_sampling_with_dist_kernel<16><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 8:
furthest_point_sampling_with_dist_kernel<8><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 4:
furthest_point_sampling_with_dist_kernel<4><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 2:
furthest_point_sampling_with_dist_kernel<2><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 1:
furthest_point_sampling_with_dist_kernel<1><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
default:
furthest_point_sampling_with_dist_kernel<512><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
}
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
import torch
def calc_square_dist(point_feat_a, point_feat_b, norm=True):
"""Calculating square distance between a and b.
Args:
point_feat_a (Tensor): (B, N, C) Feature vector of each point.
point_feat_b (Tensor): (B, M, C) Feature vector of each point.
norm (Bool): Whether to normalize the distance.
Default: True.
Returns:
Tensor: (B, N, M) Distance between each pair points.
"""
length_a = point_feat_a.shape[1]
length_b = point_feat_b.shape[1]
num_channel = point_feat_a.shape[-1]
# [bs, n, 1]
a_square = torch.sum(point_feat_a.unsqueeze(dim=2).pow(2), dim=-1)
# [bs, 1, m]
b_square = torch.sum(point_feat_b.unsqueeze(dim=1).pow(2), dim=-1)
a_square = a_square.repeat((1, 1, length_b)) # [bs, n, m]
b_square = b_square.repeat((1, length_a, 1)) # [bs, n, m]
coor = torch.matmul(point_feat_a, point_feat_b.transpose(1, 2))
dist = a_square + b_square - 2 * coor
if norm:
dist = torch.sqrt(dist) / num_channel
return dist
......@@ -4,8 +4,7 @@ from torch import nn as nn
from torch.nn import functional as F
from typing import List
from mmdet3d.ops import (GroupAll, QueryAndGroup, furthest_point_sample,
gather_points)
from mmdet3d.ops import GroupAll, Points_Sampler, QueryAndGroup, gather_points
class PointSAModuleMSG(nn.Module):
......@@ -18,6 +17,13 @@ class PointSAModuleMSG(nn.Module):
sample_nums (list[int]): Number of samples in each ball query.
mlp_channels (list[int]): Specify of the pointnet before
the global pooling for each scale.
fps_mod (list[str]: Type of FPS method, valid mod
['F-FPS', 'D-FPS', 'FS'], Default: ['D-FPS'].
F-FPS: using feature distances for FPS.
D-FPS: using Euclidean distances of points for FPS.
FS: using F-FPS and D-FPS simultaneously.
fps_sample_range_list (list[int]): Range of points to apply FPS.
Default: [-1].
norm_cfg (dict): Type of normalization method.
Default: dict(type='BN2d').
use_xyz (bool): Whether to use xyz.
......@@ -33,19 +39,40 @@ class PointSAModuleMSG(nn.Module):
radii: List[float],
sample_nums: List[int],
mlp_channels: List[List[int]],
fps_mod: List[str] = ['D-FPS'],
fps_sample_range_list: List[int] = [-1],
norm_cfg: dict = dict(type='BN2d'),
use_xyz: bool = True,
pool_mod='max',
normalize_xyz: bool = False):
normalize_xyz: bool = False,
bias='auto'):
super().__init__()
assert len(radii) == len(sample_nums) == len(mlp_channels)
assert pool_mod in ['max', 'avg']
assert isinstance(fps_mod, list) or isinstance(fps_mod, tuple)
assert isinstance(fps_sample_range_list, list) or isinstance(
fps_sample_range_list, tuple)
assert len(fps_mod) == len(fps_sample_range_list)
if isinstance(mlp_channels, tuple):
mlp_channels = list(map(list, mlp_channels))
if isinstance(num_point, int):
self.num_point = [num_point]
elif isinstance(num_point, list) or isinstance(num_point, tuple):
self.num_point = num_point
else:
raise NotImplementedError('Error type of num_point!')
self.pool_mod = pool_mod
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
self.fps_mod_list = fps_mod
self.fps_sample_range_list = fps_sample_range_list
self.points_sampler = Points_Sampler(self.num_point, self.fps_mod_list,
self.fps_sample_range_list)
for i in range(len(radii)):
radius = radii[i]
......@@ -74,15 +101,17 @@ class PointSAModuleMSG(nn.Module):
kernel_size=(1, 1),
stride=(1, 1),
conv_cfg=dict(type='Conv2d'),
norm_cfg=norm_cfg))
norm_cfg=norm_cfg,
bias=bias))
self.mlps.append(mlp)
def forward(
self,
points_xyz: torch.Tensor,
features: torch.Tensor = None,
indices: torch.Tensor = None
) -> (torch.Tensor, torch.Tensor, torch.Tensor):
indices: torch.Tensor = None,
target_xyz: torch.Tensor = None,
) -> (torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor):
"""forward.
Args:
......@@ -91,6 +120,7 @@ class PointSAModuleMSG(nn.Module):
Default: None.
indices (Tensor): (B, num_point) Index of the features.
Default: None.
target_xyz (Tensor): (B, M, 3) new_xyz coordinates of the outputs.
Returns:
Tensor: (B, M, 3) where M is the number of points.
......@@ -101,13 +131,16 @@ class PointSAModuleMSG(nn.Module):
Index of the features.
"""
new_features_list = []
xyz_flipped = points_xyz.transpose(1, 2).contiguous()
if indices is None:
indices = furthest_point_sample(points_xyz, self.num_point)
else:
assert (indices.shape[1] == self.num_point)
if indices is not None:
assert (indices.shape[1] == self.num_point[0])
new_xyz = gather_points(xyz_flipped, indices).transpose(
1, 2).contiguous() if self.num_point is not None else None
elif target_xyz is not None:
new_xyz = target_xyz.contiguous()
else:
indices = self.points_sampler(points_xyz, features)
new_xyz = gather_points(xyz_flipped, indices).transpose(
1, 2).contiguous() if self.num_point is not None else None
......@@ -152,6 +185,10 @@ class PointSAModule(PointSAModuleMSG):
Default: True.
pool_mod (str): Type of pooling method.
Default: 'max_pool'.
fps_mod (list[str]: Type of FPS method, valid mod
['F-FPS', 'D-FPS', 'FS'], Default: ['D-FPS'].
fps_sample_range_list (list[int]): Range of points to apply FPS.
Default: [-1].
normalize_xyz (bool): Whether to normalize local XYZ with radius.
Default: False.
"""
......@@ -164,6 +201,8 @@ class PointSAModule(PointSAModuleMSG):
norm_cfg: dict = dict(type='BN2d'),
use_xyz: bool = True,
pool_mod: str = 'max',
fps_mod: List[str] = ['D-FPS'],
fps_sample_range_list: List[int] = [-1],
normalize_xyz: bool = False):
super().__init__(
mlp_channels=[mlp_channels],
......@@ -173,4 +212,6 @@ class PointSAModule(PointSAModuleMSG):
norm_cfg=norm_cfg,
use_xyz=use_xyz,
pool_mod=pool_mod,
fps_mod=fps_mod,
fps_sample_range_list=fps_sample_range_list,
normalize_xyz=normalize_xyz)
......@@ -25,7 +25,7 @@ def test_pointnet_sa_module_msg():
xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', np.float32)
# (B, N, 3)
xyz = torch.from_numpy(xyz[..., :3]).view(1, -1, 3).cuda()
xyz = torch.from_numpy(xyz).view(1, -1, 3).cuda()
# (B, C, N)
features = xyz.repeat([1, 1, 4]).transpose(1, 2).contiguous().cuda()
......@@ -35,6 +35,104 @@ def test_pointnet_sa_module_msg():
assert new_features.shape == torch.Size([1, 48, 16])
assert inds.shape == torch.Size([1, 16])
# test D-FPS mod
self = PointSAModuleMSG(
num_point=16,
radii=[0.2, 0.4],
sample_nums=[4, 8],
mlp_channels=[[12, 16], [12, 32]],
norm_cfg=dict(type='BN2d'),
use_xyz=False,
pool_mod='max',
fps_mod=['D-FPS'],
fps_sample_range_list=[-1]).cuda()
# test forward
new_xyz, new_features, inds = self(xyz, features)
assert new_xyz.shape == torch.Size([1, 16, 3])
assert new_features.shape == torch.Size([1, 48, 16])
assert inds.shape == torch.Size([1, 16])
# test F-FPS mod
self = PointSAModuleMSG(
num_point=16,
radii=[0.2, 0.4],
sample_nums=[4, 8],
mlp_channels=[[12, 16], [12, 32]],
norm_cfg=dict(type='BN2d'),
use_xyz=False,
pool_mod='max',
fps_mod=['F-FPS'],
fps_sample_range_list=[-1]).cuda()
# test forward
new_xyz, new_features, inds = self(xyz, features)
assert new_xyz.shape == torch.Size([1, 16, 3])
assert new_features.shape == torch.Size([1, 48, 16])
assert inds.shape == torch.Size([1, 16])
# test FS mod
self = PointSAModuleMSG(
num_point=8,
radii=[0.2, 0.4],
sample_nums=[4, 8],
mlp_channels=[[12, 16], [12, 32]],
norm_cfg=dict(type='BN2d'),
use_xyz=False,
pool_mod='max',
fps_mod=['FS'],
fps_sample_range_list=[-1]).cuda()
# test forward
new_xyz, new_features, inds = self(xyz, features)
assert new_xyz.shape == torch.Size([1, 16, 3])
assert new_features.shape == torch.Size([1, 48, 16])
assert inds.shape == torch.Size([1, 16])
# test using F-FPS mod and D-FPS mod simultaneously
self = PointSAModuleMSG(
num_point=[8, 12],
radii=[0.2, 0.4],
sample_nums=[4, 8],
mlp_channels=[[12, 16], [12, 32]],
norm_cfg=dict(type='BN2d'),
use_xyz=False,
pool_mod='max',
fps_mod=['F-FPS', 'D-FPS'],
fps_sample_range_list=[64, -1]).cuda()
# test forward
new_xyz, new_features, inds = self(xyz, features)
assert new_xyz.shape == torch.Size([1, 20, 3])
assert new_features.shape == torch.Size([1, 48, 20])
assert inds.shape == torch.Size([1, 20])
# length of 'fps_mod' should be same as 'fps_sample_range_list'
with pytest.raises(AssertionError):
PointSAModuleMSG(
num_point=8,
radii=[0.2, 0.4],
sample_nums=[4, 8],
mlp_channels=[[12, 16], [12, 32]],
norm_cfg=dict(type='BN2d'),
use_xyz=False,
pool_mod='max',
fps_mod=['F-FPS', 'D-FPS'],
fps_sample_range_list=[-1]).cuda()
# length of 'num_point' should be same as 'fps_sample_range_list'
with pytest.raises(AssertionError):
PointSAModuleMSG(
num_point=[8, 8],
radii=[0.2, 0.4],
sample_nums=[4, 8],
mlp_channels=[[12, 16], [12, 32]],
norm_cfg=dict(type='BN2d'),
use_xyz=False,
pool_mod='max',
fps_mod=['F-FPS'],
fps_sample_range_list=[-1]).cuda()
def test_pointnet_sa_module():
if not torch.cuda.is_available():
......
import pytest
import torch
from mmdet3d.ops import (ball_query, furthest_point_sample, gather_points,
from mmdet3d.ops import (ball_query, furthest_point_sample,
furthest_point_sample_with_dist, gather_points,
grouping_operation, three_interpolate, three_nn)
......@@ -312,3 +313,32 @@ def test_three_nn():
assert torch.allclose(dist, expected_dist, 1e-4)
assert torch.all(idx == expected_idx)
def test_fps_with_dist():
if not torch.cuda.is_available():
pytest.skip()
xyz = torch.tensor([[[-0.2748, 1.0020, -1.1674], [0.1015, 1.3952, -1.2681],
[-0.8070, 2.4137,
-0.5845], [-1.0001, 2.1982, -0.5859],
[0.3841, 1.8983, -0.7431]],
[[-1.0696, 3.0758,
-0.1899], [-0.2559, 3.5521, -0.1402],
[0.8164, 4.0081, -0.1839], [-1.1000, 3.0213, -0.8205],
[-0.0518, 3.7251, -0.3950]]]).cuda()
expected_idx = torch.tensor([[0, 2, 4], [0, 2, 1]]).cuda()
xyz_square_dist = ((xyz.unsqueeze(dim=1) -
xyz.unsqueeze(dim=2))**2).sum(-1)
idx = furthest_point_sample_with_dist(xyz_square_dist, 3)
assert torch.all(idx == expected_idx)
import numpy as np
fps_idx = np.load('tests/data/ops/fps_idx.npy')
features_for_fps_distance = np.load(
'tests/data/ops/features_for_fps_distance.npy')
expected_idx = torch.from_numpy(fps_idx).cuda()
features_for_fps_distance = torch.from_numpy(
features_for_fps_distance).cuda()
idx = furthest_point_sample_with_dist(features_for_fps_distance, 32)
assert torch.all(idx == expected_idx)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment