"git@developer.sourcefind.cn:OpenDAS/torch-cluster.git" did not exist on "f4ad453a41edd3c89c754eb22241ad7d51eb4284"
Unverified Commit 108b3303 authored by xizaoqu's avatar xizaoqu Committed by GitHub
Browse files

[Fix] Fix bugs in voxelization and preprocessors (#2300)

* fix_voxel

* update
parent 6250b2cb
......@@ -388,18 +388,26 @@ class Det3DDataPreprocessor(DetDataPreprocessor):
rho = torch.sqrt(res[:, 0]**2 + res[:, 1]**2)
phi = torch.atan2(res[:, 1], res[:, 0])
polar_res = torch.stack((rho, phi, res[:, 2]), dim=-1)
# Currently we only support PyTorch >= 1.9.0, and will
# implement it in voxel_layer soon for better compatibility
min_bound = polar_res.new_tensor(
self.voxel_layer.point_cloud_range[:3])
max_bound = polar_res.new_tensor(
self.voxel_layer.point_cloud_range[3:])
polar_res = torch.clamp(polar_res, min_bound, max_bound)
try: # only support PyTorch >= 1.9.0
polar_res_clamp = torch.clamp(polar_res, min_bound,
max_bound)
except TypeError:
polar_res_clamp = polar_res.clone()
for coor_idx in range(3):
polar_res_clamp[:, coor_idx][
polar_res[:, coor_idx] >
max_bound[coor_idx]] = max_bound[coor_idx]
polar_res_clamp[:, coor_idx][
polar_res[:, coor_idx] <
min_bound[coor_idx]] = min_bound[coor_idx]
res_coors = torch.floor(
(polar_res - min_bound) /
polar_res.new_tensor(self.voxel_layer.voxel_size)).int()
if self.training:
self.get_voxel_seg(res_coors, data_sample)
(polar_res_clamp - min_bound) / polar_res_clamp.new_tensor(
self.voxel_layer.voxel_size)).int()
self.get_voxel_seg(res_coors, data_sample)
res_coors = F.pad(res_coors, (1, 0), mode='constant', value=i)
res_voxels = torch.cat((polar_res, res[:, :2], res[:, 3:]),
dim=-1)
......@@ -423,10 +431,17 @@ class Det3DDataPreprocessor(DetDataPreprocessor):
data_sample: (:obj:`Det3DDataSample`): The annotation data of
every samples. Add voxel-wise annotation forsegmentation.
"""
pts_semantic_mask = data_sample.gt_pts_seg.pts_semantic_mask
voxel_semantic_mask, _, point2voxel_map = dynamic_scatter_3d(
F.one_hot(pts_semantic_mask.long()).float(), res_coors, 'mean',
True)
voxel_semantic_mask = torch.argmax(voxel_semantic_mask, dim=-1)
data_sample.gt_pts_seg.voxel_semantic_mask = voxel_semantic_mask
data_sample.gt_pts_seg.point2voxel_map = point2voxel_map
if self.training:
pts_semantic_mask = data_sample.gt_pts_seg.pts_semantic_mask
voxel_semantic_mask, _, point2voxel_map = dynamic_scatter_3d(
F.one_hot(pts_semantic_mask.long()).float(), res_coors, 'mean',
True)
voxel_semantic_mask = torch.argmax(voxel_semantic_mask, dim=-1)
data_sample.gt_pts_seg.voxel_semantic_mask = voxel_semantic_mask
data_sample.gt_pts_seg.point2voxel_map = point2voxel_map
else:
pseudo_tensor = res_coors.new_ones([res_coors.shape[0], 1]).float()
_, _, point2voxel_map = dynamic_scatter_3d(pseudo_tensor,
res_coors, 'mean', True)
data_sample.gt_pts_seg.point2voxel_map = point2voxel_map
......@@ -508,8 +508,8 @@ class SegVFE(nn.Module):
Defaults to None.
grid_shape (tuple[float]): The grid shape of voxelization.
Defaults to (480, 360, 32).
point_cloud_range (tuple[float]): The range of points
or voxels. Defaults to (0, -40, -3, 70.4, 40, 1).
point_cloud_range (tuple[float]): The range of points or voxels.
Defaults to (0, -3.14159265359, -4, 50, 3.14159265359, 2).
norm_cfg (dict): Config dict of normalization layers.
mode (str): The mode when pooling features of points
inside a voxel. Available options include 'max' and 'avg'.
......@@ -528,8 +528,8 @@ class SegVFE(nn.Module):
with_voxel_center: bool = False,
voxel_size: Optional[Sequence[float]] = None,
grid_shape: Sequence[float] = (480, 360, 32),
point_cloud_range: Sequence[float] = (0, -180, -4, 50, 180,
2),
point_cloud_range: Sequence[float] = (0, -3.14159265359, -4,
50, 3.14159265359, 2),
norm_cfg: dict = dict(type='BN1d', eps=1e-5, momentum=0.1),
mode: bool = 'max',
with_pre_norm: bool = True,
......@@ -567,8 +567,8 @@ class SegVFE(nn.Module):
# Need pillar (voxel) size and x/y offset in order to calculate offset
self.vx = self.voxel_size[0]
self.vy = self.voxel_size[0]
self.vz = self.voxel_size[0]
self.vy = self.voxel_size[1]
self.vz = self.voxel_size[2]
self.x_offset = self.vx / 2 + point_cloud_range[0]
self.y_offset = self.vy / 2 + point_cloud_range[1]
self.z_offset = self.vz / 2 + point_cloud_range[2]
......@@ -589,14 +589,13 @@ class SegVFE(nn.Module):
nn.Linear(in_filters, out_filters), norm_layer,
nn.ReLU(inplace=True)))
self.vfe_layers = nn.ModuleList(vfe_layers)
self.num_vfe = len(vfe_layers)
self.vfe_scatter = DynamicScatter(self.voxel_size,
self.point_cloud_range,
(mode != 'max'))
self.compression_layers = None
if feat_compression is not None:
self.compression_layers = nn.Linear(feat_channels[-1],
feat_compression)
self.compression_layers = nn.Sequential(
nn.Linear(feat_channels[-1], feat_compression), nn.ReLU())
def forward(self, features: Tensor, coors: Tensor, *args,
**kwargs) -> Tuple[Tensor]:
......@@ -617,25 +616,23 @@ class SegVFE(nn.Module):
if self._with_voxel_center:
f_center = features.new_zeros(size=(features.size(0), 3))
f_center[:, 0] = features[:, 0] - (
coors[:, 3].type_as(features) * self.vx + self.x_offset)
coors[:, 1].type_as(features) * self.vx + self.x_offset)
f_center[:, 1] = features[:, 1] - (
coors[:, 2].type_as(features) * self.vy + self.y_offset)
f_center[:, 2] = features[:, 2] - (
coors[:, 1].type_as(features) * self.vz + self.z_offset)
coors[:, 3].type_as(features) * self.vz + self.z_offset)
features_ls.append(f_center)
# Combine together feature decorations
features = torch.cat(features_ls[::-1], dim=-1)
if self.pre_norm is not None:
features = self.pre_norm(features)
point_feats = []
for i, vfe in enumerate(self.vfe_layers):
for vfe in self.vfe_layers:
features = vfe(features)
point_feats.append(features)
if i == self.num_vfe - 1:
voxel_feats, voxel_coors = self.vfe_scatter(features, coors)
voxel_feats, voxel_coors = self.vfe_scatter(features, coors)
if self.compression_layers is not None:
voxel_feats = self.compression_layers(voxel_feats)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment