Unverified Commit d7067e44 authored by Wenwei Zhang's avatar Wenwei Zhang Committed by GitHub
Browse files

Bump version to v1.1.0rc2

Bump to v1.1.0rc2
parents 28fe73d2 fb0e57e5
......@@ -4,6 +4,7 @@ from typing import Dict, List, Optional, Tuple
import torch
from mmcv.cnn import ConvModule
from mmcv.ops import furthest_point_sample
from mmdet.models.utils import multi_apply
from mmengine.model import BaseModule
from mmengine.structures import InstanceData
from torch import nn as nn
......@@ -13,7 +14,6 @@ from mmdet3d.models.layers import VoteModule, build_sa_module
from mmdet3d.registry import MODELS
from mmdet3d.structures import Det3DDataSample
from mmdet3d.structures.bbox_3d import BaseInstance3DBoxes
from mmdet.models.utils import multi_apply
@MODELS.register_module()
......
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, List, Tuple
from mmdet.models.task_modules import AssignResult, SamplingResult
from mmengine import ConfigDict
from torch import Tensor
from torch.nn import functional as F
......@@ -8,7 +9,6 @@ from torch.nn import functional as F
from mmdet3d.registry import MODELS
from mmdet3d.structures import bbox3d2roi
from mmdet3d.utils import InstanceList
from mmdet.models.task_modules import AssignResult, SamplingResult
from ...structures.det3d_data_sample import SampleList
from .base_3droi_head import Base3DRoIHead
......
......@@ -2,13 +2,13 @@
from typing import Dict, Optional
import torch
from mmdet.models.task_modules import AssignResult
from torch import Tensor
from torch.nn import functional as F
from mmdet3d.registry import MODELS, TASK_UTILS
from mmdet3d.structures import bbox3d2roi
from mmdet3d.utils.typing import InstanceList, SampleList
from mmdet.models.task_modules import AssignResult
from .base_3droi_head import Base3DRoIHead
......
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional
import torch
from mmdet.models.task_modules import AssignResult
from mmdet.models.task_modules.samplers import SamplingResult
from torch.nn import functional as F
from mmdet3d.models.roi_heads.base_3droi_head import Base3DRoIHead
from mmdet3d.registry import MODELS
from mmdet3d.structures import bbox3d2roi
from mmdet3d.structures.det3d_data_sample import SampleList
from mmdet3d.utils import InstanceList
@MODELS.register_module()
class PVRCNNRoiHead(Base3DRoIHead):
"""RoI head for PV-RCNN.
Args:
num_classes (int): The number of classes. Defaults to 3.
semantic_head (dict, optional): Config of semantic head.
Defaults to None.
bbox_roi_extractor (dict, optional): Config of roi_extractor.
Defaults to None.
bbox_head (dict, optional): Config of bbox_head. Defaults to None.
train_cfg (dict, optional): Train config of model.
Defaults to None.
test_cfg (dict, optional): Train config of model.
Defaults to None.
init_cfg (dict, optional): Initialize config of
model. Defaults to None.
"""
def __init__(self,
num_classes: int = 3,
semantic_head: Optional[dict] = None,
bbox_roi_extractor: Optional[dict] = None,
bbox_head: Optional[dict] = None,
train_cfg: Optional[dict] = None,
test_cfg: Optional[dict] = None,
init_cfg: Optional[dict] = None):
super(PVRCNNRoiHead, self).__init__(
bbox_head=bbox_head,
bbox_roi_extractor=bbox_roi_extractor,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg)
self.num_classes = num_classes
self.semantic_head = MODELS.build(semantic_head)
self.init_assigner_sampler()
@property
def with_semantic(self):
"""bool: whether the head has semantic branch"""
return hasattr(self,
'semantic_head') and self.semantic_head is not None
def loss(self, feats_dict: dict, rpn_results_list: InstanceList,
batch_data_samples: SampleList, **kwargs) -> dict:
"""Training forward function of PVRCNNROIHead.
Args:
feats_dict (dict): Contains point-wise features.
rpn_results_list (List[:obj:`InstanceData`]): Detection results
of rpn head.
batch_data_samples (List[:obj:`Det3DDataSample`]): The Data
samples. It usually includes information such as
`gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`.
Returns:
dict: losses from each head.
- loss_semantic (torch.Tensor): loss of semantic head.
- loss_bbox (torch.Tensor): loss of bboxes.
- loss_cls (torch.Tensor): loss of object classification.
- loss_corner (torch.Tensor): loss of bboxes corners.
"""
losses = dict()
batch_gt_instances_3d = []
batch_gt_instances_ignore = []
for data_sample in batch_data_samples:
batch_gt_instances_3d.append(data_sample.gt_instances_3d)
if 'ignored_instances' in data_sample:
batch_gt_instances_ignore.append(data_sample.ignored_instances)
else:
batch_gt_instances_ignore.append(None)
if self.with_semantic:
semantic_results = self._semantic_forward_train(
feats_dict['keypoint_features'], feats_dict['keypoints'],
batch_gt_instances_3d)
losses['loss_semantic'] = semantic_results['loss_semantic']
sample_results = self._assign_and_sample(rpn_results_list,
batch_gt_instances_3d)
if self.with_bbox:
bbox_results = self._bbox_forward_train(
semantic_results['seg_preds'],
feats_dict['fusion_keypoint_features'],
feats_dict['keypoints'], sample_results)
losses.update(bbox_results['loss_bbox'])
return losses
def predict(self, feats_dict: dict, rpn_results_list: InstanceList,
batch_data_samples: SampleList, **kwargs) -> SampleList:
"""Perform forward propagation of the roi head and predict detection
results on the features of the upstream network.
Args:
feats_dict (dict): Contains point-wise features.
rpn_results_list (List[:obj:`InstanceData`]): Detection results
of rpn head.
batch_data_samples (List[:obj:`Det3DDataSample`]): The Data
samples. It usually includes information such as
`gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`.
Returns:
list[:obj:`InstanceData`]: Detection results of each sample
after the post process.
Each item usually contains following keys.
- scores_3d (Tensor): Classification scores, has a shape
(num_instances, )
- labels_3d (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes_3d (BaseInstance3DBoxes): Prediction of bboxes,
contains a tensor with shape (num_instances, C), where
C >= 7.
"""
assert self.with_bbox, 'Bbox head must be implemented.'
assert self.with_semantic, 'Semantic head must be implemented.'
batch_input_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
semantic_results = self.semantic_head(feats_dict['keypoint_features'])
point_features = feats_dict[
'fusion_keypoint_features'] * semantic_results[
'seg_preds'].sigmoid().max(
dim=-1, keepdim=True).values
rois = bbox3d2roi(
[res['bboxes_3d'].tensor for res in rpn_results_list])
labels_3d = [res['labels_3d'] for res in rpn_results_list]
bbox_results = self._bbox_forward(point_features,
feats_dict['keypoints'], rois)
results_list = self.bbox_head.get_results(rois,
bbox_results['bbox_scores'],
bbox_results['bbox_reg'],
labels_3d, batch_input_metas,
self.test_cfg)
return results_list
def _bbox_forward_train(self, seg_preds: torch.Tensor,
keypoint_features: torch.Tensor,
keypoints: torch.Tensor,
sampling_results: SamplingResult) -> dict:
"""Forward training function of roi_extractor and bbox_head.
Args:
seg_preds (torch.Tensor): Point-wise semantic features.
keypoint_features (torch.Tensor): key points features
from points encoder.
keypoints (torch.Tensor): Coordinate of key points.
sampling_results (:obj:`SamplingResult`): Sampled results used
for training.
Returns:
dict: Forward results including losses and predictions.
"""
rois = bbox3d2roi([res.bboxes for res in sampling_results])
keypoint_features = keypoint_features * seg_preds.sigmoid().max(
dim=-1, keepdim=True).values
bbox_results = self._bbox_forward(keypoint_features, keypoints, rois)
bbox_targets = self.bbox_head.get_targets(sampling_results,
self.train_cfg)
loss_bbox = self.bbox_head.loss(bbox_results['bbox_scores'],
bbox_results['bbox_reg'], rois,
*bbox_targets)
bbox_results.update(loss_bbox=loss_bbox)
return bbox_results
def _bbox_forward(self, keypoint_features: torch.Tensor,
keypoints: torch.Tensor, rois: torch.Tensor) -> dict:
"""Forward function of roi_extractor and bbox_head used in both
training and testing.
Args:
rois (Tensor): Roi boxes.
keypoint_features (torch.Tensor): key points features
from points encoder.
keypoints (torch.Tensor): Coordinate of key points.
rois (Tensor): Roi boxes.
Returns:
dict: Contains predictions of bbox_head and
features of roi_extractor.
"""
pooled_keypoint_features = self.bbox_roi_extractor(
keypoint_features, keypoints[..., 1:], keypoints[..., 0].int(),
rois)
bbox_score, bbox_reg = self.bbox_head(pooled_keypoint_features)
bbox_results = dict(bbox_scores=bbox_score, bbox_reg=bbox_reg)
return bbox_results
def _assign_and_sample(
self, proposal_list: InstanceList,
batch_gt_instances_3d: InstanceList) -> List[SamplingResult]:
"""Assign and sample proposals for training.
Args:
proposal_list (list[:obj:`InstancesData`]): Proposals produced by
rpn head.
batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of
gt_instances. It usually includes ``bboxes_3d`` and
``labels_3d`` attributes.
Returns:
list[:obj:`SamplingResult`]: Sampled results of each training
sample.
"""
sampling_results = []
# bbox assign
for batch_idx in range(len(proposal_list)):
cur_proposal_list = proposal_list[batch_idx]
cur_boxes = cur_proposal_list['bboxes_3d']
cur_labels_3d = cur_proposal_list['labels_3d']
cur_gt_instances_3d = batch_gt_instances_3d[batch_idx]
cur_gt_instances_3d.bboxes_3d = cur_gt_instances_3d.\
bboxes_3d.tensor
cur_gt_bboxes = batch_gt_instances_3d[batch_idx].bboxes_3d.to(
cur_boxes.device)
cur_gt_labels = batch_gt_instances_3d[batch_idx].labels_3d
batch_num_gts = 0
# 0 is bg
batch_gt_indis = cur_gt_labels.new_full((len(cur_boxes), ), 0)
batch_max_overlaps = cur_boxes.tensor.new_zeros(len(cur_boxes))
# -1 is bg
batch_gt_labels = cur_gt_labels.new_full((len(cur_boxes), ), -1)
# each class may have its own assigner
if isinstance(self.bbox_assigner, list):
for i, assigner in enumerate(self.bbox_assigner):
gt_per_cls = (cur_gt_labels == i)
pred_per_cls = (cur_labels_3d == i)
cur_assign_res = assigner.assign(
cur_proposal_list[pred_per_cls],
cur_gt_instances_3d[gt_per_cls])
# gather assign_results in different class into one result
batch_num_gts += cur_assign_res.num_gts
# gt inds (1-based)
gt_inds_arange_pad = gt_per_cls.nonzero(
as_tuple=False).view(-1) + 1
# pad 0 for indice unassigned
gt_inds_arange_pad = F.pad(
gt_inds_arange_pad, (1, 0), mode='constant', value=0)
# pad -1 for indice ignore
gt_inds_arange_pad = F.pad(
gt_inds_arange_pad, (1, 0), mode='constant', value=-1)
# convert to 0~gt_num+2 for indices
gt_inds_arange_pad += 1
# now 0 is bg, >1 is fg in batch_gt_indis
batch_gt_indis[pred_per_cls] = gt_inds_arange_pad[
cur_assign_res.gt_inds + 1] - 1
batch_max_overlaps[
pred_per_cls] = cur_assign_res.max_overlaps
batch_gt_labels[pred_per_cls] = cur_assign_res.labels
assign_result = AssignResult(batch_num_gts, batch_gt_indis,
batch_max_overlaps,
batch_gt_labels)
else: # for single class
assign_result = self.bbox_assigner.assign(
cur_proposal_list, cur_gt_instances_3d)
# sample boxes
sampling_result = self.bbox_sampler.sample(assign_result,
cur_boxes.tensor,
cur_gt_bboxes,
cur_gt_labels)
sampling_results.append(sampling_result)
return sampling_results
def _semantic_forward_train(self, keypoint_features: torch.Tensor,
keypoints: torch.Tensor,
batch_gt_instances_3d: InstanceList) -> dict:
"""Train semantic head.
Args:
keypoint_features (torch.Tensor): key points features
from points encoder.
keypoints (torch.Tensor): Coordinate of key points.
batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of
gt_instances. It usually includes ``bboxes_3d`` and
``labels_3d`` attributes.
Returns:
dict: Segmentation results including losses
"""
semantic_results = self.semantic_head(keypoint_features)
semantic_targets = self.semantic_head.get_targets(
keypoints, batch_gt_instances_3d)
loss_semantic = self.semantic_head.loss(semantic_results,
semantic_targets)
semantic_results.update(loss_semantic)
return semantic_results
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.roi_heads.roi_extractors import SingleRoIExtractor
from .batch_roigridpoint_extractor import Batch3DRoIGridExtractor
from .single_roiaware_extractor import Single3DRoIAwareExtractor
from .single_roipoint_extractor import Single3DRoIPointExtractor
__all__ = [
'SingleRoIExtractor', 'Single3DRoIAwareExtractor',
'Single3DRoIPointExtractor'
'Single3DRoIPointExtractor', 'Batch3DRoIGridExtractor'
]
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmengine.model import BaseModule
from mmdet3d.registry import MODELS
from mmdet3d.structures.bbox_3d import rotation_3d_in_axis
@MODELS.register_module()
class Batch3DRoIGridExtractor(BaseModule):
"""Grid point wise roi-aware Extractor.
Args:
grid_size (int): The number of grid points in a roi bbox.
Defaults to 6.
roi_layer (dict, optional): Config of sa module to get
grid points features. Defaults to None.
init_cfg (dict, optional): Initialize config of
model. Defaults to None.
"""
def __init__(self,
grid_size: int = 6,
roi_layer: dict = None,
init_cfg: dict = None) -> None:
super(Batch3DRoIGridExtractor, self).__init__(init_cfg=init_cfg)
self.roi_grid_pool_layer = MODELS.build(roi_layer)
self.grid_size = grid_size
def forward(self, feats: torch.Tensor, coordinate: torch.Tensor,
batch_inds: torch.Tensor, rois: torch.Tensor) -> torch.Tensor:
"""Forward roi extractor to extract grid points feature.
Args:
feats (torch.Tensor): Key points features.
coordinate (torch.Tensor): Key points coordinates.
batch_inds (torch.Tensor): Input batch indexes.
rois (torch.Tensor): Detection results of rpn head.
Returns:
torch.Tensor: Grid points features.
"""
batch_size = int(batch_inds.max()) + 1
xyz = coordinate
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for k in range(batch_size):
xyz_batch_cnt[k] = (batch_inds == k).sum()
rois_batch_inds = rois[:, 0].int()
# (N1+N2+..., 6x6x6, 3)
roi_grid = self.get_dense_grid_points(rois[:, 1:])
new_xyz = roi_grid.view(-1, 3)
new_xyz_batch_cnt = new_xyz.new_zeros(batch_size).int()
for k in range(batch_size):
new_xyz_batch_cnt[k] = ((rois_batch_inds == k).sum() *
roi_grid.size(1))
pooled_points, pooled_features = self.roi_grid_pool_layer(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz.contiguous(),
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=feats.contiguous()) # (M1 + M2 ..., C)
pooled_features = pooled_features.view(-1, self.grid_size,
self.grid_size, self.grid_size,
pooled_features.shape[-1])
# (BxN, 6, 6, 6, C)
return pooled_features
def get_dense_grid_points(self, rois: torch.Tensor) -> torch.Tensor:
"""Get dense grid points from rois.
Args:
rois (torch.Tensor): Detection results of rpn head.
Returns:
torch.Tensor: Grid points coordinates.
"""
rois_bbox = rois.clone()
rois_bbox[:, 2] += rois_bbox[:, 5] / 2
faked_features = rois_bbox.new_ones(
(self.grid_size, self.grid_size, self.grid_size))
dense_idx = faked_features.nonzero()
dense_idx = dense_idx.repeat(rois_bbox.size(0), 1, 1).float()
dense_idx = ((dense_idx + 0.5) / self.grid_size)
dense_idx[..., :3] -= 0.5
roi_ctr = rois_bbox[:, :3]
roi_dim = rois_bbox[:, 3:6]
roi_grid_points = dense_idx * roi_dim.view(-1, 1, 3)
roi_grid_points = rotation_3d_in_axis(
roi_grid_points, rois_bbox[:, 6], axis=2)
roi_grid_points += roi_ctr.view(-1, 1, 3)
return roi_grid_points
......@@ -140,7 +140,8 @@ class Base3DSegmentor(BaseModel, metaclass=ABCMeta):
def postprocess_result(self, seg_pred_list: List[dict],
batch_img_metas: List[dict]) -> list:
""" Convert results list to `Det3DDataSample`.
"""Convert results list to `Det3DDataSample`.
Args:
seg_logits_list (List[dict]): List of segmentation results,
seg_logits from model of each input point clouds sample.
......@@ -157,7 +158,8 @@ class Base3DSegmentor(BaseModel, metaclass=ABCMeta):
for i in range(len(seg_pred_list)):
img_meta = batch_img_metas[i]
seg_pred = seg_pred_list[i]
prediction = Det3DDataSample(**{'metainfo': img_meta})
prediction = Det3DDataSample(**{'metainfo': img_meta.metainfo})
prediction.set_data({'eval_ann_info': img_meta.eval_ann_info})
prediction.set_data(
{'pred_pts_seg': PointData(**{'pts_semantic_mask': seg_pred})})
predictions.append(prediction)
......
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List
from typing import List, Tuple
import numpy as np
import torch
......@@ -65,10 +65,10 @@ class EncoderDecoder3D(Base3DSegmentor):
loass. Defaults to None.
train_cfg (OptConfigType): The config for training. Defaults to None.
test_cfg (OptConfigType): The config for testing. Defaults to None.
data_preprocessor (dict, optional): The pre-process config of
:class:`BaseDataPreprocessor`.
init_cfg (dict, optional): The weight initialized config for
:class:`BaseModule`.
data_preprocessor (OptConfigType): The pre-process config of
:class:`BaseDataPreprocessor`. Defaults to None.
init_cfg (OptMultiConfig): The weight initialized config for
:class:`BaseModule`. Defaults to None.
""" # noqa: E501
def __init__(self,
......@@ -80,7 +80,7 @@ class EncoderDecoder3D(Base3DSegmentor):
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
init_cfg: OptMultiConfig = None) -> None:
super(EncoderDecoder3D, self).__init__(
data_preprocessor=data_preprocessor, init_cfg=init_cfg)
self.backbone = MODELS.build(backbone)
......@@ -122,15 +122,15 @@ class EncoderDecoder3D(Base3DSegmentor):
else:
self.loss_regularization = MODELS.build(loss_regularization)
def extract_feat(self, batch_inputs) -> List[Tensor]:
def extract_feat(self, batch_inputs: Tensor) -> Tensor:
"""Extract features from points."""
x = self.backbone(batch_inputs)
if self.with_neck:
x = self.neck(x)
return x
def encode_decode(self, batch_inputs: torch.Tensor,
batch_input_metas: List[dict]) -> List[Tensor]:
def encode_decode(self, batch_inputs: Tensor,
batch_input_metas: List[dict]) -> Tensor:
"""Encode points with backbone and decode into a semantic segmentation
map of the same size as input.
......@@ -178,7 +178,7 @@ class EncoderDecoder3D(Base3DSegmentor):
return losses
def _loss_regularization_forward_train(self):
def _loss_regularization_forward_train(self) -> dict:
"""Calculate regularization loss for model weight in training."""
losses = dict()
if isinstance(self.loss_regularization, nn.ModuleList):
......@@ -213,7 +213,8 @@ class EncoderDecoder3D(Base3DSegmentor):
"""
# extract features using backbone
x = self.extract_feat(batch_inputs_dict)
points = torch.stack(batch_inputs_dict['points'])
x = self.extract_feat(points)
losses = dict()
......@@ -236,7 +237,7 @@ class EncoderDecoder3D(Base3DSegmentor):
patch_center: Tensor,
coord_max: Tensor,
feats: Tensor,
use_normalized_coord: bool = False):
use_normalized_coord: bool = False) -> Tensor:
"""Generating model input.
Generate input by subtracting patch center and adding additional
......@@ -273,7 +274,7 @@ class EncoderDecoder3D(Base3DSegmentor):
block_size: float,
sample_rate: float = 0.5,
use_normalized_coord: bool = False,
eps: float = 1e-3):
eps: float = 1e-3) -> Tuple[Tensor, Tensor]:
"""Sampling points in a sliding window fashion.
First sample patches to cover all the input points.
......@@ -291,7 +292,7 @@ class EncoderDecoder3D(Base3DSegmentor):
points coverage. Defaults to 1e-3.
Returns:
np.ndarray | np.ndarray:
tuple:
- patch_points (torch.Tensor): Points of different patches of
shape [K, N, 3+C].
......@@ -372,7 +373,7 @@ class EncoderDecoder3D(Base3DSegmentor):
return patch_points, patch_idxs
def slide_inference(self, point: Tensor, img_meta: List[dict],
rescale: bool):
rescale: bool) -> Tensor:
"""Inference by sliding-window with overlap.
Args:
......@@ -417,14 +418,14 @@ class EncoderDecoder3D(Base3DSegmentor):
return preds.transpose(0, 1) # to [num_classes, K*N]
def whole_inference(self, points: Tensor, input_metas: List[dict],
rescale: bool):
rescale: bool) -> Tensor:
"""Inference with full scene (one forward pass without sliding)."""
seg_logit = self.encode_decode(points, input_metas)
# TODO: if rescale and voxelization segmentor
return seg_logit
def inference(self, points: Tensor, input_metas: List[dict],
rescale: bool):
rescale: bool) -> Tensor:
"""Inference with slide/whole style.
Args:
......@@ -489,7 +490,7 @@ class EncoderDecoder3D(Base3DSegmentor):
seg_map = seg_map.cpu()
seg_pred_list.append(seg_map)
return self.postprocess_result(seg_pred_list, batch_input_metas)
return self.postprocess_result(seg_pred_list, batch_data_samples)
def _forward(self,
batch_inputs_dict: dict,
......@@ -510,7 +511,8 @@ class EncoderDecoder3D(Base3DSegmentor):
Returns:
Tensor: Forward output of model without any post-processes.
"""
x = self.extract_feat(batch_inputs_dict)
points = torch.stack(batch_inputs_dict['points'])
x = self.extract_feat(points)
return self.decode_head.forward(x)
def aug_test(self, batch_inputs, batch_img_metas):
......
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.task_modules import AssignResult, BaseAssigner
from .anchor import (ANCHOR_GENERATORS, PRIOR_GENERATORS,
AlignedAnchor3DRangeGenerator,
AlignedAnchor3DRangeGeneratorPerCls,
......
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Union
from mmdet.models.task_modules import AssignResult, MaxIoUAssigner
from mmengine.structures import InstanceData
from mmdet3d.registry import TASK_UTILS
from mmdet.models.task_modules import AssignResult, MaxIoUAssigner
@TASK_UTILS.register_module()
......
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.models.task_modules import BaseBBoxCoder
from mmdet3d.registry import TASK_UTILS
from mmdet.models.task_modules import BaseBBoxCoder
@TASK_UTILS.register_module()
......
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.models.task_modules import BaseBBoxCoder
from mmdet3d.registry import TASK_UTILS
from mmdet.models.task_modules import BaseBBoxCoder
@TASK_UTILS.register_module()
......
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmdet.models.task_modules import BaseBBoxCoder
from mmdet3d.registry import TASK_UTILS
from mmdet3d.structures.bbox_3d import limit_period
from mmdet.models.task_modules import BaseBBoxCoder
@TASK_UTILS.register_module()
......
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmdet.models.task_modules import BaseBBoxCoder
from torch.nn import functional as F
from mmdet3d.registry import TASK_UTILS
from mmdet.models.task_modules import BaseBBoxCoder
@TASK_UTILS.register_module()
......
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmdet.models.task_modules import BaseBBoxCoder
from mmdet3d.registry import TASK_UTILS
from mmdet.models.task_modules import BaseBBoxCoder
@TASK_UTILS.register_module()
......
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmdet.models.task_modules import BaseBBoxCoder
from mmdet3d.registry import TASK_UTILS
from mmdet.models.task_modules import BaseBBoxCoder
@TASK_UTILS.register_module()
......
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmdet.models.task_modules import BaseBBoxCoder
from mmdet3d.registry import TASK_UTILS
from mmdet.models.task_modules import BaseBBoxCoder
@TASK_UTILS.register_module()
......
......@@ -4,6 +4,7 @@ from mmdet.models.task_modules.samplers import (BaseSampler, CombinedSampler,
IoUBalancedNegSampler,
OHEMSampler, RandomSampler,
SamplingResult)
from .iou_neg_piecewise_sampler import IoUNegPiecewiseSampler
from .pseudosample import PseudoSampler
......
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
from mmdet3d.registry import TASK_UTILS
......@@ -74,9 +76,11 @@ class IoUNegPiecewiseSampler(RandomSampler):
# if the numbers of negative samplers in previous
# pieces are less than the expected number, extend
# the same number in the current piece.
piece_expected_num = int(
num_expected *
self.neg_piece_fractions[piece_inds]) + extend_num
piece_expected_num = min(
num_expected,
math.ceil(num_expected *
self.neg_piece_fractions[piece_inds]) +
extend_num)
min_iou_thr = self.neg_iou_thr[piece_inds + 1]
max_iou_thr = self.neg_iou_thr[piece_inds]
piece_neg_inds = torch.nonzero(
......
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.models.task_modules import AssignResult
from mmengine.structures import InstanceData
from mmdet3d.registry import TASK_UTILS
from mmdet.models.task_modules import AssignResult
from ..samplers import BaseSampler, SamplingResult
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment