Unverified Commit 907bb4e4 authored by Yezhen Cong's avatar Yezhen Cong Committed by GitHub
Browse files

[Fix] Fix several errors in docstring (#761)

* Fix docstring typos and change formats for api-doc to work

* Test on fixing noqa

* Fix noqa

* Fix noqa
parent bdbc7c66
...@@ -180,13 +180,13 @@ class AxisAlignedBboxOverlaps3D(object): ...@@ -180,13 +180,13 @@ class AxisAlignedBboxOverlaps3D(object):
bboxes2 (Tensor): shape (B, n, 6) in <x1, y1, z1, x2, y2, z2> bboxes2 (Tensor): shape (B, n, 6) in <x1, y1, z1, x2, y2, z2>
format or empty. format or empty.
B indicates the batch dim, in shape (B1, B2, ..., Bn). B indicates the batch dim, in shape (B1, B2, ..., Bn).
If ``is_aligned `` is ``True``, then m and n must be equal. If ``is_aligned`` is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union) or "giou" (generalized mode (str): "iou" (intersection over union) or "giou" (generalized
intersection over union). intersection over union).
is_aligned (bool, optional): If True, then m and n must be equal. is_aligned (bool, optional): If True, then m and n must be equal.
Default False. Default False.
Returns: Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,) Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,)
""" """
assert bboxes1.size(-1) == bboxes2.size(-1) == 6 assert bboxes1.size(-1) == bboxes2.size(-1) == 6
return axis_aligned_bbox_overlaps_3d(bboxes1, bboxes2, mode, return axis_aligned_bbox_overlaps_3d(bboxes1, bboxes2, mode,
...@@ -204,7 +204,7 @@ def axis_aligned_bbox_overlaps_3d(bboxes1, ...@@ -204,7 +204,7 @@ def axis_aligned_bbox_overlaps_3d(bboxes1,
is_aligned=False, is_aligned=False,
eps=1e-6): eps=1e-6):
"""Calculate overlap between two set of axis aligned 3D bboxes. If """Calculate overlap between two set of axis aligned 3D bboxes. If
``is_aligned `` is ``False``, then calculate the overlaps between each bbox ``is_aligned`` is ``False``, then calculate the overlaps between each bbox
of bboxes1 and bboxes2, otherwise the overlaps between each aligned pair of of bboxes1 and bboxes2, otherwise the overlaps between each aligned pair of
bboxes1 and bboxes2. bboxes1 and bboxes2.
...@@ -214,7 +214,7 @@ def axis_aligned_bbox_overlaps_3d(bboxes1, ...@@ -214,7 +214,7 @@ def axis_aligned_bbox_overlaps_3d(bboxes1,
bboxes2 (Tensor): shape (B, n, 6) in <x1, y1, z1, x2, y2, z2> bboxes2 (Tensor): shape (B, n, 6) in <x1, y1, z1, x2, y2, z2>
format or empty. format or empty.
B indicates the batch dim, in shape (B1, B2, ..., Bn). B indicates the batch dim, in shape (B1, B2, ..., Bn).
If ``is_aligned `` is ``True``, then m and n must be equal. If ``is_aligned`` is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union) or "giou" (generalized mode (str): "iou" (intersection over union) or "giou" (generalized
intersection over union). intersection over union).
is_aligned (bool, optional): If True, then m and n must be equal. is_aligned (bool, optional): If True, then m and n must be equal.
...@@ -223,7 +223,7 @@ def axis_aligned_bbox_overlaps_3d(bboxes1, ...@@ -223,7 +223,7 @@ def axis_aligned_bbox_overlaps_3d(bboxes1,
stability. Default 1e-6. stability. Default 1e-6.
Returns: Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,) Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,)
Example: Example:
>>> bboxes1 = torch.FloatTensor([ >>> bboxes1 = torch.FloatTensor([
......
...@@ -199,7 +199,7 @@ class BaseInstance3DBoxes(object): ...@@ -199,7 +199,7 @@ class BaseInstance3DBoxes(object):
"""Convert self to ``dst`` mode. """Convert self to ``dst`` mode.
Args: Args:
dst (:obj:`BoxMode`): The target Box mode. dst (:obj:`Box3DMode`): The target Box mode.
rt_mat (np.ndarray | torch.Tensor): The rotation and translation rt_mat (np.ndarray | torch.Tensor): The rotation and translation
matrix between different coordinates. Defaults to None. matrix between different coordinates. Defaults to None.
The conversion from `src` coordinates to `dst` coordinates The conversion from `src` coordinates to `dst` coordinates
...@@ -266,7 +266,7 @@ class BaseInstance3DBoxes(object): ...@@ -266,7 +266,7 @@ class BaseInstance3DBoxes(object):
subject to Pytorch's indexing semantics. subject to Pytorch's indexing semantics.
Returns: Returns:
:obj:`BaseInstances3DBoxes`: A new object of \ :obj:`BaseInstance3DBoxes`: A new object of \
:class:`BaseInstances3DBoxes` after indexing. :class:`BaseInstances3DBoxes` after indexing.
""" """
original_type = type(self) original_type = type(self)
...@@ -293,10 +293,10 @@ class BaseInstance3DBoxes(object): ...@@ -293,10 +293,10 @@ class BaseInstance3DBoxes(object):
"""Concatenate a list of Boxes into a single Boxes. """Concatenate a list of Boxes into a single Boxes.
Args: Args:
boxes_list (list[:obj:`BaseInstances3DBoxes`]): List of boxes. boxes_list (list[:obj:`BaseInstance3DBoxes`]): List of boxes.
Returns: Returns:
:obj:`BaseInstances3DBoxes`: The concatenated Boxes. :obj:`BaseInstance3DBoxes`: The concatenated Boxes.
""" """
assert isinstance(boxes_list, (list, tuple)) assert isinstance(boxes_list, (list, tuple))
if len(boxes_list) == 0: if len(boxes_list) == 0:
...@@ -360,8 +360,8 @@ class BaseInstance3DBoxes(object): ...@@ -360,8 +360,8 @@ class BaseInstance3DBoxes(object):
boxes2, boxes1 and boxes2 should be in the same type. boxes2, boxes1 and boxes2 should be in the same type.
Args: Args:
boxes1 (:obj:`BaseInstanceBoxes`): Boxes 1 contain N boxes. boxes1 (:obj:`BaseInstance3DBoxes`): Boxes 1 contain N boxes.
boxes2 (:obj:`BaseInstanceBoxes`): Boxes 2 contain M boxes. boxes2 (:obj:`BaseInstance3DBoxes`): Boxes 2 contain M boxes.
mode (str, optional): Mode of iou calculation. Defaults to 'iou'. mode (str, optional): Mode of iou calculation. Defaults to 'iou'.
Returns: Returns:
...@@ -392,8 +392,8 @@ class BaseInstance3DBoxes(object): ...@@ -392,8 +392,8 @@ class BaseInstance3DBoxes(object):
``boxes2``, ``boxes1`` and ``boxes2`` should be in the same type. ``boxes2``, ``boxes1`` and ``boxes2`` should be in the same type.
Args: Args:
boxes1 (:obj:`BaseInstanceBoxes`): Boxes 1 contain N boxes. boxes1 (:obj:`BaseInstance3DBoxes`): Boxes 1 contain N boxes.
boxes2 (:obj:`BaseInstanceBoxes`): Boxes 2 contain M boxes. boxes2 (:obj:`BaseInstance3DBoxes`): Boxes 2 contain M boxes.
mode (str, optional): Mode of iou calculation. Defaults to 'iou'. mode (str, optional): Mode of iou calculation. Defaults to 'iou'.
Returns: Returns:
......
...@@ -67,8 +67,8 @@ class Box3DMode(IntEnum): ...@@ -67,8 +67,8 @@ class Box3DMode(IntEnum):
box (tuple | list | np.ndarray | box (tuple | list | np.ndarray |
torch.Tensor | BaseInstance3DBoxes): torch.Tensor | BaseInstance3DBoxes):
Can be a k-tuple, k-list or an Nxk array/tensor, where k = 7. Can be a k-tuple, k-list or an Nxk array/tensor, where k = 7.
src (:obj:`BoxMode`): The src Box mode. src (:obj:`Box3DMode`): The src Box mode.
dst (:obj:`BoxMode`): The target Box mode. dst (:obj:`Box3DMode`): The target Box mode.
rt_mat (np.ndarray | torch.Tensor): The rotation and translation rt_mat (np.ndarray | torch.Tensor): The rotation and translation
matrix between different coordinates. Defaults to None. matrix between different coordinates. Defaults to None.
The conversion from `src` coordinates to `dst` coordinates The conversion from `src` coordinates to `dst` coordinates
...@@ -87,7 +87,7 @@ class Box3DMode(IntEnum): ...@@ -87,7 +87,7 @@ class Box3DMode(IntEnum):
single_box = isinstance(box, (list, tuple)) single_box = isinstance(box, (list, tuple))
if single_box: if single_box:
assert len(box) >= 7, ( assert len(box) >= 7, (
'BoxMode.convert takes either a k-tuple/list or ' 'Box3DMode.convert takes either a k-tuple/list or '
'an Nxk array/tensor, where k >= 7') 'an Nxk array/tensor, where k >= 7')
arr = torch.tensor(box)[None, :] arr = torch.tensor(box)[None, :]
else: else:
......
...@@ -307,7 +307,7 @@ class CameraInstance3DBoxes(BaseInstance3DBoxes): ...@@ -307,7 +307,7 @@ class CameraInstance3DBoxes(BaseInstance3DBoxes):
"""Convert self to ``dst`` mode. """Convert self to ``dst`` mode.
Args: Args:
dst (:obj:`BoxMode`): The target Box mode. dst (:obj:`Box3DMode`): The target Box mode.
rt_mat (np.ndarray | torch.Tensor): The rotation and translation rt_mat (np.ndarray | torch.Tensor): The rotation and translation
matrix between different coordinates. Defaults to None. matrix between different coordinates. Defaults to None.
The conversion from ``src`` coordinates to ``dst`` coordinates The conversion from ``src`` coordinates to ``dst`` coordinates
......
...@@ -233,7 +233,7 @@ class DepthInstance3DBoxes(BaseInstance3DBoxes): ...@@ -233,7 +233,7 @@ class DepthInstance3DBoxes(BaseInstance3DBoxes):
"""Convert self to ``dst`` mode. """Convert self to ``dst`` mode.
Args: Args:
dst (:obj:`BoxMode`): The target Box mode. dst (:obj:`Box3DMode`): The target Box mode.
rt_mat (np.ndarray | torch.Tensor): The rotation and translation rt_mat (np.ndarray | torch.Tensor): The rotation and translation
matrix between different coordinates. Defaults to None. matrix between different coordinates. Defaults to None.
The conversion from ``src`` coordinates to ``dst`` coordinates The conversion from ``src`` coordinates to ``dst`` coordinates
......
...@@ -224,7 +224,7 @@ class LiDARInstance3DBoxes(BaseInstance3DBoxes): ...@@ -224,7 +224,7 @@ class LiDARInstance3DBoxes(BaseInstance3DBoxes):
"""Convert self to ``dst`` mode. """Convert self to ``dst`` mode.
Args: Args:
dst (:obj:`BoxMode`): the target Box mode dst (:obj:`Box3DMode`): the target Box mode
rt_mat (np.ndarray | torch.Tensor): The rotation and translation rt_mat (np.ndarray | torch.Tensor): The rotation and translation
matrix between different coordinates. Defaults to None. matrix between different coordinates. Defaults to None.
The conversion from ``src`` coordinates to ``dst`` coordinates The conversion from ``src`` coordinates to ``dst`` coordinates
......
...@@ -23,7 +23,7 @@ class LyftDataset(Custom3DDataset): ...@@ -23,7 +23,7 @@ class LyftDataset(Custom3DDataset):
This class serves as the API for experiments on the Lyft Dataset. This class serves as the API for experiments on the Lyft Dataset.
Please refer to Please refer to
`<https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/data>`_ # noqa `<https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/data>`_
for data downloading. for data downloading.
Args: Args:
...@@ -49,7 +49,7 @@ class LyftDataset(Custom3DDataset): ...@@ -49,7 +49,7 @@ class LyftDataset(Custom3DDataset):
Defaults to True. Defaults to True.
test_mode (bool, optional): Whether the dataset is in test mode. test_mode (bool, optional): Whether the dataset is in test mode.
Defaults to False. Defaults to False.
""" """ # noqa: E501
NameMapping = { NameMapping = {
'bicycle': 'bicycle', 'bicycle': 'bicycle',
'bus': 'bus', 'bus': 'bus',
......
...@@ -14,7 +14,7 @@ class FreeAnchor3DHead(Anchor3DHead): ...@@ -14,7 +14,7 @@ class FreeAnchor3DHead(Anchor3DHead):
Note: Note:
This implementation is directly modified from the `mmdet implementation This implementation is directly modified from the `mmdet implementation
<https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/free_anchor_retina_head.py>`_ # noqa <https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/free_anchor_retina_head.py>`_.
We find it also works on 3D detection with minor modification, i.e., We find it also works on 3D detection with minor modification, i.e.,
different hyper-parameters and a additional direction classifier. different hyper-parameters and a additional direction classifier.
...@@ -25,7 +25,7 @@ class FreeAnchor3DHead(Anchor3DHead): ...@@ -25,7 +25,7 @@ class FreeAnchor3DHead(Anchor3DHead):
gamma (float): Gamma parameter in focal loss. gamma (float): Gamma parameter in focal loss.
alpha (float): Alpha parameter in focal loss. alpha (float): Alpha parameter in focal loss.
kwargs (dict): Other arguments are the same as those in :class:`Anchor3DHead`. kwargs (dict): Other arguments are the same as those in :class:`Anchor3DHead`.
""" """ # noqa: E501
def __init__(self, def __init__(self,
pre_anchor_topk=50, pre_anchor_topk=50,
......
...@@ -4,11 +4,11 @@ from .single_stage_mono3d import SingleStageMono3DDetector ...@@ -4,11 +4,11 @@ from .single_stage_mono3d import SingleStageMono3DDetector
@DETECTORS.register_module() @DETECTORS.register_module()
class FCOSMono3D(SingleStageMono3DDetector): class FCOSMono3D(SingleStageMono3DDetector):
r"""FCOS3D <https://arxiv.org/abs/2104.10956>`_ for monocular 3D object detection. r"""`FCOS3D <https://arxiv.org/abs/2104.10956>`_ for monocular 3D object detection.
Currently please refer to our entry on the Currently please refer to our entry on the
`leaderboard <https://www.nuscenes.org/object-detection?externalData=all&mapData=all&modalities=Camera>` # noqa `leaderboard <https://www.nuscenes.org/object-detection?externalData=all&mapData=all&modalities=Camera>`_.
""" """ # noqa: E501
def __init__(self, def __init__(self,
backbone, backbone,
......
...@@ -12,7 +12,7 @@ from .base import Base3DDetector ...@@ -12,7 +12,7 @@ from .base import Base3DDetector
def sample_valid_seeds(mask, num_sampled_seed=1024): def sample_valid_seeds(mask, num_sampled_seed=1024):
r"""Randomly sample seeds from all imvotes. r"""Randomly sample seeds from all imvotes.
Modified from `<https://github.com/facebookresearch/imvotenet/blob/a8856345146bacf29a57266a2f0b874406fd8823/models/imvotenet.py#L26>`_ # noqa Modified from `<https://github.com/facebookresearch/imvotenet/blob/a8856345146bacf29a57266a2f0b874406fd8823/models/imvotenet.py#L26>`_
Args: Args:
mask (torch.Tensor): Bool tensor in shape ( mask (torch.Tensor): Bool tensor in shape (
...@@ -22,7 +22,7 @@ def sample_valid_seeds(mask, num_sampled_seed=1024): ...@@ -22,7 +22,7 @@ def sample_valid_seeds(mask, num_sampled_seed=1024):
Returns: Returns:
torch.Tensor: Indices with shape (num_sampled_seed). torch.Tensor: Indices with shape (num_sampled_seed).
""" """ # noqa: E501
device = mask.device device = mask.device
batch_size = mask.shape[0] batch_size = mask.shape[0]
sample_inds = mask.new_zeros((batch_size, num_sampled_seed), sample_inds = mask.new_zeros((batch_size, num_sampled_seed),
...@@ -600,10 +600,8 @@ class ImVoteNet(Base3DDetector): ...@@ -600,10 +600,8 @@ class ImVoteNet(Base3DDetector):
img_metas, img_metas,
proposals=None, proposals=None,
rescale=False): rescale=False):
"""Test without augmentation, image network pretrain. May refer to r"""Test without augmentation, image network pretrain. May refer to
https://github.com/open- `<https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py>`_.
mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py #
noqa.
Args: Args:
img (torch.Tensor): Should have a shape NxCxHxW, which contains img (torch.Tensor): Should have a shape NxCxHxW, which contains
...@@ -616,7 +614,7 @@ class ImVoteNet(Base3DDetector): ...@@ -616,7 +614,7 @@ class ImVoteNet(Base3DDetector):
Returns: Returns:
list[list[torch.Tensor]]: Predicted 2d boxes. list[list[torch.Tensor]]: Predicted 2d boxes.
""" """ # noqa: E501
assert self.with_img_bbox, 'Img bbox head must be implemented.' assert self.with_img_bbox, 'Img bbox head must be implemented.'
assert self.with_img_backbone, 'Img backbone must be implemented.' assert self.with_img_backbone, 'Img backbone must be implemented.'
assert self.with_img_rpn, 'Img rpn must be implemented.' assert self.with_img_rpn, 'Img rpn must be implemented.'
...@@ -701,10 +699,8 @@ class ImVoteNet(Base3DDetector): ...@@ -701,10 +699,8 @@ class ImVoteNet(Base3DDetector):
return bbox_results return bbox_results
def aug_test_img_only(self, img, img_metas, rescale=False): def aug_test_img_only(self, img, img_metas, rescale=False):
"""Test function with augmentation, image network pretrain. May refer r"""Test function with augmentation, image network pretrain. May refer
to https://github.com/open- to `<https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py>`_.
mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py #
noqa.
Args: Args:
img (list[list[torch.Tensor]], optional): the outer img (list[list[torch.Tensor]], optional): the outer
...@@ -722,7 +718,7 @@ class ImVoteNet(Base3DDetector): ...@@ -722,7 +718,7 @@ class ImVoteNet(Base3DDetector):
Returns: Returns:
list[list[torch.Tensor]]: Predicted 2d boxes. list[list[torch.Tensor]]: Predicted 2d boxes.
""" """ # noqa: E501
assert self.with_img_bbox, 'Img bbox head must be implemented.' assert self.with_img_bbox, 'Img bbox head must be implemented.'
assert self.with_img_backbone, 'Img backbone must be implemented.' assert self.with_img_backbone, 'Img backbone must be implemented.'
assert self.with_img_rpn, 'Img rpn must be implemented.' assert self.with_img_rpn, 'Img rpn must be implemented.'
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment