"...text-generation-inference.git" did not exist on "5dad0c0b29cf31271c01948653ac164649a3ac78"
Unverified Commit 907bb4e4 authored by Yezhen Cong's avatar Yezhen Cong Committed by GitHub
Browse files

[Fix] Fix several errors in docstring (#761)

* Fix docstring typos and change formats for api-doc to work

* Test on fixing noqa

* Fix noqa

* Fix noqa
parent bdbc7c66
......@@ -176,17 +176,17 @@ class AxisAlignedBboxOverlaps3D(object):
Args:
bboxes1 (Tensor): shape (B, m, 6) in <x1, y1, z1, x2, y2, z2>
format or empty.
format or empty.
bboxes2 (Tensor): shape (B, n, 6) in <x1, y1, z1, x2, y2, z2>
format or empty.
format or empty.
B indicates the batch dim, in shape (B1, B2, ..., Bn).
If ``is_aligned `` is ``True``, then m and n must be equal.
If ``is_aligned`` is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union) or "giou" (generalized
intersection over union).
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,)
"""
assert bboxes1.size(-1) == bboxes2.size(-1) == 6
return axis_aligned_bbox_overlaps_3d(bboxes1, bboxes2, mode,
......@@ -204,17 +204,17 @@ def axis_aligned_bbox_overlaps_3d(bboxes1,
is_aligned=False,
eps=1e-6):
"""Calculate overlap between two set of axis aligned 3D bboxes. If
``is_aligned `` is ``False``, then calculate the overlaps between each bbox
``is_aligned`` is ``False``, then calculate the overlaps between each bbox
of bboxes1 and bboxes2, otherwise the overlaps between each aligned pair of
bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (B, m, 6) in <x1, y1, z1, x2, y2, z2>
format or empty.
format or empty.
bboxes2 (Tensor): shape (B, n, 6) in <x1, y1, z1, x2, y2, z2>
format or empty.
format or empty.
B indicates the batch dim, in shape (B1, B2, ..., Bn).
If ``is_aligned `` is ``True``, then m and n must be equal.
If ``is_aligned`` is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union) or "giou" (generalized
intersection over union).
is_aligned (bool, optional): If True, then m and n must be equal.
......@@ -223,7 +223,7 @@ def axis_aligned_bbox_overlaps_3d(bboxes1,
stability. Default 1e-6.
Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,)
Example:
>>> bboxes1 = torch.FloatTensor([
......
......@@ -199,7 +199,7 @@ class BaseInstance3DBoxes(object):
"""Convert self to ``dst`` mode.
Args:
dst (:obj:`BoxMode`): The target Box mode.
dst (:obj:`Box3DMode`): The target Box mode.
rt_mat (np.ndarray | torch.Tensor): The rotation and translation
matrix between different coordinates. Defaults to None.
The conversion from `src` coordinates to `dst` coordinates
......@@ -266,7 +266,7 @@ class BaseInstance3DBoxes(object):
subject to Pytorch's indexing semantics.
Returns:
:obj:`BaseInstances3DBoxes`: A new object of \
:obj:`BaseInstance3DBoxes`: A new object of \
:class:`BaseInstances3DBoxes` after indexing.
"""
original_type = type(self)
......@@ -293,10 +293,10 @@ class BaseInstance3DBoxes(object):
"""Concatenate a list of Boxes into a single Boxes.
Args:
boxes_list (list[:obj:`BaseInstances3DBoxes`]): List of boxes.
boxes_list (list[:obj:`BaseInstance3DBoxes`]): List of boxes.
Returns:
:obj:`BaseInstances3DBoxes`: The concatenated Boxes.
:obj:`BaseInstance3DBoxes`: The concatenated Boxes.
"""
assert isinstance(boxes_list, (list, tuple))
if len(boxes_list) == 0:
......@@ -360,8 +360,8 @@ class BaseInstance3DBoxes(object):
boxes2, boxes1 and boxes2 should be in the same type.
Args:
boxes1 (:obj:`BaseInstanceBoxes`): Boxes 1 contain N boxes.
boxes2 (:obj:`BaseInstanceBoxes`): Boxes 2 contain M boxes.
boxes1 (:obj:`BaseInstance3DBoxes`): Boxes 1 contain N boxes.
boxes2 (:obj:`BaseInstance3DBoxes`): Boxes 2 contain M boxes.
mode (str, optional): Mode of iou calculation. Defaults to 'iou'.
Returns:
......@@ -392,8 +392,8 @@ class BaseInstance3DBoxes(object):
``boxes2``, ``boxes1`` and ``boxes2`` should be in the same type.
Args:
boxes1 (:obj:`BaseInstanceBoxes`): Boxes 1 contain N boxes.
boxes2 (:obj:`BaseInstanceBoxes`): Boxes 2 contain M boxes.
boxes1 (:obj:`BaseInstance3DBoxes`): Boxes 1 contain N boxes.
boxes2 (:obj:`BaseInstance3DBoxes`): Boxes 2 contain M boxes.
mode (str, optional): Mode of iou calculation. Defaults to 'iou'.
Returns:
......
......@@ -67,8 +67,8 @@ class Box3DMode(IntEnum):
box (tuple | list | np.ndarray |
torch.Tensor | BaseInstance3DBoxes):
Can be a k-tuple, k-list or an Nxk array/tensor, where k = 7.
src (:obj:`BoxMode`): The src Box mode.
dst (:obj:`BoxMode`): The target Box mode.
src (:obj:`Box3DMode`): The src Box mode.
dst (:obj:`Box3DMode`): The target Box mode.
rt_mat (np.ndarray | torch.Tensor): The rotation and translation
matrix between different coordinates. Defaults to None.
The conversion from `src` coordinates to `dst` coordinates
......@@ -87,7 +87,7 @@ class Box3DMode(IntEnum):
single_box = isinstance(box, (list, tuple))
if single_box:
assert len(box) >= 7, (
'BoxMode.convert takes either a k-tuple/list or '
'Box3DMode.convert takes either a k-tuple/list or '
'an Nxk array/tensor, where k >= 7')
arr = torch.tensor(box)[None, :]
else:
......
......@@ -307,7 +307,7 @@ class CameraInstance3DBoxes(BaseInstance3DBoxes):
"""Convert self to ``dst`` mode.
Args:
dst (:obj:`BoxMode`): The target Box mode.
dst (:obj:`Box3DMode`): The target Box mode.
rt_mat (np.ndarray | torch.Tensor): The rotation and translation
matrix between different coordinates. Defaults to None.
The conversion from ``src`` coordinates to ``dst`` coordinates
......
......@@ -233,7 +233,7 @@ class DepthInstance3DBoxes(BaseInstance3DBoxes):
"""Convert self to ``dst`` mode.
Args:
dst (:obj:`BoxMode`): The target Box mode.
dst (:obj:`Box3DMode`): The target Box mode.
rt_mat (np.ndarray | torch.Tensor): The rotation and translation
matrix between different coordinates. Defaults to None.
The conversion from ``src`` coordinates to ``dst`` coordinates
......
......@@ -224,7 +224,7 @@ class LiDARInstance3DBoxes(BaseInstance3DBoxes):
"""Convert self to ``dst`` mode.
Args:
dst (:obj:`BoxMode`): the target Box mode
dst (:obj:`Box3DMode`): the target Box mode
rt_mat (np.ndarray | torch.Tensor): The rotation and translation
matrix between different coordinates. Defaults to None.
The conversion from ``src`` coordinates to ``dst`` coordinates
......
......@@ -23,7 +23,7 @@ class LyftDataset(Custom3DDataset):
This class serves as the API for experiments on the Lyft Dataset.
Please refer to
`<https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/data>`_ # noqa
`<https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/data>`_
for data downloading.
Args:
......@@ -49,7 +49,7 @@ class LyftDataset(Custom3DDataset):
Defaults to True.
test_mode (bool, optional): Whether the dataset is in test mode.
Defaults to False.
"""
""" # noqa: E501
NameMapping = {
'bicycle': 'bicycle',
'bus': 'bus',
......
......@@ -14,7 +14,7 @@ class FreeAnchor3DHead(Anchor3DHead):
Note:
This implementation is directly modified from the `mmdet implementation
<https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/free_anchor_retina_head.py>`_ # noqa
<https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/free_anchor_retina_head.py>`_.
We find it also works on 3D detection with minor modification, i.e.,
different hyper-parameters and a additional direction classifier.
......@@ -25,7 +25,7 @@ class FreeAnchor3DHead(Anchor3DHead):
gamma (float): Gamma parameter in focal loss.
alpha (float): Alpha parameter in focal loss.
kwargs (dict): Other arguments are the same as those in :class:`Anchor3DHead`.
"""
""" # noqa: E501
def __init__(self,
pre_anchor_topk=50,
......
......@@ -4,11 +4,11 @@ from .single_stage_mono3d import SingleStageMono3DDetector
@DETECTORS.register_module()
class FCOSMono3D(SingleStageMono3DDetector):
r"""FCOS3D <https://arxiv.org/abs/2104.10956>`_ for monocular 3D object detection.
r"""`FCOS3D <https://arxiv.org/abs/2104.10956>`_ for monocular 3D object detection.
Currently please refer to our entry on the
`leaderboard <https://www.nuscenes.org/object-detection?externalData=all&mapData=all&modalities=Camera>` # noqa
"""
`leaderboard <https://www.nuscenes.org/object-detection?externalData=all&mapData=all&modalities=Camera>`_.
""" # noqa: E501
def __init__(self,
backbone,
......
......@@ -12,7 +12,7 @@ from .base import Base3DDetector
def sample_valid_seeds(mask, num_sampled_seed=1024):
r"""Randomly sample seeds from all imvotes.
Modified from `<https://github.com/facebookresearch/imvotenet/blob/a8856345146bacf29a57266a2f0b874406fd8823/models/imvotenet.py#L26>`_ # noqa
Modified from `<https://github.com/facebookresearch/imvotenet/blob/a8856345146bacf29a57266a2f0b874406fd8823/models/imvotenet.py#L26>`_
Args:
mask (torch.Tensor): Bool tensor in shape (
......@@ -22,7 +22,7 @@ def sample_valid_seeds(mask, num_sampled_seed=1024):
Returns:
torch.Tensor: Indices with shape (num_sampled_seed).
"""
""" # noqa: E501
device = mask.device
batch_size = mask.shape[0]
sample_inds = mask.new_zeros((batch_size, num_sampled_seed),
......@@ -600,10 +600,8 @@ class ImVoteNet(Base3DDetector):
img_metas,
proposals=None,
rescale=False):
"""Test without augmentation, image network pretrain. May refer to
https://github.com/open-
mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py #
noqa.
r"""Test without augmentation, image network pretrain. May refer to
`<https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py>`_.
Args:
img (torch.Tensor): Should have a shape NxCxHxW, which contains
......@@ -616,7 +614,7 @@ class ImVoteNet(Base3DDetector):
Returns:
list[list[torch.Tensor]]: Predicted 2d boxes.
"""
""" # noqa: E501
assert self.with_img_bbox, 'Img bbox head must be implemented.'
assert self.with_img_backbone, 'Img backbone must be implemented.'
assert self.with_img_rpn, 'Img rpn must be implemented.'
......@@ -701,10 +699,8 @@ class ImVoteNet(Base3DDetector):
return bbox_results
def aug_test_img_only(self, img, img_metas, rescale=False):
"""Test function with augmentation, image network pretrain. May refer
to https://github.com/open-
mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py #
noqa.
r"""Test function with augmentation, image network pretrain. May refer
to `<https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py>`_.
Args:
img (list[list[torch.Tensor]], optional): the outer
......@@ -722,7 +718,7 @@ class ImVoteNet(Base3DDetector):
Returns:
list[list[torch.Tensor]]: Predicted 2d boxes.
"""
""" # noqa: E501
assert self.with_img_bbox, 'Img bbox head must be implemented.'
assert self.with_img_backbone, 'Img backbone must be implemented.'
assert self.with_img_rpn, 'Img rpn must be implemented.'
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment