Commit c2fe651f authored by zhangshilong's avatar zhangshilong Committed by ChaimZhu
Browse files

refactor directory

parent bc5806ba
......@@ -5,7 +5,6 @@ from mmcv.utils import get_git_hash
import mmdet
import mmdet3d
import mmseg
from mmdet3d.ops.spconv import IS_SPCONV2_AVAILABLE
def collect_env():
......@@ -14,7 +13,9 @@ def collect_env():
env_info['MMDetection'] = mmdet.__version__
env_info['MMSegmentation'] = mmseg.__version__
env_info['MMDetection3D'] = mmdet3d.__version__ + '+' + get_git_hash()[:7]
from mmdet3d.models.layers.spconv import IS_SPCONV2_AVAILABLE
env_info['spconv2.0'] = IS_SPCONV2_AVAILABLE
return env_info
......
# Copyright (c) OpenMMLab. All rights reserved.
import datetime
import os
import platform
......@@ -66,12 +67,11 @@ def register_all_modules(init_default_scope: bool = True) -> None:
to https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md
Defaults to True.
""" # noqa
import mmdet3d.core # noqa: F401,F403
import mmdet3d.datasets # noqa: F401,F403
import mmdet3d.metrics # noqa: F401,F403
import mmdet3d.models # noqa: F401,F403
import mmdet3d.ops # noqa: F401,F403
import mmdet3d.scheduler # noqa: F401,F403
import mmdet3d.engine.scheduler # noqa: F401,F403
import mmdet3d.evaluation.metrics # noqa: F401,F403
import mmdet3d.structures # noqa: F401,F403
import mmdet3d.visualization # noqa: F401,F403
if init_default_scope:
never_created = DefaultScope.get_current_instance() is None \
or not DefaultScope.check_instance_created('mmdet3d')
......
# Copyright (c) OpenMMLab. All rights reserved.
"""Collecting some commonly used type hint in MMDetection3D."""
from typing import Dict, List, Optional, Tuple, Union
from typing import List, Optional, Union
import torch
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from ..bbox.samplers import SamplingResult
from ..data_structures import Det3DDataSample
from mmdet.models.task_modules import SamplingResult
# Type hint of config data
ConfigType = Union[ConfigDict, dict]
......@@ -20,12 +18,6 @@ OptMultiConfig = Optional[MultiConfig]
InstanceList = List[InstanceData]
OptInstanceList = Optional[InstanceList]
SampleList = List[Det3DDataSample]
OptSampleList = Optional[SampleList]
SamplingResultList = List[SamplingResult]
OptSamplingResultList = Optional[SamplingResultList]
ForwardResults = Union[Dict[str, torch.Tensor], List[Det3DDataSample],
Tuple[torch.Tensor], torch.Tensor]
# Copyright (c) OpenMMLab. All rights reserved.
from .local_visualizer import Det3DLocalVisualizer
from .vis_utils import (proj_camera_bbox3d_to_img, proj_depth_bbox3d_to_img,
proj_lidar_bbox3d_to_img, write_obj,
proj_lidar_bbox3d_to_img, to_depth_mode, write_obj,
write_oriented_bbox)
__all__ = [
'Det3DLocalVisualizer', 'proj_depth_bbox3d_to_img',
'proj_camera_bbox3d_to_img', 'proj_lidar_bbox3d_to_img',
'write_oriented_bbox', 'write_obj'
'Det3DLocalVisualizer', 'write_obj', 'write_oriented_bbox',
'to_depth_mode', 'proj_lidar_bbox3d_to_img', 'proj_depth_bbox3d_to_img',
'proj_camera_bbox3d_to_img'
]
......@@ -8,6 +8,8 @@ import numpy as np
from mmengine.dist import master_only
from torch import Tensor
from mmdet.visualization import DetLocalVisualizer
try:
import open3d as o3d
from open3d import geometry
......@@ -18,10 +20,9 @@ except ImportError:
from mmengine.data import InstanceData
from mmengine.visualization.utils import check_type, tensor2ndarray
from mmdet3d.core import (BaseInstance3DBoxes, DepthInstance3DBoxes,
Det3DDataSample, PixelData)
from mmdet3d.registry import VISUALIZERS
from mmdet.core.visualization import DetLocalVisualizer
from mmdet3d.structures import (BaseInstance3DBoxes, DepthInstance3DBoxes,
Det3DDataSample, PointData)
from .vis_utils import (proj_camera_bbox3d_to_img, proj_depth_bbox3d_to_img,
proj_lidar_bbox3d_to_img, to_depth_mode, write_obj,
write_oriented_bbox)
......@@ -35,6 +36,7 @@ class Det3DLocalVisualizer(DetLocalVisualizer):
- draw_bboxes_3d: draw 3D bounding boxes on point clouds
- draw_proj_bboxes_3d: draw projected 3D bounding boxes on image
- draw_seg_mask: draw segmentation mask via per-point colorization
Args:
name (str): Name of the instance. Defaults to 'visualizer'.
......@@ -54,6 +56,9 @@ class Det3DLocalVisualizer(DetLocalVisualizer):
Defaults to None.
line_width (int, float): The linewidth of lines.
Defaults to 3.
vis_cfg (dict): The coordinate frame config while Open3D
visualization initialization.
Defaults to dict(size=1, origin=[0, 0, 0]).
alpha (int, float): The transparency of bboxes or mask.
Defaults to 0.8.
......@@ -61,33 +66,21 @@ class Det3DLocalVisualizer(DetLocalVisualizer):
>>> import numpy as np
>>> import torch
>>> from mmengine.data import InstanceData
>>> from mmdet3d.core import Det3DDataSample
>>> from mmdet3d.core import Det3DLocalVisualizer
>>> from mmdet3d.structures import Det3DDataSample
>>> from mmdet3d.visualization import Det3DLocalVisualizer
>>> det3d_local_visualizer = Det3DLocalVisualizer()
>>> image = np.random.randint(0, 256,
... size=(10, 12, 3)).astype('uint8')
>>> points = np.random.rand((1000, ))
>>> gt_instances_3d = InstanceData()
>>> gt_instances_3d.bboxes_3d = BaseInstance3DBoxes(torch.rand((5, 7)))
>>> gt_instances_3d.labels_3d = torch.randint(0, 2, (1,))
>>> gt_instances_3d.labels_3d = torch.randint(0, 2, (5,))
>>> gt_det3d_data_sample = Det3DDataSample()
>>> gt_det3d_data_sample.gt_instances_3d = gt_instances_3d
>>> det3d_local_visualizer.add_datasample('image', image,
... gt_det_data_sample)
>>> det3d_local_visualizer.add_datasample(
... 'image', image, gt_det_data_sample,
... out_file='out_file.jpg')
>>> det3d_local_visualizer.add_datasample(
... 'image', image, gt_det_data_sample,
... show=True)
>>> pred_instances = InstanceData()
>>> pred_instances.bboxes_3d = torch.Tensor([[2, 4, 4, 8]])
>>> pred_instances.labels_3d = torch.randint(0, 2, (1,))
>>> pred_det_data_sample = Det3DDataSample()
>>> pred_det_data_sample.pred_instances = pred_instances
>>> det_local_visualizer.add_datasample('image', image,
... gt_det_data_sample,
... pred_det_data_sample)
>>> data_input = dict(img=image, points=points)
>>> det3d_local_visualizer.add_datasample('3D Scene', data_input,
... gt_det3d_data_sample)
"""
def __init__(self,
......@@ -100,7 +93,7 @@ class Det3DLocalVisualizer(DetLocalVisualizer):
Tuple[int]]] = (200, 200, 200),
mask_color: Optional[Union[str, Tuple[int]]] = None,
line_width: Union[int, float] = 3,
vis_cfg: Optional[Dict] = None,
vis_cfg: dict = dict(size=1, origin=[0, 0, 0]),
alpha: float = 0.8):
super().__init__(
name=name,
......@@ -127,7 +120,7 @@ class Det3DLocalVisualizer(DetLocalVisualizer):
o3d_vis = o3d.visualization.Visualizer()
o3d_vis.create_window()
# create coordinate frame
mesh_frame = geometry.TriangleMesh.create_coordinate_frame(vis_cfg)
mesh_frame = geometry.TriangleMesh.create_coordinate_frame(**vis_cfg)
o3d_vis.add_geometry(mesh_frame)
return o3d_vis
......@@ -374,8 +367,10 @@ class Det3DLocalVisualizer(DetLocalVisualizer):
if vis_task in ['mono-det', 'multi_modality-det']:
assert 'img' in data_input
image = data_input['img']
self.set_image(image)
if isinstance(data_input['img'], Tensor):
img = data_input['img'].permute(1, 2, 0).numpy()
img = img[..., [2, 1, 0]] # bgr to rgb
self.set_image(img)
self.draw_proj_bboxes_3d(bboxes_3d, input_meta)
drawn_img = self.get_image()
......@@ -385,7 +380,7 @@ class Det3DLocalVisualizer(DetLocalVisualizer):
def _draw_pts_sem_seg(self,
points: Tensor,
pts_seg: PixelData,
pts_seg: PointData,
vis_task: str,
palette: Optional[List[tuple]] = None,
ignore_index: Optional[int] = None):
......@@ -503,8 +498,10 @@ class Det3DLocalVisualizer(DetLocalVisualizer):
vis_task, palette)
if 'gt_instances' in gt_sample:
assert 'img' in data_input
gt_img_data = self._draw_instances(data_input['img'],
gt_sample.gt_instances,
if isinstance(data_input['img'], Tensor):
img = data_input['img'].permute(1, 2, 0).numpy()
img = img[..., [2, 1, 0]] # bgr to rgb
gt_img_data = self._draw_instances(img, gt_sample.gt_instances,
classes, palette)
if 'gt_pts_sem_seg' in gt_sample:
assert classes is not None, 'class information is ' \
......@@ -532,9 +529,11 @@ class Det3DLocalVisualizer(DetLocalVisualizer):
pred_instances = pred_sample.pred_instances
pred_instances = pred_instances_3d[
pred_instances.scores > pred_score_thr].cpu()
pred_img_data = self._draw_instances(data_input['img'],
pred_instances, classes,
palette)
if isinstance(data_input['img'], Tensor):
img = data_input['img'].permute(1, 2, 0).numpy()
img = img[..., [2, 1, 0]] # bgr to rgb
pred_img_data = self._draw_instances(img, pred_instances,
classes, palette)
if 'pred_pts_sem_seg' in pred_sample:
assert classes is not None, 'class information is ' \
'not provided when ' \
......
......@@ -5,10 +5,8 @@ import numpy as np
import torch
import trimesh
from mmdet3d.core.bbox import Box3DMode, Coord3DMode
from mmdet3d.core.bbox.structures.cam_box3d import CameraInstance3DBoxes
from mmdet3d.core.bbox.structures.depth_box3d import DepthInstance3DBoxes
from mmdet3d.core.bbox.structures.lidar_box3d import LiDARInstance3DBoxes
from mmdet3d.structures import (Box3DMode, CameraInstance3DBoxes, Coord3DMode,
DepthInstance3DBoxes, LiDARInstance3DBoxes)
def write_obj(points, out_filename):
......@@ -125,8 +123,8 @@ def proj_depth_bbox3d_to_img(bboxes_3d: DepthInstance3DBoxes,
3d bbox in depth coordinate system to visualize.
input_meta (dict): Used in coordinates transformation.
"""
from mmdet3d.core.bbox import points_cam2img
from mmdet3d.models import apply_3d_transformation
from mmdet3d.structures import points_cam2img
input_meta = copy.deepcopy(input_meta)
corners_3d = bboxes_3d.corners
......@@ -157,7 +155,7 @@ def proj_camera_bbox3d_to_img(bboxes_3d: CameraInstance3DBoxes,
cam2img (np.array)): Camera intrinsic matrix,
denoted as `K` in depth bbox coordinate system.
"""
from mmdet3d.core.bbox import points_cam2img
from mmdet3d.structures import points_cam2img
cam2img = copy.deepcopy(input_meta['cam2img'])
corners_3d = bboxes_3d.corners
......
......@@ -7,7 +7,7 @@ CommandLine:
"""
import torch
from mmdet3d.core.anchor import build_prior_generator
from mmdet3d.registry import TASK_UTILS
def test_anchor_3d_range_generator():
......@@ -26,7 +26,7 @@ def test_anchor_3d_range_generator():
rotations=[0, 1.57],
reshape_out=False)
anchor_generator = build_prior_generator(anchor_generator_cfg)
anchor_generator = TASK_UTILS.build(anchor_generator_cfg)
repr_str = repr(anchor_generator)
expected_repr_str = 'Anchor3DRangeGenerator(anchor_range=' \
'[[0, -39.68, -0.6, 70.4, 39.68, -0.6], ' \
......@@ -65,7 +65,7 @@ def test_aligned_anchor_generator():
reshape_out=True)
featmap_sizes = [(256, 256), (128, 128), (64, 64)]
anchor_generator = build_prior_generator(anchor_generator_cfg)
anchor_generator = TASK_UTILS.build(anchor_generator_cfg)
assert anchor_generator.num_base_anchors == 8
# check base anchors
......@@ -200,7 +200,7 @@ def test_aligned_anchor_generator_per_cls():
reshape_out=False)
featmap_sizes = [(100, 100), (50, 50)]
anchor_generator = build_prior_generator(anchor_generator_cfg)
anchor_generator = TASK_UTILS.build(anchor_generator_cfg)
# check base anchors
expected_grid_anchors = [[
......
......@@ -4,9 +4,9 @@ import torch
from mmcv.cnn import Scale
from torch import nn as nn
from mmdet3d.core import build_bbox_coder
from mmdet3d.core.bbox import (CameraInstance3DBoxes, DepthInstance3DBoxes,
LiDARInstance3DBoxes)
from mmdet3d.registry import TASK_UTILS
from mmdet3d.structures import (CameraInstance3DBoxes, DepthInstance3DBoxes,
LiDARInstance3DBoxes)
def test_partial_bin_based_box_coder():
......@@ -25,7 +25,7 @@ def test_partial_bin_based_box_coder():
[0.500618, 0.632163, 0.683424],
[0.404671, 1.071108, 1.688889],
[0.76584, 1.398258, 0.472728]])
box_coder = build_bbox_coder(box_coder_cfg)
box_coder = TASK_UTILS.build(box_coder_cfg)
# test eocode
gt_bboxes = DepthInstance3DBoxes(
......@@ -226,7 +226,7 @@ def test_partial_bin_based_box_coder():
def test_anchor_free_box_coder():
box_coder_cfg = dict(
type='AnchorFreeBBoxCoder', num_dir_bins=12, with_rot=True)
box_coder = build_bbox_coder(box_coder_cfg)
box_coder = TASK_UTILS.build(box_coder_cfg)
# test encode
gt_bboxes = LiDARInstance3DBoxes([[
......@@ -340,7 +340,7 @@ def test_centerpoint_bbox_coder():
out_size_factor=4,
voxel_size=[0.2, 0.2])
bbox_coder = build_bbox_coder(bbox_coder_cfg)
bbox_coder = TASK_UTILS.build(bbox_coder_cfg)
batch_dim = torch.rand([2, 3, 128, 128])
batch_hei = torch.rand([2, 1, 128, 128])
......@@ -363,7 +363,7 @@ def test_point_xyzwhlr_bbox_coder():
type='PointXYZWHLRBBoxCoder',
use_mean_size=True,
mean_size=[[3.9, 1.6, 1.56], [0.8, 0.6, 1.73], [1.76, 0.6, 1.73]])
boxcoder = build_bbox_coder(bbox_coder_cfg)
boxcoder = TASK_UTILS.build(bbox_coder_cfg)
# test encode
gt_bboxes_3d = torch.tensor(
......@@ -396,7 +396,7 @@ def test_fcos3d_bbox_coder():
base_dims=None,
code_size=7,
norm_on_bbox=True)
bbox_coder = build_bbox_coder(bbox_coder_cfg)
bbox_coder = TASK_UTILS.build(bbox_coder_cfg)
# test decode
# [2, 7, 1, 1]
......@@ -426,7 +426,7 @@ def test_fcos3d_bbox_coder():
base_dims=((2., 3., 1.), (1., 2., 3.)),
code_size=7,
norm_on_bbox=True)
prior_bbox_coder = build_bbox_coder(prior_bbox_coder_cfg)
prior_bbox_coder = TASK_UTILS.build(prior_bbox_coder_cfg)
# test decode
batch_bbox = torch.tensor([[[[0.3130]], [[0.7094]], [[0.8743]], [[0.0570]],
......@@ -472,7 +472,7 @@ def test_pgd_bbox_coder():
base_dims=None,
code_size=7,
norm_on_bbox=True)
bbox_coder = build_bbox_coder(bbox_coder_cfg)
bbox_coder = TASK_UTILS.build(bbox_coder_cfg)
# test decode_2d
# [2, 27, 1, 1]
......@@ -575,7 +575,7 @@ def test_smoke_bbox_coder():
base_dims=((3.88, 1.63, 1.53), (1.78, 1.70, 0.58), (0.88, 1.73, 0.67)),
code_size=7)
bbox_coder = build_bbox_coder(bbox_coder_cfg)
bbox_coder = TASK_UTILS.build(bbox_coder_cfg)
regression = torch.rand([200, 8])
points = torch.rand([200, 2])
labels = torch.ones([2, 100])
......@@ -616,7 +616,7 @@ def test_monoflex_bbox_coder():
bin_centers=[0, np.pi / 2, np.pi, -np.pi / 2],
bin_margin=np.pi / 6,
code_size=7)
bbox_coder = build_bbox_coder(bbox_coder_cfg)
bbox_coder = TASK_UTILS.build(bbox_coder_cfg)
gt_bboxes_3d = CameraInstance3DBoxes(torch.rand([6, 7]))
orientation_target = bbox_coder.encode(gt_bboxes_3d)
assert orientation_target.shape == torch.Size([6, 8])
......
......@@ -5,15 +5,14 @@ import numpy as np
import pytest
import torch
from mmdet3d.core.bbox import (BaseInstance3DBoxes, Box3DMode,
CameraInstance3DBoxes, Coord3DMode,
DepthInstance3DBoxes, LiDARInstance3DBoxes,
bbox3d2roi, bbox3d_mapping_back)
from mmdet3d.core.bbox.structures.utils import (get_box_type, limit_period,
points_cam2img,
rotation_3d_in_axis,
xywhr2xyxyr)
from mmdet3d.core.points import CameraPoints, DepthPoints, LiDARPoints
from mmdet3d.structures import (BaseInstance3DBoxes, Box3DMode,
CameraInstance3DBoxes, Coord3DMode,
DepthInstance3DBoxes, LiDARInstance3DBoxes,
bbox3d2roi, bbox3d_mapping_back)
from mmdet3d.structures.bbox_3d.utils import (get_box_type, limit_period,
points_cam2img,
rotation_3d_in_axis, xywhr2xyxyr)
from mmdet3d.structures.points import CameraPoints, DepthPoints, LiDARPoints
def test_bbox3d_mapping_back():
......
......@@ -3,7 +3,7 @@ import numpy as np
def test_camera_to_lidar():
from mmdet3d.core.bbox.box_np_ops import camera_to_lidar
from mmdet3d.structures.ops.box_np_ops import camera_to_lidar
points = np.array([[1.84, 1.47, 8.41]])
rect = np.array([[0.9999128, 0.01009263, -0.00851193, 0.],
[-0.01012729, 0.9999406, -0.00403767, 0.],
......@@ -19,7 +19,7 @@ def test_camera_to_lidar():
def test_box_camera_to_lidar():
from mmdet3d.core.bbox.box_np_ops import box_camera_to_lidar
from mmdet3d.structures.ops.box_np_ops import box_camera_to_lidar
box = np.array([[1.84, 1.47, 8.41, 1.2, 1.89, 0.48, -0.01]])
rect = np.array([[0.9999128, 0.01009263, -0.00851193, 0.],
[-0.01012729, 0.9999406, -0.00403767, 0.],
......@@ -37,7 +37,7 @@ def test_box_camera_to_lidar():
def test_corners_nd():
from mmdet3d.core.bbox.box_np_ops import corners_nd
from mmdet3d.structures.ops.box_np_ops import corners_nd
dims = np.array([[0.47, 0.98]])
corners = corners_nd(dims)
expected_corners = np.array([[[-0.235, -0.49], [-0.235, 0.49],
......@@ -46,7 +46,7 @@ def test_corners_nd():
def test_center_to_corner_box2d():
from mmdet3d.core.bbox.box_np_ops import center_to_corner_box2d
from mmdet3d.structures.ops.box_np_ops import center_to_corner_box2d
center = np.array([[9.348705, -3.6271024]])
dims = np.array([[0.47, 0.98]])
angles = np.array([3.14])
......@@ -66,7 +66,7 @@ def test_center_to_corner_box2d():
def test_points_in_convex_polygon_jit():
from mmdet3d.core.bbox.box_np_ops import points_in_convex_polygon_jit
from mmdet3d.structures.ops.box_np_ops import points_in_convex_polygon_jit
points = np.array([[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]])
polygons = np.array([[[1.0, 0.0], [0.0, 1.0], [0.0, 0.5], [0.0, 0.0]],
[[1.0, 0.0], [1.0, 1.0], [0.5, 1.0], [0.0, 1.0]],
......
......@@ -2,10 +2,10 @@
import numpy as np
import torch
from mmdet3d.core.bbox import (CameraInstance3DBoxes, Coord3DMode,
DepthInstance3DBoxes, LiDARInstance3DBoxes,
limit_period)
from mmdet3d.core.points import CameraPoints, DepthPoints, LiDARPoints
from mmdet3d.structures import (CameraInstance3DBoxes, Coord3DMode,
DepthInstance3DBoxes, LiDARInstance3DBoxes,
limit_period)
from mmdet3d.structures.points import CameraPoints, DepthPoints, LiDARPoints
def test_points_conversion():
......
......@@ -3,8 +3,8 @@ import pytest
import torch
from mmengine import InstanceData
from mmdet3d.core.bbox.assigners import Max3DIoUAssigner
from mmdet3d.core.bbox.samplers import IoUNegPiecewiseSampler
from mmdet3d.models.task_modules import IoUNegPiecewiseSampler
from mmdet3d.models.task_modules.assigners import Max3DIoUAssigner
def test_iou_piecewise_sampler():
......
......@@ -6,7 +6,7 @@ import pytest
import torch
from mmengine.data import InstanceData
from mmdet3d.core.data_structures import Det3DDataSample, PointData
from mmdet3d.structures import Det3DDataSample, PointData
def _equal(a, b):
......
......@@ -3,8 +3,8 @@ import numpy as np
import pytest
import torch
from mmdet3d.core.points import (BasePoints, CameraPoints, DepthPoints,
LiDARPoints)
from mmdet3d.structures.points import (BasePoints, CameraPoints, DepthPoints,
LiDARPoints)
def test_base_points():
......
......@@ -3,8 +3,8 @@ import mmcv
import pytest
import torch
from mmdet3d.core import merge_aug_bboxes_3d
from mmdet3d.core.bbox import DepthInstance3DBoxes
from mmdet3d.models import merge_aug_bboxes_3d
from mmdet3d.structures import DepthInstance3DBoxes
def test_merge_aug_bboxes_3d():
......
......@@ -5,7 +5,7 @@ import torch
def test_aligned_3d_nms():
from mmdet3d.core.post_processing import aligned_3d_nms
from mmdet3d.models.layers import aligned_3d_nms
boxes = torch.tensor([[1.2261, 0.6679, -1.2678, 2.6547, 1.0428, 0.1000],
[5.0919, 0.6512, 0.7238, 5.4821, 1.2451, 2.1095],
......@@ -61,7 +61,7 @@ def test_aligned_3d_nms():
def test_circle_nms():
from mmdet3d.core.post_processing import circle_nms
from mmdet3d.models.layers import circle_nms
boxes = torch.tensor([[-11.1100, 2.1300, 0.8823],
[-11.2810, 2.2422, 0.8914],
[-10.3966, -0.3198, 0.8643],
......@@ -80,7 +80,7 @@ def test_circle_nms():
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_nms_bev():
from mmdet3d.core.post_processing import nms_bev
from mmdet3d.models.layers import nms_bev
np_boxes = np.array(
[[6.0, 3.0, 8.0, 7.0, 2.0], [3.0, 6.0, 9.0, 11.0, 1.0],
......@@ -99,7 +99,7 @@ def test_nms_bev():
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_nms_normal_bev():
from mmdet3d.core.post_processing import nms_normal_bev
from mmdet3d.models.layers import nms_normal_bev
np_boxes = np.array(
[[6.0, 3.0, 8.0, 7.0, 2.0], [3.0, 6.0, 9.0, 11.0, 1.0],
......
......@@ -3,10 +3,11 @@ import numpy as np
import pytest
import torch
from mmdet3d.core import array_converter, draw_heatmap_gaussian, points_img2cam
from mmdet3d.core.bbox import CameraInstance3DBoxes
from mmdet3d.models import draw_heatmap_gaussian
from mmdet3d.models.utils import (filter_outside_objs, get_edge_indices,
get_keypoints, handle_proj_objs)
from mmdet3d.structures import CameraInstance3DBoxes, points_img2cam
from mmdet3d.utils import array_converter
def test_gaussian():
......
......@@ -6,9 +6,8 @@ from mmcv.transforms.base import BaseTransform
from mmengine.data import InstanceData
from mmengine.registry import TRANSFORMS
from mmdet3d.core import LiDARInstance3DBoxes
from mmdet3d.core.data_structures import Det3DDataSample
from mmdet3d.datasets import KittiDataset
from mmdet3d.structures import Det3DDataSample, LiDARInstance3DBoxes
def _generate_kitti_dataset_config():
......@@ -72,15 +71,12 @@ def test_getitem():
ann_info = kitti_dataset.parse_ann_info(input_dict)
# assert the keys in ann_info and the type
assert 'gt_labels' in ann_info
assert ann_info['gt_labels'].dtype == np.int64
assert 'instances' in ann_info
# only one instance
assert len(ann_info['gt_labels']) == 1
assert (ann_info['gt_labels'] == 0).all()
assert 'gt_labels_3d' in ann_info
assert ann_info['gt_labels_3d'].dtype == np.int64
assert 'gt_bboxes' in ann_info
assert ann_info['gt_bboxes'].dtype == np.float64
assert 'gt_bboxes_3d' in ann_info
assert isinstance(ann_info['gt_bboxes_3d'], LiDARInstance3DBoxes)
assert torch.allclose(ann_info['gt_bboxes_3d'].tensor.sum(),
......@@ -89,16 +85,6 @@ def test_getitem():
assert ann_info['centers_2d'].dtype == np.float64
assert 'depths' in ann_info
assert ann_info['depths'].dtype == np.float64
assert 'group_id' in ann_info
assert ann_info['group_id'].dtype == np.int64
assert 'occluded' in ann_info
assert ann_info['occluded'].dtype == np.int64
assert 'difficulty' in ann_info
assert ann_info['difficulty'].dtype == np.int64
assert 'num_lidar_pts' in ann_info
assert ann_info['num_lidar_pts'].dtype == np.int64
assert 'truncated' in ann_info
assert ann_info['truncated'].dtype == np.int64
car_kitti_dataset = KittiDataset(
data_root,
......@@ -115,8 +101,8 @@ def test_getitem():
ann_info = car_kitti_dataset.parse_ann_info(input_dict)
# assert the keys in ann_info and the type
assert 'gt_labels' in ann_info
assert ann_info['gt_labels'].dtype == np.int64
assert 'instances' in ann_info
assert ann_info['gt_labels_3d'].dtype == np.int64
# all instance have been filtered by classes
assert len(ann_info['gt_labels']) == 0
assert len(ann_info['gt_labels_3d']) == 0
assert len(car_kitti_dataset.metainfo['CLASSES']) == 1
......@@ -4,9 +4,8 @@ from mmcv.transforms.base import BaseTransform
from mmengine.data import InstanceData
from mmengine.registry import TRANSFORMS
from mmdet3d.core.bbox import LiDARInstance3DBoxes
from mmdet3d.core.data_structures import Det3DDataSample
from mmdet3d.datasets import LyftDataset
from mmdet3d.structures import Det3DDataSample, LiDARInstance3DBoxes
def _generate_nus_dataset_config():
......
......@@ -4,9 +4,8 @@ from mmcv.transforms.base import BaseTransform
from mmengine.data import InstanceData
from mmengine.registry import TRANSFORMS
from mmdet3d.core.bbox import LiDARInstance3DBoxes
from mmdet3d.core.data_structures import Det3DDataSample
from mmdet3d.datasets import NuScenesDataset
from mmdet3d.structures import Det3DDataSample, LiDARInstance3DBoxes
def _generate_nus_dataset_config():
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment