"...transforms/git@developer.sourcefind.cn:OpenDAS/vision.git" did not exist on "0e2a5ae79bc0dc8aeb1e54601888df5be01937f0"
Commit c2fe651f authored by zhangshilong's avatar zhangshilong Committed by ChaimZhu
Browse files

refactor directory

parent bc5806ba
# dataset settings
# TODO refactor S3DISDataset
dataset_type = 'S3DISDataset'
data_root = './data/s3dis/'
class_names = ('table', 'chair', 'sofa', 'bookcase', 'board')
......
......@@ -116,7 +116,7 @@ model = dict(
[0.50867593, 0.50656086, 0.30136237],
[1.1511526, 1.0546296, 0.49706793],
[0.47535285, 0.49249494, 0.5802117]]), # Mean sizes for each class, the order is consistent with class_names.
vote_moudule_cfg=dict( # Config of vote module branch, refer to mmdet3d.models.model_utils for more details
vote_moudule_cfg=dict( # Config of vote module branch, refer to mmdet3d.models.layers for more details
in_channels=256, # Input channels for vote_module
vote_per_seed=1, # Number of votes to generate for each seed
gt_per_seed=3, # Number of gts for each seed
......@@ -186,77 +186,77 @@ class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',
'bookshelf', 'picture', 'counter', 'desk', 'curtain',
'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub',
'garbagebin') # Names of classes
train_pipeline = [ # Training pipeline, refer to mmdet3d.datasets.pipelines for more details
train_pipeline = [ # Training pipeline, refer to mmdet3d.datasets.transforms for more details
dict(
type='LoadPointsFromFile', # First pipeline to load points, refer to mmdet3d.datasets.pipelines.indoor_loading for more details
type='LoadPointsFromFile', # First pipeline to load points, refer to mmdet3d.datasets.transforms.indoor_loading for more details
shift_height=True, # Whether to use shifted height
load_dim=6, # The dimension of the loaded points
use_dim=[0, 1, 2]), # Which dimensions of the points to be used
dict(
type='LoadAnnotations3D', # Second pipeline to load annotations, refer to mmdet3d.datasets.pipelines.indoor_loading for more details
type='LoadAnnotations3D', # Second pipeline to load annotations, refer to mmdet3d.datasets.transforms.indoor_loading for more details
with_bbox_3d=True, # Whether to load 3D boxes
with_label_3d=True, # Whether to load 3D labels corresponding to each 3D box
with_mask_3d=True, # Whether to load 3D instance masks
with_seg_3d=True), # Whether to load 3D semantic masks
dict(
type='PointSegClassMapping', # Declare valid categories, refer to mmdet3d.datasets.pipelines.point_seg_class_mapping for more details
type='PointSegClassMapping', # Declare valid categories, refer to mmdet3d.datasets.transforms.point_seg_class_mapping for more details
valid_cat_ids=(3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34,
36, 39), # all valid categories ids
max_cat_id=40), # max possible category id in input segmentation mask
dict(type='PointSample', # Sample points, refer to mmdet3d.datasets.pipelines.transforms_3d for more details
dict(type='PointSample', # Sample points, refer to mmdet3d.datasets.transforms.transforms_3d for more details
num_points=40000), # Number of points to be sampled
dict(type='IndoorFlipData', # Augmentation pipeline that flip points and 3d boxes
flip_ratio_yz=0.5, # Probability of being flipped along yz plane
flip_ratio_xz=0.5), # Probability of being flipped along xz plane
dict(
type='IndoorGlobalRotScale', # Augmentation pipeline that rotate and scale points and 3d boxes, refer to mmdet3d.datasets.pipelines.indoor_augment for more details
type='IndoorGlobalRotScale', # Augmentation pipeline that rotate and scale points and 3d boxes, refer to mmdet3d.datasets.transforms.indoor_augment for more details
shift_height=True, # Whether the loaded points use `shift_height` attribute
rot_range=[-0.027777777777777776, 0.027777777777777776], # Range of rotation
scale_range=None), # Range of scale
dict(
type='DefaultFormatBundle3D', # Default format bundle to gather data in the pipeline, refer to mmdet3d.datasets.pipelines.formatting for more details
type='DefaultFormatBundle3D', # Default format bundle to gather data in the pipeline, refer to mmdet3d.datasets.transforms.formatting for more details
class_names=('cabinet', 'bed', 'chair', 'sofa', 'table', 'door',
'window', 'bookshelf', 'picture', 'counter', 'desk',
'curtain', 'refrigerator', 'showercurtrain', 'toilet',
'sink', 'bathtub', 'garbagebin')),
dict(
type='Collect3D', # Pipeline that decides which keys in the data should be passed to the detector, refer to mmdet3d.datasets.pipelines.formatting for more details
type='Collect3D', # Pipeline that decides which keys in the data should be passed to the detector, refer to mmdet3d.datasets.transforms.formatting for more details
keys=[
'points', 'gt_bboxes_3d', 'gt_labels_3d', 'pts_semantic_mask',
'pts_instance_mask'
])
]
test_pipeline = [ # Testing pipeline, refer to mmdet3d.datasets.pipelines for more details
test_pipeline = [ # Testing pipeline, refer to mmdet3d.datasets.transforms for more details
dict(
type='LoadPointsFromFile', # First pipeline to load points, refer to mmdet3d.datasets.pipelines.indoor_loading for more details
type='LoadPointsFromFile', # First pipeline to load points, refer to mmdet3d.datasets.transforms.indoor_loading for more details
shift_height=True, # Whether to use shifted height
load_dim=6, # The dimension of the loaded points
use_dim=[0, 1, 2]), # Which dimensions of the points to be used
dict(type='PointSample', # Sample points, refer to mmdet3d.datasets.pipelines.transforms_3d for more details
dict(type='PointSample', # Sample points, refer to mmdet3d.datasets.transforms.transforms_3d for more details
num_points=40000), # Number of points to be sampled
dict(
type='DefaultFormatBundle3D', # Default format bundle to gather data in the pipeline, refer to mmdet3d.datasets.pipelines.formatting for more details
type='DefaultFormatBundle3D', # Default format bundle to gather data in the pipeline, refer to mmdet3d.datasets.transforms.formatting for more details
class_names=('cabinet', 'bed', 'chair', 'sofa', 'table', 'door',
'window', 'bookshelf', 'picture', 'counter', 'desk',
'curtain', 'refrigerator', 'showercurtrain', 'toilet',
'sink', 'bathtub', 'garbagebin')),
dict(type='Collect3D', # Pipeline that decides which keys in the data should be passed to the detector, refer to mmdet3d.datasets.pipelines.formatting for more details
dict(type='Collect3D', # Pipeline that decides which keys in the data should be passed to the detector, refer to mmdet3d.datasets.transforms.formatting for more details
keys=['points'])
]
eval_pipeline = [ # Pipeline used for evaluation or visualization, refer to mmdet3d.datasets.pipelines for more details
eval_pipeline = [ # Pipeline used for evaluation or visualization, refer to mmdet3d.datasets.transforms for more details
dict(
type='LoadPointsFromFile', # First pipeline to load points, refer to mmdet3d.datasets.pipelines.indoor_loading for more details
type='LoadPointsFromFile', # First pipeline to load points, refer to mmdet3d.datasets.transforms.indoor_loading for more details
shift_height=True, # Whether to use shifted height
load_dim=6, # The dimension of the loaded points
use_dim=[0, 1, 2]), # Which dimensions of the points to be used
dict(
type='DefaultFormatBundle3D', # Default format bundle to gather data in the pipeline, refer to mmdet3d.datasets.pipelines.formatting for more details
type='DefaultFormatBundle3D', # Default format bundle to gather data in the pipeline, refer to mmdet3d.datasets.transforms.formatting for more details
class_names=('cabinet', 'bed', 'chair', 'sofa', 'table', 'door',
'window', 'bookshelf', 'picture', 'counter', 'desk',
'curtain', 'refrigerator', 'showercurtrain', 'toilet',
'sink', 'bathtub', 'garbagebin')),
with_label=False),
dict(type='Collect3D', # Pipeline that decides which keys in the data should be passed to the detector, refer to mmdet3d.datasets.pipelines.formatting for more details
dict(type='Collect3D', # Pipeline that decides which keys in the data should be passed to the detector, refer to mmdet3d.datasets.transforms.formatting for more details
keys=['points'])
]
data = dict(
......
......@@ -117,7 +117,7 @@ model = dict(
[0.50867593, 0.50656086, 0.30136237],
[1.1511526, 1.0546296, 0.49706793],
[0.47535285, 0.49249494, 0.5802117]]), # 每一类的平均尺寸,其顺序与类名顺序相同
vote_moudule_cfg=dict( # 投票 (vote) 模块的配置,更多细节请参考 mmdet3d.models.model_utils
vote_moudule_cfg=dict( # 投票 (vote) 模块的配置,更多细节请参考 mmdet3d.models.layers
in_channels=256, # 投票模块的输入通道数
vote_per_seed=1, # 对于每个种子点生成的投票数
gt_per_seed=3, # 每个种子点的真实标签个数
......@@ -187,77 +187,77 @@ class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',
'bookshelf', 'picture', 'counter', 'desk', 'curtain',
'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub',
'garbagebin') # 类的名称
train_pipeline = [ # 训练流水线,更多细节请参考 mmdet3d.datasets.pipelines
train_pipeline = [ # 训练流水线,更多细节请参考 mmdet3d.datasets.transforms
dict(
type='LoadPointsFromFile', # 第一个流程,用于读取点,更多细节请参考 mmdet3d.datasets.pipelines.indoor_loading
type='LoadPointsFromFile', # 第一个流程,用于读取点,更多细节请参考 mmdet3d.datasets.transforms.indoor_loading
shift_height=True, # 是否使用变换高度
load_dim=6, # 读取的点的维度
use_dim=[0, 1, 2]), # 使用所读取点的哪些维度
dict(
type='LoadAnnotations3D', # 第二个流程,用于读取标注,更多细节请参考 mmdet3d.datasets.pipelines.indoor_loading
type='LoadAnnotations3D', # 第二个流程,用于读取标注,更多细节请参考 mmdet3d.datasets.transforms.indoor_loading
with_bbox_3d=True, # 是否读取 3D 框
with_label_3d=True, # 是否读取 3D 框对应的类别标签
with_mask_3d=True, # 是否读取 3D 实例分割掩码
with_seg_3d=True), # 是否读取 3D 语义分割掩码
dict(
type='PointSegClassMapping', # 选取有效的类别,更多细节请参考 mmdet3d.datasets.pipelines.point_seg_class_mapping
type='PointSegClassMapping', # 选取有效的类别,更多细节请参考 mmdet3d.datasets.transforms.point_seg_class_mapping
valid_cat_ids=(3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34,
36, 39), # 所有有效类别的编号
max_cat_id=40), # 输入语义分割掩码中可能存在的最大类别编号
dict(type='PointSample', # 室内点采样,更多细节请参考 mmdet3d.datasets.pipelines.indoor_sample
dict(type='PointSample', # 室内点采样,更多细节请参考 mmdet3d.datasets.transforms.indoor_sample
num_points=40000), # 采样的点的数量
dict(type='IndoorFlipData', # 数据增广流程,随机翻转点和 3D 框
flip_ratio_yz=0.5, # 沿着 yz 平面被翻转的概率
flip_ratio_xz=0.5), # 沿着 xz 平面被翻转的概率
dict(
type='IndoorGlobalRotScale', # 数据增广流程,旋转并放缩点和 3D 框,更多细节请参考 mmdet3d.datasets.pipelines.indoor_augment
type='IndoorGlobalRotScale', # 数据增广流程,旋转并放缩点和 3D 框,更多细节请参考 mmdet3d.datasets.transforms.indoor_augment
shift_height=True, # 读取的点是否有高度这一属性
rot_range=[-0.027777777777777776, 0.027777777777777776], # 旋转角范围
scale_range=None), # 缩放尺寸范围
dict(
type='DefaultFormatBundle3D', # 默认格式打包以收集读取的所有数据,更多细节请参考 mmdet3d.datasets.pipelines.formatting
type='DefaultFormatBundle3D', # 默认格式打包以收集读取的所有数据,更多细节请参考 mmdet3d.datasets.transforms.formatting
class_names=('cabinet', 'bed', 'chair', 'sofa', 'table', 'door',
'window', 'bookshelf', 'picture', 'counter', 'desk',
'curtain', 'refrigerator', 'showercurtrain', 'toilet',
'sink', 'bathtub', 'garbagebin')),
dict(
type='Collect3D', # 最后一个流程,决定哪些键值对应的数据会被输入给检测器,更多细节请参考 mmdet3d.datasets.pipelines.formatting
type='Collect3D', # 最后一个流程,决定哪些键值对应的数据会被输入给检测器,更多细节请参考 mmdet3d.datasets.transforms.formatting
keys=[
'points', 'gt_bboxes_3d', 'gt_labels_3d', 'pts_semantic_mask',
'pts_instance_mask'
])
]
test_pipeline = [ # 测试流水线,更多细节请参考 mmdet3d.datasets.pipelines
test_pipeline = [ # 测试流水线,更多细节请参考 mmdet3d.datasets.transforms
dict(
type='LoadPointsFromFile', # 第一个流程,用于读取点,更多细节请参考 mmdet3d.datasets.pipelines.indoor_loading
type='LoadPointsFromFile', # 第一个流程,用于读取点,更多细节请参考 mmdet3d.datasets.transforms.indoor_loading
shift_height=True, # 是否使用变换高度
load_dim=6, # 读取的点的维度
use_dim=[0, 1, 2]), # 使用所读取点的哪些维度
dict(type='PointSample', # 室内点采样,更多细节请参考 mmdet3d.datasets.pipelines.indoor_sample
dict(type='PointSample', # 室内点采样,更多细节请参考 mmdet3d.datasets.transforms.indoor_sample
num_points=40000), # 采样的点的数量
dict(
type='DefaultFormatBundle3D', # 默认格式打包以收集读取的所有数据,更多细节请参考 mmdet3d.datasets.pipelines.formatting
type='DefaultFormatBundle3D', # 默认格式打包以收集读取的所有数据,更多细节请参考 mmdet3d.datasets.transforms.formatting
class_names=('cabinet', 'bed', 'chair', 'sofa', 'table', 'door',
'window', 'bookshelf', 'picture', 'counter', 'desk',
'curtain', 'refrigerator', 'showercurtrain', 'toilet',
'sink', 'bathtub', 'garbagebin')),
dict(type='Collect3D', # 最后一个流程,决定哪些键值对应的数据会被输入给检测器,更多细节请参考 mmdet3d.datasets.pipelines.formatting
dict(type='Collect3D', # 最后一个流程,决定哪些键值对应的数据会被输入给检测器,更多细节请参考 mmdet3d.datasets.transforms.formatting
keys=['points'])
]
eval_pipeline = [ # 模型验证或可视化所使用的流水线,更多细节请参考 mmdet3d.datasets.pipelines
eval_pipeline = [ # 模型验证或可视化所使用的流水线,更多细节请参考 mmdet3d.datasets.transforms
dict(
type='LoadPointsFromFile', # 第一个流程,用于读取点,更多细节请参考 mmdet3d.datasets.pipelines.indoor_loading
type='LoadPointsFromFile', # 第一个流程,用于读取点,更多细节请参考 mmdet3d.datasets.transforms.indoor_loading
shift_height=True, # 是否使用变换高度
load_dim=6, # 读取的点的维度
use_dim=[0, 1, 2]), # 使用所读取点的哪些维度
dict(
type='DefaultFormatBundle3D', # 默认格式打包以收集读取的所有数据,更多细节请参考 mmdet3d.datasets.pipelines.formatting
type='DefaultFormatBundle3D', # 默认格式打包以收集读取的所有数据,更多细节请参考 mmdet3d.datasets.transforms.formatting
class_names=('cabinet', 'bed', 'chair', 'sofa', 'table', 'door',
'window', 'bookshelf', 'picture', 'counter', 'desk',
'curtain', 'refrigerator', 'showercurtrain', 'toilet',
'sink', 'bathtub', 'garbagebin')),
with_label=False),
dict(type='Collect3D', # 最后一个流程,决定哪些键值对应的数据会被输入给检测器,更多细节请参考 mmdet3d.datasets.pipelines.formatting
dict(type='Collect3D', # 最后一个流程,决定哪些键值对应的数据会被输入给检测器,更多细节请参考 mmdet3d.datasets.transforms.formatting
keys=['points'])
]
data = dict(
......
......@@ -12,9 +12,9 @@ import torch.nn as nn
from mmengine.dataset import Compose
from mmengine.runner import load_checkpoint
from mmdet3d.core import Box3DMode, Det3DDataSample, SampleList
from mmdet3d.core.bbox import get_box_type
from mmdet3d.models import build_model
from mmdet3d.registry import MODELS
from mmdet3d.structures import Box3DMode, Det3DDataSample, get_box_type
from mmdet3d.structures.det3d_data_sample import SampleList
def convert_SyncBN(config):
......@@ -55,7 +55,7 @@ def init_model(config, checkpoint=None, device='cuda:0'):
config.model.pretrained = None
convert_SyncBN(config.model)
config.model.train_cfg = None
model = build_model(config.model)
model = MODELS.build(config.model)
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
if 'CLASSES' in checkpoint['meta']:
......
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor import * # noqa: F401, F403
from .bbox import * # noqa: F401, F403
from .data_structures import * # noqa: F401, F403
from .evaluation import * # noqa: F401, F403
from .points import * # noqa: F401, F403
from .post_processing import * # noqa: F401, F403
from .utils import * # noqa: F401, F403
from .visualization import * # noqa: F401, F403
from .voxel import * # noqa: F401, F403
# Copyright (c) OpenMMLab. All rights reserved.
from .assigners import AssignResult, BaseAssigner, Max3DIoUAssigner
# from .bbox_target import bbox_target
from .builder import build_assigner, build_bbox_coder, build_sampler
from .coders import DeltaXYZWLHRBBoxCoder
from .iou_calculators import (AxisAlignedBboxOverlaps3D, BboxOverlaps3D,
BboxOverlapsNearest3D,
axis_aligned_bbox_overlaps_3d, bbox_overlaps_3d,
bbox_overlaps_nearest_3d)
from .samplers import (BaseSampler, CombinedSampler,
InstanceBalancedPosSampler, IoUBalancedNegSampler,
PseudoSampler, RandomSampler, SamplingResult)
from .structures import (BaseInstance3DBoxes, Box3DMode, CameraInstance3DBoxes,
Coord3DMode, DepthInstance3DBoxes,
LiDARInstance3DBoxes, get_box_type, limit_period,
mono_cam_box2vis, points_cam2img, points_img2cam,
xywhr2xyxyr)
from .transforms import bbox3d2result, bbox3d2roi, bbox3d_mapping_back
__all__ = [
'BaseSampler', 'AssignResult', 'BaseAssigner', 'Max3DIoUAssigner',
'PseudoSampler', 'RandomSampler', 'InstanceBalancedPosSampler',
'IoUBalancedNegSampler', 'CombinedSampler', 'SamplingResult',
'DeltaXYZWLHRBBoxCoder', 'BboxOverlapsNearest3D', 'BboxOverlaps3D',
'bbox_overlaps_nearest_3d', 'bbox_overlaps_3d',
'AxisAlignedBboxOverlaps3D', 'axis_aligned_bbox_overlaps_3d', 'Box3DMode',
'LiDARInstance3DBoxes', 'CameraInstance3DBoxes', 'bbox3d2roi',
'bbox3d2result', 'DepthInstance3DBoxes', 'BaseInstance3DBoxes',
'bbox3d_mapping_back', 'xywhr2xyxyr', 'limit_period', 'points_cam2img',
'points_img2cam', 'get_box_type', 'Coord3DMode', 'mono_cam_box2vis',
'build_assigner', 'build_bbox_coder', 'build_sampler'
]
# Copyright (c) OpenMMLab. All rights reserved.
from .iou3d_calculator import (AxisAlignedBboxOverlaps3D, BboxOverlaps3D,
BboxOverlapsNearest3D,
axis_aligned_bbox_overlaps_3d, bbox_overlaps_3d,
bbox_overlaps_nearest_3d)
__all__ = [
'BboxOverlapsNearest3D', 'BboxOverlaps3D', 'bbox_overlaps_nearest_3d',
'bbox_overlaps_3d', 'AxisAlignedBboxOverlaps3D',
'axis_aligned_bbox_overlaps_3d'
]
# Copyright (c) OpenMMLab. All rights reserved.
from .det3d_data_sample import Det3DDataSample
from .point_data import PointData
__all__ = ['Det3DDataSample', 'PointData']
# Copyright (c) OpenMMLab. All rights reserved.
from .indoor_eval import indoor_eval
from .instance_seg_eval import instance_seg_eval
from .kitti_utils import kitti_eval, kitti_eval_coco_style
from .lyft_eval import lyft_eval
from .seg_eval import seg_eval
__all__ = [
'kitti_eval_coco_style', 'kitti_eval', 'indoor_eval', 'lyft_eval',
'seg_eval', 'instance_seg_eval'
]
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core.post_processing import (merge_aug_bboxes, merge_aug_masks,
merge_aug_proposals, merge_aug_scores,
multiclass_nms)
from .box3d_nms import (aligned_3d_nms, box3d_multiclass_nms, circle_nms,
nms_bev, nms_normal_bev)
from .merge_augs import merge_aug_bboxes_3d
__all__ = [
'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes',
'merge_aug_scores', 'merge_aug_masks', 'box3d_multiclass_nms',
'aligned_3d_nms', 'merge_aug_bboxes_3d', 'circle_nms', 'nms_bev',
'nms_normal_bev'
]
# Copyright (c) OpenMMLab. All rights reserved.
from .array_converter import ArrayConverter, array_converter
from .gaussian import (draw_heatmap_gaussian, ellip_gaussian2D, gaussian_2d,
gaussian_radius, get_ellip_gaussian_2D)
from .misc import add_prefix
from .typing import (ConfigType, ForwardResults, InstanceList, MultiConfig,
OptConfigType, OptInstanceList, OptMultiConfig,
OptSampleList, OptSamplingResultList, SampleList,
SamplingResultList)
__all__ = [
'gaussian_2d', 'gaussian_radius', 'draw_heatmap_gaussian',
'ArrayConverter', 'array_converter', 'ellip_gaussian2D',
'get_ellip_gaussian_2D', 'ConfigType', 'OptConfigType', 'MultiConfig',
'OptMultiConfig', 'InstanceList', 'OptInstanceList', 'SampleList',
'OptSampleList', 'SamplingResultList', 'ForwardResults',
'OptSamplingResultList', 'add_prefix'
]
......@@ -4,21 +4,8 @@ from .convert_utils import get_2d_boxes
from .dataset_wrappers import CBGSDataset
from .det3d_dataset import Det3DDataset
from .kitti_dataset import KittiDataset
from .kitti_mono_dataset import KittiMonoDataset
from .lyft_dataset import LyftDataset
from .nuscenes_dataset import NuScenesDataset
from .nuscenes_mono_dataset import NuScenesMonoDataset
# yapf: disable
from .pipelines import (AffineResize, BackgroundPointsFilter, GlobalAlignment,
GlobalRotScaleTrans, IndoorPatchPointSample,
IndoorPointSample, LoadAnnotations3D,
LoadPointsFromDict, LoadPointsFromFile,
LoadPointsFromMultiSweeps, NormalizePointsColor,
ObjectNameFilter, ObjectNoise, ObjectRangeFilter,
ObjectSample, PointSample, PointShuffle,
PointsRangeFilter, RandomDropPointsColor, RandomFlip3D,
RandomJitterPoints, RandomShiftScale,
VoxelBasedPointSampler)
# yapf: enable
from .s3dis_dataset import S3DISDataset, S3DISSegDataset
from .scannet_dataset import (ScanNetDataset, ScanNetInstanceSegDataset,
......@@ -26,12 +13,23 @@ from .scannet_dataset import (ScanNetDataset, ScanNetInstanceSegDataset,
from .seg3d_dataset import Seg3DDataset
from .semantickitti_dataset import SemanticKITTIDataset
from .sunrgbd_dataset import SUNRGBDDataset
# yapf: disable
from .transforms import (AffineResize, BackgroundPointsFilter, GlobalAlignment,
GlobalRotScaleTrans, IndoorPatchPointSample,
IndoorPointSample, LoadAnnotations3D,
LoadPointsFromDict, LoadPointsFromFile,
LoadPointsFromMultiSweeps, NormalizePointsColor,
ObjectNameFilter, ObjectNoise, ObjectRangeFilter,
ObjectSample, PointSample, PointShuffle,
PointsRangeFilter, RandomDropPointsColor,
RandomFlip3D, RandomJitterPoints, RandomShiftScale,
VoxelBasedPointSampler)
from .utils import get_loading_pipeline
from .waymo_dataset import WaymoDataset
__all__ = [
'KittiDataset', 'KittiMonoDataset', 'DATASETS', 'CBGSDataset',
'build_dataset', 'NuScenesDataset', 'NuScenesMonoDataset', 'LyftDataset',
'KittiDataset', 'DATASETS', 'CBGSDataset',
'build_dataset', 'NuScenesDataset', 'LyftDataset',
'ObjectSample', 'RandomFlip3D', 'ObjectNoise', 'GlobalRotScaleTrans',
'PointShuffle', 'ObjectRangeFilter', 'PointsRangeFilter',
'LoadPointsFromFile', 'S3DISSegDataset', 'S3DISDataset',
......
......@@ -18,9 +18,9 @@ PIPELINES = TRANSFORMS
def build_dataset(cfg, default_args=None):
from mmengine import ClassBalancedDataset, ConcatDataset, RepeatDataset
from mmdet3d.datasets.dataset_wrappers import CBGSDataset
from mmdet.datasets.dataset_wrappers import (ClassBalancedDataset,
ConcatDataset, RepeatDataset)
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif cfg['type'] == 'ConcatDataset':
......
......@@ -7,7 +7,7 @@ from nuscenes.utils.geometry_utils import view_points
from pyquaternion import Quaternion
from shapely.geometry import MultiPoint, box
from mmdet3d.core.bbox import points_cam2img
from mmdet3d.structures import points_cam2img
nus_categories = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle',
'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone',
......
......@@ -8,7 +8,7 @@ import numpy as np
from mmengine.dataset import BaseDataset
from mmdet3d.datasets import DATASETS
from ..core.bbox import get_box_type
from mmdet3d.structures import get_box_type
@DATASETS.register_module()
......@@ -139,7 +139,10 @@ class Det3DDataset(BaseDataset):
img_filtered_annotations = {}
filter_mask = ann_info['gt_labels_3d'] > -1
for key in ann_info.keys():
img_filtered_annotations[key] = (ann_info[key][filter_mask])
if key != 'instances':
img_filtered_annotations[key] = (ann_info[key][filter_mask])
else:
img_filtered_annotations[key] = ann_info[key]
return img_filtered_annotations
def get_ann_info(self, index: int) -> dict:
......@@ -180,7 +183,7 @@ class Det3DDataset(BaseDataset):
# add s or gt prefix for most keys after concat
# we only process 3d annotations here, the corresponding
# 2d annotation process is in the `LoadAnnotations3D`
# in `pipelines`
# in `transforms`
name_mapping = {
'bbox_label_3d': 'gt_labels_3d',
'bbox_3d': 'gt_bboxes_3d',
......@@ -197,17 +200,16 @@ class Det3DDataset(BaseDataset):
keys = list(instances[0].keys())
ann_info = dict()
for ann_name in keys:
temp_anns = [item[ann_name] for item in instances]
# map the original dataset label to training label
if 'label' in ann_name:
temp_anns = [
self.label_mapping[item] for item in temp_anns
]
if ann_name in name_mapping:
temp_anns = [item[ann_name] for item in instances]
# map the original dataset label to training label
if 'label' in ann_name:
temp_anns = [
self.label_mapping[item] for item in temp_anns
]
temp_anns = np.array(temp_anns)
ann_name = name_mapping[ann_name]
ann_info[ann_name] = temp_anns
temp_anns = np.array(temp_anns)
ann_info[ann_name] = temp_anns
ann_info['instances'] = info['instances']
return ann_info
......
......@@ -213,7 +213,7 @@ class Kitti2DDataset(Det3DDataset):
Returns:
list[dict]: A list of dictionaries with the kitti 2D format.
"""
from mmdet3d.core.bbox.transforms import bbox2result_kitti2d
from mmdet3d.structures.ops.transforms import bbox2result_kitti2d
sample_idx = [info['image']['image_idx'] for info in self.data_infos]
result_files = bbox2result_kitti2d(outputs, self.CLASSES, sample_idx,
out)
......@@ -231,7 +231,7 @@ class Kitti2DDataset(Det3DDataset):
tuple (str, dict): Average precision results in str format
and average precision results in dict format.
"""
from mmdet3d.core.evaluation import kitti_eval
from mmdet3d.evaluation import kitti_eval
eval_types = ['bbox'] if not eval_types else eval_types
assert eval_types in ('bbox', ['bbox'
]), 'KITTI data set only evaluate bbox'
......
......@@ -4,7 +4,7 @@ from typing import Callable, List, Optional, Union
import numpy as np
from mmdet3d.datasets import DATASETS
from ..core.bbox import CameraInstance3DBoxes
from mmdet3d.structures import CameraInstance3DBoxes
from .det3d_dataset import Det3DDataset
......@@ -119,7 +119,7 @@ class KittiDataset(Det3DDataset):
Returns:
dict: annotation information consists of the following keys:
- bboxes_3d (:obj:`LiDARInstance3DBoxes`):
- gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`):
3D ground truth bboxes.
- bbox_labels_3d (np.ndarray): Labels of ground truths.
- gt_bboxes (np.ndarray): 2D ground truth bboxes.
......
This diff is collapsed.
......@@ -4,7 +4,7 @@ from typing import Dict, List
import numpy as np
from mmdet3d.registry import DATASETS
from ..core.bbox import LiDARInstance3DBoxes
from mmdet3d.structures import LiDARInstance3DBoxes
from .det3d_dataset import Det3DDataset
......
......@@ -4,9 +4,9 @@ from typing import Dict, List
import numpy as np
from mmdet3d.core.bbox.structures.cam_box3d import CameraInstance3DBoxes
from mmdet3d.registry import DATASETS
from ..core.bbox import LiDARInstance3DBoxes
from mmdet3d.structures import LiDARInstance3DBoxes
from mmdet3d.structures.bbox_3d.cam_box3d import CameraInstance3DBoxes
from .det3d_dataset import Det3DDataset
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment