Unverified Commit d7067e44 authored by Wenwei Zhang's avatar Wenwei Zhang Committed by GitHub
Browse files

Bump version to v1.1.0rc2

Bump to v1.1.0rc2
parents 28fe73d2 fb0e57e5
...@@ -48,15 +48,14 @@ def test_getitem(): ...@@ -48,15 +48,14 @@ def test_getitem():
ann_file, ann_file,
data_prefix=data_prefix, data_prefix=data_prefix,
pipeline=pipeline, pipeline=pipeline,
metainfo=dict(CLASSES=classes), metainfo=dict(classes=classes),
modality=modality) modality=modality)
lyft_dataset.prepare_data(0) lyft_dataset.prepare_data(0)
input_dict = lyft_dataset.get_data_info(0) input_dict = lyft_dataset.get_data_info(0)
# assert the the path should contains data_prefix and data_root # assert the the path should contains data_prefix and data_root
assert input_dict['lidar_points'][ assert data_prefix['pts'] in input_dict['lidar_points']['lidar_path']
'lidar_path'] == 'tests/data/lyft/lidar/host-a017_lidar1_' \ assert data_root in input_dict['lidar_points']['lidar_path']
'1236118886901125926.bin'
ann_info = lyft_dataset.parse_ann_info(input_dict) ann_info = lyft_dataset.parse_ann_info(input_dict)
...@@ -68,4 +67,4 @@ def test_getitem(): ...@@ -68,4 +67,4 @@ def test_getitem():
assert 'gt_bboxes_3d' in ann_info assert 'gt_bboxes_3d' in ann_info
assert isinstance(ann_info['gt_bboxes_3d'], LiDARInstance3DBoxes) assert isinstance(ann_info['gt_bboxes_3d'], LiDARInstance3DBoxes)
assert len(lyft_dataset.metainfo['CLASSES']) == 9 assert len(lyft_dataset.metainfo['classes']) == 9
...@@ -51,17 +51,15 @@ def test_getitem(): ...@@ -51,17 +51,15 @@ def test_getitem():
ann_file=ann_file, ann_file=ann_file,
data_prefix=data_prefix, data_prefix=data_prefix,
pipeline=pipeline, pipeline=pipeline,
metainfo=dict(CLASSES=classes), metainfo=dict(classes=classes),
modality=modality) modality=modality)
nus_dataset.prepare_data(0) nus_dataset.prepare_data(0)
input_dict = nus_dataset.get_data_info(0) input_dict = nus_dataset.get_data_info(0)
# assert the the path should contains data_prefix and data_root # assert the the path should contains data_prefix and data_root
assert data_prefix['pts'] in input_dict['lidar_points']['lidar_path'] assert data_prefix['pts'] in input_dict['lidar_points']['lidar_path']
assert input_dict['lidar_points'][ assert data_root in input_dict['lidar_points']['lidar_path']
'lidar_path'] == 'tests/data/nuscenes/samples/LIDAR_TOP/' \
'n015-2018-08-02-17-16-37+0800__LIDAR_TOP__' \
'1533201470948018.pcd.bin'
for cam_id, img_info in input_dict['images'].items(): for cam_id, img_info in input_dict['images'].items():
if 'img_path' in img_info: if 'img_path' in img_info:
assert data_prefix['img'] in img_info['img_path'] assert data_prefix['img'] in img_info['img_path']
...@@ -77,7 +75,7 @@ def test_getitem(): ...@@ -77,7 +75,7 @@ def test_getitem():
assert 'gt_bboxes_3d' in ann_info assert 'gt_bboxes_3d' in ann_info
assert isinstance(ann_info['gt_bboxes_3d'], LiDARInstance3DBoxes) assert isinstance(ann_info['gt_bboxes_3d'], LiDARInstance3DBoxes)
assert len(nus_dataset.metainfo['CLASSES']) == 10 assert len(nus_dataset.metainfo['classes']) == 10
assert input_dict['token'] == 'fd8420396768425eabec9bdddf7e64b6' assert input_dict['token'] == 'fd8420396768425eabec9bdddf7e64b6'
assert input_dict['timestamp'] == 1533201470.448696 assert input_dict['timestamp'] == 1533201470.448696
...@@ -3,8 +3,10 @@ import unittest ...@@ -3,8 +3,10 @@ import unittest
import numpy as np import numpy as np
import torch import torch
from mmengine.testing import assert_allclose
from mmdet3d.datasets import S3DISSegDataset from mmdet3d.datasets import S3DISDataset, S3DISSegDataset
from mmdet3d.structures import DepthInstance3DBoxes
from mmdet3d.utils import register_all_modules from mmdet3d.utils import register_all_modules
...@@ -55,8 +57,101 @@ def _generate_s3dis_seg_dataset_config(): ...@@ -55,8 +57,101 @@ def _generate_s3dis_seg_dataset_config():
pipeline, modality) pipeline, modality)
def _generate_s3dis_dataset_config():
data_root = 'tests/data/s3dis'
ann_file = 's3dis_infos.pkl'
classes = ('table', 'chair', 'sofa', 'bookcase', 'board')
modality = dict(use_lidar=True, use_camera=False)
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
use_color=True,
load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]),
dict(
type='LoadAnnotations3D',
with_bbox_3d=True,
with_label_3d=True,
with_mask_3d=True,
with_seg_3d=True),
dict(type='PointSegClassMapping'),
dict(type='PointSample', num_points=5),
dict(
type='RandomFlip3D',
sync_2d=False,
flip_ratio_bev_horizontal=1.0,
flip_ratio_bev_vertical=1.0),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.087266, 0.087266],
scale_ratio_range=[1.0, 1.0]),
dict(type='NormalizePointsColor', color_mean=None),
dict(
type='Pack3DDetInputs',
keys=[
'points', 'pts_semantic_mask', 'gt_bboxes_3d', 'gt_labels_3d',
'pts_instance_mask'
])
]
data_prefix = dict(
pts='points',
pts_instance_mask='instance_mask',
pts_semantic_mask='semantic_mask')
return data_root, ann_file, classes, data_prefix, pipeline, modality
class TestS3DISDataset(unittest.TestCase): class TestS3DISDataset(unittest.TestCase):
def test_s3dis(self):
np.random.seed(0)
data_root, ann_file, classes, data_prefix, \
pipeline, modality = _generate_s3dis_dataset_config()
register_all_modules()
s3dis_dataset = S3DISDataset(
data_root,
ann_file,
data_prefix=data_prefix,
pipeline=pipeline,
metainfo=dict(classes=classes),
modality=modality)
s3dis_dataset.prepare_data(0)
input_dict = s3dis_dataset.get_data_info(0)
s3dis_dataset[0]
# assert the path should contains data_prefix and data_root
self.assertIn(data_prefix['pts'],
input_dict['lidar_points']['lidar_path'])
self.assertIn(data_root, input_dict['lidar_points']['lidar_path'])
ann_info = s3dis_dataset.parse_ann_info(input_dict)
# assert the keys in ann_info and the type
except_label = np.array([1, 1, 3, 1, 2, 0, 0, 0, 3])
self.assertEqual(ann_info['gt_labels_3d'].dtype, np.int64)
assert_allclose(ann_info['gt_labels_3d'], except_label)
self.assertIsInstance(ann_info['gt_bboxes_3d'], DepthInstance3DBoxes)
assert len(ann_info['gt_bboxes_3d']) == 9
assert torch.allclose(ann_info['gt_bboxes_3d'].tensor.sum(),
torch.tensor([63.0455]))
no_class_s3dis_dataset = S3DISDataset(
data_root, ann_file, metainfo=dict(classes=['table']))
input_dict = no_class_s3dis_dataset.get_data_info(0)
ann_info = no_class_s3dis_dataset.parse_ann_info(input_dict)
# assert the keys in ann_info and the type
self.assertIn('gt_labels_3d', ann_info)
# assert mapping to -1 or 1
assert (ann_info['gt_labels_3d'] <= 0).all()
self.assertEqual(ann_info['gt_labels_3d'].dtype, np.int64)
# all instance have been filtered by classes
self.assertEqual(len(ann_info['gt_labels_3d']), 9)
self.assertEqual(len(no_class_s3dis_dataset.metainfo['classes']), 1)
def test_s3dis_seg(self): def test_s3dis_seg(self):
data_root, ann_file, classes, palette, scene_idxs, data_prefix, \ data_root, ann_file, classes, palette, scene_idxs, data_prefix, \
pipeline, modality, = _generate_s3dis_seg_dataset_config() pipeline, modality, = _generate_s3dis_seg_dataset_config()
...@@ -67,7 +162,7 @@ class TestS3DISDataset(unittest.TestCase): ...@@ -67,7 +162,7 @@ class TestS3DISDataset(unittest.TestCase):
s3dis_seg_dataset = S3DISSegDataset( s3dis_seg_dataset = S3DISSegDataset(
data_root, data_root,
ann_file, ann_file,
metainfo=dict(CLASSES=classes, PALETTE=palette), metainfo=dict(classes=classes, palette=palette),
data_prefix=data_prefix, data_prefix=data_prefix,
pipeline=pipeline, pipeline=pipeline,
modality=modality, modality=modality,
......
...@@ -39,7 +39,7 @@ def _generate_scannet_seg_dataset_config(): ...@@ -39,7 +39,7 @@ def _generate_scannet_seg_dataset_config():
[227, 119, 194], [227, 119, 194],
[82, 84, 163], [82, 84, 163],
] ]
scene_idxs = [0 for _ in range(20)] scene_idxs = [0]
modality = dict(use_lidar=True, use_camera=False) modality = dict(use_lidar=True, use_camera=False)
pipeline = [ pipeline = [
dict( dict(
...@@ -83,22 +83,39 @@ def _generate_scannet_dataset_config(): ...@@ -83,22 +83,39 @@ def _generate_scannet_dataset_config():
'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'bookshelf', 'picture', 'counter', 'desk', 'curtain',
'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub',
'garbagebin') 'garbagebin')
# TODO add pipline
from mmcv.transforms.base import BaseTransform
from mmengine.registry import TRANSFORMS
if 'Identity' not in TRANSFORMS:
@TRANSFORMS.register_module()
class Identity(BaseTransform):
def transform(self, info):
if 'ann_info' in info:
info['gt_labels_3d'] = info['ann_info']['gt_labels_3d']
return info
modality = dict(use_lidar=True, use_camera=False) modality = dict(use_lidar=True, use_camera=False)
pipeline = [ pipeline = [
dict(type='Identity'), dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=True,
load_dim=6,
use_dim=[0, 1, 2]),
dict(
type='LoadAnnotations3D',
with_bbox_3d=True,
with_label_3d=True,
with_mask_3d=True,
with_seg_3d=True),
dict(type='GlobalAlignment', rotation_axis=2),
dict(type='PointSegClassMapping'),
dict(type='PointSample', num_points=5),
dict(
type='RandomFlip3D',
sync_2d=False,
flip_ratio_bev_horizontal=1.0,
flip_ratio_bev_vertical=1.0),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.087266, 0.087266],
scale_ratio_range=[1.0, 1.0],
shift_height=True),
dict(
type='Pack3DDetInputs',
keys=[
'points', 'pts_semantic_mask', 'gt_bboxes_3d', 'gt_labels_3d',
'pts_instance_mask'
])
] ]
data_prefix = dict( data_prefix = dict(
pts='points', pts='points',
...@@ -113,13 +130,13 @@ class TestScanNetDataset(unittest.TestCase): ...@@ -113,13 +130,13 @@ class TestScanNetDataset(unittest.TestCase):
np.random.seed(0) np.random.seed(0)
data_root, ann_file, classes, data_prefix, \ data_root, ann_file, classes, data_prefix, \
pipeline, modality, = _generate_scannet_dataset_config() pipeline, modality, = _generate_scannet_dataset_config()
register_all_modules()
scannet_dataset = ScanNetDataset( scannet_dataset = ScanNetDataset(
data_root, data_root,
ann_file, ann_file,
data_prefix=data_prefix, data_prefix=data_prefix,
pipeline=pipeline, pipeline=pipeline,
metainfo=dict(CLASSES=classes), metainfo=dict(classes=classes),
modality=modality) modality=modality)
scannet_dataset.prepare_data(0) scannet_dataset.prepare_data(0)
...@@ -146,7 +163,7 @@ class TestScanNetDataset(unittest.TestCase): ...@@ -146,7 +163,7 @@ class TestScanNetDataset(unittest.TestCase):
torch.tensor([107.7353])) torch.tensor([107.7353]))
no_class_scannet_dataset = ScanNetDataset( no_class_scannet_dataset = ScanNetDataset(
data_root, ann_file, metainfo=dict(CLASSES=['cabinet'])) data_root, ann_file, metainfo=dict(classes=['cabinet']))
input_dict = no_class_scannet_dataset.get_data_info(0) input_dict = no_class_scannet_dataset.get_data_info(0)
ann_info = no_class_scannet_dataset.parse_ann_info(input_dict) ann_info = no_class_scannet_dataset.parse_ann_info(input_dict)
...@@ -158,7 +175,7 @@ class TestScanNetDataset(unittest.TestCase): ...@@ -158,7 +175,7 @@ class TestScanNetDataset(unittest.TestCase):
self.assertEqual(ann_info['gt_labels_3d'].dtype, np.int64) self.assertEqual(ann_info['gt_labels_3d'].dtype, np.int64)
# all instance have been filtered by classes # all instance have been filtered by classes
self.assertEqual(len(ann_info['gt_labels_3d']), 27) self.assertEqual(len(ann_info['gt_labels_3d']), 27)
self.assertEqual(len(no_class_scannet_dataset.metainfo['CLASSES']), 1) self.assertEqual(len(no_class_scannet_dataset.metainfo['classes']), 1)
def test_scannet_seg(self): def test_scannet_seg(self):
data_root, ann_file, classes, palette, scene_idxs, data_prefix, \ data_root, ann_file, classes, palette, scene_idxs, data_prefix, \
...@@ -169,7 +186,7 @@ class TestScanNetDataset(unittest.TestCase): ...@@ -169,7 +186,7 @@ class TestScanNetDataset(unittest.TestCase):
scannet_seg_dataset = ScanNetSegDataset( scannet_seg_dataset = ScanNetSegDataset(
data_root, data_root,
ann_file, ann_file,
metainfo=dict(CLASSES=classes, PALETTE=palette), metainfo=dict(classes=classes, palette=palette),
data_prefix=data_prefix, data_prefix=data_prefix,
pipeline=pipeline, pipeline=pipeline,
modality=modality, modality=modality,
......
...@@ -72,7 +72,7 @@ class TestSemanticKITTIDataset(unittest.TestCase): ...@@ -72,7 +72,7 @@ class TestSemanticKITTIDataset(unittest.TestCase):
semantickitti_dataset = SemanticKITTIDataset( semantickitti_dataset = SemanticKITTIDataset(
data_root, data_root,
ann_file, ann_file,
metainfo=dict(CLASSES=classes, PALETTE=palette), metainfo=dict(classes=classes, palette=palette),
data_prefix=data_prefix, data_prefix=data_prefix,
pipeline=pipeline, pipeline=pipeline,
modality=modality) modality=modality)
......
...@@ -48,7 +48,7 @@ class TestScanNetDataset(unittest.TestCase): ...@@ -48,7 +48,7 @@ class TestScanNetDataset(unittest.TestCase):
ann_file, ann_file,
data_prefix=data_prefix, data_prefix=data_prefix,
pipeline=pipeline, pipeline=pipeline,
metainfo=dict(CLASSES=classes), metainfo=dict(classes=classes),
modality=modality) modality=modality)
scannet_dataset.prepare_data(0) scannet_dataset.prepare_data(0)
...@@ -81,7 +81,7 @@ class TestScanNetDataset(unittest.TestCase): ...@@ -81,7 +81,7 @@ class TestScanNetDataset(unittest.TestCase):
ann_file, ann_file,
data_prefix=data_prefix, data_prefix=data_prefix,
pipeline=pipeline, pipeline=pipeline,
metainfo=dict(CLASSES=classes), metainfo=dict(classes=classes),
modality=modality) modality=modality)
input_dict = bed_scannet_dataset.get_data_info(0) input_dict = bed_scannet_dataset.get_data_info(0)
...@@ -94,4 +94,4 @@ class TestScanNetDataset(unittest.TestCase): ...@@ -94,4 +94,4 @@ class TestScanNetDataset(unittest.TestCase):
assert ann_info['gt_labels_3d'].dtype == np.int64 assert ann_info['gt_labels_3d'].dtype == np.int64
# all instance have been filtered by classes # all instance have been filtered by classes
self.assertEqual(len(ann_info['gt_labels_3d']), 3) self.assertEqual(len(ann_info['gt_labels_3d']), 3)
self.assertEqual(len(bed_scannet_dataset.metainfo['CLASSES']), 1) self.assertEqual(len(bed_scannet_dataset.metainfo['classes']), 1)
...@@ -3,10 +3,10 @@ import unittest ...@@ -3,10 +3,10 @@ import unittest
import torch import torch
from mmengine.testing import assert_allclose from mmengine.testing import assert_allclose
from utils import create_data_info_after_loading
from mmdet3d.datasets.transforms.formating import Pack3DDetInputs from mmdet3d.datasets.transforms.formating import Pack3DDetInputs
from mmdet3d.structures import LiDARInstance3DBoxes from mmdet3d.structures import LiDARInstance3DBoxes
from mmdet3d.testing import create_data_info_after_loading
class TestPack3DDetInputs(unittest.TestCase): class TestPack3DDetInputs(unittest.TestCase):
......
...@@ -4,12 +4,12 @@ import unittest ...@@ -4,12 +4,12 @@ import unittest
import numpy as np import numpy as np
import torch import torch
from mmengine.testing import assert_allclose from mmengine.testing import assert_allclose
from utils import create_dummy_data_info
from mmdet3d.datasets.transforms import PointSegClassMapping from mmdet3d.datasets.transforms import PointSegClassMapping
from mmdet3d.datasets.transforms.loading import (LoadAnnotations3D, from mmdet3d.datasets.transforms.loading import (LoadAnnotations3D,
LoadPointsFromFile) LoadPointsFromFile)
from mmdet3d.structures import DepthPoints, LiDARPoints from mmdet3d.structures import DepthPoints, LiDARPoints
from mmdet3d.testing import create_dummy_data_info
class TestLoadPointsFromFile(unittest.TestCase): class TestLoadPointsFromFile(unittest.TestCase):
......
...@@ -5,10 +5,10 @@ import unittest ...@@ -5,10 +5,10 @@ import unittest
import numpy as np import numpy as np
import torch import torch
from mmengine.testing import assert_allclose from mmengine.testing import assert_allclose
from utils import create_data_info_after_loading
from mmdet3d.datasets import GlobalAlignment, RandomFlip3D from mmdet3d.datasets import GlobalAlignment, RandomFlip3D
from mmdet3d.datasets.transforms import GlobalRotScaleTrans from mmdet3d.datasets.transforms import GlobalRotScaleTrans
from mmdet3d.testing import create_data_info_after_loading
class TestGlobalRotScaleTrans(unittest.TestCase): class TestGlobalRotScaleTrans(unittest.TestCase):
......
...@@ -49,7 +49,7 @@ class TestIndoorMetric(unittest.TestCase): ...@@ -49,7 +49,7 @@ class TestIndoorMetric(unittest.TestCase):
pred_dict['eval_ann_info'] = eval_ann_info pred_dict['eval_ann_info'] = eval_ann_info
indoor_metric.dataset_meta = { indoor_metric.dataset_meta = {
'CLASSES': ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'classes': ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door',
'window', 'bookshelf', 'picture', 'counter', 'desk', 'window', 'bookshelf', 'picture', 'counter', 'desk',
'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'curtain', 'refrigerator', 'showercurtrain', 'toilet',
'sink', 'bathtub', 'garbagebin'), 'sink', 'bathtub', 'garbagebin'),
......
...@@ -67,7 +67,7 @@ class TestInstanceSegMetric(unittest.TestCase): ...@@ -67,7 +67,7 @@ class TestInstanceSegMetric(unittest.TestCase):
'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'curtain', 'refrigerator', 'showercurtrain', 'toilet',
'sink', 'bathtub', 'garbagebin') 'sink', 'bathtub', 'garbagebin')
dataset_meta = dict( dataset_meta = dict(
seg_valid_class_ids=seg_valid_class_ids, CLASSES=class_labels) seg_valid_class_ids=seg_valid_class_ids, classes=class_labels)
instance_seg_metric = InstanceSegMetric() instance_seg_metric = InstanceSegMetric()
instance_seg_metric.dataset_meta = dataset_meta instance_seg_metric.dataset_meta = dataset_meta
instance_seg_metric.process(data_batch, predictions) instance_seg_metric.process(data_batch, predictions)
......
...@@ -53,7 +53,7 @@ def test_multi_modal_kitti_metric(): ...@@ -53,7 +53,7 @@ def test_multi_modal_kitti_metric():
pytest.skip('test requires GPU and torch+cuda') pytest.skip('test requires GPU and torch+cuda')
kittimetric = KittiMetric( kittimetric = KittiMetric(
data_root + '/kitti_infos_train.pkl', metric=['mAP']) data_root + '/kitti_infos_train.pkl', metric=['mAP'])
kittimetric.dataset_meta = dict(CLASSES=['Pedestrian', 'Cyclist', 'Car']) kittimetric.dataset_meta = dict(classes=['Pedestrian', 'Cyclist', 'Car'])
data_batch, predictions = _init_multi_modal_evaluate_input() data_batch, predictions = _init_multi_modal_evaluate_input()
kittimetric.process(data_batch, predictions) kittimetric.process(data_batch, predictions)
ap_dict = kittimetric.compute_metrics(kittimetric.results) ap_dict = kittimetric.compute_metrics(kittimetric.results)
...@@ -76,7 +76,7 @@ def test_kitti_metric_mAP(): ...@@ -76,7 +76,7 @@ def test_kitti_metric_mAP():
pytest.skip('test requires GPU and torch+cuda') pytest.skip('test requires GPU and torch+cuda')
kittimetric = KittiMetric( kittimetric = KittiMetric(
data_root + '/kitti_infos_train.pkl', metric=['mAP']) data_root + '/kitti_infos_train.pkl', metric=['mAP'])
kittimetric.dataset_meta = dict(CLASSES=['Pedestrian', 'Cyclist', 'Car']) kittimetric.dataset_meta = dict(classes=['Pedestrian', 'Cyclist', 'Car'])
data_batch, predictions = _init_evaluate_input() data_batch, predictions = _init_evaluate_input()
kittimetric.process(data_batch, predictions) kittimetric.process(data_batch, predictions)
ap_dict = kittimetric.compute_metrics(kittimetric.results) ap_dict = kittimetric.compute_metrics(kittimetric.results)
......
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from mmdet3d import * # noqa
from mmdet3d.models.dense_heads import FCAF3DHead
from mmdet3d.testing import create_detector_inputs
class TestAnchor3DHead(TestCase):
def test_fcaf3d_head_loss(self):
"""Test anchor head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
try:
import MinkowskiEngine as ME
except ImportError:
pytest.skip('test requires MinkowskiEngine installation')
# build head
fcaf3d_head = FCAF3DHead(
in_channels=(64, 128, 256, 512),
out_channels=128,
voxel_size=1.,
pts_prune_threshold=1000,
pts_assign_threshold=27,
pts_center_threshold=18,
num_classes=18,
num_reg_outs=6,
test_cfg=dict(nms_pre=1000, iou_thr=.5, score_thr=.01),
center_loss=dict(type='mmdet.CrossEntropyLoss', use_sigmoid=True),
bbox_loss=dict(type='AxisAlignedIoULoss'),
cls_loss=dict(type='mmdet.FocalLoss'),
)
fcaf3d_head = fcaf3d_head.cuda()
# fake input of head
coordinates, features = [torch.randn(500, 3).cuda() * 100
], [torch.randn(500, 3).cuda()]
tensor_coordinates, tensor_features = ME.utils.sparse_collate(
coordinates, features)
x = ME.SparseTensor(
features=tensor_features, coordinates=tensor_coordinates)
# backbone
conv1 = ME.MinkowskiConvolution(
3, 64, kernel_size=3, stride=2, dimension=3).cuda()
conv2 = ME.MinkowskiConvolution(
64, 128, kernel_size=3, stride=2, dimension=3).cuda()
conv3 = ME.MinkowskiConvolution(
128, 256, kernel_size=3, stride=2, dimension=3).cuda()
conv4 = ME.MinkowskiConvolution(
256, 512, kernel_size=3, stride=2, dimension=3).cuda()
# backbone outputs of 4 levels
x1 = conv1(x)
x2 = conv2(x1)
x3 = conv3(x2)
x4 = conv4(x3)
x = (x1, x2, x3, x4)
# fake annotation
packed_inputs = create_detector_inputs(
with_points=False,
with_img=False,
num_gt_instance=3,
num_classes=1,
points_feat_dim=6,
gt_bboxes_dim=6)
data_samples = [
sample.cuda() for sample in packed_inputs['data_samples']
]
gt_losses = fcaf3d_head.loss(x, data_samples)
print(gt_losses)
self.assertGreaterEqual(gt_losses['cls_loss'], 0,
'cls loss should be non-zero')
self.assertGreaterEqual(gt_losses['bbox_loss'], 0,
'box loss should be non-zero')
self.assertGreaterEqual(gt_losses['center_loss'], 0,
'dir loss should be none-zero')
...@@ -4,8 +4,8 @@ import torch ...@@ -4,8 +4,8 @@ import torch
from mmengine import DefaultScope from mmengine import DefaultScope
from mmdet3d.registry import MODELS from mmdet3d.registry import MODELS
from tests.utils.model_utils import (_create_detector_inputs, from mmdet3d.testing import (create_detector_inputs, get_detector_cfg,
_get_detector_cfg, _setup_seed) setup_seed)
class TestFreeAnchor(unittest.TestCase): class TestFreeAnchor(unittest.TestCase):
...@@ -15,8 +15,8 @@ class TestFreeAnchor(unittest.TestCase): ...@@ -15,8 +15,8 @@ class TestFreeAnchor(unittest.TestCase):
assert hasattr(mmdet3d.models.dense_heads, 'FreeAnchor3DHead') assert hasattr(mmdet3d.models.dense_heads, 'FreeAnchor3DHead')
DefaultScope.get_instance('test_freeanchor', scope_name='mmdet3d') DefaultScope.get_instance('test_freeanchor', scope_name='mmdet3d')
_setup_seed(0) setup_seed(0)
freeanchor_cfg = _get_detector_cfg( freeanchor_cfg = get_detector_cfg(
'free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor' 'free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor'
'_sbn-all_8xb4-2x_nus-3d.py') '_sbn-all_8xb4-2x_nus-3d.py')
# decrease channels to reduce cuda memory. # decrease channels to reduce cuda memory.
...@@ -29,14 +29,14 @@ class TestFreeAnchor(unittest.TestCase): ...@@ -29,14 +29,14 @@ class TestFreeAnchor(unittest.TestCase):
freeanchor_cfg.pts_bbox_head.in_channels = 1 freeanchor_cfg.pts_bbox_head.in_channels = 1
model = MODELS.build(freeanchor_cfg) model = MODELS.build(freeanchor_cfg)
num_gt_instance = 3 num_gt_instance = 3
packed_inputs = _create_detector_inputs( packed_inputs = create_detector_inputs(
num_gt_instance=num_gt_instance, gt_bboxes_dim=9) num_gt_instance=num_gt_instance, gt_bboxes_dim=9)
# TODO: Support aug_test # TODO: Support aug_test
# aug_data = [ # aug_data = [
# _create_detector_inputs( # create_detector_inputs(
# num_gt_instance=num_gt_instance, gt_bboxes_dim=9), # num_gt_instance=num_gt_instance, gt_bboxes_dim=9),
# _create_detector_inputs( # create_detector_inputs(
# num_gt_instance=num_gt_instance + 1, gt_bboxes_dim=9) # num_gt_instance=num_gt_instance + 1, gt_bboxes_dim=9)
# ] # ]
# # test_aug_test # # test_aug_test
......
...@@ -4,8 +4,8 @@ import torch ...@@ -4,8 +4,8 @@ import torch
from mmengine import DefaultScope from mmengine import DefaultScope
from mmdet3d.registry import MODELS from mmdet3d.registry import MODELS
from tests.utils.model_utils import (_create_detector_inputs, from mmdet3d.testing import (create_detector_inputs, get_detector_cfg,
_get_detector_cfg, _setup_seed) setup_seed)
class TestSSN(unittest.TestCase): class TestSSN(unittest.TestCase):
...@@ -15,8 +15,8 @@ class TestSSN(unittest.TestCase): ...@@ -15,8 +15,8 @@ class TestSSN(unittest.TestCase):
assert hasattr(mmdet3d.models.dense_heads, 'ShapeAwareHead') assert hasattr(mmdet3d.models.dense_heads, 'ShapeAwareHead')
DefaultScope.get_instance('test_ssn', scope_name='mmdet3d') DefaultScope.get_instance('test_ssn', scope_name='mmdet3d')
_setup_seed(0) setup_seed(0)
ssn_cfg = _get_detector_cfg( ssn_cfg = get_detector_cfg(
'ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_nus-3d.py') 'ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_nus-3d.py')
ssn_cfg.pts_voxel_encoder.feat_channels = [1, 1] ssn_cfg.pts_voxel_encoder.feat_channels = [1, 1]
ssn_cfg.pts_middle_encoder.in_channels = 1 ssn_cfg.pts_middle_encoder.in_channels = 1
...@@ -28,14 +28,14 @@ class TestSSN(unittest.TestCase): ...@@ -28,14 +28,14 @@ class TestSSN(unittest.TestCase):
ssn_cfg.pts_bbox_head.feat_channels = 1 ssn_cfg.pts_bbox_head.feat_channels = 1
model = MODELS.build(ssn_cfg) model = MODELS.build(ssn_cfg)
num_gt_instance = 50 num_gt_instance = 50
packed_inputs = _create_detector_inputs( packed_inputs = create_detector_inputs(
num_gt_instance=num_gt_instance, gt_bboxes_dim=9) num_gt_instance=num_gt_instance, gt_bboxes_dim=9)
# TODO: Support aug_test # TODO: Support aug_test
# aug_data = [ # aug_data = [
# _create_detector_inputs( # create_detector_inputs(
# num_gt_instance=num_gt_instance, gt_bboxes_dim=9), # num_gt_instance=num_gt_instance, gt_bboxes_dim=9),
# _create_detector_inputs( # create_detector_inputs(
# num_gt_instance=num_gt_instance + 1, gt_bboxes_dim=9) # num_gt_instance=num_gt_instance + 1, gt_bboxes_dim=9)
# ] # ]
# test_aug_test # test_aug_test
......
...@@ -4,8 +4,8 @@ import torch ...@@ -4,8 +4,8 @@ import torch
from mmengine import DefaultScope from mmengine import DefaultScope
from mmdet3d.registry import MODELS from mmdet3d.registry import MODELS
from tests.utils.model_utils import (_create_detector_inputs, from mmdet3d.testing import (create_detector_inputs, get_detector_cfg,
_get_detector_cfg, _setup_seed) setup_seed)
class Test3DSSD(unittest.TestCase): class Test3DSSD(unittest.TestCase):
...@@ -15,11 +15,11 @@ class Test3DSSD(unittest.TestCase): ...@@ -15,11 +15,11 @@ class Test3DSSD(unittest.TestCase):
assert hasattr(mmdet3d.models, 'SSD3DNet') assert hasattr(mmdet3d.models, 'SSD3DNet')
DefaultScope.get_instance('test_ssd3d', scope_name='mmdet3d') DefaultScope.get_instance('test_ssd3d', scope_name='mmdet3d')
_setup_seed(0) setup_seed(0)
voxel_net_cfg = _get_detector_cfg('3dssd/3dssd_4xb4_kitti-3d-car.py') voxel_net_cfg = get_detector_cfg('3dssd/3dssd_4xb4_kitti-3d-car.py')
model = MODELS.build(voxel_net_cfg) model = MODELS.build(voxel_net_cfg)
num_gt_instance = 3 num_gt_instance = 3
packed_inputs = _create_detector_inputs( packed_inputs = create_detector_inputs(
num_gt_instance=num_gt_instance, num_classes=1) num_gt_instance=num_gt_instance, num_classes=1)
if torch.cuda.is_available(): if torch.cuda.is_available():
......
...@@ -4,8 +4,8 @@ import torch ...@@ -4,8 +4,8 @@ import torch
from mmengine import DefaultScope from mmengine import DefaultScope
from mmdet3d.registry import MODELS from mmdet3d.registry import MODELS
from tests.utils.model_utils import (_create_detector_inputs, from mmdet3d.testing import (create_detector_inputs, get_detector_cfg,
_get_detector_cfg, _setup_seed) setup_seed)
class TestCenterPoint(unittest.TestCase): class TestCenterPoint(unittest.TestCase):
...@@ -15,14 +15,14 @@ class TestCenterPoint(unittest.TestCase): ...@@ -15,14 +15,14 @@ class TestCenterPoint(unittest.TestCase):
assert hasattr(mmdet3d.models, 'CenterPoint') assert hasattr(mmdet3d.models, 'CenterPoint')
_setup_seed(0) setup_seed(0)
DefaultScope.get_instance('test_center_point', scope_name='mmdet3d') DefaultScope.get_instance('test_center_point', scope_name='mmdet3d')
centerpoint_net_cfg = _get_detector_cfg( centerpoint_net_cfg = get_detector_cfg(
'centerpoint/centerpoint_voxel01_second_secfpn_8xb4-cyclic-20e_nus-3d.py' # noqa 'centerpoint/centerpoint_voxel01_second_secfpn_8xb4-cyclic-20e_nus-3d.py' # noqa
) )
model = MODELS.build(centerpoint_net_cfg) model = MODELS.build(centerpoint_net_cfg)
num_gt_instance = 50 num_gt_instance = 50
packed_inputs = _create_detector_inputs( packed_inputs = create_detector_inputs(
with_img=True, num_gt_instance=num_gt_instance, points_feat_dim=5) with_img=True, num_gt_instance=num_gt_instance, points_feat_dim=5)
for sample_id in range(len(packed_inputs['data_samples'])): for sample_id in range(len(packed_inputs['data_samples'])):
......
import unittest
import torch
from mmengine import DefaultScope
from mmdet3d.registry import MODELS
from mmdet3d.testing import (create_detector_inputs, get_detector_cfg,
setup_seed)
class TestFCAF3d(unittest.TestCase):
def test_fcaf3d(self):
try:
import MinkowskiEngine # noqa: F401
except ImportError:
return
import mmdet3d.models
assert hasattr(mmdet3d.models, 'MinkSingleStage3DDetector')
DefaultScope.get_instance('test_fcaf3d', scope_name='mmdet3d')
setup_seed(0)
fcaf3d_net_cfg = get_detector_cfg(
'fcaf3d/fcaf3d_2xb8_scannet-3d-18class.py')
model = MODELS.build(fcaf3d_net_cfg)
num_gt_instance = 3
packed_inputs = create_detector_inputs(
num_gt_instance=num_gt_instance,
num_classes=1,
points_feat_dim=6,
gt_bboxes_dim=6)
if torch.cuda.is_available():
model = model.cuda()
with torch.no_grad():
data = model.data_preprocessor(packed_inputs, False)
torch.cuda.empty_cache()
results = model.forward(**data, mode='predict')
self.assertEqual(len(results), 1)
self.assertIn('bboxes_3d', results[0].pred_instances_3d)
self.assertIn('scores_3d', results[0].pred_instances_3d)
self.assertIn('labels_3d', results[0].pred_instances_3d)
losses = model.forward(**data, mode='loss')
self.assertGreater(losses['center_loss'], 0)
self.assertGreater(losses['bbox_loss'], 0)
self.assertGreater(losses['cls_loss'], 0)
...@@ -4,8 +4,8 @@ import torch ...@@ -4,8 +4,8 @@ import torch
from mmengine import DefaultScope from mmengine import DefaultScope
from mmdet3d.registry import MODELS from mmdet3d.registry import MODELS
from tests.utils.model_utils import (_create_detector_inputs, from mmdet3d.testing import (create_detector_inputs, get_detector_cfg,
_get_detector_cfg, _setup_seed) setup_seed)
class TestGroupfree3d(unittest.TestCase): class TestGroupfree3d(unittest.TestCase):
...@@ -15,12 +15,12 @@ class TestGroupfree3d(unittest.TestCase): ...@@ -15,12 +15,12 @@ class TestGroupfree3d(unittest.TestCase):
assert hasattr(mmdet3d.models, 'GroupFree3DNet') assert hasattr(mmdet3d.models, 'GroupFree3DNet')
DefaultScope.get_instance('test_groupfree3d', scope_name='mmdet3d') DefaultScope.get_instance('test_groupfree3d', scope_name='mmdet3d')
_setup_seed(0) setup_seed(0)
voxel_net_cfg = _get_detector_cfg( voxel_net_cfg = get_detector_cfg(
'groupfree3d/groupfree3d_head-L6-O256_4xb8_scannet-seg.py') 'groupfree3d/groupfree3d_head-L6-O256_4xb8_scannet-seg.py')
model = MODELS.build(voxel_net_cfg) model = MODELS.build(voxel_net_cfg)
num_gt_instance = 5 num_gt_instance = 5
packed_inputs = _create_detector_inputs( packed_inputs = create_detector_inputs(
num_gt_instance=num_gt_instance, num_gt_instance=num_gt_instance,
points_feat_dim=3, points_feat_dim=3,
with_pts_semantic_mask=True, with_pts_semantic_mask=True,
......
...@@ -4,8 +4,8 @@ import torch ...@@ -4,8 +4,8 @@ import torch
from mmengine import DefaultScope from mmengine import DefaultScope
from mmdet3d.registry import MODELS from mmdet3d.registry import MODELS
from tests.utils.model_utils import (_create_detector_inputs, from mmdet3d.testing import (create_detector_inputs, get_detector_cfg,
_get_detector_cfg, _setup_seed) setup_seed)
class TestH3D(unittest.TestCase): class TestH3D(unittest.TestCase):
...@@ -15,11 +15,11 @@ class TestH3D(unittest.TestCase): ...@@ -15,11 +15,11 @@ class TestH3D(unittest.TestCase):
assert hasattr(mmdet3d.models, 'H3DNet') assert hasattr(mmdet3d.models, 'H3DNet')
DefaultScope.get_instance('test_H3DNet', scope_name='mmdet3d') DefaultScope.get_instance('test_H3DNet', scope_name='mmdet3d')
_setup_seed(0) setup_seed(0)
voxel_net_cfg = _get_detector_cfg('h3dnet/h3dnet_8xb3_scannet-seg.py') voxel_net_cfg = get_detector_cfg('h3dnet/h3dnet_8xb3_scannet-seg.py')
model = MODELS.build(voxel_net_cfg) model = MODELS.build(voxel_net_cfg)
num_gt_instance = 5 num_gt_instance = 5
packed_inputs = _create_detector_inputs( packed_inputs = create_detector_inputs(
num_gt_instance=num_gt_instance, num_gt_instance=num_gt_instance,
points_feat_dim=4, points_feat_dim=4,
bboxes_3d_type='depth', bboxes_3d_type='depth',
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment