Unverified Commit d7067e44 authored by Wenwei Zhang's avatar Wenwei Zhang Committed by GitHub
Browse files

Bump version to v1.1.0rc2

Bump to v1.1.0rc2
parents 28fe73d2 fb0e57e5
This diff is collapsed.
...@@ -7,6 +7,9 @@ _base_ = [ ...@@ -7,6 +7,9 @@ _base_ = [
# If point cloud range is changed, the models should also change their point # If point cloud range is changed, the models should also change their point
# cloud range accordingly # cloud range accordingly
point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
# Using calibration info convert the Lidar-coordinate point cloud range to the
# ego-coordinate point cloud range could bring a little promotion in nuScenes.
# point_cloud_range = [-51.2, -52, -5.0, 51.2, 50.4, 3.0]
# For nuScenes we usually do 10-class detection # For nuScenes we usually do 10-class detection
class_names = [ class_names = [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier',
...@@ -126,7 +129,7 @@ train_dataloader = dict( ...@@ -126,7 +129,7 @@ train_dataloader = dict(
data_root=data_root, data_root=data_root,
ann_file='nuscenes_infos_train.pkl', ann_file='nuscenes_infos_train.pkl',
pipeline=train_pipeline, pipeline=train_pipeline,
metainfo=dict(CLASSES=class_names), metainfo=dict(classes=class_names),
test_mode=False, test_mode=False,
data_prefix=data_prefix, data_prefix=data_prefix,
use_valid_flag=True, use_valid_flag=True,
...@@ -134,8 +137,8 @@ train_dataloader = dict( ...@@ -134,8 +137,8 @@ train_dataloader = dict(
# and box_type_3d='Depth' in sunrgbd and scannet dataset. # and box_type_3d='Depth' in sunrgbd and scannet dataset.
box_type_3d='LiDAR'))) box_type_3d='LiDAR')))
test_dataloader = dict( test_dataloader = dict(
dataset=dict(pipeline=test_pipeline, metainfo=dict(CLASSES=class_names))) dataset=dict(pipeline=test_pipeline, metainfo=dict(classes=class_names)))
val_dataloader = dict( val_dataloader = dict(
dataset=dict(pipeline=test_pipeline, metainfo=dict(CLASSES=class_names))) dataset=dict(pipeline=test_pipeline, metainfo=dict(classes=class_names)))
train_cfg = dict(val_interval=20) train_cfg = dict(val_interval=20)
...@@ -4,6 +4,9 @@ _base_ = ['./centerpoint_voxel01_second_secfpn_8xb4-cyclic-20e_nus-3d.py'] ...@@ -4,6 +4,9 @@ _base_ = ['./centerpoint_voxel01_second_secfpn_8xb4-cyclic-20e_nus-3d.py']
# cloud range accordingly # cloud range accordingly
voxel_size = [0.075, 0.075, 0.2] voxel_size = [0.075, 0.075, 0.2]
point_cloud_range = [-54, -54, -5.0, 54, 54, 3.0] point_cloud_range = [-54, -54, -5.0, 54, 54, 3.0]
# Using calibration info convert the Lidar-coordinate point cloud range to the
# ego-coordinate point cloud range could bring a little promotion in nuScenes.
# point_cloud_range = [-54, -54.8, -5.0, 54, 53.2, 3.0]
# For nuScenes we usually do 10-class detection # For nuScenes we usually do 10-class detection
class_names = [ class_names = [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier',
...@@ -89,7 +92,6 @@ train_pipeline = [ ...@@ -89,7 +92,6 @@ train_pipeline = [
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
dict(type='ObjectNameFilter', classes=class_names), dict(type='ObjectNameFilter', classes=class_names),
dict(type='PointShuffle'), dict(type='PointShuffle'),
dict(type='DefaultFormatBundle3D', class_names=class_names),
dict( dict(
type='Pack3DDetInputs', type='Pack3DDetInputs',
keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
...@@ -122,8 +124,8 @@ test_pipeline = [ ...@@ -122,8 +124,8 @@ test_pipeline = [
train_dataloader = dict( train_dataloader = dict(
dataset=dict( dataset=dict(
dataset=dict( dataset=dict(
pipeline=train_pipeline, metainfo=dict(CLASSES=class_names)))) pipeline=train_pipeline, metainfo=dict(classes=class_names))))
test_dataloader = dict( test_dataloader = dict(
dataset=dict(pipeline=test_pipeline, metainfo=dict(CLASSES=class_names))) dataset=dict(pipeline=test_pipeline, metainfo=dict(classes=class_names)))
val_dataloader = dict( val_dataloader = dict(
dataset=dict(pipeline=test_pipeline, metainfo=dict(CLASSES=class_names))) dataset=dict(pipeline=test_pipeline, metainfo=dict(classes=class_names)))
...@@ -2,6 +2,9 @@ _base_ = './centerpoint_voxel0075_second_secfpn_' \ ...@@ -2,6 +2,9 @@ _base_ = './centerpoint_voxel0075_second_secfpn_' \
'head-dcn-circlenms_8xb4_cyclic-20e_nus-3d.py' 'head-dcn-circlenms_8xb4_cyclic-20e_nus-3d.py'
point_cloud_range = [-54, -54, -5.0, 54, 54, 3.0] point_cloud_range = [-54, -54, -5.0, 54, 54, 3.0]
# Using calibration info convert the Lidar-coordinate point cloud range to the
# ego-coordinate point cloud range could bring a little promotion in nuScenes.
# point_cloud_range = [-54, -54.8, -5.0, 54, 53.2, 3.0]
file_client_args = dict(backend='disk') file_client_args = dict(backend='disk')
class_names = [ class_names = [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier',
......
...@@ -2,6 +2,9 @@ _base_ = './centerpoint_voxel0075_second_secfpn' \ ...@@ -2,6 +2,9 @@ _base_ = './centerpoint_voxel0075_second_secfpn' \
'_head-dcn_8xb4-cyclic-20e_nus-3d.py' '_head-dcn_8xb4-cyclic-20e_nus-3d.py'
point_cloud_range = [-54, -54, -5.0, 54, 54, 3.0] point_cloud_range = [-54, -54, -5.0, 54, 54, 3.0]
# Using calibration info convert the Lidar-coordinate point cloud range to the
# ego-coordinate point cloud range could bring a little promotion in nuScenes.
# point_cloud_range = [-54, -54.8, -5.0, 54, 53.2, 3.0]
file_client_args = dict(backend='disk') file_client_args = dict(backend='disk')
class_names = [ class_names = [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier',
......
...@@ -4,6 +4,9 @@ _base_ = './centerpoint_voxel0075_second_secfpn' \ ...@@ -4,6 +4,9 @@ _base_ = './centerpoint_voxel0075_second_secfpn' \
model = dict(test_cfg=dict(pts=dict(use_rotate_nms=True, max_num=500))) model = dict(test_cfg=dict(pts=dict(use_rotate_nms=True, max_num=500)))
point_cloud_range = [-54, -54, -5.0, 54, 54, 3.0] point_cloud_range = [-54, -54, -5.0, 54, 54, 3.0]
# Using calibration info convert the Lidar-coordinate point cloud range to the
# ego-coordinate point cloud range could bring a little promotion in nuScenes.
# point_cloud_range = [-54, -54.8, -5.0, 54, 53.2, 3.0]
file_client_args = dict(backend='disk') file_client_args = dict(backend='disk')
class_names = [ class_names = [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier',
......
...@@ -7,6 +7,9 @@ _base_ = [ ...@@ -7,6 +7,9 @@ _base_ = [
# If point cloud range is changed, the models should also change their point # If point cloud range is changed, the models should also change their point
# cloud range accordingly # cloud range accordingly
point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
# Using calibration info convert the Lidar-coordinate point cloud range to the
# ego-coordinate point cloud range could bring a little promotion in nuScenes.
# point_cloud_range = [-51.2, -52, -5.0, 51.2, 50.4, 3.0]
# For nuScenes we usually do 10-class detection # For nuScenes we usually do 10-class detection
class_names = [ class_names = [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier',
...@@ -127,7 +130,7 @@ train_dataloader = dict( ...@@ -127,7 +130,7 @@ train_dataloader = dict(
data_root=data_root, data_root=data_root,
ann_file='nuscenes_infos_train.pkl', ann_file='nuscenes_infos_train.pkl',
pipeline=train_pipeline, pipeline=train_pipeline,
metainfo=dict(CLASSES=class_names), metainfo=dict(classes=class_names),
test_mode=False, test_mode=False,
data_prefix=data_prefix, data_prefix=data_prefix,
use_valid_flag=True, use_valid_flag=True,
...@@ -135,8 +138,8 @@ train_dataloader = dict( ...@@ -135,8 +138,8 @@ train_dataloader = dict(
# and box_type_3d='Depth' in sunrgbd and scannet dataset. # and box_type_3d='Depth' in sunrgbd and scannet dataset.
box_type_3d='LiDAR'))) box_type_3d='LiDAR')))
test_dataloader = dict( test_dataloader = dict(
dataset=dict(pipeline=test_pipeline, metainfo=dict(CLASSES=class_names))) dataset=dict(pipeline=test_pipeline, metainfo=dict(classes=class_names)))
val_dataloader = dict( val_dataloader = dict(
dataset=dict(pipeline=test_pipeline, metainfo=dict(CLASSES=class_names))) dataset=dict(pipeline=test_pipeline, metainfo=dict(classes=class_names)))
train_cfg = dict(val_interval=20) train_cfg = dict(val_interval=20)
...@@ -16,80 +16,80 @@ Collections: ...@@ -16,80 +16,80 @@ Collections:
Version: v0.6.0 Version: v0.6.0
Models: Models:
- Name: centerpoint_01voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus - Name: centerpoint_voxel01_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d
In Collection: CenterPoint In Collection: CenterPoint
Config: configs/centerpoint/centerpoint_voxel01_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py Config: configs/centerpoint/centerpoint_voxel01_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py
Metadata: metadata:
Training Memory (GB): 4.9 Training Memory (GB): 5.2
Results: Results:
- Task: 3D Object Detection - Task: 3D Object Detection
Dataset: nuScenes Dataset: nuScenes
Metrics: Metrics:
mAP: 56.19 mAP: 56.11
NDS: 64.43 NDS: 64.61
Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/centerpoint/centerpoint_01voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus/centerpoint_01voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus_20201001_135205-5db91e00.pth Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/centerpoint/centerpoint_01voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus/centerpoint_01voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus_20220810_030004-9061688e.pth
- Name: centerpoint_01voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus - Name: centerpoint_voxel01_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d
In Collection: CenterPoint In Collection: CenterPoint
Config: configs/centerpoint/centerpoint_voxel01_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py Config: configs/centerpoint/centerpoint_voxel01_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py
Metadata: Metadata:
Training Memory (GB): 5.2 Training Memory (GB): 5.5
Results: Results:
- Task: 3D Object Detection - Task: 3D Object Detection
Dataset: nuScenes Dataset: nuScenes
Metrics: Metrics:
mAP: 56.34 mAP: 56.10
NDS: 64.81 NDS: 64.69
Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/centerpoint/centerpoint_01voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus/centerpoint_01voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus_20201004_075317-26d8176c.pth Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/centerpoint/centerpoint_01voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus/centerpoint_01voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus_20220810_052355-a6928835.pth
- Name: centerpoint_0075voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus - Name: centerpoint_voxel0075_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d
In Collection: CenterPoint In Collection: CenterPoint
Config: configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py Config: configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py
Metadata: Metadata:
Training Memory (GB): 7.8 Training Memory (GB): 8.2
Results: Results:
- Task: 3D Object Detection - Task: 3D Object Detection
Dataset: nuScenes Dataset: nuScenes
Metrics: Metrics:
mAP: 57.34 mAP: 56.54
NDS: 65.23 NDS: 65.17
Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/centerpoint/centerpoint_0075voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus/centerpoint_0075voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus_20200925_230905-358fbe3b.pth Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/centerpoint/centerpoint_0075voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus/centerpoint_0075voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus_20220810_011659-04cb3a3b.pth
- Name: centerpoint_0075voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus - Name: centerpoint_voxel0075_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d
In Collection: CenterPoint In Collection: CenterPoint
Config: configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py Config: configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py
Metadata: Metadata:
Training Memory (GB): 8.5 Training Memory (GB): 8.7
Results: Results:
- Task: 3D Object Detection - Task: 3D Object Detection
Dataset: nuScenes Dataset: nuScenes
Metrics: Metrics:
mAP: 57.27 mAP: 56.92
NDS: 65.58 NDS: 65.27
Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/centerpoint/centerpoint_0075voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus/centerpoint_0075voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus_20200930_201619-67c8496f.pth Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/centerpoint/centerpoint_0075voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus/centerpoint_0075voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus_20220810_025930-657f67e0.pth
- Name: centerpoint_02pillar_second_secfpn_circlenms_4x8_cyclic_20e_nus - Name: centerpoint_pillar02_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d
In Collection: CenterPoint In Collection: CenterPoint
Config: configs/centerpoint/centerpoint_pillar02_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py Config: configs/centerpoint/centerpoint_pillar02_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py
Metadata: Metadata:
Training Memory (GB): 4.4 Training Memory (GB): 4.6
Results: Results:
- Task: 3D Object Detection - Task: 3D Object Detection
Dataset: nuScenes Dataset: nuScenes
Metrics: Metrics:
mAP: 49.07 mAP: 48.70
NDS: 59.66 NDS: 59.62
Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/centerpoint/centerpoint_01voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus/centerpoint_02pillar_second_secfpn_circlenms_4x8_cyclic_20e_nus_20201004_170716-a134a233.pth Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/centerpoint/centerpoint_02pillar_second_secfpn_circlenms_4x8_cyclic_20e_nus/centerpoint_02pillar_second_secfpn_circlenms_4x8_cyclic_20e_nus_20220811_031844-191a3822.pth
- Name: centerpoint_02pillar_second_secfpn_dcn_4x8_cyclic_20e_nus - Name: centerpoint_pillar02_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d
In Collection: CenterPoint In Collection: CenterPoint
Config: configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py Config: configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py
Metadata: Metadata:
Training Memory (GB): 4.6 Training Memory (GB): 4.9
Results: Results:
- Task: 3D Object Detection - Task: 3D Object Detection
Dataset: nuScenes Dataset: nuScenes
Metrics: Metrics:
mAP: 48.8 mAP: 48.38
NDS: 59.67 NDS: 59.79
Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/centerpoint/centerpoint_02pillar_second_secfpn_dcn_4x8_cyclic_20e_nus/centerpoint_02pillar_second_secfpn_dcn_4x8_cyclic_20e_nus_20200930_103722-3bb135f2.pth Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/centerpoint/centerpoint_02pillar_second_secfpn_dcn_4x8_cyclic_20e_nus/centerpoint_02pillar_second_secfpn_dcn_4x8_cyclic_20e_nus_20220811_045458-808e69ad.pth
...@@ -22,15 +22,15 @@ We implement DGCNN and provide the results and checkpoints on S3DIS dataset. ...@@ -22,15 +22,15 @@ We implement DGCNN and provide the results and checkpoints on S3DIS dataset.
### S3DIS ### S3DIS
| Method | Split | Lr schd | Mem (GB) | Inf time (fps) | mIoU (Val set) | Download | | Method | Split | Lr schd | Mem (GB) | Inf time (fps) | mIoU (Val set) | Download |
| :---------------------------------------------: | :----: | :---------: | :------: | :------------: | :------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | :--------------------------------------------------------: | :----: | :---------: | :------: | :------------: | :------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
| [DGCNN](./dgcnn_4xb32-cosine-100e_s3dis-seg.py) | Area_1 | cosine 100e | 13.1 | | 68.33 | [model](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area1/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210731_000734-39658f14.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area1/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210731_000734.log.json) | | [DGCNN](./dgcnn_4xb32-cosine-100e_s3dis-seg_test-area1.py) | Area_1 | cosine 100e | 13.1 | | 68.33 | [model](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area1/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210731_000734-39658f14.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area1/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210731_000734.log.json) |
| [DGCNN](./dgcnn_4xb32-cosine-100e_s3dis-seg.py) | Area_2 | cosine 100e | 13.1 | | 40.68 | [model](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area2/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210731_144648-aea9ecb6.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area2/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210731_144648.log.json) | | [DGCNN](./dgcnn_4xb32-cosine-100e_s3dis-seg_test-area2.py) | Area_2 | cosine 100e | 13.1 | | 40.68 | [model](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area2/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210731_144648-aea9ecb6.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area2/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210731_144648.log.json) |
| [DGCNN](./dgcnn_4xb32-cosine-100e_s3dis-seg.py) | Area_3 | cosine 100e | 13.1 | | 69.38 | [model](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area3/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210801_154629-2ff50ee0.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area3/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210801_154629.log.json) | | [DGCNN](./dgcnn_4xb32-cosine-100e_s3dis-seg_test-area3.py) | Area_3 | cosine 100e | 13.1 | | 69.38 | [model](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area3/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210801_154629-2ff50ee0.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area3/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210801_154629.log.json) |
| [DGCNN](./dgcnn_4xb32-cosine-100e_s3dis-seg.py) | Area_4 | cosine 100e | 13.1 | | 50.07 | [model](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area4/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210802_073551-dffab9cd.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area4/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210802_073551.log.json) | | [DGCNN](./dgcnn_4xb32-cosine-100e_s3dis-seg_test-area4.py) | Area_4 | cosine 100e | 13.1 | | 50.07 | [model](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area4/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210802_073551-dffab9cd.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area4/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210802_073551.log.json) |
| [DGCNN](./dgcnn_4xb32-cosine-100e_s3dis-seg.py) | Area_5 | cosine 100e | 13.1 | | 50.59 | [model](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area5/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210730_235824-f277e0c5.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area5/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210730_235824.log.json) | | [DGCNN](./dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py) | Area_5 | cosine 100e | 13.1 | | 50.59 | [model](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area5/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210730_235824-f277e0c5.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area5/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210730_235824.log.json) |
| [DGCNN](./dgcnn_4xb32-cosine-100e_s3dis-seg.py) | Area_6 | cosine 100e | 13.1 | | 77.94 | [model](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area6/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210802_154317-e3511b32.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area6/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210802_154317.log.json) | | [DGCNN](./dgcnn_4xb32-cosine-100e_s3dis-seg_test-area6.py) | Area_6 | cosine 100e | 13.1 | | 77.94 | [model](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area6/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210802_154317-e3511b32.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area6/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210802_154317.log.json) |
| [DGCNN](./dgcnn_4xb32-cosine-100e_s3dis-seg.py) | 6-fold | | | | 59.43 | | | DGCNN | 6-fold | | | | 59.43 | |
**Notes:** **Notes:**
......
_base_ = './dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py'
# data settings
train_area = [2, 3, 4, 5, 6]
test_area = 1
train_dataloader = dict(
batch_size=32,
dataset=dict(
ann_files=[f's3dis_infos_Area_{i}.pkl' for i in train_area],
scene_idxs=[
f'seg_info/Area_{i}_resampled_scene_idxs.npy' for i in train_area
]))
test_dataloader = dict(
dataset=dict(
ann_files=f's3dis_infos_Area_{test_area}.pkl',
scene_idxs=f'seg_info/Area_{test_area}_resampled_scene_idxs.npy'))
val_dataloader = test_dataloader
_base_ = './dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py'
# data settings
train_area = [1, 3, 4, 5, 6]
test_area = 2
train_dataloader = dict(
batch_size=32,
dataset=dict(
ann_files=[f's3dis_infos_Area_{i}.pkl' for i in train_area],
scene_idxs=[
f'seg_info/Area_{i}_resampled_scene_idxs.npy' for i in train_area
]))
test_dataloader = dict(
dataset=dict(
ann_files=f's3dis_infos_Area_{test_area}.pkl',
scene_idxs=f'seg_info/Area_{test_area}_resampled_scene_idxs.npy'))
val_dataloader = test_dataloader
_base_ = './dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py'
# data settings
train_area = [1, 2, 4, 5, 6]
test_area = 3
train_dataloader = dict(
batch_size=32,
dataset=dict(
ann_files=[f's3dis_infos_Area_{i}.pkl' for i in train_area],
scene_idxs=[
f'seg_info/Area_{i}_resampled_scene_idxs.npy' for i in train_area
]))
test_dataloader = dict(
dataset=dict(
ann_files=f's3dis_infos_Area_{test_area}.pkl',
scene_idxs=f'seg_info/Area_{test_area}_resampled_scene_idxs.npy'))
val_dataloader = test_dataloader
_base_ = './dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py'
# data settings
train_area = [1, 2, 3, 5, 6]
test_area = 4
train_dataloader = dict(
batch_size=32,
dataset=dict(
ann_files=[f's3dis_infos_Area_{i}.pkl' for i in train_area],
scene_idxs=[
f'seg_info/Area_{i}_resampled_scene_idxs.npy' for i in train_area
]))
test_dataloader = dict(
dataset=dict(
ann_files=f's3dis_infos_Area_{test_area}.pkl',
scene_idxs=f'seg_info/Area_{test_area}_resampled_scene_idxs.npy'))
val_dataloader = test_dataloader
...@@ -16,6 +16,6 @@ model = dict( ...@@ -16,6 +16,6 @@ model = dict(
use_normalized_coord=True, use_normalized_coord=True,
batch_size=24)) batch_size=24))
default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=2), ) default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=2))
train_dataloader = dict(batch_size=32) train_dataloader = dict(batch_size=32)
val_cfg = dict(interval=2) train_cfg = dict(val_interval=2)
_base_ = './dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py'
# data settings
train_area = [1, 2, 3, 4, 5]
test_area = 6
train_dataloader = dict(
batch_size=32,
dataset=dict(
ann_files=[f's3dis_infos_Area_{i}.pkl' for i in train_area],
scene_idxs=[
f'seg_info/Area_{i}_resampled_scene_idxs.npy' for i in train_area
]))
test_dataloader = dict(
dataset=dict(
ann_files=f's3dis_infos_Area_{test_area}.pkl',
scene_idxs=f'seg_info/Area_{test_area}_resampled_scene_idxs.npy'))
val_dataloader = test_dataloader
...@@ -10,15 +10,80 @@ Collections: ...@@ -10,15 +10,80 @@ Collections:
README: configs/dgcnn/README.md README: configs/dgcnn/README.md
Models: Models:
- Name: dgcnn_4xb32-cosine-100e_s3dis-seg.py - Name: dgcnn_4xb32-cosine-100e_s3dis-seg_test-area1.py
In Collection: DGCNN In Collection: DGCNN
Config: configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg.py Config: configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area1.py
Metadata: Metadata:
Training Data: S3DIS Training Data: S3DIS
Training Memory (GB): 13.3 Training Memory (GB): 13.3
Results: Results:
- Task: 3D Semantic Segmentation - Task: 3D Semantic Segmentation
Dataset: S3DIS Dataset: S3DIS Area1
Metrics:
mIoU: 68.33
Weights: https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area1/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210731_000734-39658f14.pth
- Name: dgcnn_4xb32-cosine-100e_s3dis-seg_test-area2.py
In Collection: DGCNN
Config: configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area2.py
Metadata:
Training Data: S3DIS
Training Memory (GB): 13.3
Results:
- Task: 3D Semantic Segmentation
Dataset: S3DIS Area2
Metrics:
mIoU: 40.68
Weights: https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area2/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210731_144648-aea9ecb6.pth
- Name: dgcnn_4xb32-cosine-100e_s3dis-seg_test-area3.py
In Collection: DGCNN
Config: configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area3.py
Metadata:
Training Data: S3DIS
Training Memory (GB): 13.3
Results:
- Task: 3D Semantic Segmentation
Dataset: S3DIS Area3
Metrics:
mIoU: 69.38
Weights: https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area3/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210801_154629-2ff50ee0.pth
- Name: dgcnn_4xb32-cosine-100e_s3dis-seg_test-area4.py
In Collection: DGCNN
Config: configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area4.py
Metadata:
Training Data: S3DIS
Training Memory (GB): 13.3
Results:
- Task: 3D Semantic Segmentation
Dataset: S3DIS Area4
Metrics:
mIoU: 50.07
Weights: https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area4/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210802_073551-dffab9cd.pth
- Name: dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py
In Collection: DGCNN
Config: configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py
Metadata:
Training Data: S3DIS
Training Memory (GB): 13.3
Results:
- Task: 3D Semantic Segmentation
Dataset: S3DIS Area5
Metrics: Metrics:
mIoU: 50.59 mIoU: 50.59
Weights: https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area5/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210730_235824-f277e0c5.pth Weights: https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area5/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210730_235824-f277e0c5.pth
- Name: dgcnn_4xb32-cosine-100e_s3dis-seg_test-area6.py
In Collection: DGCNN
Config: configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area6.py
Metadata:
Training Data: S3DIS
Training Memory (GB): 13.3
Results:
- Task: 3D Semantic Segmentation
Dataset: S3DIS Area6
Metrics:
mIoU: 77.94
Weights: https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area6/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210802_154317-e3511b32.pth
# FCAF3D: Fully Convolutional Anchor-Free 3D Object Detection
> [FCAF3D: Fully Convolutional Anchor-Free 3D Object Detection](https://arxiv.org/abs/2112.00322)
<!-- [ALGORITHM] -->
## Abstract
Recently, promising applications in robotics and augmented reality have attracted considerable attention to 3D object detection from point clouds. In this paper, we present FCAF3D --- a first-in-class fully convolutional anchor-free indoor 3D object detection method. It is a simple yet effective method that uses a voxel representation of a point cloud and processes voxels with sparse convolutions. FCAF3D can handle large-scale scenes with minimal runtime through a single fully convolutional feed-forward pass. Existing 3D object detection methods make prior assumptions on the geometry of objects, and we argue that it limits their generalization ability. To eliminate prior assumptions, we propose a novel parametrization of oriented bounding boxes that allows obtaining better results in a purely data-driven way. The proposed method achieves state-of-the-art 3D object detection results in terms of mAP@0.5 on ScanNet V2 (+4.5), SUN RGB-D (+3.5), and S3DIS (+20.5) datasets.
<div align="center">
<img src="https://user-images.githubusercontent.com/6030962/182842796-98c10576-d39c-4c2b-a15a-a04c9870919c.png" width="800"/>
</div>
## Introduction
We implement FCAF3D and provide the result and checkpoints on the ScanNet and SUN RGB-D dataset.
## Results and models
### ScanNet
| Backbone | Mem (GB) | Inf time (fps) | AP@0.25 | AP@0.5 | Download |
| :------------------------------------------------: | :------: | :------------: | :----------: | :----------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
| [MinkResNet34](./fcaf3d_8x2_scannet-3d-18class.py) | 10.5 | 8.0 | 69.7(70.7\*) | 55.2(56.0\*) | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/fcaf3d/fcaf3d_8x2_scannet-3d-18class/fcaf3d_8x2_scannet-3d-18class_20220805_084956.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/fcaf3d/fcaf3d_8x2_scannet-3d-18class/fcaf3d_8x2_scannet-3d-18class_20220805_084956.log.json) |
### SUN RGB-D
| Backbone | Mem (GB) | Inf time (fps) | AP@0.25 | AP@0.5 | Download |
| :------------------------------------------------: | :------: | :------------: | :----------: | :----------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
| [MinkResNet34](./fcaf3d_8x2_sunrgbd-3d-10class.py) | 6.3 | 15.6 | 63.8(63.8\*) | 47.3(48.2\*) | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/fcaf3d/fcaf3d_8x2_sunrgbd-3d-10class/fcaf3d_8x2_sunrgbd-3d-10class_20220805_165017.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/fcaf3d/fcaf3d_8x2_sunrgbd-3d-10class/fcaf3d_8x2_sunrgbd-3d-10class_20220805_165017.log.json) |
### S3DIS
| Backbone | Mem (GB) | Inf time (fps) | AP@0.25 | AP@0.5 | Download |
| :----------------------------------------------: | :------: | :------------: | :----------: | :----------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
| [MinkResNet34](./fcaf3d_2xb8_s3dis-3d-5class.py) | 23.5 | 4.2 | 67.4(64.9\*) | 45.7(43.8\*) | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/fcaf3d/fcaf3d_8x2_s3dis-3d-5class/fcaf3d_8x2_s3dis-3d-5class_20220805_121957.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/fcaf3d/fcaf3d_8x2_s3dis-3d-5class/fcaf3d_8x2_s3dis-3d-5class_20220805_121957.log.json) |
**Note**
- We report the results across 5 train runs followed by 5 test runs. * means the results reported in the paper.
- Inference time is given for a single NVidia GeForce GTX 1080 Ti GPU. All models are trained on 2 GPUs.
## Citation
```latex
@inproceedings{rukhovich2022fcaf3d,
title={FCAF3D: Fully Convolutional Anchor-Free 3D Object Detection},
author={Danila Rukhovich, Anna Vorontsova, Anton Konushin},
booktitle={European conference on computer vision},
year={2022}
}
```
_base_ = [
'../_base_/models/fcaf3d.py', '../_base_/default_runtime.py',
'../_base_/datasets/s3dis-3d.py'
]
model = dict(bbox_head=dict(num_classes=5))
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=0.001, weight_decay=0.0001),
clip_grad=dict(max_norm=10, norm_type=2))
# learning rate
param_scheduler = dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
custom_hooks = [dict(type='EmptyCacheHook', after_iter=True)]
# training schedule for 1x
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=12)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
_base_ = [
'../_base_/models/fcaf3d.py', '../_base_/default_runtime.py',
'../_base_/datasets/scannet-3d.py'
]
n_points = 100000
train_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
use_color=True,
load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]),
dict(type='LoadAnnotations3D'),
dict(type='GlobalAlignment', rotation_axis=2),
dict(type='PointSample', num_points=n_points),
dict(
type='RandomFlip3D',
sync_2d=False,
flip_ratio_bev_horizontal=0.5,
flip_ratio_bev_vertical=0.5),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.087266, 0.087266],
scale_ratio_range=[.9, 1.1],
translation_std=[.1, .1, .1],
shift_height=False),
dict(type='NormalizePointsColor', color_mean=None),
dict(
type='Pack3DDetInputs',
keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
]
test_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
use_color=True,
load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]),
dict(type='GlobalAlignment', rotation_axis=2),
dict(
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
pts_scale_ratio=1,
flip=False,
transforms=[
dict(
type='GlobalRotScaleTrans',
rot_range=[0, 0],
scale_ratio_range=[1., 1.],
translation_std=[0, 0, 0]),
dict(
type='RandomFlip3D',
sync_2d=False,
flip_ratio_bev_horizontal=0.5,
flip_ratio_bev_vertical=0.5),
dict(type='PointSample', num_points=n_points),
dict(type='NormalizePointsColor', color_mean=None),
]),
dict(type='Pack3DDetInputs', keys=['points'])
]
train_dataloader = dict(
dataset=dict(
type='RepeatDataset',
times=10,
dataset=dict(pipeline=train_pipeline, filter_empty_gt=True)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=0.001, weight_decay=0.0001),
clip_grad=dict(max_norm=10, norm_type=2))
# learning rate
param_scheduler = dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
custom_hooks = [dict(type='EmptyCacheHook', after_iter=True)]
# training schedule for 1x
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=12)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
_base_ = [
'../_base_/models/fcaf3d.py', '../_base_/default_runtime.py',
'../_base_/datasets/sunrgbd-3d.py'
]
n_points = 100000
model = dict(
bbox_head=dict(
num_classes=10,
num_reg_outs=8,
bbox_loss=dict(type='RotatedIoU3DLoss')))
train_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]),
dict(type='LoadAnnotations3D'),
dict(type='PointSample', num_points=n_points),
dict(type='RandomFlip3D', sync_2d=False, flip_ratio_bev_horizontal=0.5),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.523599, 0.523599],
scale_ratio_range=[0.85, 1.15],
translation_std=[.1, .1, .1],
shift_height=False),
dict(
type='Pack3DDetInputs',
keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
]
test_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]),
dict(
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
pts_scale_ratio=1,
flip=False,
transforms=[
dict(
type='GlobalRotScaleTrans',
rot_range=[0, 0],
scale_ratio_range=[1., 1.],
translation_std=[0, 0, 0]),
dict(
type='RandomFlip3D',
sync_2d=False,
flip_ratio_bev_horizontal=0.5,
flip_ratio_bev_vertical=0.5),
dict(type='PointSample', num_points=n_points)
]),
dict(type='Pack3DDetInputs', keys=['points'])
]
train_dataloader = dict(
batch_size=8,
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(pipeline=train_pipeline, filter_empty_gt=True)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=0.001, weight_decay=0.0001),
clip_grad=dict(max_norm=10, norm_type=2))
# learning rate
param_scheduler = dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
custom_hooks = [dict(type='EmptyCacheHook', after_iter=True)]
# training schedule for 1x
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=12)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment