Commit eb1107e4 authored by raojy's avatar raojy
Browse files

fix_mmdetection

parent 7aa442d5
Pipeline #3461 canceled with stages
_base_ = [
'../_base_/datasets/waymoD3-mv-mono3d-3class.py',
'../_base_/models/pgd.py', '../_base_/schedules/mmdet-schedule-1x.py',
'../_base_/default_runtime.py'
]
# model settings
model = dict(
backbone=dict(
type='mmdet.ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'),
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, False, True, True)),
neck=dict(num_outs=3),
bbox_head=dict(
num_classes=3,
bbox_code_size=7,
pred_attrs=False,
pred_velo=False,
pred_bbox2d=True,
use_onlyreg_proj=True,
strides=(8, 16, 32),
regress_ranges=((-1, 128), (128, 256), (256, 1e8)),
group_reg_dims=(2, 1, 3, 1, 16,
4), # offset, depth, size, rot, kpts, bbox2d
reg_branch=(
(256, ), # offset
(256, ), # depth
(256, ), # size
(256, ), # rot
(256, ), # kpts
(256, ) # bbox2d
),
centerness_branch=(256, ),
loss_cls=dict(
type='mmdet.FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(
type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
loss_dir=dict(
type='mmdet.CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_centerness=dict(
type='mmdet.CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
use_depth_classifier=True,
depth_branch=(256, ),
depth_range=(0, 50),
depth_unit=10,
division='uniform',
depth_bins=6,
pred_keypoints=True,
weight_dim=1,
loss_depth=dict(
type='UncertainSmoothL1Loss', alpha=1.0, beta=3.0,
loss_weight=1.0),
loss_bbox2d=dict(
type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.0),
loss_consistency=dict(type='mmdet.GIoULoss', loss_weight=0.0),
bbox_coder=dict(
type='PGDBBoxCoder',
base_depths=((41.01, 18.44), ),
base_dims=(
(0.91, 1.74, 0.84), # Pedestrian
(1.81, 1.77, 0.84), # Cyclist
(4.73, 1.77, 2.08)), # Car
code_size=7)),
# set weight 1.0 for base 7 dims (offset, depth, size, rot)
# 0.2 for 16-dim keypoint offsets and 1.0 for 4-dim 2D distance targets
train_cfg=dict(code_weight=[
1.0, 1.0, 0.2, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0
]),
test_cfg=dict(nms_pre=100, nms_thr=0.05, score_thr=0.001, max_per_img=20))
# optimizer
optim_wrapper = dict(
optimizer=dict(
type='SGD',
lr=0.008,
),
paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.),
clip_grad=dict(max_norm=35, norm_type=2))
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=24, val_interval=24)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
auto_scale_lr = dict(enable=False, base_batch_size=48)
# PointRCNN: 3D Object Proposal Generation and Detection from Point Cloud
> [PointRCNN: 3D Object Proposal Generation and Detection from Point Cloud](https://arxiv.org/abs/1812.04244)
<!-- [ALGORITHM] -->
## Abstract
In this paper, we propose PointRCNN for 3D object detection from raw point cloud. The whole framework is composed of two stages: stage-1 for the bottom-up 3D proposal generation and stage-2 for refining proposals in the canonical coordinates to obtain the final detection results. Instead of generating proposals from RGB image or projecting point cloud to bird's view or voxels as previous methods do, our stage-1 sub-network directly generates a small number of high-quality 3D proposals from point cloud in a bottom-up manner via segmenting the point cloud of the whole scene into foreground points and background. The stage-2 sub-network transforms the pooled points of each proposal to canonical coordinates to learn better local spatial features, which is combined with global semantic features of each point learned in stage-1 for accurate box refinement and confidence prediction. Extensive experiments on the 3D detection benchmark of KITTI dataset show that our proposed architecture outperforms state-of-the-art methods with remarkable margins by using only point cloud as input.
<div align=center>
<img src="https://user-images.githubusercontent.com/79644370/144959105-271038a2-4ae1-4cdb-b6a8-68c14daf83b0.png" width="800"/>
</div>
## Introduction
We implement PointRCNN and provide the result with checkpoints on KITTI dataset.
## Results and models
### KITTI
| Backbone | Class | Lr schd | Mem (GB) | Inf time (fps) | mAP | Download |
| :------------------------------------------------: | :-----: | :--------: | :------: | :------------: | :---: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
| [PointNet++](./point-rcnn_8xb2_kitti-3d-3class.py) | 3 Class | cyclic 40e | 4.6 | | 70.83 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/point_rcnn/point_rcnn_2x8_kitti-3d-3classes_20211208_151344.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/point_rcnn/point_rcnn_2x8_kitti-3d-3classes_20211208_151344.log.json) |
Note: mAP represents AP11 results on 3 Class under the moderate setting.
Detailed performance on KITTI 3D detection (3D) is as follows, evaluated by AP11 metric:
| | Easy | Moderate | Hard |
| ---------- | :---: | :------: | :---: |
| Car | 89.13 | 78.72 | 78.24 |
| Pedestrian | 65.81 | 59.57 | 52.75 |
| Cyclist | 93.51 | 74.19 | 70.73 |
## Citation
```latex
@inproceedings{Shi_2019_CVPR,
title = {PointRCNN: 3D Object Proposal Generation and Detection from Point Cloud},
author = {Shi, Shaoshuai and Wang, Xiaogang and Li, Hongsheng},
booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
month = {June},
year = {2019}
}
```
Collections:
- Name: PointRCNN
Metadata:
Training Data: KITTI
Training Techniques:
- AdamW
Training Resources: 8x Titan XP GPUs
Architecture:
- PointNet++
Paper:
URL: https://arxiv.org/abs/1812.04244
Title: 'PointRCNN: 3D Object Proposal Generation and Detection from Point Cloud'
README: configs/point_rcnn/README.md
Code:
URL: https://github.com/open-mmlab/mmdetection3d/blob/v1.0.0.dev0/mmdet3d/models/detectors/point_rcnn.py#L8
Version: v1.0.0
Models:
- Name: point-rcnn_8xb2_kitti-3d-3class
In Collection: PointRCNN
Config: configs/point_rcnn/point-rcnn_8xb2_kitti-3d-3class.py
Metadata:
Training Memory (GB): 4.6
Results:
- Task: 3D Object Detection
Dataset: KITTI
Metrics:
mAP: 70.83
Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/point_rcnn/point_rcnn_2x8_kitti-3d-3classes_20211208_151344.pth
_base_ = [
'../_base_/datasets/kitti-3d-car.py', '../_base_/models/point_rcnn.py',
'../_base_/default_runtime.py', '../_base_/schedules/cyclic-40e.py'
]
# dataset settings
dataset_type = 'KittiDataset'
data_root = 'data/kitti/'
class_names = ['Pedestrian', 'Cyclist', 'Car']
metainfo = dict(classes=class_names)
point_cloud_range = [0, -40, -3, 70.4, 40, 1]
input_modality = dict(use_lidar=True, use_camera=False)
backend_args = None
db_sampler = dict(
data_root=data_root,
info_path=data_root + 'kitti_dbinfos_train.pkl',
rate=1.0,
prepare=dict(
filter_by_difficulty=[-1],
filter_by_min_points=dict(Car=5, Pedestrian=5, Cyclist=5)),
sample_groups=dict(Car=20, Pedestrian=15, Cyclist=15),
classes=class_names,
points_loader=dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
backend_args=backend_args),
backend_args=backend_args)
train_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
backend_args=backend_args),
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),
dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
dict(type='ObjectSample', db_sampler=db_sampler),
dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),
dict(
type='ObjectNoise',
num_try=100,
translation_std=[1.0, 1.0, 0.5],
global_rot_range=[0.0, 0.0],
rot_range=[-0.78539816, 0.78539816]),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.78539816, 0.78539816],
scale_ratio_range=[0.95, 1.05]),
dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict(type='PointSample', num_points=16384, sample_range=40.0),
dict(type='PointShuffle'),
dict(
type='Pack3DDetInputs',
keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
]
test_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
backend_args=backend_args),
dict(
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
pts_scale_ratio=1,
flip=False,
transforms=[
dict(
type='GlobalRotScaleTrans',
rot_range=[0, 0],
scale_ratio_range=[1., 1.],
translation_std=[0, 0, 0]),
dict(type='RandomFlip3D'),
dict(
type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict(type='PointSample', num_points=16384, sample_range=40.0)
]),
dict(type='Pack3DDetInputs', keys=['points'])
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
dataset=dict(
type='RepeatDataset',
times=2,
dataset=dict(pipeline=train_pipeline, metainfo=metainfo)))
test_dataloader = dict(dataset=dict(pipeline=test_pipeline, metainfo=metainfo))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline, metainfo=metainfo))
lr = 0.001 # max learning rate
optim_wrapper = dict(optimizer=dict(lr=lr, betas=(0.95, 0.85)))
train_cfg = dict(by_epoch=True, max_epochs=80, val_interval=2)
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
param_scheduler = [
# learning rate scheduler
# During the first 35 epochs, learning rate increases from 0 to lr * 10
# during the next 45 epochs, learning rate decreases from lr * 10 to
# lr * 1e-4
dict(
type='CosineAnnealingLR',
T_max=35,
eta_min=lr * 10,
begin=0,
end=35,
by_epoch=True,
convert_to_iter_based=True),
dict(
type='CosineAnnealingLR',
T_max=45,
eta_min=lr * 1e-4,
begin=35,
end=80,
by_epoch=True,
convert_to_iter_based=True),
# momentum scheduler
# During the first 35 epochs, momentum increases from 0 to 0.85 / 0.95
# during the next 45 epochs, momentum increases from 0.85 / 0.95 to 1
dict(
type='CosineAnnealingMomentum',
T_max=35,
eta_min=0.85 / 0.95,
begin=0,
end=35,
by_epoch=True,
convert_to_iter_based=True),
dict(
type='CosineAnnealingMomentum',
T_max=45,
eta_min=1,
begin=35,
end=80,
by_epoch=True,
convert_to_iter_based=True)
]
# PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space
> [PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space](https://arxiv.org/abs/1706.02413)
<!-- [ALGORITHM] -->
## Abstract
Few prior works study deep learning on point sets. PointNet by Qi et al. is a pioneer in this direction. However, by design PointNet does not capture local structures induced by the metric space points live in, limiting its ability to recognize fine-grained patterns and generalizability to complex scenes. In this work, we introduce a hierarchical neural network that applies PointNet recursively on a nested partitioning of the input point set. By exploiting metric space distances, our network is able to learn local features with increasing contextual scales. With further observation that point sets are usually sampled with varying densities, which results in greatly decreased performance for networks trained on uniform densities, we propose novel set learning layers to adaptively combine features from multiple scales. Experiments show that our network called PointNet++ is able to learn deep point set features efficiently and robustly. In particular, results significantly better than state-of-the-art have been obtained on challenging benchmarks of 3D point clouds.
<div align=center>
<img src="https://user-images.githubusercontent.com/79644370/143885530-ae53ed38-8132-4bb7-85a7-d2577de7de3f.png" width="800"/>
</div>
## Introduction
We implement PointNet++ and provide the result and checkpoints on ScanNet and S3DIS datasets.
**Notice**: The original PointNet++ paper used step learning rate schedule. We discovered that cosine schedule achieves much better results and adopt it in our implementations. We also use a larger `weight_decay` factor because we find it consistently improves the performance.
## Results and models
### ScanNet
| Method | Input | Lr schd | Mem (GB) | Inf time (fps) | mIoU (Val set) | mIoU (Test set) | Download |
| :---------------------------------------------------------------------------: | :-------: | :---------: | :------: | :------------: | :------------: | :-------------: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| [PointNet++ (SSG)](./pointnet2_ssg_2xb16-cosine-200e_scannet-seg-xyz-only.py) | XYZ | cosine 200e | 1.9 | | 53.91 | | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_ssg_xyz-only_16x2_cosine_200e_scannet_seg-3d-20class/pointnet2_ssg_xyz-only_16x2_cosine_200e_scannet_seg-3d-20class_20210514_143628-4e341a48.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_ssg_xyz-only_16x2_cosine_200e_scannet_seg-3d-20class/pointnet2_ssg_xyz-only_16x2_cosine_200e_scannet_seg-3d-20class_20210514_143628.log.json) |
| [PointNet++ (SSG)](./pointnet2_ssg_2xb16-cosine-200e_scannet-seg.py) | XYZ+Color | cosine 200e | 1.9 | | 54.44 | | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_ssg_16x2_cosine_200e_scannet_seg-3d-20class/pointnet2_ssg_16x2_cosine_200e_scannet_seg-3d-20class_20210514_143644-ee73704a.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_ssg_16x2_cosine_200e_scannet_seg-3d-20class/pointnet2_ssg_16x2_cosine_200e_scannet_seg-3d-20class_20210514_143644.log.json) |
| [PointNet++ (MSG)](./pointnet2_msg_2xb16-cosine-250e_scannet-seg-xyz-only.py) | XYZ | cosine 250e | 2.4 | | 54.26 | | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_msg_xyz-only_16x2_cosine_250e_scannet_seg-3d-20class/pointnet2_msg_xyz-only_16x2_cosine_250e_scannet_seg-3d-20class_20210514_143838-b4a3cf89.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_msg_xyz-only_16x2_cosine_250e_scannet_seg-3d-20class/pointnet2_msg_xyz-only_16x2_cosine_250e_scannet_seg-3d-20class_20210514_143838.log.json) |
| [PointNet++ (MSG)](./pointnet2_msg_2xb16-cosine-250e_scannet-seg.py) | XYZ+Color | cosine 250e | 2.4 | | 55.05 | | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_msg_16x2_cosine_250e_scannet_seg-3d-20class/pointnet2_msg_16x2_cosine_250e_scannet_seg-3d-20class_20210514_144009-24477ab1.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_msg_16x2_cosine_250e_scannet_seg-3d-20class/pointnet2_msg_16x2_cosine_250e_scannet_seg-3d-20class_20210514_144009.log.json) |
**Notes:**
- The original PointNet++ paper conducted experiments on the ScanNet V1 dataset, while later point cloud segmentor papers often used ScanNet V2. Following common practice, we report results on the ScanNet V2 dataset.
- Since ScanNet dataset doesn't provide ground-truth labels for the test set, users can only evaluate test set performance by submitting to its online benchmark [website](http://kaldir.vc.in.tum.de/scannet_benchmark/). However, users are only allowed to submit once every two weeks. Therefore, we currently report val set mIoU. Test set performance may be added in the future.
- To generate submission file for ScanNet online benchmark, you need to modify the ScanNet dataset's [config](https://github.com/open-mmlab/mmdetection3d/blob/main/configs/_base_/datasets/scannet-seg.py#L126). Change `ann_file=data_root + 'scannet_infos_val.pkl'` to `ann_file=data_root + 'scannet_infos_test.pkl'`, and then simply run:
```shell
python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} --format-only --options 'txt_prefix=exps/pointnet2_scannet_results'
```
This will save the prediction results as `txt` files in `exps/pointnet2_scannet_results/`. Then, go to this folder and zip all files into `pn2_scannet.zip`. Now you can submit it to the online benchmark and wait for the test set result. More instructions can be found at their official [website](http://kaldir.vc.in.tum.de/scannet_benchmark/documentation#submission-policy).
### S3DIS
| Method | Split | Lr schd | Mem (GB) | Inf time (fps) | mIoU (Val set) | Download |
| :---------------------------------------------------------------: | :----: | :--------: | :------: | :------------: | :------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
| [PointNet++ (SSG)](./pointnet2_ssg_2xb16-cosine-50e_s3dis-seg.py) | Area_5 | cosine 50e | 3.6 | | 56.93 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_ssg_16x2_cosine_50e_s3dis_seg-3d-13class/pointnet2_ssg_16x2_cosine_50e_s3dis_seg-3d-13class_20210514_144205-995d0119.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_ssg_16x2_cosine_50e_s3dis_seg-3d-13class/pointnet2_ssg_16x2_cosine_50e_s3dis_seg-3d-13class_20210514_144205.log.json) |
| [PointNet++ (MSG)](./pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py) | Area_5 | cosine 80e | 3.6 | | 58.04 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_msg_16x2_cosine_80e_s3dis_seg-3d-13class/pointnet2_msg_16x2_cosine_80e_s3dis_seg-3d-13class_20210514_144307-b2059817.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_msg_16x2_cosine_80e_s3dis_seg-3d-13class/pointnet2_msg_16x2_cosine_80e_s3dis_seg-3d-13class_20210514_144307.log.json) |
**Notes:**
- We use XYZ+Color+Normalized_XYZ as input in all the experiments on S3DIS datasets.
- `Area_5` Split means training the model on Area_1, 2, 3, 4, 6 and testing on Area_5.
## Indeterminism
Since PointNet++ testing adopts sliding patch inference which involves random point sampling, and the test script uses fixed random seeds while the random seeds of validation in training are not fixed, the test results may be slightly different from the results reported above.
## Citation
```latex
@inproceedings{qi2017pointnet++,
title={PointNet++ deep hierarchical feature learning on point sets in a metric space},
author={Qi, Charles R and Yi, Li and Su, Hao and Guibas, Leonidas J},
booktitle={Proceedings of the 31st International Conference on Neural Information Processing Systems},
pages={5105--5114},
year={2017}
}
```
Collections:
- Name: PointNet++
Metadata:
Training Techniques:
- Adam
Training Resources: 2x Titan XP GPUs
Architecture:
- PointNet++
Paper:
URL: https://arxiv.org/abs/1706.02413
Title: 'PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space'
README: configs/pointnet2/README.md
Code:
URL: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/backbones/pointnet2_sa_ssg.py#L12
Version: v0.14.0
Models:
- Name: pointnet2_ssg_2xb16-cosine-200e_scannet-seg-xyz-only
In Collection: PointNet++
Config: configs/pointnet2/pointnet2_ssg_2xb16-cosine-200e_scannet-seg-xyz-only.py
Metadata:
Training Data: ScanNet
Training Memory (GB): 1.9
Results:
- Task: 3D Semantic Segmentation
Dataset: ScanNet
Metrics:
mIoU: 53.91
Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_ssg_xyz-only_16x2_cosine_200e_scannet_seg-3d-20class/pointnet2_ssg_xyz-only_16x2_cosine_200e_scannet_seg-3d-20class_20210514_143628-4e341a48.pth
- Name: pointnet2_ssg_2xb16-cosine-200e_scannet-seg
In Collection: PointNet++
Config: configs/pointnet2/pointnet2_ssg_2xb16-cosine-200e_scannet-seg.py
Metadata:
Training Data: ScanNet
Training Memory (GB): 1.9
Results:
- Task: 3D Semantic Segmentation
Dataset: ScanNet
Metrics:
mIoU: 54.44
Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_ssg_16x2_cosine_200e_scannet_seg-3d-20class/pointnet2_ssg_16x2_cosine_200e_scannet_seg-3d-20class_20210514_143644-ee73704a.pth
- Name: pointnet2_msg_2xb16-cosine-250e_scannet-seg-xyz-only
In Collection: PointNet++
Config: configs/pointnet2/pointnet2_msg_2xb16-cosine-250e_scannet-seg-xyz-only.py
Metadata:
Training Data: ScanNet
Training Memory (GB): 2.4
Results:
- Task: 3D Semantic Segmentation
Dataset: ScanNet
Metrics:
mIoU: 54.26
Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_msg_xyz-only_16x2_cosine_250e_scannet_seg-3d-20class/pointnet2_msg_xyz-only_16x2_cosine_250e_scannet_seg-3d-20class_20210514_143838-b4a3cf89.pth
- Name: pointnet2_msg_2xb16-cosine-250e_scannet-seg
In Collection: PointNet++
Config: configs/pointnet2/pointnet2_msg_2xb16-cosine-250e_scannet-seg.py
Metadata:
Training Data: ScanNet
Training Memory (GB): 2.4
Results:
- Task: 3D Semantic Segmentation
Dataset: ScanNet
Metrics:
mIoU: 55.05
Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_msg_16x2_cosine_250e_scannet_seg-3d-20class/pointnet2_msg_16x2_cosine_250e_scannet_seg-3d-20class_20210514_144009-24477ab1.pth
- Name: pointnet2_ssg_2xb16-cosine-50e_s3dis-seg
Alias: pointnet2-ssg_s3dis-seg
In Collection: PointNet++
Config: configs/pointnet2/pointnet2_ssg_2xb16-cosine-50e_s3dis-seg.py
Metadata:
Training Data: S3DIS
Training Memory (GB): 3.6
Results:
- Task: 3D Semantic Segmentation
Dataset: S3DIS
Metrics:
mIoU: 56.93
Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_ssg_16x2_cosine_50e_s3dis_seg-3d-13class/pointnet2_ssg_16x2_cosine_50e_s3dis_seg-3d-13class_20210514_144205-995d0119.pth
- Name: pointnet2_msg_2xb16-cosine-80e_s3dis-seg
In Collection: PointNet++
Config: configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py
Metadata:
Training Data: S3DIS
Training Memory (GB): 3.6
Results:
- Task: 3D Semantic Segmentation
Dataset: S3DIS
Metrics:
mIoU: 58.04
Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_msg_16x2_cosine_80e_s3dis_seg-3d-13class/pointnet2_msg_16x2_cosine_80e_s3dis_seg-3d-13class_20210514_144307-b2059817.pth
_base_ = [
'../_base_/datasets/scannet-seg.py', '../_base_/models/pointnet2_msg.py',
'../_base_/schedules/seg-cosine-200e.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
backbone=dict(in_channels=3), # only [xyz]
decode_head=dict(
num_classes=20,
ignore_index=20,
# `class_weight` is generated in data pre-processing, saved in
# `data/scannet/seg_info/train_label_weight.npy`
# you can copy paste the values here, or input the file path as
# `class_weight=data/scannet/seg_info/train_label_weight.npy`
loss_decode=dict(class_weight=[
2.389689, 2.7215734, 4.5944676, 4.8543367, 4.096086, 4.907941,
4.690836, 4.512031, 4.623311, 4.9242644, 5.358117, 5.360071,
5.019636, 4.967126, 5.3502126, 5.4023647, 5.4027233, 5.4169416,
5.3954206, 4.6971426
])),
test_cfg=dict(
num_points=8192,
block_size=1.5,
sample_rate=0.5,
use_normalized_coord=False,
batch_size=24))
# dataset settings
# in this setting, we only use xyz as network input
# so we need to re-write all the data pipeline
class_names = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table',
'door', 'window', 'bookshelf', 'picture', 'counter', 'desk',
'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink',
'bathtub', 'otherfurniture')
num_points = 8192
backend_args = None
train_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
use_color=False,
load_dim=6,
use_dim=[0, 1, 2], # only load xyz coordinates
backend_args=backend_args),
dict(
type='LoadAnnotations3D',
with_bbox_3d=False,
with_label_3d=False,
with_mask_3d=False,
with_seg_3d=True,
backend_args=backend_args),
dict(type='PointSegClassMapping'),
dict(
type='IndoorPatchPointSample',
num_points=num_points,
block_size=1.5,
ignore_index=len(class_names),
use_normalized_coord=False,
enlarge_size=0.2,
min_unique_num=None),
dict(type='Pack3DDetInputs', keys=['points', 'pts_semantic_mask'])
]
test_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
use_color=False,
load_dim=6,
use_dim=[0, 1, 2],
backend_args=backend_args),
dict(
type='LoadAnnotations3D',
with_bbox_3d=False,
with_label_3d=False,
with_mask_3d=False,
with_seg_3d=True,
backend_args=backend_args),
dict(
# a wrapper in order to successfully call test function
# actually we don't perform test-time-aug
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
pts_scale_ratio=1,
flip=False,
transforms=[
dict(
type='GlobalRotScaleTrans',
rot_range=[0, 0],
scale_ratio_range=[1., 1.],
translation_std=[0, 0, 0]),
dict(
type='RandomFlip3D',
sync_2d=False,
flip_ratio_bev_horizontal=0.0,
flip_ratio_bev_vertical=0.0),
]),
dict(type='Pack3DDetInputs', keys=['points'])
]
train_dataloader = dict(batch_size=16, dataset=dict(pipeline=train_pipeline))
test_dataloader = dict(dataset=dict(pipeline=test_pipeline))
val_dataloader = test_dataloader
# runtime settings
default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=5))
# PointNet2-MSG needs longer training time than PointNet2-SSG
train_cfg = dict(by_epoch=True, max_epochs=250, val_interval=5)
_base_ = [
'../_base_/datasets/scannet-seg.py', '../_base_/models/pointnet2_msg.py',
'../_base_/schedules/seg-cosine-200e.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
decode_head=dict(
num_classes=20,
ignore_index=20,
# `class_weight` is generated in data pre-processing, saved in
# `data/scannet/seg_info/train_label_weight.npy`
# you can copy paste the values here, or input the file path as
# `class_weight=data/scannet/seg_info/train_label_weight.npy`
loss_decode=dict(class_weight=[
2.389689, 2.7215734, 4.5944676, 4.8543367, 4.096086, 4.907941,
4.690836, 4.512031, 4.623311, 4.9242644, 5.358117, 5.360071,
5.019636, 4.967126, 5.3502126, 5.4023647, 5.4027233, 5.4169416,
5.3954206, 4.6971426
])),
test_cfg=dict(
num_points=8192,
block_size=1.5,
sample_rate=0.5,
use_normalized_coord=False,
batch_size=24))
# data settings
train_dataloader = dict(batch_size=16)
# runtime settings
default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=5))
# PointNet2-MSG needs longer training time than PointNet2-SSG
train_cfg = dict(by_epoch=True, max_epochs=250, val_interval=5)
_base_ = [
'../_base_/datasets/s3dis-seg.py', '../_base_/models/pointnet2_msg.py',
'../_base_/schedules/seg-cosine-50e.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
backbone=dict(in_channels=9), # [xyz, rgb, normalized_xyz]
decode_head=dict(
num_classes=13, ignore_index=13,
loss_decode=dict(class_weight=None)), # S3DIS doesn't use class_weight
test_cfg=dict(
num_points=4096,
block_size=1.0,
sample_rate=0.5,
use_normalized_coord=True,
batch_size=24))
# data settings
train_dataloader = dict(batch_size=16)
# runtime settings
default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=2))
# PointNet2-MSG needs longer training time than PointNet2-SSG
train_cfg = dict(by_epoch=True, max_epochs=80, val_interval=2)
_base_ = [
'../_base_/datasets/scannet-seg.py', '../_base_/models/pointnet2_ssg.py',
'../_base_/schedules/seg-cosine-200e.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
backbone=dict(in_channels=3), # only [xyz]
decode_head=dict(
num_classes=20,
ignore_index=20,
# `class_weight` is generated in data pre-processing, saved in
# `data/scannet/seg_info/train_label_weight.npy`
# you can copy paste the values here, or input the file path as
# `class_weight=data/scannet/seg_info/train_label_weight.npy`
loss_decode=dict(class_weight=[
2.389689, 2.7215734, 4.5944676, 4.8543367, 4.096086, 4.907941,
4.690836, 4.512031, 4.623311, 4.9242644, 5.358117, 5.360071,
5.019636, 4.967126, 5.3502126, 5.4023647, 5.4027233, 5.4169416,
5.3954206, 4.6971426
])),
test_cfg=dict(
num_points=8192,
block_size=1.5,
sample_rate=0.5,
use_normalized_coord=False,
batch_size=24))
# dataset settings
# in this setting, we only use xyz as network input
# so we need to re-write all the data pipeline
class_names = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table',
'door', 'window', 'bookshelf', 'picture', 'counter', 'desk',
'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink',
'bathtub', 'otherfurniture')
num_points = 8192
backend_args = None
train_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
use_color=False,
load_dim=6,
use_dim=[0, 1, 2], # only load xyz coordinates
backend_args=backend_args),
dict(
type='LoadAnnotations3D',
with_bbox_3d=False,
with_label_3d=False,
with_mask_3d=False,
with_seg_3d=True,
backend_args=backend_args),
dict(type='PointSegClassMapping'),
dict(
type='IndoorPatchPointSample',
num_points=num_points,
block_size=1.5,
ignore_index=len(class_names),
use_normalized_coord=False,
enlarge_size=0.2,
min_unique_num=None),
dict(type='Pack3DDetInputs', keys=['points', 'pts_semantic_mask'])
]
test_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
use_color=False,
load_dim=6,
use_dim=[0, 1, 2],
backend_args=backend_args),
dict(
type='LoadAnnotations3D',
with_bbox_3d=False,
with_label_3d=False,
with_mask_3d=False,
with_seg_3d=True,
backend_args=backend_args),
dict(
# a wrapper in order to successfully call test function
# actually we don't perform test-time-aug
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
pts_scale_ratio=1,
flip=False,
transforms=[
dict(
type='GlobalRotScaleTrans',
rot_range=[0, 0],
scale_ratio_range=[1., 1.],
translation_std=[0, 0, 0]),
dict(
type='RandomFlip3D',
sync_2d=False,
flip_ratio_bev_horizontal=0.0,
flip_ratio_bev_vertical=0.0),
]),
dict(type='Pack3DDetInputs', keys=['points'])
]
train_dataloader = dict(batch_size=16, dataset=dict(pipeline=train_pipeline))
test_dataloader = dict(dataset=dict(pipeline=test_pipeline))
val_dataloader = test_dataloader
# runtime settings
default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=5))
train_cfg = dict(val_interval=5)
_base_ = [
'../_base_/datasets/scannet-seg.py', '../_base_/models/pointnet2_ssg.py',
'../_base_/schedules/seg-cosine-200e.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
decode_head=dict(
num_classes=20,
ignore_index=20,
# `class_weight` is generated in data pre-processing, saved in
# `data/scannet/seg_info/train_label_weight.npy`
# you can copy paste the values here, or input the file path as
# `class_weight=data/scannet/seg_info/train_label_weight.npy`
loss_decode=dict(class_weight=[
2.389689, 2.7215734, 4.5944676, 4.8543367, 4.096086, 4.907941,
4.690836, 4.512031, 4.623311, 4.9242644, 5.358117, 5.360071,
5.019636, 4.967126, 5.3502126, 5.4023647, 5.4027233, 5.4169416,
5.3954206, 4.6971426
])),
test_cfg=dict(
num_points=8192,
block_size=1.5,
sample_rate=0.5,
use_normalized_coord=False,
batch_size=24))
# data settings
train_dataloader = dict(batch_size=16)
# runtime settings
default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=5))
train_cfg = dict(val_interval=5)
_base_ = [
'../_base_/datasets/s3dis-seg.py', '../_base_/models/pointnet2_ssg.py',
'../_base_/schedules/seg-cosine-50e.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
backbone=dict(in_channels=9), # [xyz, rgb, normalized_xyz]
decode_head=dict(
num_classes=13, ignore_index=13,
loss_decode=dict(class_weight=None)), # S3DIS doesn't use class_weight
test_cfg=dict(
num_points=4096,
block_size=1.0,
sample_rate=0.5,
use_normalized_coord=True,
batch_size=24))
# data settings
train_dataloader = dict(batch_size=16)
# runtime settings
default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=2))
train_cfg = dict(val_interval=2)
# PointPillars: Fast Encoders for Object Detection from Point Clouds
> [PointPillars: Fast Encoders for Object Detection from Point Clouds](https://arxiv.org/abs/1812.05784)
<!-- [ALGORITHM] -->
## Abstract
Object detection in point clouds is an important aspect of many robotics applications such as autonomous driving. In this paper we consider the problem of encoding a point cloud into a format appropriate for a downstream detection pipeline. Recent literature suggests two types of encoders; fixed encoders tend to be fast but sacrifice accuracy, while encoders that are learned from data are more accurate, but slower. In this work we propose PointPillars, a novel encoder which utilizes PointNets to learn a representation of point clouds organized in vertical columns (pillars). While the encoded features can be used with any standard 2D convolutional detection architecture, we further propose a lean downstream network. Extensive experimentation shows that PointPillars outperforms previous encoders with respect to both speed and accuracy by a large margin. Despite only using lidar, our full detection pipeline significantly outperforms the state of the art, even among fusion methods, with respect to both the 3D and bird's eye view KITTI benchmarks. This detection performance is achieved while running at 62 Hz: a 2 - 4 fold runtime improvement. A faster version of our method matches the state of the art at 105 Hz. These benchmarks suggest that PointPillars is an appropriate encoding for object detection in point clouds.
<div align=center>
<img src="https://user-images.githubusercontent.com/79644370/143885905-aab6ffcf-7727-495e-90ca-edb8dd5e324b.png" width="800"/>
</div>
## Introduction
We implement PointPillars and provide the results and checkpoints on KITTI, nuScenes, Lyft and Waymo datasets.
## Results and models
### KITTI
| Backbone | Class | Lr schd | Mem (GB) | Inf time (fps) | AP | Download |
| :-------------------------------------------------------------: | :-----: | :---------: | :------: | :------------: | :---: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
| [SECFPN](./pointpillars_hv_secfpn_8xb6-160e_kitti-3d-car.py) | Car | cyclic 160e | 5.4 | | 77.6 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20220331_134606-d42d15ed.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20220331_134606.log.json) |
| [SECFPN](./pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py) | 3 Class | cyclic 160e | 5.5 | | 64.07 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class/hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class_20220301_150306-37dc2420.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class/hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class_20220301_150306.log.json) |
### nuScenes
| Backbone | Lr schd | Mem (GB) | Inf time (fps) | mAP | NDS | Download |
| :---------------------------------------------------------------------: | :-----: | :------: | :------------: | :---: | :---: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
| [SECFPN](./pointpillars_hv_secfpn_sbn-all_8xb4-2x_nus-3d.py) | 2x | 16.4 | | 34.33 | 49.1 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d_20210826_225857-f19d00a3.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d_20210826_225857.log.json) |
| [SECFPN (FP16)](./pointpillars_hv_secfpn_sbn-all_8xb2-amp-2x_nus-3d.py) | 2x | 8.37 | | 35.19 | 50.27 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fp16/hv_pointpillars_secfpn_sbn-all_fp16_2x8_2x_nus-3d/hv_pointpillars_secfpn_sbn-all_fp16_2x8_2x_nus-3d_20201020_222626-c3f0483e.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fp16/hv_pointpillars_secfpn_sbn-all_fp16_2x8_2x_nus-3d/hv_pointpillars_secfpn_sbn-all_fp16_2x8_2x_nus-3d_20201020_222626.log.json) |
| [FPN](./pointpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d.py) | 2x | 16.3 | | 39.7 | 53.2 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d_20210826_104936-fca299c1.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d_20210826_104936.log.json) |
| [FPN (FP16)](./pointpillars_hv_fpn_sbn-all_8xb2-amp-2x_nus-3d.py) | 2x | 8.40 | | 39.26 | 53.26 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fp16/hv_pointpillars_fpn_sbn-all_fp16_2x8_2x_nus-3d/hv_pointpillars_fpn_sbn-all_fp16_2x8_2x_nus-3d_20201021_120719-269f9dd6.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fp16/hv_pointpillars_fpn_sbn-all_fp16_2x8_2x_nus-3d/hv_pointpillars_fpn_sbn-all_fp16_2x8_2x_nus-3d_20201021_120719.log.json) |
### Lyft
| Backbone | Lr schd | Mem (GB) | Inf time (fps) | Private Score | Public Score | Download |
| :-----------------------------------------------------------: | :-----: | :------: | :------------: | :-----------: | :----------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
| [SECFPN](./pointpillars_hv_secfpn_sbn-all_8xb2-2x_lyft-3d.py) | 2x | 12.2 | | 13.8 | 14.1 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_secfpn_sbn-all_2x8_2x_lyft-3d/hv_pointpillars_secfpn_sbn-all_2x8_2x_lyft-3d_20210829_100455-82b81c39.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_secfpn_sbn-all_2x8_2x_lyft-3d/hv_pointpillars_secfpn_sbn-all_2x8_2x_lyft-3d_20210829_100455.log.json) |
| [FPN](./pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py) | 2x | 9.2 | | 14.8 | 15.0 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_fpn_sbn-all_2x8_2x_lyft-3d/hv_pointpillars_fpn_sbn-all_2x8_2x_lyft-3d_20210822_095429-0b3d6196.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_fpn_sbn-all_2x8_2x_lyft-3d/hv_pointpillars_fpn_sbn-all_2x8_2x_lyft-3d_20210822_095429.log.json) |
### Waymo
| Backbone | Load Interval | Class | Lr schd | Mem (GB) | Inf time (fps) | mAP@L1 | mAPH@L1 | mAP@L2 | **mAPH@L2** | Download |
| :----------------------------------------------------------------------: | :-----------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :----: | :---------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
| [SECFPN](./pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-car.py) | 5 | Car | 2x | 7.76 | | 70.2 | 69.6 | 62.6 | 62.1 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-car/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-car_20200901_204315-302fc3e7.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-car/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-car_20200901_204315.log.json) |
| [SECFPN](./pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py) | 5 | 3 Class | 2x | 8.12 | | 64.7 | 57.6 | 58.4 | 52.1 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class_20200831_204144-d1a706b1.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class_20200831_204144.log.json) |
| above @ Car | | | 2x | 8.12 | | 68.5 | 67.9 | 60.1 | 59.6 | |
| above @ Pedestrian | | | 2x | 8.12 | | 67.8 | 50.6 | 59.6 | 44.3 | |
| above @ Cyclist | | | 2x | 8.12 | | 57.7 | 54.4 | 55.5 | 52.4 | |
| [SECFPN](./pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymo-3d-car.py) | 1 | Car | 2x | 7.76 | | 72.1 | 71.5 | 63.6 | 63.1 | [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymo-3d-car/hv_pointpillars_secfpn_sbn_2x16_2x_waymo-3d-car.log.json) |
| [SECFPN](./pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymo-3d-3class.py) | 1 | 3 Class | 2x | 8.12 | | 68.8 | 63.3 | 62.6 | 57.6 | [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymo-3d-3class/hv_pointpillars_secfpn_sbn_2x16_2x_waymo-3d-3class.log.json) |
| above @ Car | | | 2x | 8.12 | | 71.6 | 71.0 | 63.1 | 62.5 | |
| above @ Pedestrian | | | 2x | 8.12 | | 70.6 | 56.7 | 62.9 | 50.2 | |
| above @ Cyclist | | | 2x | 8.12 | | 64.4 | 62.3 | 61.9 | 59.9 | |
#### Note:
- **Metric**: For model trained with 3 classes, the average APH@L2 (mAPH@L2) of all the categories is reported and used to rank the model. For model trained with only 1 class, the APH@L2 is reported and used to rank the model.
- **Data Split**: Here we provide several baselines for waymo dataset, among which D5 means that we divide the dataset into 5 folds and only use one fold for efficient experiments. Using the complete dataset can boost the performance a lot, especially for the detection of cyclist and pedestrian, where more than 5 mAP or mAPH improvement can be expected.
- **Implementation Details**: We basically follow the implementation in the [paper](https://arxiv.org/pdf/1912.04838.pdf) in terms of the network architecture (having a
stride of 1 for the first convolutional block). Different settings of voxelization, data augmentation and hyper parameters make these baselines outperform those in the paper by about 7 mAP for car and 4 mAP for pedestrian with only a subset of the whole dataset. All of these results are achieved without bells-and-whistles, e.g. ensemble, multi-scale training and test augmentation.
- **License Aggrement**: To comply the [license agreement of Waymo dataset](https://waymo.com/open/terms/), the pre-trained models on Waymo dataset are not released. We still release the training log as a reference to ease the future research.
- `FP16` means Mixed Precision (FP16) is adopted in training. With mixed precision training, we can train PointPillars with nuScenes dataset on 8 Titan XP GPUS with batch size of 2. This will cause OOM error without mixed precision training. The loss scale for PointPillars on nuScenes dataset is specifically tuned to avoid the loss to be Nan. We find 32 is more stable than 512, though loss scale 32 still cause Nan sometimes.
## Citation
```latex
@inproceedings{lang2019pointpillars,
title={Pointpillars: Fast encoders for object detection from point clouds},
author={Lang, Alex H and Vora, Sourabh and Caesar, Holger and Zhou, Lubing and Yang, Jiong and Beijbom, Oscar},
booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
pages={12697--12705},
year={2019}
}
```
Collections:
- Name: PointPillars
Metadata:
Training Techniques:
- AdamW
Architecture:
- Feature Pyramid Network
Paper:
URL: https://arxiv.org/abs/1812.05784
Title: 'PointPillars: Fast Encoders for Object Detection from Point Clouds'
README: configs/pointpillars/README.md
Code:
URL: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/voxel_encoders/pillar_encoder.py#L13
Version: v0.6.0
Models:
- Name: pointpillars_hv_secfpn_8xb6-160e_kitti-3d-car
In Collection: PointPillars
Config: configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-car.py
Metadata:
Training Data: KITTI
Training Memory (GB): 5.4
Training Resources: 8x V100 GPUs
Results:
- Task: 3D Object Detection
Dataset: KITTI
Metrics:
AP: 77.6
Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20220331_134606-d42d15ed.pth
- Name: pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class
Alias: pointpillars_kitti-3class
In Collection: PointPillars
Config: configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py
Metadata:
Training Data: KITTI
Training Memory (GB): 5.5
Training Resources: 8x V100 GPUs
Results:
- Task: 3D Object Detection
Dataset: KITTI
Metrics:
AP: 64.07
Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class/hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class_20220301_150306-37dc2420.pth
- Name: pointpillars_hv_secfpn_sbn-all_8xb4-2x_nus-3d
In Collection: PointPillars
Config: configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb4-2x_nus-3d.py
Metadata:
Training Data: nuScenes
Training Memory (GB): 16.4
Training Resources: 8x V100 GPUs
Results:
- Task: 3D Object Detection
Dataset: nuScenes
Metrics:
mAP: 34.33
NDS: 49.1
Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d_20210826_225857-f19d00a3.pth
- Name: pointpillars_hv_secfpn_sbn-all_8xb4-amp-2x_nus-3d
In Collection: PointPillars
Config: configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb4-amp-2x_nus-3d.py
Metadata:
Training Techniques:
- AdamW
- Mixed Precision Training
Training Resources: 8x TITAN Xp
Architecture:
- Hard Voxelization
Training Data: nuScenes
Training Memory (GB): 8.37
Results:
- Task: 3D Object Detection
Dataset: nuScenes
Metrics:
mAP: 35.19
NDS: 50.27
Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fp16/hv_pointpillars_secfpn_sbn-all_fp16_2x8_2x_nus-3d/hv_pointpillars_secfpn_sbn-all_fp16_2x8_2x_nus-3d_20201020_222626-c3f0483e.pth
Code:
Version: v0.7.0
- Name: pointpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d
In Collection: PointPillars
Config: configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d.py
Metadata:
Training Data: nuScenes
Training Memory (GB): 16.3
Training Resources: 8x V100 GPUs
Results:
- Task: 3D Object Detection
Dataset: nuScenes
Metrics:
mAP: 39.71
NDS: 53.15
Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d_20210826_104936-fca299c1.pth
- Name: pointpillars_hv_fpn_sbn-all_8xb4-amp-2x_nus-3d
In Collection: PointPillars
Config: configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb4-amp-2x_nus-3d.py
Metadata:
Training Techniques:
- AdamW
- Mixed Precision Training
Training Resources: 8x TITAN Xp
Architecture:
- Hard Voxelization
Training Data: nuScenes
Training Memory (GB): 8.40
Results:
- Task: 3D Object Detection
Dataset: nuScenes
Metrics:
mAP: 39.26
NDS: 53.26
Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fp16/hv_pointpillars_fpn_sbn-all_fp16_2x8_2x_nus-3d/hv_pointpillars_fpn_sbn-all_fp16_2x8_2x_nus-3d_20201021_120719-269f9dd6.pth
Code:
Version: v0.7.0
- Name: pointpillars_hv_secfpn_sbn-all_8xb2-2x_lyft-3d
In Collection: PointPillars
Config: configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb2-2x_lyft-3d.py
Metadata:
Training Data: Lyft
Training Memory (GB): 12.2
Training Resources: 8x V100 GPUs
Results:
- Task: 3D Object Detection
Dataset: Lyft
Metrics:
Private Score: 13.8
Public Score: 14.1
Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_secfpn_sbn-all_2x8_2x_lyft-3d/hv_pointpillars_secfpn_sbn-all_2x8_2x_lyft-3d_20210829_100455-82b81c39.pth
- Name: pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d
In Collection: PointPillars
Config: configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py
Metadata:
Training Data: Lyft
Training Memory (GB): 9.2
Training Resources: 8x V100 GPUs
Results:
- Task: 3D Object Detection
Dataset: Lyft
Metrics:
Private Score: 14.0
Public Score: 15.0
Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_fpn_sbn-all_2x8_2x_lyft-3d/hv_pointpillars_fpn_sbn-all_2x8_2x_lyft-3d_20210822_095429-0b3d6196.pth
- Name: pointpillars_hv_secfpn_sbn_2x16_2x_waymoD5-3d-car
In Collection: PointPillars
Config: configs/pointpillars/pointpillars_hv_secfpn_sbn_2x16_2x_waymoD5-3d-car.py
Metadata:
Training Data: Waymo
Training Memory (GB): 7.76
Training Resources: 8x GeForce GTX 1080 Ti
Results:
- Task: 3D Object Detection
Dataset: Waymo
Metrics:
mAP@L1: 70.2
mAPH@L1: 69.6
mAP@L2: 62.6
mAPH@L2: 62.1
Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-car/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-car_20200901_204315-302fc3e7.pth
- Name: pointpillars_hv_secfpn_sbn_2x16_2x_waymoD5-3d-3class
Alias: pointpillars_waymod5-3class
In Collection: PointPillars
Config: configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py
Metadata:
Training Data: Waymo
Training Memory (GB): 8.12
Training Resources: 8x GeForce GTX 1080 Ti
Results:
- Task: 3D Object Detection
Dataset: Waymo
Metrics:
mAP@L1: 64.7
mAPH@L1: 57.6
mAP@L2: 58.4
mAPH@L2: 52.1
Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class_20200831_204144-d1a706b1.pth
- Name: pointpillars_hv_secfpn_sbn_2x16_2x_waymo-3d-car
In Collection: PointPillars
Config: configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymo-3d-car.py
Metadata:
Training Data: Waymo
Training Memory (GB): 7.76
Training Resources: 8x GeForce GTX 1080 Ti
Results:
- Task: 3D Object Detection
Dataset: Waymo
Metrics:
mAP@L1: 72.1
mAPH@L1: 71.5
mAP@L2: 63.6
mAPH@L2: 63.1
- Name: pointpillars_hv_secfpn_sbn_2x16_2x_waymo-3d-3class
In Collection: PointPillars
Config: configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymo-3d-3class.py
Metadata:
Training Data: Waymo
Training Memory (GB): 8.12
Training Resources: 8x GeForce GTX 1080 Ti
Results:
- Task: 3D Object Detection
Dataset: Waymo
Metrics:
mAP@L1: 68.8
mAPH@L1: 63.3
mAP@L2: 62.6
mAPH@L2: 57.6
_base_ = [
'../_base_/models/pointpillars_hv_fpn_range100_lyft.py',
'../_base_/datasets/lyft-3d-range100.py',
'../_base_/schedules/schedule-2x.py', '../_base_/default_runtime.py'
]
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
_base_ = [
'../_base_/models/pointpillars_hv_fpn_lyft.py',
'../_base_/datasets/lyft-3d.py', '../_base_/schedules/schedule-2x.py',
'../_base_/default_runtime.py'
]
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
_base_ = './pointpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d.py'
train_dataloader = dict(batch_size=2, num_workers=2)
# schedule settings
optim_wrapper = dict(type='AmpOptimWrapper', loss_scale=4096.)
_base_ = [
'../_base_/models/pointpillars_hv_fpn_nus.py',
'../_base_/datasets/nus-3d.py', '../_base_/schedules/schedule-2x.py',
'../_base_/default_runtime.py'
]
# For nuScenes dataset, we usually evaluate the model at the end of training.
# Since the models are trained by 24 epochs by default, we set evaluation
# interval to be 24. Please change the interval accordingly if you do not
# use a default schedule.
train_cfg = dict(val_interval=24)
_base_ = [
'../_base_/models/pointpillars_hv_secfpn_kitti.py',
'../_base_/datasets/kitti-3d-3class.py',
'../_base_/schedules/cyclic-40e.py', '../_base_/default_runtime.py'
]
point_cloud_range = [0, -39.68, -3, 69.12, 39.68, 1]
# dataset settings
data_root = 'data/kitti/'
class_names = ['Pedestrian', 'Cyclist', 'Car']
metainfo = dict(classes=class_names)
backend_args = None
# PointPillars adopted a different sampling strategies among classes
db_sampler = dict(
data_root=data_root,
info_path=data_root + 'kitti_dbinfos_train.pkl',
rate=1.0,
prepare=dict(
filter_by_difficulty=[-1],
filter_by_min_points=dict(Car=5, Pedestrian=5, Cyclist=5)),
classes=class_names,
sample_groups=dict(Car=15, Pedestrian=15, Cyclist=15),
points_loader=dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
backend_args=backend_args),
backend_args=backend_args)
# PointPillars uses different augmentation hyper parameters
train_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
backend_args=backend_args),
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),
dict(type='ObjectSample', db_sampler=db_sampler, use_ground_plane=True),
dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.78539816, 0.78539816],
scale_ratio_range=[0.95, 1.05]),
dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
dict(type='PointShuffle'),
dict(
type='Pack3DDetInputs',
keys=['points', 'gt_labels_3d', 'gt_bboxes_3d'])
]
test_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
backend_args=backend_args),
dict(
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
pts_scale_ratio=1,
flip=False,
transforms=[
dict(
type='GlobalRotScaleTrans',
rot_range=[0, 0],
scale_ratio_range=[1., 1.],
translation_std=[0, 0, 0]),
dict(type='RandomFlip3D'),
dict(
type='PointsRangeFilter', point_cloud_range=point_cloud_range)
]),
dict(type='Pack3DDetInputs', keys=['points'])
]
train_dataloader = dict(
dataset=dict(dataset=dict(pipeline=train_pipeline, metainfo=metainfo)))
test_dataloader = dict(dataset=dict(pipeline=test_pipeline, metainfo=metainfo))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline, metainfo=metainfo))
# In practice PointPillars also uses a different schedule
# optimizer
lr = 0.001
epoch_num = 80
optim_wrapper = dict(
optimizer=dict(lr=lr), clip_grad=dict(max_norm=35, norm_type=2))
param_scheduler = [
dict(
type='CosineAnnealingLR',
T_max=epoch_num * 0.4,
eta_min=lr * 10,
begin=0,
end=epoch_num * 0.4,
by_epoch=True,
convert_to_iter_based=True),
dict(
type='CosineAnnealingLR',
T_max=epoch_num * 0.6,
eta_min=lr * 1e-4,
begin=epoch_num * 0.4,
end=epoch_num * 1,
by_epoch=True,
convert_to_iter_based=True),
dict(
type='CosineAnnealingMomentum',
T_max=epoch_num * 0.4,
eta_min=0.85 / 0.95,
begin=0,
end=epoch_num * 0.4,
by_epoch=True,
convert_to_iter_based=True),
dict(
type='CosineAnnealingMomentum',
T_max=epoch_num * 0.6,
eta_min=1,
begin=epoch_num * 0.4,
end=epoch_num * 1,
convert_to_iter_based=True)
]
# max_norm=35 is slightly better than 10 for PointPillars in the earlier
# development of the codebase thus we keep the setting. But we does not
# specifically tune this parameter.
# PointPillars usually need longer schedule than second, we simply double
# the training schedule. Do remind that since we use RepeatDataset and
# repeat factor is 2, so we actually train 160 epochs.
train_cfg = dict(by_epoch=True, max_epochs=epoch_num, val_interval=2)
val_cfg = dict()
test_cfg = dict()
# model settings
_base_ = './pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py'
# dataset settings
dataset_type = 'KittiDataset'
data_root = 'data/kitti/'
class_names = ['Car']
metainfo = dict(classes=class_names)
backend_args = None
point_cloud_range = [0, -39.68, -3, 69.12, 39.68, 1]
model = dict(
bbox_head=dict(
type='Anchor3DHead',
num_classes=1,
anchor_generator=dict(
_delete_=True,
type='AlignedAnchor3DRangeGenerator',
ranges=[[0, -39.68, -1.78, 69.12, 39.68, -1.78]],
sizes=[[3.9, 1.6, 1.56]],
rotations=[0, 1.57],
reshape_out=True)),
# model training and testing settings
train_cfg=dict(
_delete_=True,
assigner=dict(
type='Max3DIoUAssigner',
iou_calculator=dict(type='BboxOverlapsNearest3D'),
pos_iou_thr=0.6,
neg_iou_thr=0.45,
min_pos_iou=0.45,
ignore_iof_thr=-1),
allowed_border=0,
pos_weight=-1,
debug=False))
db_sampler = dict(
data_root=data_root,
info_path=data_root + 'kitti_dbinfos_train.pkl',
rate=1.0,
prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)),
classes=class_names,
sample_groups=dict(Car=15),
points_loader=dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
backend_args=backend_args),
backend_args=backend_args)
train_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
backend_args=backend_args),
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),
dict(type='ObjectSample', db_sampler=db_sampler, use_ground_plane=True),
dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.78539816, 0.78539816],
scale_ratio_range=[0.95, 1.05]),
dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
dict(type='PointShuffle'),
dict(
type='Pack3DDetInputs',
keys=['points', 'gt_labels_3d', 'gt_bboxes_3d'])
]
test_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
backend_args=backend_args),
dict(
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
pts_scale_ratio=1,
flip=False,
transforms=[
dict(
type='GlobalRotScaleTrans',
rot_range=[0, 0],
scale_ratio_range=[1., 1.],
translation_std=[0, 0, 0]),
dict(type='RandomFlip3D'),
dict(
type='PointsRangeFilter', point_cloud_range=point_cloud_range)
]),
dict(type='Pack3DDetInputs', keys=['points'])
]
train_dataloader = dict(
dataset=dict(dataset=dict(pipeline=train_pipeline, metainfo=metainfo)))
test_dataloader = dict(dataset=dict(pipeline=test_pipeline, metainfo=metainfo))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline, metainfo=metainfo))
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment