"vscode:/vscode.git/clone" did not exist on "b3254eaf0cddf5cf8ee93dd42a4147e0853debc0"
Unverified Commit d7067e44 authored by Wenwei Zhang's avatar Wenwei Zhang Committed by GitHub
Browse files

Bump version to v1.1.0rc2

Bump to v1.1.0rc2
parents 28fe73d2 fb0e57e5
...@@ -26,6 +26,7 @@ workflows: ...@@ -26,6 +26,7 @@ workflows:
tools/.* lint_only false tools/.* lint_only false
configs/.* lint_only false configs/.* lint_only false
.circleci/.* lint_only false .circleci/.* lint_only false
projects/.* lint_only false
base-revision: dev-1.x base-revision: dev-1.x
# this is the path of the configuration we should trigger once # this is the path of the configuration we should trigger once
# path filtering and pipeline parameter value updates are # path filtering and pipeline parameter value updates are
......
...@@ -73,11 +73,11 @@ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_test.sh $PARTITION dv_mvx-f ...@@ -73,11 +73,11 @@ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_test.sh $PARTITION dv_mvx-f
$CHECKPOINT_DIR/configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py/latest.pth --eval map \ $CHECKPOINT_DIR/configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py/latest.pth --eval map \
2>&1|tee $CHECKPOINT_DIR/configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py/FULL_LOG.txt & 2>&1|tee $CHECKPOINT_DIR/configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py/FULL_LOG.txt &
echo 'configs/parta2/PartA2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py' & echo 'configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py' &
mkdir -p $CHECKPOINT_DIR/configs/parta2/PartA2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py mkdir -p $CHECKPOINT_DIR/configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_test.sh $PARTITION hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-3class configs/parta2/PartA2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py \ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_test.sh $PARTITION hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-3class configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py \
$CHECKPOINT_DIR/configs/parta2/PartA2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py/latest.pth --eval map \ $CHECKPOINT_DIR/configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py/latest.pth --eval map \
2>&1|tee $CHECKPOINT_DIR/configs/parta2/PartA2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py/FULL_LOG.txt & 2>&1|tee $CHECKPOINT_DIR/configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py/FULL_LOG.txt &
echo 'configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py' & echo 'configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py' &
mkdir -p $CHECKPOINT_DIR/configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py mkdir -p $CHECKPOINT_DIR/configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py
......
...@@ -73,11 +73,11 @@ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_train.sh $PARTITION dv_mvx- ...@@ -73,11 +73,11 @@ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_train.sh $PARTITION dv_mvx-
$CHECKPOINT_DIR/configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py --cfg-options checkpoint_config.max_keep_ckpts=1 \ $CHECKPOINT_DIR/configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py --cfg-options checkpoint_config.max_keep_ckpts=1 \
2>&1|tee $CHECKPOINT_DIR/configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py/FULL_LOG.txt & 2>&1|tee $CHECKPOINT_DIR/configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py/FULL_LOG.txt &
echo 'configs/parta2/PartA2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py' & echo 'configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py' &
mkdir -p $CHECKPOINT_DIR/configs/parta2/PartA2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py mkdir -p $CHECKPOINT_DIR/configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_train.sh $PARTITION hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-3class configs/parta2/PartA2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py \ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_train.sh $PARTITION hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-3class configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py \
$CHECKPOINT_DIR/configs/parta2/PartA2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py --cfg-options checkpoint_config.max_keep_ckpts=1 \ $CHECKPOINT_DIR/configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py --cfg-options checkpoint_config.max_keep_ckpts=1 \
2>&1|tee $CHECKPOINT_DIR/configs/parta2/PartA2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py/FULL_LOG.txt & 2>&1|tee $CHECKPOINT_DIR/configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py/FULL_LOG.txt &
echo 'configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py' & echo 'configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py' &
mkdir -p $CHECKPOINT_DIR/configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py mkdir -p $CHECKPOINT_DIR/configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py
......
name: lint
on: [push, pull_request]
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.7
uses: actions/setup-python@v2
with:
python-version: 3.7
- name: Install pre-commit hook
run: |
pip install pre-commit
pre-commit install
- name: Linting
run: pre-commit run --all-files
- name: Check docstring coverage
run: |
pip install interrogate
interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-regex "__repr__" --fail-under 90 mmdet3d
name: merge_stage_test
on:
push:
paths-ignore:
- 'README.md'
- 'README_zh-CN.md'
- 'docs/**'
- 'demo/**'
- '.dev_scripts/**'
- '.circleci/**'
branches:
- dev-1.x
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build_cpu_py:
runs-on: ubuntu-18.04
strategy:
matrix:
python-version: [3.7, 3.8, 3.9]
torch: [1.8.1]
include:
- torch: 1.8.1
torchvision: 0.9.1
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Upgrade pip
run: pip install pip --upgrade
- name: Install PyTorch
run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/torch_stable.html
- name: Install MMEngine
run: pip install git+https://github.com/open-mmlab/mmengine.git@main
- name: Install MMCV
run: |
pip install -U openmim
mim install 'mmcv >= 2.0.0rc1'
- name: Install MMDet
run: pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x
- name: Install other dependencies
run: pip install -r requirements/tests.txt
- name: Build and install
run: rm -rf .eggs && pip install -e .
- name: Run unittests and generate coverage report
run: |
coverage run --branch --source mmdet3d -m pytest tests/
coverage xml
coverage report -m
build_cpu_pt:
runs-on: ubuntu-18.04
strategy:
matrix:
python-version: [3.7]
torch: [1.6.0, 1.7.1, 1.8.1, 1.9.1, 1.10.1, 1.11.0, 1.12.1]
include:
- torch: 1.6.0
torchvision: 0.7.0
- torch: 1.7.1
torchvision: 0.8.2
- torch: 1.8.1
torchvision: 0.9.1
- torch: 1.9.1
torchvision: 0.10.1
- torch: 1.10.1
torchvision: 0.11.2
- torch: 1.11.0
torchvision: 0.12.0
- torch: 1.12.1
torchvision: 0.13.1
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Upgrade pip
run: pip install pip --upgrade
- name: Install PyTorch
run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/torch_stable.html
- name: Install MMEngine
run: pip install git+https://github.com/open-mmlab/mmengine.git@main
- name: Install MMCV
run: |
pip install -U openmim
mim install 'mmcv >= 2.0.0rc1'
- name: Install MMDet
run: pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x
- name: Install other dependencies
run: pip install -r requirements/tests.txt
- name: Build and install
run: rm -rf .eggs && pip install -e .
- name: Run unittests and generate coverage report
run: |
coverage run --branch --source mmdet3d -m pytest tests/
coverage xml
coverage report -m
# Only upload coverage report for python3.7 && pytorch1.8.1 cpu
- name: Upload coverage to Codecov
if: ${{matrix.torch == '1.8.1' && matrix.python-version == '3.7'}}
uses: codecov/codecov-action@v1.0.14
with:
file: ./coverage.xml
flags: unittests
env_vars: OS,PYTHON
name: codecov-umbrella
fail_ci_if_error: false
build_cu102:
runs-on: ubuntu-18.04
container:
image: pytorch/pytorch:1.8.1-cuda10.2-cudnn7-devel
strategy:
matrix:
python-version: [3.7]
include:
- torch: 1.8.1
cuda: 10.2
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Upgrade pip
run: pip install pip --upgrade
- name: Fetch GPG keys
run: |
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub
- name: Install Python-dev
run: apt-get update && apt-get install -y python${{matrix.python-version}}-dev
if: ${{matrix.python-version != 3.9}}
- name: Install system dependencies
run: |
apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6
- name: Install PyTorch
run: python -m pip install torch==1.8.1+cu102 torchvision==0.9.1+cu102 -f https://download.pytorch.org/whl/torch_stable.html
- name: Install mmdet3d dependencies
run: |
pip install git+https://github.com/open-mmlab/mmengine.git@main
pip install -U openmim
mim install 'mmcv >= 2.0.0rc1'
pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x
pip install -r requirements/tests.txt
- name: Build and install
run: |
python setup.py check -m -s
TORCH_CUDA_ARCH_LIST=7.0 pip install -e .
build_windows:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [windows-2022]
python: [3.7]
platform: [cpu, cu111]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
- name: Upgrade pip
run: pip install pip --upgrade
- name: Install lmdb
run: pip install lmdb
- name: Install PyTorch
run: pip install torch==1.8.1+${{matrix.platform}} torchvision==0.9.1+${{matrix.platform}} -f https://download.pytorch.org/whl/lts/1.8/torch_lts.html
- name: Install mmdet3d dependencies
run: |
pip install git+https://github.com/open-mmlab/mmengine.git@main
pip install -U openmim
mim install 'mmcv >= 2.0.0rc1'
pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x
pip install -r requirements/tests.txt
- name: Build and install
run: |
pip install -e .
- name: Run unittests and generate coverage report
run: |
pytest tests/
name: pr_stage_test
on:
pull_request:
paths-ignore:
- 'README.md'
- 'README_zh-CN.md'
- 'docs/**'
- 'demo/**'
- '.dev_scripts/**'
- '.circleci/**'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build_cpu:
runs-on: ubuntu-18.04
strategy:
matrix:
python-version: [3.7]
include:
- torch: 1.8.1
torchvision: 0.9.1
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Upgrade pip
run: pip install pip --upgrade
- name: Install PyTorch
run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/torch_stable.html
- name: Install MMEngine
run: pip install git+https://github.com/open-mmlab/mmengine.git@main
- name: Install MMCV
run: |
pip install -U openmim
mim install 'mmcv >= 2.0.0rc1'
- name: Install MMDet
run: pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x
- name: Install other dependencies
run: pip install -r requirements/tests.txt
- name: Build and install
run: rm -rf .eggs && pip install -e .
- name: Run unittests and generate coverage report
run: |
coverage run --branch --source mmdet3d -m pytest tests/
coverage xml
coverage report -m
# Upload coverage report for python3.7 && pytorch1.8.1 cpu
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v1.0.14
with:
file: ./coverage.xml
flags: unittests
env_vars: OS,PYTHON
name: codecov-umbrella
fail_ci_if_error: false
build_cu102:
runs-on: ubuntu-18.04
container:
image: pytorch/pytorch:1.8.1-cuda10.2-cudnn7-devel
strategy:
matrix:
python-version: [3.8]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Upgrade pip
run: pip install pip --upgrade
- name: Fetch GPG keys
run: |
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub
- name: Install Python-dev
run: apt-get update && apt-get install -y python${{matrix.python-version}}-dev
if: ${{matrix.python-version != 3.9}}
- name: Install system dependencies
run: |
apt-get update
apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libxrender-dev
- name: Install PyTorch
run: python -m pip install torch==1.8.1+cu102 torchvision==0.9.1+cu102 -f https://download.pytorch.org/whl/torch_stable.html
- name: Install mmdet3d dependencies
run: |
pip install git+https://github.com/open-mmlab/mmengine.git@main
pip install -U openmim
mim install 'mmcv >= 2.0.0rc1'
pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x
pip install -r requirements/tests.txt
- name: Build and install
run: |
python setup.py check -m -s
TORCH_CUDA_ARCH_LIST=7.0 pip install -e .
build_windows:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [windows-2022]
python: [3.7]
platform: [cpu, cu111]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
- name: Upgrade pip
run: python -m pip install pip --upgrade
- name: Install lmdb
run: pip install lmdb
- name: Install PyTorch
run: pip install torch==1.8.1+${{matrix.platform}} torchvision==0.9.1+${{matrix.platform}} -f https://download.pytorch.org/whl/lts/1.8/torch_lts.html
- name: Install mmdet3d dependencies
run: |
pip install git+https://github.com/open-mmlab/mmengine.git@main
pip install -U openmim
mim install 'mmcv >= 2.0.0rc1'
pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x
pip install -r requirements/tests.txt
- name: Build and install
run: |
pip install -e .
- name: Run unittests and generate coverage report
run: |
pytest tests/
name: test-mim
on:
push:
paths:
- 'model-index.yml'
- 'configs/**'
pull_request:
paths:
- 'model-index.yml'
- 'configs/**'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build_cpu:
runs-on: ubuntu-18.04
strategy:
matrix:
python-version: [3.7]
torch: [1.8.0]
include:
- torch: 1.8.0
torch_version: torch1.8
torchvision: 0.9.0
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Upgrade pip
run: pip install pip --upgrade
- name: Install PyTorch
run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/torch_stable.html
- name: Install openmim
run: pip install openmim
- name: Build and install
run: rm -rf .eggs && mim install -e .
- name: test commands of mim
run: mim search mmdet3d
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
**News**: **News**:
**v1.1.0rc1** was released in 11/10/2022 **v1.1.0rc2** was released in 2/12/2022
The compatibilities of models are broken due to the unification and simplification of coordinate systems after v1.0.0rc0. For now, most models are benchmarked with similar performance, though few models are still being benchmarked. In the following release, we will update all the model checkpoints and benchmarks. See more details in the [Changelog](docs/en/notes/changelog.md) and [Changelog-v1.0.x](docs/en/notes/changelog_v1.0.x.md). The compatibilities of models are broken due to the unification and simplification of coordinate systems after v1.0.0rc0. For now, most models are benchmarked with similar performance, though few models are still being benchmarked. In the following release, we will update all the model checkpoints and benchmarks. See more details in the [Changelog](docs/en/notes/changelog.md) and [Changelog-v1.0.x](docs/en/notes/changelog_v1.0.x.md).
...@@ -60,7 +60,7 @@ a part of the OpenMMLab project developed by [MMLab](http://mmlab.ie.cuhk.edu.hk ...@@ -60,7 +60,7 @@ a part of the OpenMMLab project developed by [MMLab](http://mmlab.ie.cuhk.edu.hk
- **High efficiency** - **High efficiency**
It trains faster than other codebases. The main results are as below. Details can be found in [benchmark.md](./docs/en/benchmarks.md). We compare the number of samples trained per second (the higher, the better). The models that are not supported by other codebases are marked by `×`. It trains faster than other codebases. The main results are as below. Details can be found in [benchmark.md](./docs/en/notes/benchmarks.md). We compare the number of samples trained per second (the higher, the better). The models that are not supported by other codebases are marked by `×`.
| Methods | MMDetection3D | [OpenPCDet](https://github.com/open-mmlab/OpenPCDet) | [votenet](https://github.com/facebookresearch/votenet) | [Det3D](https://github.com/poodarchu/Det3D) | | Methods | MMDetection3D | [OpenPCDet](https://github.com/open-mmlab/OpenPCDet) | [votenet](https://github.com/facebookresearch/votenet) | [Det3D](https://github.com/poodarchu/Det3D) |
| :-----------------: | :-----------: | :--------------------------------------------------: | :----------------------------------------------------: | :-----------------------------------------: | | :-----------------: | :-----------: | :--------------------------------------------------: | :----------------------------------------------------: | :-----------------------------------------: |
...@@ -78,7 +78,7 @@ This project is released under the [Apache 2.0 license](LICENSE). ...@@ -78,7 +78,7 @@ This project is released under the [Apache 2.0 license](LICENSE).
## Changelog ## Changelog
**1.1.0rc1** was released in 11/10/2022. **1.1.0rc2** was released in 2/12/2022.
Please refer to [changelog.md](docs/en/notes/changelog.md) for details and release history. Please refer to [changelog.md](docs/en/notes/changelog.md) for details and release history.
...@@ -159,12 +159,14 @@ Results and models are available in the [model zoo](docs/en/model_zoo.md). ...@@ -159,12 +159,14 @@ Results and models are available in the [model zoo](docs/en/model_zoo.md).
<li><a href="configs/point_rcnn">PointRCNN (CVPR'2019)</a></li> <li><a href="configs/point_rcnn">PointRCNN (CVPR'2019)</a></li>
<li><a href="configs/parta2">Part-A2 (TPAMI'2020)</a></li> <li><a href="configs/parta2">Part-A2 (TPAMI'2020)</a></li>
<li><a href="configs/centerpoint">CenterPoint (CVPR'2021)</a></li> <li><a href="configs/centerpoint">CenterPoint (CVPR'2021)</a></li>
<li><a href="configs/pv_rcnn">PV-RCNN (CVPR'2020)</a></li>
</ul> </ul>
<li><b>Indoor</b></li> <li><b>Indoor</b></li>
<ul> <ul>
<li><a href="configs/votenet">VoteNet (ICCV'2019)</a></li> <li><a href="configs/votenet">VoteNet (ICCV'2019)</a></li>
<li><a href="configs/h3dnet">H3DNet (ECCV'2020)</a></li> <li><a href="configs/h3dnet">H3DNet (ECCV'2020)</a></li>
<li><a href="configs/groupfree3d">Group-Free-3D (ICCV'2021)</a></li> <li><a href="configs/groupfree3d">Group-Free-3D (ICCV'2021)</a></li>
<li><a href="configs/fcaf3d">FCAF3D (ECCV'2022)</a></li>
</ul> </ul>
</td> </td>
<td> <td>
...@@ -202,31 +204,33 @@ Results and models are available in the [model zoo](docs/en/model_zoo.md). ...@@ -202,31 +204,33 @@ Results and models are available in the [model zoo](docs/en/model_zoo.md).
</tbody> </tbody>
</table> </table>
| | ResNet | ResNeXt | SENet | PointNet++ | DGCNN | HRNet | RegNetX | Res2Net | DLA | | | ResNet | ResNeXt | SENet | PointNet++ | DGCNN | HRNet | RegNetX | Res2Net | DLA | MinkResNet |
| ------------- | :----: | :-----: | :---: | :--------: | :---: | :---: | :-----: | :-----: | :-: | | ------------- | :----: | :-----: | :---: | :--------: | :---: | :---: | :-----: | :-----: | :-: | :--------: |
| SECOND | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ✓ | ☐ | ✗ | | SECOND | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ✓ | ☐ | ✗ | ✗ |
| PointPillars | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ✓ | ☐ | ✗ | | PointPillars | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ✓ | ☐ | ✗ | ✗ |
| FreeAnchor | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ✓ | ☐ | ✗ | | FreeAnchor | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ✓ | ☐ | ✗ | ✗ |
| VoteNet | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | | VoteNet | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ |
| H3DNet | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | | H3DNet | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ |
| 3DSSD | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | | 3DSSD | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ |
| Part-A2 | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ☐ | ☐ | ✗ | | Part-A2 | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ☐ | ☐ | ✗ | ✗ |
| MVXNet | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ☐ | ☐ | ✗ | | MVXNet | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ☐ | ☐ | ✗ | ✗ |
| CenterPoint | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ☐ | ☐ | ✗ | | CenterPoint | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ☐ | ☐ | ✗ | ✗ |
| SSN | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ✓ | ☐ | ✗ | | SSN | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ✓ | ☐ | ✗ | ✗ |
| ImVoteNet | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | | ImVoteNet | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ |
| FCOS3D | ✓ | ☐ | ☐ | ✗ | ✗ | ☐ | ☐ | ☐ | ✗ | | FCOS3D | ✓ | ☐ | ☐ | ✗ | ✗ | ☐ | ☐ | ☐ | ✗ | ✗ |
| PointNet++ | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | | PointNet++ | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ |
| Group-Free-3D | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | | Group-Free-3D | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ |
| ImVoxelNet | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | | ImVoxelNet | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ |
| PAConv | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | | PAConv | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ |
| DGCNN | ✗ | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | | DGCNN | ✗ | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ |
| SMOKE | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✓ | | SMOKE | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✓ | ✗ |
| PGD | ✓ | ☐ | ☐ | ✗ | ✗ | ☐ | ☐ | ☐ | ✗ | | PGD | ✓ | ☐ | ☐ | ✗ | ✗ | ☐ | ☐ | ☐ | ✗ | ✗ |
| MonoFlex | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✓ | | MonoFlex | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✓ | ✗ |
| SA-SSD | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ☐ | ☐ | ✗ | | SA-SSD | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ☐ | ☐ | ✗ | ✗ |
| FCAF3D | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✓ |
**Note:** All the about **300+ models, methods of 40+ papers** in 2D detection supported by [MMDetection](https://github.com/open-mmlab/mmdetection/blob/master/docs/en/model_zoo.md) can be trained or used in this codebase. | PV-RCNN | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ☐ | ☐ | ✗ | ✗ |
**Note:** All the about **300+ models, methods of 40+ papers** in 2D detection supported by [MMDetection](https://github.com/open-mmlab/mmdetection/blob/3.x/docs/en/model_zoo.md) can be trained or used in this codebase.
## Installation ## Installation
...@@ -234,7 +238,7 @@ Please refer to [getting_started.md](docs/en/getting_started.md) for installatio ...@@ -234,7 +238,7 @@ Please refer to [getting_started.md](docs/en/getting_started.md) for installatio
## Get Started ## Get Started
Please see [getting_started.md](docs/en/getting_started.md) for the basic usage of MMDetection3D. We provide guidance for quick run [with existing dataset](docs/en/user_guides/1_exist_data_model.md) and [with customized dataset](docs/en/user_guides/2_new_data_model.md) for beginners. There are also tutorials for [learning configuration systems](docs/en/user_guides/config.md), [adding new dataset](docs/en/advanced_guides/customize_dataset.md), [designing data pipeline](docs/en/user_guides/data_pipeline.md), [customizing models](docs/en/advanced_guides/customize_models.md), [customizing runtime settings](docs/en/advanced_guides/customize_runtime.md) and [Waymo dataset](docs/en/advanced_guides/datasets/waymo_det.md). Please see [getting_started.md](docs/en/getting_started.md) for the basic usage of MMDetection3D. We provide guidance for quick run [with existing dataset](docs/en/user_guides/train_test.md) and [with new dataset](docs/en/user_guides/2_new_data_model.md) for beginners. There are also tutorials for [learning configuration systems](docs/en/user_guides/config.md), [customizing dataset](docs/en/advanced_guides/customize_dataset.md), [designing data pipeline](docs/en/user_guides/data_pipeline.md), [customizing models](docs/en/advanced_guides/customize_models.md), [customizing runtime settings](docs/en/advanced_guides/customize_runtime.md) and [Waymo dataset](docs/en/advanced_guides/datasets/waymo_det.md).
Please refer to [FAQ](docs/en/notes/faq.md) for frequently asked questions. When updating the version of MMDetection3D, please also check the [compatibility doc](docs/en/notes/compatibility.md) to be aware of the BC-breaking updates introduced in each version. Please refer to [FAQ](docs/en/notes/faq.md) for frequently asked questions. When updating the version of MMDetection3D, please also check the [compatibility doc](docs/en/notes/compatibility.md) to be aware of the BC-breaking updates introduced in each version.
...@@ -264,11 +268,13 @@ We wish that the toolbox and benchmark could serve the growing research communit ...@@ -264,11 +268,13 @@ We wish that the toolbox and benchmark could serve the growing research communit
- [MMEngine](https://github.com/open-mmlab/mmengine): OpenMMLab foundational library for training deep learning models. - [MMEngine](https://github.com/open-mmlab/mmengine): OpenMMLab foundational library for training deep learning models.
- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab foundational library for computer vision. - [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab foundational library for computer vision.
- [MMEval](https://github.com/open-mmlab/mmeval): A unified evaluation library for multiple machine learning libraries.
- [MIM](https://github.com/open-mmlab/mim): MIM installs OpenMMLab packages. - [MIM](https://github.com/open-mmlab/mim): MIM installs OpenMMLab packages.
- [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab image classification toolbox and benchmark. - [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab image classification toolbox and benchmark.
- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab detection toolbox and benchmark. - [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab detection toolbox and benchmark.
- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection. - [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection.
- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab rotated object detection toolbox and benchmark. - [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab rotated object detection toolbox and benchmark.
- [MMYOLO](https://github.com/open-mmlab/mmyolo): OpenMMLab YOLO series toolbox and benchmark.
- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab semantic segmentation toolbox and benchmark. - [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab semantic segmentation toolbox and benchmark.
- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab text detection, recognition, and understanding toolbox. - [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab text detection, recognition, and understanding toolbox.
- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark. - [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark.
......
...@@ -19,18 +19,18 @@ ...@@ -19,18 +19,18 @@
<div>&nbsp;</div> <div>&nbsp;</div>
</div> </div>
[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmdetection3d.readthedocs.io/en/1.1/) [![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmdetection3d.readthedocs.io/zh_CN/1.1/)
[![badge](https://github.com/open-mmlab/mmdetection3d/workflows/build/badge.svg)](https://github.com/open-mmlab/mmdetection3d/actions) [![badge](https://github.com/open-mmlab/mmdetection3d/workflows/build/badge.svg)](https://github.com/open-mmlab/mmdetection3d/actions)
[![codecov](https://codecov.io/gh/open-mmlab/mmdetection3d/branch/master/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmdetection3d) [![codecov](https://codecov.io/gh/open-mmlab/mmdetection3d/branch/master/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmdetection3d)
[![license](https://img.shields.io/github/license/open-mmlab/mmdetection3d.svg)](https://github.com/open-mmlab/mmdetection3d/blob/master/LICENSE) [![license](https://img.shields.io/github/license/open-mmlab/mmdetection3d.svg)](https://github.com/open-mmlab/mmdetection3d/blob/master/LICENSE)
**新闻**: **新闻**
**v1.1.0rc1** 版本已经在 2022.10.11 发布。 **v1.1.0rc2** 版本已经在 2022.12.2 发布。
由于坐标系的统一和简化,模型的兼容性会受到影响。目前,大多数模型都以类似的性能对齐了精度,但仍有少数模型在进行基准测试。在接下来的版本中,我们将更新所有的模型权重文件和基准。您可以在 [变更日志](docs/en/changelog.md) [v1.0.x版本变更日志](docs/en/notes/changelog_v1.0.x.md) 中查看更多详细信息。 由于坐标系的统一和简化,模型的兼容性会受到影响。目前,大多数模型都以类似的性能对齐了精度,但仍有少数模型在进行基准测试。在接下来的版本中,我们将更新所有的模型权重文件和基准。您可以在[变更日志](docs/zh_cn/notes/changelog.md)[v1.0.x 版本变更日志](docs/zh_cn/notes/changelog_v1.0.x.md)中查看更多详细信息。
文档: https://mmdetection3d.readthedocs.io/ 文档https://mmdetection3d.readthedocs.io/
## 简介 ## 简介
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
主分支代码目前支持 PyTorch 1.6 以上的版本。 主分支代码目前支持 PyTorch 1.6 以上的版本。
MMDetection3D 是一个基于 PyTorch 的目标检测开源工具箱, 下一代面向3D检测的平台. 它是 OpenMMlab 项目的一部分,这个项目由香港中文大学多媒体实验室和商汤科技联合发起. MMDetection3D 是一个基于 PyTorch 的目标检测开源工具箱下一代面向 3D 检测的平台它是 OpenMMlab 项目的一部分,这个项目由香港中文大学多媒体实验室和商汤科技联合发起
![demo image](resources/mmdet3d_outdoor_demo.gif) ![demo image](resources/mmdet3d_outdoor_demo.gif)
...@@ -50,17 +50,16 @@ MMDetection3D 是一个基于 PyTorch 的目标检测开源工具箱, 下一代 ...@@ -50,17 +50,16 @@ MMDetection3D 是一个基于 PyTorch 的目标检测开源工具箱, 下一代
- **支持户内/户外的数据集** - **支持户内/户外的数据集**
支持室内/室外的3D检测数据集,包括 ScanNet, SUNRGB-D, Waymo, nuScenes, Lyft, KITTI. 支持室内/室外的 3D 检测数据集,包括 ScanNet,SUNRGB-D,Waymo,nuScenes,Lyft,KITTI。
对于 nuScenes 数据集,我们也支持 [nuImages 数据集](https://github.com/open-mmlab/mmdetection3d/tree/1.1/configs/nuimages)
对于 nuScenes 数据集, 我们也支持 [nuImages 数据集](https://github.com/open-mmlab/mmdetection3d/tree/1.1/configs/nuimages).
- **与 2D 检测器的自然整合** - **与 2D 检测器的自然整合**
[MMDetection](https://github.com/open-mmlab/mmdetection/blob/master/docs/zh_cn/model_zoo.md) 支持的**300+个模型 , 40+的论文算法**, 和相关模块都可以在此代码库中训练或使用。 [MMDetection](https://github.com/open-mmlab/mmdetection/blob/3.x/docs/zh_cn/model_zoo.md) 支持的 **300+ 个模型40+ 的论文算法**和相关模块都可以在此代码库中训练或使用。
- **性能高** - **性能高**
训练速度比其他代码库更快。下表可见主要的对比结果。更多的细节可见[基准测评文档](./docs/zh_cn/benchmarks.md)。我们对比了每秒训练的样本数(值越高越好)。其他代码库不支持的模型被标记为 `×` 训练速度比其他代码库更快。下表可见主要的对比结果。更多的细节可见[基准测评文档](./docs/zh_cn/notes/benchmarks.md)。我们对比了每秒训练的样本数(值越高越好)。其他代码库不支持的模型被标记为 `×`
| Methods | MMDetection3D | [OpenPCDet](https://github.com/open-mmlab/OpenPCDet) | [votenet](https://github.com/facebookresearch/votenet) | [Det3D](https://github.com/poodarchu/Det3D) | | Methods | MMDetection3D | [OpenPCDet](https://github.com/open-mmlab/OpenPCDet) | [votenet](https://github.com/facebookresearch/votenet) | [Det3D](https://github.com/poodarchu/Det3D) |
| :-----------------: | :-----------: | :--------------------------------------------------: | :----------------------------------------------------: | :-----------------------------------------: | | :-----------------: | :-----------: | :--------------------------------------------------: | :----------------------------------------------------: | :-----------------------------------------: |
...@@ -70,7 +69,7 @@ MMDetection3D 是一个基于 PyTorch 的目标检测开源工具箱, 下一代 ...@@ -70,7 +69,7 @@ MMDetection3D 是一个基于 PyTorch 的目标检测开源工具箱, 下一代
| SECOND | 40 | 30 | × | × | | SECOND | 40 | 30 | × | × |
| Part-A2 | 17 | 14 | × | × | | Part-A2 | 17 | 14 | × | × |
[MMDetection](https://github.com/open-mmlab/mmdetection)[MMCV](https://github.com/open-mmlab/mmcv) 一样, MMDetection3D 也可以作为一个库去支持各式各样的项目. [MMDetection](https://github.com/open-mmlab/mmdetection)[MMCV](https://github.com/open-mmlab/mmcv) 一样MMDetection3D 也可以作为一个库去支持各式各样的项目
## 开源许可证 ## 开源许可证
...@@ -78,9 +77,9 @@ MMDetection3D 是一个基于 PyTorch 的目标检测开源工具箱, 下一代 ...@@ -78,9 +77,9 @@ MMDetection3D 是一个基于 PyTorch 的目标检测开源工具箱, 下一代
## 更新日志 ## 更新日志
我们在 2022.10.11 发布了 **1.1.0rc1** 版本. 我们在 2022.12.2 发布了 **1.1.0rc2** 版本
更多细节和版本发布历史可以参考[changelog.md](docs/en/notes/changelog.md). 更多细节和版本发布历史可以参考 [changelog.md](docs/zh_cn/notes/changelog.md)
## 基准测试和模型库 ## 基准测试和模型库
...@@ -165,6 +164,7 @@ MMDetection3D 是一个基于 PyTorch 的目标检测开源工具箱, 下一代 ...@@ -165,6 +164,7 @@ MMDetection3D 是一个基于 PyTorch 的目标检测开源工具箱, 下一代
<li><a href="configs/votenet">VoteNet (ICCV'2019)</a></li> <li><a href="configs/votenet">VoteNet (ICCV'2019)</a></li>
<li><a href="configs/h3dnet">H3DNet (ECCV'2020)</a></li> <li><a href="configs/h3dnet">H3DNet (ECCV'2020)</a></li>
<li><a href="configs/groupfree3d">Group-Free-3D (ICCV'2021)</a></li> <li><a href="configs/groupfree3d">Group-Free-3D (ICCV'2021)</a></li>
<li><a href="configs/fcaf3d">FCAF3D (ECCV'2022)</a></li>
</ul> </ul>
</td> </td>
<td> <td>
...@@ -202,31 +202,32 @@ MMDetection3D 是一个基于 PyTorch 的目标检测开源工具箱, 下一代 ...@@ -202,31 +202,32 @@ MMDetection3D 是一个基于 PyTorch 的目标检测开源工具箱, 下一代
</tbody> </tbody>
</table> </table>
| | ResNet | ResNeXt | SENet | PointNet++ | DGCNN | HRNet | RegNetX | Res2Net | DLA | | | ResNet | ResNeXt | SENet | PointNet++ | DGCNN | HRNet | RegNetX | Res2Net | DLA | MinkResNet |
| ------------- | :----: | :-----: | :---: | :--------: | :---: | :---: | :-----: | :-----: | :-: | | ------------- | :----: | :-----: | :---: | :--------: | :---: | :---: | :-----: | :-----: | :-: | :--------: |
| SECOND | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ✓ | ☐ | ✗ | | SECOND | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ✓ | ☐ | ✗ | ✗ |
| PointPillars | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ✓ | ☐ | ✗ | | PointPillars | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ✓ | ☐ | ✗ | ✗ |
| FreeAnchor | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ✓ | ☐ | ✗ | | FreeAnchor | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ✓ | ☐ | ✗ | ✗ |
| VoteNet | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | | VoteNet | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ |
| H3DNet | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | | H3DNet | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ |
| 3DSSD | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | | 3DSSD | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ |
| Part-A2 | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ☐ | ☐ | ✗ | | Part-A2 | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ☐ | ☐ | ✗ | ✗ |
| MVXNet | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ☐ | ☐ | ✗ | | MVXNet | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ☐ | ☐ | ✗ | ✗ |
| CenterPoint | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ☐ | ☐ | ✗ | | CenterPoint | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ☐ | ☐ | ✗ | ✗ |
| SSN | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ✓ | ☐ | ✗ | | SSN | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ✓ | ☐ | ✗ | ✗ |
| ImVoteNet | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | | ImVoteNet | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ |
| FCOS3D | ✓ | ☐ | ☐ | ✗ | ✗ | ☐ | ☐ | ☐ | ✗ | | FCOS3D | ✓ | ☐ | ☐ | ✗ | ✗ | ☐ | ☐ | ☐ | ✗ | ✗ |
| PointNet++ | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | | PointNet++ | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ |
| Group-Free-3D | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | | Group-Free-3D | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ |
| ImVoxelNet | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | | ImVoxelNet | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ |
| PAConv | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | | PAConv | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ |
| DGCNN | ✗ | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | | DGCNN | ✗ | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ |
| SMOKE | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✓ | | SMOKE | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✓ | ✗ |
| PGD | ✓ | ☐ | ☐ | ✗ | ✗ | ☐ | ☐ | ☐ | ✗ | | PGD | ✓ | ☐ | ☐ | ✗ | ✗ | ☐ | ☐ | ☐ | ✗ | ✗ |
| MonoFlex | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✓ | | MonoFlex | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✓ | ✗ |
| SA-SSD | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ☐ | ☐ | ✗ | | SA-SSD | ☐ | ☐ | ☐ | ✗ | ✗ | ☐ | ☐ | ☐ | ✗ | ✗ |
| FCAF3D | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✓ |
**注意:** [MMDetection](https://github.com/open-mmlab/mmdetection/blob/master/docs/zh_cn/model_zoo.md) 支持的基于2D检测的**300+个模型 , 40+的论文算法**在 MMDetection3D 中都可以被训练或使用。
**注意:**[MMDetection](https://github.com/open-mmlab/mmdetection/blob/3.x/docs/zh_cn/model_zoo.md) 支持的基于 2D 检测的 **300+ 个模型,40+ 的论文算法**在 MMDetection3D 中都可以被训练或使用。
## 安装 ## 安装
...@@ -234,7 +235,7 @@ MMDetection3D 是一个基于 PyTorch 的目标检测开源工具箱, 下一代 ...@@ -234,7 +235,7 @@ MMDetection3D 是一个基于 PyTorch 的目标检测开源工具箱, 下一代
## 快速入门 ## 快速入门
请参考[快速入门文档](docs/zh_cn/getting_started.md)学习 MMDetection3D 的基本使用。 我们为新手提供了分别针对[已有数据集](docs/zh_cn/user_guides/1_exist_data_model.md)[新数据集](docs/zh_cn/user_guides/2_new_data_model.md)的使用指南。我们也提供了一些进阶教程,内容覆盖了[学习配置文件](docs/zh_cn/user_guides/config.md), [增加数据集支持](docs/zh_cn/advanced_guides/customize_dataset.md), [设计新的数据预处理流程](docs/zh_cn/user_guides/data_pipeline.md), [增加自定义模型](docs/zh_cn/advanced_guides/customize_models.md), [增加自定义的运行时配置](docs/zh_cn/advanced_guides/customize_runtime.md)[Waymo 数据集](docs/zh_cn/advanced_guides/datasets/waymo.md). 请参考[快速入门文档](docs/zh_cn/getting_started.md)学习 MMDetection3D 的基本使用。我们为新手提供了分别针对[已有数据集](docs/zh_cn/user_guides/train_test.md)[新数据集](docs/zh_cn/user_guides/2_new_data_model.md)的使用指南。我们也提供了一些进阶教程,内容覆盖了[学习配置文件](docs/zh_cn/user_guides/config.md)[增加自定义数据集](docs/zh_cn/advanced_guides/customize_dataset.md)[设计新的数据预处理流程](docs/zh_cn/user_guides/data_pipeline.md)[增加自定义模型](docs/zh_cn/advanced_guides/customize_models.md)[增加自定义的运行时配置](docs/zh_cn/advanced_guides/customize_runtime.md)[Waymo 数据集](docs/zh_cn/advanced_guides/datasets/waymo_det.md)
请参考 [FAQ](docs/zh_cn/notes/faq.md) 查看一些常见的问题与解答。在升级 MMDetection3D 的版本时,请查看[兼容性文档](docs/zh_cn/notes/compatibility.md)以知晓每个版本引入的不与之前版本兼容的更新。 请参考 [FAQ](docs/zh_cn/notes/faq.md) 查看一些常见的问题与解答。在升级 MMDetection3D 的版本时,请查看[兼容性文档](docs/zh_cn/notes/compatibility.md)以知晓每个版本引入的不与之前版本兼容的更新。
...@@ -263,11 +264,13 @@ MMDetection3D 是一款由来自不同高校和企业的研发人员共同参与 ...@@ -263,11 +264,13 @@ MMDetection3D 是一款由来自不同高校和企业的研发人员共同参与
- [MMEngine](https://github.com/open-mmlab/mmengine): OpenMMLab 深度学习模型训练基础库 - [MMEngine](https://github.com/open-mmlab/mmengine): OpenMMLab 深度学习模型训练基础库
- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab 计算机视觉基础库 - [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab 计算机视觉基础库
- [MMEval](https://github.com/open-mmlab/mmeval): 统一开放的跨框架算法评测库
- [MIM](https://github.com/open-mmlab/mim): MIM 是 OpenMMlab 项目、算法、模型的统一入口 - [MIM](https://github.com/open-mmlab/mim): MIM 是 OpenMMlab 项目、算法、模型的统一入口
- [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab 图像分类工具箱 - [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab 图像分类工具箱
- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab 目标检测工具箱 - [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab 目标检测工具箱
- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab 新一代通用 3D 目标检测平台 - [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab 新一代通用 3D 目标检测平台
- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab 旋转框检测工具箱与测试基准 - [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab 旋转框检测工具箱与测试基准
- [MMYOLO](https://github.com/open-mmlab/mmyolo): OpenMMLab YOLO 系列工具箱与测试基准
- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab 语义分割工具箱 - [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab 语义分割工具箱
- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab 全流程文字检测识别理解工具包 - [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab 全流程文字检测识别理解工具包
- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab 姿态估计工具箱 - [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab 姿态估计工具箱
......
...@@ -4,7 +4,7 @@ data_root = 'data/kitti/' ...@@ -4,7 +4,7 @@ data_root = 'data/kitti/'
class_names = ['Pedestrian', 'Cyclist', 'Car'] class_names = ['Pedestrian', 'Cyclist', 'Car']
point_cloud_range = [0, -40, -3, 70.4, 40, 1] point_cloud_range = [0, -40, -3, 70.4, 40, 1]
input_modality = dict(use_lidar=True, use_camera=False) input_modality = dict(use_lidar=True, use_camera=False)
metainfo = dict(CLASSES=class_names) metainfo = dict(classes=class_names)
db_sampler = dict( db_sampler = dict(
data_root=data_root, data_root=data_root,
......
...@@ -4,7 +4,7 @@ data_root = 'data/kitti/' ...@@ -4,7 +4,7 @@ data_root = 'data/kitti/'
class_names = ['Car'] class_names = ['Car']
point_cloud_range = [0, -40, -3, 70.4, 40, 1] point_cloud_range = [0, -40, -3, 70.4, 40, 1]
input_modality = dict(use_lidar=True, use_camera=False) input_modality = dict(use_lidar=True, use_camera=False)
metainfo = dict(CLASSES=class_names) metainfo = dict(classes=class_names)
db_sampler = dict( db_sampler = dict(
data_root=data_root, data_root=data_root,
......
...@@ -2,7 +2,7 @@ dataset_type = 'KittiDataset' ...@@ -2,7 +2,7 @@ dataset_type = 'KittiDataset'
data_root = 'data/kitti/' data_root = 'data/kitti/'
class_names = ['Pedestrian', 'Cyclist', 'Car'] class_names = ['Pedestrian', 'Cyclist', 'Car']
input_modality = dict(use_lidar=False, use_camera=True) input_modality = dict(use_lidar=False, use_camera=True)
metainfo = dict(CLASSES=class_names) metainfo = dict(classes=class_names)
file_client_args = dict(backend='disk') file_client_args = dict(backend='disk')
# Uncomment the following if use ceph or other file clients. # Uncomment the following if use ceph or other file clients.
...@@ -52,6 +52,7 @@ train_dataloader = dict( ...@@ -52,6 +52,7 @@ train_dataloader = dict(
data_prefix=dict(img='training/image_2'), data_prefix=dict(img='training/image_2'),
pipeline=train_pipeline, pipeline=train_pipeline,
modality=input_modality, modality=input_modality,
load_type='fov_image_based',
test_mode=False, test_mode=False,
metainfo=metainfo, metainfo=metainfo,
# we use box_type_3d='Camera' in monocular 3d # we use box_type_3d='Camera' in monocular 3d
...@@ -70,6 +71,7 @@ val_dataloader = dict( ...@@ -70,6 +71,7 @@ val_dataloader = dict(
ann_file='kitti_infos_val.pkl', ann_file='kitti_infos_val.pkl',
pipeline=test_pipeline, pipeline=test_pipeline,
modality=input_modality, modality=input_modality,
load_type='fov_image_based',
metainfo=metainfo, metainfo=metainfo,
test_mode=True, test_mode=True,
box_type_3d='Camera')) box_type_3d='Camera'))
......
...@@ -39,8 +39,9 @@ train_pipeline = [ ...@@ -39,8 +39,9 @@ train_pipeline = [
dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
dict(type='PointShuffle'), dict(type='PointShuffle'),
dict(type='DefaultFormatBundle3D', class_names=class_names), dict(
dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) type='Pack3DDetInputs',
keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
] ]
test_pipeline = [ test_pipeline = [
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=5, use_dim=5), dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=5, use_dim=5),
...@@ -59,23 +60,15 @@ test_pipeline = [ ...@@ -59,23 +60,15 @@ test_pipeline = [
dict(type='RandomFlip3D'), dict(type='RandomFlip3D'),
dict( dict(
type='PointsRangeFilter', point_cloud_range=point_cloud_range), type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict( ]),
type='DefaultFormatBundle3D', dict(type='Pack3DDetInputs', keys=['points'])
class_names=class_names,
with_label=False),
dict(type='Collect3D', keys=['points'])
])
] ]
# construct a pipeline for data and gt loading in show function # construct a pipeline for data and gt loading in show function
# please keep its loading function consistent with test_pipeline (e.g. client) # please keep its loading function consistent with test_pipeline (e.g. client)
eval_pipeline = [ eval_pipeline = [
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=5, use_dim=5), dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=5, use_dim=5),
dict(type='LoadPointsFromMultiSweeps', sweeps_num=10), dict(type='LoadPointsFromMultiSweeps', sweeps_num=10),
dict( dict(type='Pack3DDetInputs', keys=['points'])
type='DefaultFormatBundle3D',
class_names=class_names,
with_label=False),
dict(type='Collect3D', keys=['points'])
] ]
data = dict( data = dict(
......
...@@ -78,7 +78,7 @@ train_dataloader = dict( ...@@ -78,7 +78,7 @@ train_dataloader = dict(
data_root=data_root, data_root=data_root,
ann_file='lyft_infos_train.pkl', ann_file='lyft_infos_train.pkl',
pipeline=train_pipeline, pipeline=train_pipeline,
metainfo=dict(CLASSES=class_names), metainfo=dict(classes=class_names),
modality=input_modality, modality=input_modality,
data_prefix=data_prefix, data_prefix=data_prefix,
test_mode=False, test_mode=False,
...@@ -94,7 +94,7 @@ test_dataloader = dict( ...@@ -94,7 +94,7 @@ test_dataloader = dict(
data_root=data_root, data_root=data_root,
ann_file='lyft_infos_val.pkl', ann_file='lyft_infos_val.pkl',
pipeline=test_pipeline, pipeline=test_pipeline,
metainfo=dict(CLASSES=class_names), metainfo=dict(classes=class_names),
modality=input_modality, modality=input_modality,
data_prefix=data_prefix, data_prefix=data_prefix,
test_mode=True, test_mode=True,
...@@ -110,7 +110,7 @@ val_dataloader = dict( ...@@ -110,7 +110,7 @@ val_dataloader = dict(
data_root=data_root, data_root=data_root,
ann_file='lyft_infos_val.pkl', ann_file='lyft_infos_val.pkl',
pipeline=test_pipeline, pipeline=test_pipeline,
metainfo=dict(CLASSES=class_names), metainfo=dict(classes=class_names),
modality=input_modality, modality=input_modality,
test_mode=True, test_mode=True,
data_prefix=data_prefix, data_prefix=data_prefix,
......
...@@ -4,8 +4,6 @@ class_names = [ ...@@ -4,8 +4,6 @@ class_names = [
'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle',
'motorcycle', 'pedestrian', 'traffic_cone', 'barrier' 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier'
] ]
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
file_client_args = dict(backend='disk') file_client_args = dict(backend='disk')
# Uncomment the following if use ceph or other file clients. # Uncomment the following if use ceph or other file clients.
...@@ -23,10 +21,7 @@ train_pipeline = [ ...@@ -23,10 +21,7 @@ train_pipeline = [
multiscale_mode='range', multiscale_mode='range',
keep_ratio=True), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5), dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg), dict(type='PackDetInputs'),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
] ]
test_pipeline = [ test_pipeline = [
dict(type='LoadImageFromFile'), dict(type='LoadImageFromFile'),
...@@ -37,11 +32,11 @@ test_pipeline = [ ...@@ -37,11 +32,11 @@ test_pipeline = [
transforms=[ transforms=[
dict(type='Resize', keep_ratio=True), dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'), dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg), ]),
dict(type='Pad', size_divisor=32), dict(
dict(type='ImageToTensor', keys=['img']), type='PackDetInputs',
dict(type='Collect', keys=['img']), meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
]) 'scale_factor')),
] ]
data = dict( data = dict(
samples_per_gpu=2, samples_per_gpu=2,
......
# If point cloud range is changed, the models should also change their point # If point cloud range is changed, the models should also change their point
# cloud range accordingly # cloud range accordingly
point_cloud_range = [-50, -50, -5, 50, 50, 3] point_cloud_range = [-50, -50, -5, 50, 50, 3]
# Using calibration info convert the Lidar-coordinate point cloud range to the
# ego-coordinate point cloud range could bring a little promotion in nuScenes.
# point_cloud_range = [-50, -50.8, -5, 50, 49.2, 3]
# For nuScenes we usually do 10-class detection # For nuScenes we usually do 10-class detection
class_names = [ class_names = [
'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle',
'motorcycle', 'pedestrian', 'traffic_cone', 'barrier' 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier'
] ]
metainfo = dict(CLASSES=class_names) metainfo = dict(classes=class_names)
dataset_type = 'NuScenesDataset' dataset_type = 'NuScenesDataset'
data_root = 'data/nuscenes/' data_root = 'data/nuscenes/'
# Input modality for nuScenes dataset, this is consistent with the submission # Input modality for nuScenes dataset, this is consistent with the submission
......
...@@ -4,7 +4,7 @@ class_names = [ ...@@ -4,7 +4,7 @@ class_names = [
'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle',
'motorcycle', 'pedestrian', 'traffic_cone', 'barrier' 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier'
] ]
metainfo = dict(CLASSES=class_names) metainfo = dict(classes=class_names)
# Input modality for nuScenes dataset, this is consistent with the submission # Input modality for nuScenes dataset, this is consistent with the submission
# format which requires the information in input_modality. # format which requires the information in input_modality.
input_modality = dict(use_lidar=False, use_camera=True) input_modality = dict(use_lidar=False, use_camera=True)
...@@ -65,7 +65,7 @@ train_dataloader = dict( ...@@ -65,7 +65,7 @@ train_dataloader = dict(
CAM_BACK_RIGHT='samples/CAM_BACK_RIGHT', CAM_BACK_RIGHT='samples/CAM_BACK_RIGHT',
CAM_BACK_LEFT='samples/CAM_BACK_LEFT'), CAM_BACK_LEFT='samples/CAM_BACK_LEFT'),
ann_file='nuscenes_infos_train.pkl', ann_file='nuscenes_infos_train.pkl',
task='mono_det', load_type='mv_image_based',
pipeline=train_pipeline, pipeline=train_pipeline,
metainfo=metainfo, metainfo=metainfo,
modality=input_modality, modality=input_modality,
...@@ -92,7 +92,7 @@ val_dataloader = dict( ...@@ -92,7 +92,7 @@ val_dataloader = dict(
CAM_BACK_RIGHT='samples/CAM_BACK_RIGHT', CAM_BACK_RIGHT='samples/CAM_BACK_RIGHT',
CAM_BACK_LEFT='samples/CAM_BACK_LEFT'), CAM_BACK_LEFT='samples/CAM_BACK_LEFT'),
ann_file='nuscenes_infos_val.pkl', ann_file='nuscenes_infos_val.pkl',
task='mono_det', load_type='mv_image_based',
pipeline=test_pipeline, pipeline=test_pipeline,
modality=input_modality, modality=input_modality,
metainfo=metainfo, metainfo=metainfo,
......
# dataset settings
dataset_type = 'S3DISDataset'
data_root = 'data/s3dis/'
metainfo = dict(classes=('table', 'chair', 'sofa', 'bookcase', 'board'))
train_area = [1, 2, 3, 4, 6]
test_area = 5
train_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
use_color=True,
load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]),
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),
dict(type='PointSample', num_points=100000),
dict(
type='RandomFlip3D',
sync_2d=False,
flip_ratio_bev_horizontal=0.5,
flip_ratio_bev_vertical=0.5),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.087266, 0.087266],
scale_ratio_range=[0.9, 1.1],
translation_std=[.1, .1, .1],
shift_height=False),
dict(type='NormalizePointsColor', color_mean=None),
dict(
type='Pack3DDetInputs',
keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
]
test_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
use_color=True,
load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]),
dict(
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
pts_scale_ratio=1,
flip=False,
transforms=[
dict(
type='GlobalRotScaleTrans',
rot_range=[0, 0],
scale_ratio_range=[1., 1.],
translation_std=[0, 0, 0]),
dict(
type='RandomFlip3D',
sync_2d=False,
flip_ratio_bev_horizontal=0.5,
flip_ratio_bev_vertical=0.5),
dict(type='PointSample', num_points=100000),
dict(type='NormalizePointsColor', color_mean=None),
]),
dict(type='Pack3DDetInputs', keys=['points'])
]
train_dataloader = dict(
batch_size=8,
num_workers=4,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type='RepeatDataset',
times=13,
dataset=dict(
type='ConcatDataset',
datasets=[
dict(
type=dataset_type,
data_root=data_root,
ann_file=f's3dis_infos_Area_{i}.pkl',
pipeline=train_pipeline,
filter_empty_gt=True,
metainfo=metainfo,
box_type_3d='Depth') for i in train_area
])))
val_dataloader = dict(
batch_size=1,
num_workers=1,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file=f's3dis_infos_Area_{test_area}.pkl',
pipeline=test_pipeline,
metainfo=metainfo,
test_mode=True,
box_type_3d='Depth'))
test_dataloader = dict(
batch_size=1,
num_workers=1,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file=f's3dis_infos_Area_{test_area}.pkl',
pipeline=test_pipeline,
metainfo=metainfo,
test_mode=True,
box_type_3d='Depth'))
val_evaluator = dict(type='IndoorMetric')
test_evaluator = val_evaluator
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer')
# For S3DIS seg we usually do 13-class segmentation # For S3DIS seg we usually do 13-class segmentation
class_names = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door', class_names = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door',
'table', 'chair', 'sofa', 'bookcase', 'board', 'clutter') 'table', 'chair', 'sofa', 'bookcase', 'board', 'clutter')
metainfo = dict(CLASSES=class_names) metainfo = dict(classes=class_names)
dataset_type = 'S3DISSegDataset' dataset_type = 'S3DISSegDataset'
data_root = 'data/s3dis/' data_root = 'data/s3dis/'
input_modality = dict(use_lidar=True, use_camera=False) input_modality = dict(use_lidar=True, use_camera=False)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment