"git@developer.sourcefind.cn:OpenDAS/ollama.git" did not exist on "c8b1f2369e9ff31a443436eb378dc620595b29ab"
Unverified Commit a8f3ec5f authored by VVsssssk's avatar VVsssssk Committed by GitHub
Browse files

[CI] Add circle ci (#1647)

* add circle ci

* delete github ci

* fix ci

* fix ut

* fix markdown version

* rm
parent 420dcf4b
version: 2.1
# this allows you to use CircleCI's dynamic configuration feature
setup: true
# the path-filtering orb is required to continue a pipeline based on
# the path of an updated fileset
orbs:
path-filtering: circleci/path-filtering@0.1.2
workflows:
# the always-run workflow is always triggered, regardless of the pipeline parameters.
always-run:
jobs:
# the path-filtering/filter job determines which pipeline
# parameters to update.
- path-filtering/filter:
name: check-updated-files
# 3-column, whitespace-delimited mapping. One mapping per
# line:
# <regex path-to-test> <parameter-to-set> <value-of-pipeline-parameter>
mapping: |
mmdet3d/.* lint_only false
requirements/.* lint_only false
tests/.* lint_only false
tools/.* lint_only false
configs/.* lint_only false
.circleci/.* lint_only false
base-revision: dev-1.x
# this is the path of the configuration we should trigger once
# path filtering and pipeline parameter value updates are
# complete. In this case, we are using the parent dynamic
# configuration itself.
config-path: .circleci/test.yml
ARG PYTORCH="1.8.1"
ARG CUDA="10.2"
ARG CUDNN="7"
FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel
# To fix GPG key error when running apt-get update
RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub
RUN apt-get update && apt-get install -y ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx
version: 2.1
# the default pipeline parameters, which will be updated according to
# the results of the path-filtering orb
parameters:
lint_only:
type: boolean
default: true
jobs:
lint:
docker:
- image: cimg/python:3.7.4
steps:
- checkout
- run:
name: Install pre-commit hook
command: |
pip install pre-commit
pre-commit install
- run:
name: Linting
command: pre-commit run --all-files
- run:
name: Check docstring coverage
command: |
pip install interrogate
interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-magic --ignore-regex "__repr__" --fail-under 90 mmdet3d
build_cpu:
parameters:
# The python version must match available image tags in
# https://circleci.com/developer/images/image/cimg/python
python:
type: string
torch:
type: string
torchvision:
type: string
mmcv:
type: string
docker:
- image: cimg/python:<< parameters.python >>
resource_class: large
steps:
- checkout
- run:
name: Install Libraries
command: |
sudo apt-get update
sudo apt-get install -y ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx libjpeg-dev zlib1g-dev libtinfo-dev libncurses5
- run:
name: Configure Python & pip
command: |
python -m pip install --upgrade pip
python -m pip install wheel
- run:
name: Install PyTorch
command: |
python -V
python -m pip install torch==<< parameters.torch >>+cpu torchvision==<< parameters.torchvision >>+cpu -f https://download.pytorch.org/whl/torch_stable.html
- run:
name: Install mmdet3d dependencies
command: |
python -m pip install git+ssh://git@github.com/open-mmlab/mmengine.git@main
python -m pip install << parameters.mmcv >>
python -m pip install git+ssh://git@github.com/open-mmlab/mmdetection.git@dev-3.x
python -m pip install -r requirements.txt
- run:
name: Build and install
command: |
python -m pip install -e .
- run:
name: Run unittests
command: |
python -m coverage run --branch --source mmocr -m pytest tests/
python -m coverage xml
python -m coverage report -m
build_cuda:
parameters:
torch:
type: string
cuda:
type: enum
enum: ["10.1", "10.2", "11.1"]
cudnn:
type: integer
default: 7
mmcv:
type: string
machine:
image: ubuntu-2004-cuda-11.4:202110-01
# docker_layer_caching: true
resource_class: gpu.nvidia.small
steps:
- checkout
- run:
# Cloning repos in VM since Docker doesn't have access to the private key
name: Clone Repos
command: |
git clone -b main --depth 1 ssh://git@github.com/open-mmlab/mmengine.git /home/circleci/mmengine
git clone -b dev-3.x --depth 1 ssh://git@github.com/open-mmlab/mmdetection.git /home/circleci/mmdetection
- run:
name: Build Docker image
command: |
docker build .circleci/docker -t mmdet3d:gpu --build-arg PYTORCH=<< parameters.torch >> --build-arg CUDA=<< parameters.cuda >> --build-arg CUDNN=<< parameters.cudnn >>
docker run --gpus all -t -d -v /home/circleci/project:/mmdetection3d -v /home/circleci/mmengine:/mmengine -v /home/circleci/mmdetection:/mmdetection -w /mmdetection3d --name mmdet3d mmdet3d:gpu
- run:
name: Install mmdet3d dependencies
command: |
docker exec mmdet3d pip install -e /mmengine
docker exec mmdet3d pip install << parameters.mmcv >>
docker exec mmdet3d pip install -e /mmdetection
docker exec mmdet3d pip install -r requirements.txt
- run:
name: Build and install
command: |
docker exec mmdet3d pip install -e .
- run:
name: Run unittests
command: |
docker exec mmocr python -m pytest tests/
workflows:
pr_stage_lint:
when: << pipeline.parameters.lint_only >>
jobs:
- lint:
name: lint
filters:
branches:
ignore:
- dev-1.x
pr_stage_test:
when:
not:
<< pipeline.parameters.lint_only >>
jobs:
- lint:
name: lint
filters:
branches:
ignore:
- dev-1.x
- build_cpu:
name: minimum_version_cpu
torch: 1.6.0
torchvision: 0.7.0
python: 3.6.9 # The lowest python 3.6.x version available on CircleCI images
mmcv: https://download.openmmlab.com/mmcv/dev-2.x/cpu/torch1.6.0/mmcv_full-2.0.0rc0-cp36-cp36m-manylinux1_x86_64.whl
requires:
- lint
- build_cpu:
name: maximum_version_cpu
torch: 1.9.0
torchvision: 0.10.0
python: 3.9.0
mmcv: https://download.openmmlab.com/mmcv/dev-2.x/cpu/torch1.9.0/mmcv_full-2.0.0rc0-cp39-cp39-manylinux1_x86_64.whl
requires:
- minimum_version_cpu
- hold:
type: approval
requires:
- maximum_version_cpu
- build_cuda:
name: mainstream_version_gpu
torch: 1.8.1
# Use double quotation mark to explicitly specify its type
# as string instead of number
cuda: "10.2"
mmcv: https://download.openmmlab.com/mmcv/dev-2.x/cu102/torch1.8.0/mmcv_full-2.0.0rc0-cp37-cp37m-manylinux1_x86_64.whl
requires:
- hold
merge_stage_test:
when:
not:
<< pipeline.parameters.lint_only >>
jobs:
- build_cuda:
name: minimum_version_gpu
torch: 1.6.0
# Use double quotation mark to explicitly specify its type
# as string instead of number
mmcv: https://download.openmmlab.com/mmcv/dev-2.x/cu101/torch1.6.0/mmcv_full-2.0.0rc0-cp37-cp37m-manylinux1_x86_64.whl
cuda: "10.1"
filters:
branches:
only:
- dev-1.x
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
name: build
on:
push:
paths-ignore:
- ".dev_scripts/**"
- ".github/**.md"
- "demo/**"
- "docker/**"
- "tools/**"
pull_request:
paths-ignore:
- ".dev_scripts/**"
- ".github/**.md"
- "demo/**"
- "docker/**"
- "tools/**"
- "docs/**"
- "docs_zh-CN/**"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build:
env:
FORCE_CUDA: 1
CUDA_ARCH: ${{matrix.cuda_arch}}
runs-on: ubuntu-18.04
container:
image: pytorch/pytorch:1.6.0-cuda10.1-cudnn7-devel
strategy:
matrix:
python-version: [3.6, 3.7]
torch: [1.5.0+cu101, 1.6.0+cu101, 1.7.0+cu101, 1.8.0+cu101]
include:
- torch: 1.5.0+cu101
torch_version: torch1.5
torchvision: 0.6.0+cu101
mmcv_link: "torch1.5.0"
cuda_arch: "7.0"
- torch: 1.6.0+cu101
torch_version: torch1.6
mmcv_link: "torch1.6.0"
torchvision: 0.7.0+cu101
cuda_arch: "7.0"
- torch: 1.7.0+cu101
torch_version: torch1.7
mmcv_link: "torch1.7.0"
torchvision: 0.8.1+cu101
cuda_arch: "7.0"
- torch: 1.8.0+cu101
torch_version: torch1.8
mmcv_link: "torch1.8.0"
torchvision: 0.9.0+cu101
cuda_arch: "7.0"
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Fetch GPG keys
run: |
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub
- name: Install system dependencies
run: |
apt-get update && apt-get install -y ffmpeg libsm6 git ninja-build libglib2.0-0 libsm6 libxrender-dev python${{matrix.python-version}}-dev
apt-get clean
rm -rf /var/lib/apt/lists/*
- name: Install Pillow
run: python -m pip install Pillow==6.2.2
if: ${{matrix.torchvision < 0.5}}
- name: Install PyTorch
run: python -m pip install numpy==1.19.5 torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
- name: Install mmdet3d dependencies
run: |
python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu101/${{matrix.torch_version}}/index.html
python -m pip install mmdet
python -m pip install mmsegmentation
python -m pip install -r requirements.txt
- name: Build and install
run: |
rm -rf .eggs
python setup.py check -m -s
TORCH_CUDA_ARCH_LIST=${CUDA_ARCH} python setup.py build_ext --inplace
- name: Run unittests and generate coverage report
run: |
coverage run --branch --source mmdet3d -m pytest tests/
coverage xml
coverage report -m
# Only upload coverage report for python3.7 && pytorch1.5
- name: Upload coverage to Codecov
if: ${{matrix.torch == '1.5.0+cu101' && matrix.python-version == '3.7'}}
uses: codecov/codecov-action@v1.0.10
with:
file: ./coverage.xml
flags: unittests
env_vars: OS,PYTHON
name: codecov-umbrella
fail_ci_if_error: false
build_windows:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [windows-2022]
python: [3.8]
platform: [cpu]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
- name: Upgrade pip
run: python -m pip install pip --upgrade --user
- name: Install PyTorch
# As a complement to Linux CI, we test on PyTorch LTS version
run: pip install torch==1.8.2+${{ matrix.platform }} torchvision==0.9.2+${{ matrix.platform }} -f https://download.pytorch.org/whl/lts/1.8/torch_lts.html
- name: Install mmdet3d dependencies
run: |
pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/torch1.8/index.html --only-binary mmcv-full
python -m pip install mmdet
python -m pip install mmsegmentation
python -m pip install -r requirements/build.txt -r requirements/runtime.txt -r requirements/tests.txt
- name: Build and install
run: pip install -e .
- name: Run unittests and generate coverage report
run: coverage run --branch --source mmdet3d -m pytest tests/
- name: Generate coverage report
run: |
coverage xml
coverage report -m
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v2
with:
file: ./coverage.xml
flags: unittests
env_vars: OS,PYTHON
name: codecov-umbrella
fail_ci_if_error: false
name: deploy
on: push
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build-n-publish:
runs-on: ubuntu-18.04
if: startsWith(github.event.ref, 'refs/tags')
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.7
uses: actions/setup-python@v2
with:
python-version: 3.7
- name: Install torch
run: pip install torch
- name: Build MMDet3D
run: python setup.py sdist
- name: Publish distribution to PyPI
run: |
pip install twine
twine upload dist/* -u __token__ -p ${{ secrets.pypi_password }}
name: lint
on: [push, pull_request]
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
lint:
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.7
uses: actions/setup-python@v1
with:
python-version: 3.7
- name: Install linting dependencies
run: |
python -m pip install --upgrade pip
pip install flake8 isort yapf interrogate
- name: Lint with flake8
run: flake8 .
- name: Lint with isort
run: isort --recursive --check-only --diff mmdet3d/ tests/ examples/
- name: Format python codes with yapf
run: yapf -r -d mmdet3d/ tests/ examples/
- name: Check docstring
run: interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --exclude mmdet3d/ops --ignore-regex "__repr__" --fail-under 95 mmdet3d
...@@ -4,16 +4,16 @@ ...@@ -4,16 +4,16 @@
## 配置 ## 配置
* 硬件:8 NVIDIA Tesla V100 (32G) GPUs, Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz - 硬件:8 NVIDIA Tesla V100 (32G) GPUs, Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz
* 软件:Python 3.7, CUDA 10.1, cuDNN 7.6.5, PyTorch 1.3, numba 0.48.0. - 软件:Python 3.7, CUDA 10.1, cuDNN 7.6.5, PyTorch 1.3, numba 0.48.0.
* 模型:由于不同代码库所实现的模型种类有所不同,在基准测试中我们选择了 SECOND、PointPillars、Part-A2 和 VoteNet 几种模型,分别与其他代码库中的相应模型实现进行了对比。 - 模型:由于不同代码库所实现的模型种类有所不同,在基准测试中我们选择了 SECOND、PointPillars、Part-A2 和 VoteNet 几种模型,分别与其他代码库中的相应模型实现进行了对比。
* 度量方法:我们使用整个训练过程中的平均吞吐量作为度量方法,并跳过每个 epoch 的前 50 次迭代以消除训练预热的影响。 - 度量方法:我们使用整个训练过程中的平均吞吐量作为度量方法,并跳过每个 epoch 的前 50 次迭代以消除训练预热的影响。
## 主要结果 ## 主要结果
对于模型的训练速度(样本/秒),我们将 MMDetection3D 与其他实现了相同模型的代码库进行了对比。结果如下所示,表格内的数字越大,代表模型的训练速度越快。代码库中不支持的模型使用 `×` 进行标识。 对于模型的训练速度(样本/秒),我们将 MMDetection3D 与其他实现了相同模型的代码库进行了对比。结果如下所示,表格内的数字越大,代表模型的训练速度越快。代码库中不支持的模型使用 `×` 进行标识。
| 模型 | MMDetection3D | OpenPCDet | votenet | Det3D | | 模型 | MMDetection3D | OpenPCDet | votenet | Det3D |
| :-----------------: | :-----------: | :-------: | :-----: | :---: | | :-----------------: | :-----------: | :-------: | :-----: | :---: |
| VoteNet | 358 | × | 77 | × | | VoteNet | 358 | × | 77 | × |
| PointPillars-car | 141 | × | × | 140 | | PointPillars-car | 141 | × | × | 140 |
...@@ -25,103 +25,104 @@ ...@@ -25,103 +25,104 @@
### 为了计算速度所做的修改 ### 为了计算速度所做的修改
* __MMDetection3D__:我们尝试使用与其他代码库中尽可能相同的配置,具体配置细节见 [基准测试配置](https://github.com/open-mmlab/MMDetection3D/blob/master/configs/benchmark) - __MMDetection3D__:我们尝试使用与其他代码库中尽可能相同的配置,具体配置细节见 [基准测试配置](https://github.com/open-mmlab/MMDetection3D/blob/master/configs/benchmark)
* __Det3D__:为了与 Det3D 进行比较,我们使用了 commit [519251e](https://github.com/poodarchu/Det3D/tree/519251e72a5c1fdd58972eabeac67808676b9bb7) 所对应的代码版本。 - __Det3D__:为了与 Det3D 进行比较,我们使用了 commit [519251e](https://github.com/poodarchu/Det3D/tree/519251e72a5c1fdd58972eabeac67808676b9bb7) 所对应的代码版本。
* __OpenPCDet__:为了与 OpenPCDet 进行比较,我们使用了 commit [b32fbddb](https://github.com/open-mmlab/OpenPCDet/tree/b32fbddbe06183507bad433ed99b407cbc2175c2) 所对应的代码版本。 - __OpenPCDet__:为了与 OpenPCDet 进行比较,我们使用了 commit [b32fbddb](https://github.com/open-mmlab/OpenPCDet/tree/b32fbddbe06183507bad433ed99b407cbc2175c2) 所对应的代码版本。
为了计算训练速度,我们在 `./tools/train_utils/train_utils.py` 文件中添加了用于记录运行时间的代码。我们对每个 epoch 的训练速度进行计算,并报告所有 epoch 的平均速度。 为了计算训练速度,我们在 `./tools/train_utils/train_utils.py` 文件中添加了用于记录运行时间的代码。我们对每个 epoch 的训练速度进行计算,并报告所有 epoch 的平均速度。
<details>
<details>
<summary> <summary>
(为了使用相同方法进行测试所做的具体修改 - 点击展开) (为了使用相同方法进行测试所做的具体修改 - 点击展开)
</summary> </summary>
```diff ```diff
diff --git a/tools/train_utils/train_utils.py b/tools/train_utils/train_utils.py diff --git a/tools/train_utils/train_utils.py b/tools/train_utils/train_utils.py
index 91f21dd..021359d 100644 index 91f21dd..021359d 100644
--- a/tools/train_utils/train_utils.py --- a/tools/train_utils/train_utils.py
+++ b/tools/train_utils/train_utils.py +++ b/tools/train_utils/train_utils.py
@@ -2,6 +2,7 @@ import torch @@ -2,6 +2,7 @@ import torch
import os import os
import glob import glob
import tqdm import tqdm
+import datetime +import datetime
from torch.nn.utils import clip_grad_norm_ from torch.nn.utils import clip_grad_norm_
@@ -13,7 +14,10 @@ def train_one_epoch(model, optimizer, train_loader, model_func, lr_scheduler, ac @@ -13,7 +14,10 @@ def train_one_epoch(model, optimizer, train_loader, model_func, lr_scheduler, ac
if rank == 0: if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True) pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
+ start_time = None + start_time = None
for cur_it in range(total_it_each_epoch): for cur_it in range(total_it_each_epoch):
+ if cur_it > 49 and start_time is None: + if cur_it > 49 and start_time is None:
+ start_time = datetime.datetime.now() + start_time = datetime.datetime.now()
try: try:
batch = next(dataloader_iter) batch = next(dataloader_iter)
except StopIteration: except StopIteration:
@@ -55,9 +59,11 @@ def train_one_epoch(model, optimizer, train_loader, model_func, lr_scheduler, ac @@ -55,9 +59,11 @@ def train_one_epoch(model, optimizer, train_loader, model_func, lr_scheduler, ac
tb_log.add_scalar('learning_rate', cur_lr, accumulated_iter) tb_log.add_scalar('learning_rate', cur_lr, accumulated_iter)
for key, val in tb_dict.items(): for key, val in tb_dict.items():
tb_log.add_scalar('train_' + key, val, accumulated_iter) tb_log.add_scalar('train_' + key, val, accumulated_iter)
+ endtime = datetime.datetime.now() + endtime = datetime.datetime.now()
+ speed = (endtime - start_time).seconds / (total_it_each_epoch - 50) + speed = (endtime - start_time).seconds / (total_it_each_epoch - 50)
if rank == 0: if rank == 0:
pbar.close() pbar.close()
- return accumulated_iter - return accumulated_iter
+ return accumulated_iter, speed + return accumulated_iter, speed
def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_cfg, def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_cfg,
@@ -65,6 +71,7 @@ def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_ @@ -65,6 +71,7 @@ def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_
lr_warmup_scheduler=None, ckpt_save_interval=1, max_ckpt_save_num=50, lr_warmup_scheduler=None, ckpt_save_interval=1, max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False): merge_all_iters_to_one_epoch=False):
accumulated_iter = start_iter accumulated_iter = start_iter
+ speeds = [] + speeds = []
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar: with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(train_loader) total_it_each_epoch = len(train_loader)
if merge_all_iters_to_one_epoch: if merge_all_iters_to_one_epoch:
@@ -82,7 +89,7 @@ def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_ @@ -82,7 +89,7 @@ def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_
cur_scheduler = lr_warmup_scheduler cur_scheduler = lr_warmup_scheduler
else: else:
cur_scheduler = lr_scheduler cur_scheduler = lr_scheduler
- accumulated_iter = train_one_epoch( - accumulated_iter = train_one_epoch(
+ accumulated_iter, speed = train_one_epoch( + accumulated_iter, speed = train_one_epoch(
model, optimizer, train_loader, model_func, model, optimizer, train_loader, model_func,
lr_scheduler=cur_scheduler, lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg, accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
@@ -91,7 +98,7 @@ def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_ @@ -91,7 +98,7 @@ def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_
total_it_each_epoch=total_it_each_epoch, total_it_each_epoch=total_it_each_epoch,
dataloader_iter=dataloader_iter dataloader_iter=dataloader_iter
) )
- -
+ speeds.append(speed) + speeds.append(speed)
# save trained model # save trained model
trained_epoch = cur_epoch + 1 trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0: if trained_epoch % ckpt_save_interval == 0 and rank == 0:
@@ -107,6 +114,8 @@ def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_ @@ -107,6 +114,8 @@ def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_
save_checkpoint( save_checkpoint(
checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name, checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name,
) )
+ print(speed) + print(speed)
+ print(f'*******{sum(speeds) / len(speeds)}******') + print(f'*******{sum(speeds) / len(speeds)}******')
def model_state_to_cpu(model_state): def model_state_to_cpu(model_state):
``` ```
</details> </details>
### VoteNet ### VoteNet
* __MMDetection3D__:在 v0.1.0 版本下, 执行如下命令: - __MMDetection3D__:在 v0.1.0 版本下, 执行如下命令:
```bash ```bash
./tools/dist_train.sh configs/votenet/votenet_16x8_sunrgbd-3d-10class.py 8 --no-validate ./tools/dist_train.sh configs/votenet/votenet_16x8_sunrgbd-3d-10class.py 8 --no-validate
``` ```
* __votenet__:在 commit [2f6d6d3](https://github.com/facebookresearch/votenet/tree/2f6d6d36ff98d96901182e935afe48ccee82d566) 版本下,执行如下命令: - __votenet__:在 commit [2f6d6d3](https://github.com/facebookresearch/votenet/tree/2f6d6d36ff98d96901182e935afe48ccee82d566) 版本下,执行如下命令:
```bash ```bash
python train.py --dataset sunrgbd --batch_size 16 python train.py --dataset sunrgbd --batch_size 16
...@@ -196,13 +197,13 @@ ...@@ -196,13 +197,13 @@
### PointPillars-car ### PointPillars-car
* __MMDetection3D__:在 v0.1.0 版本下, 执行如下命令: - __MMDetection3D__:在 v0.1.0 版本下, 执行如下命令:
```bash ```bash
./tools/dist_train.sh configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py 8 --no-validate ./tools/dist_train.sh configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py 8 --no-validate
``` ```
* __Det3D__:在 commit [519251e](https://github.com/poodarchu/Det3D/tree/519251e72a5c1fdd58972eabeac67808676b9bb7) 版本下,使用 `kitti_point_pillars_mghead_syncbn.py` 并执行如下命令: - __Det3D__:在 commit [519251e](https://github.com/poodarchu/Det3D/tree/519251e72a5c1fdd58972eabeac67808676b9bb7) 版本下,使用 `kitti_point_pillars_mghead_syncbn.py` 并执行如下命令:
```bash ```bash
./tools/scripts/train.sh --launcher=slurm --gpus=8 ./tools/scripts/train.sh --launcher=slurm --gpus=8
...@@ -238,13 +239,13 @@ ...@@ -238,13 +239,13 @@
### PointPillars-3class ### PointPillars-3class
* __MMDetection3D__:在 v0.1.0 版本下, 执行如下命令: - __MMDetection3D__:在 v0.1.0 版本下, 执行如下命令:
```bash ```bash
./tools/dist_train.sh configs/benchmark/hv_pointpillars_secfpn_4x8_80e_pcdet_kitti-3d-3class.py 8 --no-validate ./tools/dist_train.sh configs/benchmark/hv_pointpillars_secfpn_4x8_80e_pcdet_kitti-3d-3class.py 8 --no-validate
``` ```
* __OpenPCDet__:在 commit [b32fbddb](https://github.com/open-mmlab/OpenPCDet/tree/b32fbddbe06183507bad433ed99b407cbc2175c2) 版本下,执行如下命令: - __OpenPCDet__:在 commit [b32fbddb](https://github.com/open-mmlab/OpenPCDet/tree/b32fbddbe06183507bad433ed99b407cbc2175c2) 版本下,执行如下命令:
```bash ```bash
cd tools cd tools
...@@ -255,13 +256,13 @@ ...@@ -255,13 +256,13 @@
基准测试中的 SECOND 指在 [second.Pytorch](https://github.com/traveller59/second.pytorch) 首次被实现的 [SECONDv1.5](https://github.com/traveller59/second.pytorch/blob/master/second/configs/all.fhd.config)。Det3D 实现的 SECOND 中,使用了自己实现的 Multi-Group Head,因此无法将它的速度与其他代码库进行对比。 基准测试中的 SECOND 指在 [second.Pytorch](https://github.com/traveller59/second.pytorch) 首次被实现的 [SECONDv1.5](https://github.com/traveller59/second.pytorch/blob/master/second/configs/all.fhd.config)。Det3D 实现的 SECOND 中,使用了自己实现的 Multi-Group Head,因此无法将它的速度与其他代码库进行对比。
* __MMDetection3D__:在 v0.1.0 版本下, 执行如下命令: - __MMDetection3D__:在 v0.1.0 版本下, 执行如下命令:
```bash ```bash
./tools/dist_train.sh configs/benchmark/hv_second_secfpn_4x8_80e_pcdet_kitti-3d-3class.py 8 --no-validate ./tools/dist_train.sh configs/benchmark/hv_second_secfpn_4x8_80e_pcdet_kitti-3d-3class.py 8 --no-validate
``` ```
* __OpenPCDet__:在 commit [b32fbddb](https://github.com/open-mmlab/OpenPCDet/tree/b32fbddbe06183507bad433ed99b407cbc2175c2) 版本下,执行如下命令: - __OpenPCDet__:在 commit [b32fbddb](https://github.com/open-mmlab/OpenPCDet/tree/b32fbddbe06183507bad433ed99b407cbc2175c2) 版本下,执行如下命令:
```bash ```bash
cd tools cd tools
...@@ -270,13 +271,13 @@ ...@@ -270,13 +271,13 @@
### Part-A2 ### Part-A2
* __MMDetection3D__:在 v0.1.0 版本下, 执行如下命令: - __MMDetection3D__:在 v0.1.0 版本下, 执行如下命令:
```bash ```bash
./tools/dist_train.sh configs/benchmark/hv_PartA2_secfpn_4x8_cyclic_80e_pcdet_kitti-3d-3class.py 8 --no-validate ./tools/dist_train.sh configs/benchmark/hv_PartA2_secfpn_4x8_cyclic_80e_pcdet_kitti-3d-3class.py 8 --no-validate
``` ```
* __OpenPCDet__:在 commit [b32fbddb](https://github.com/open-mmlab/OpenPCDet/tree/b32fbddbe06183507bad433ed99b407cbc2175c2) 版本下,执行如下命令以进行模型训练: - __OpenPCDet__:在 commit [b32fbddb](https://github.com/open-mmlab/OpenPCDet/tree/b32fbddbe06183507bad433ed99b407cbc2175c2) 版本下,执行如下命令以进行模型训练:
```bash ```bash
cd tools cd tools
......
...@@ -7,6 +7,7 @@ from torch import nn as nn ...@@ -7,6 +7,7 @@ from torch import nn as nn
from mmdet3d.models.layers import SparseBasicBlock, make_sparse_convmodule from mmdet3d.models.layers import SparseBasicBlock, make_sparse_convmodule
from mmdet3d.models.layers.spconv import IS_SPCONV2_AVAILABLE from mmdet3d.models.layers.spconv import IS_SPCONV2_AVAILABLE
from mmdet3d.registry import MODELS from mmdet3d.registry import MODELS
from mmdet.models.losses import sigmoid_focal_loss, smooth_l1_loss
if IS_SPCONV2_AVAILABLE: if IS_SPCONV2_AVAILABLE:
from spconv.pytorch import SparseConvTensor, SparseSequential from spconv.pytorch import SparseConvTensor, SparseSequential
...@@ -213,7 +214,7 @@ class SparseEncoder(nn.Module): ...@@ -213,7 +214,7 @@ class SparseEncoder(nn.Module):
return out_channels return out_channels
@MIDDLE_ENCODERS.register_module() @MODELS.register_module()
class SparseEncoderSASSD(SparseEncoder): class SparseEncoderSASSD(SparseEncoder):
r"""Sparse encoder for `SASSD <https://github.com/skyhehe123/SA-SSD>`_ r"""Sparse encoder for `SASSD <https://github.com/skyhehe123/SA-SSD>`_
......
open3d
spconv spconv
waymo-open-dataset-tf-2-1-0==1.2.0 waymo-open-dataset-tf-2-1-0==1.2.0
...@@ -3,6 +3,7 @@ networkx>=2.2,<2.3 ...@@ -3,6 +3,7 @@ networkx>=2.2,<2.3
numba==0.53.0 numba==0.53.0
numpy numpy
nuscenes-devkit nuscenes-devkit
open3d
plyfile plyfile
scikit-image scikit-image
# by default we also use tensorboard to log results # by default we also use tensorboard to log results
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment