Commit afe88104 authored by lishj6's avatar lishj6 🏸
Browse files

init0905

parent a48c4071
*.pyc
*.npy
*.pth
*.whl
*.swp
data/
ckpt/
work_dirs*/
dist_test/
vis/
val/
lib/
*.egg-info
build/
__pycache__/
*.so
job_scripts/
temp_ops/
\ No newline at end of file
MIT License
Copyright (c) 2024 swc-17
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
# Quick Start
### Set up a new virtual environment
```bash
conda create -n sparsedrive python=3.8 -y
conda activate sparsedrive
```
### Install dependency packpages
```bash
sparsedrive_path="path/to/sparsedrive"
cd ${sparsedrive_path}
pip3 install --upgrade pip
pip3 install torch==1.13.0+cu116 torchvision==0.14.0+cu116 torchaudio==0.13.0 --extra-index-url https://download.pytorch.org/whl/cu116
pip3 install -r requirement.txt
```
### Compile the deformable_aggregation CUDA op
```bash
cd projects/mmdet3d_plugin/ops
python3 setup.py develop
cd ../../../
```
### Prepare the data
Download the [NuScenes dataset](https://www.nuscenes.org/nuscenes#download) and CAN bus expansion, put CAN bus expansion in /path/to/nuscenes, create symbolic links.
```bash
cd ${sparsedrive_path}
mkdir data
ln -s path/to/nuscenes ./data/nuscenes
```
Pack the meta-information and labels of the dataset, and generate the required pkl files to data/infos. Note that we also generate map_annos in data_converter, with a roi_size of (30, 60) as default, if you want a different range, you can modify roi_size in tools/data_converter/nuscenes_converter.py.
```bash
sh scripts/create_data.sh
```
### Generate anchors by K-means
Gnerated anchors are saved to data/kmeans and can be visualized in vis/kmeans.
```bash
sh scripts/kmeans.sh
```
### Download pre-trained weights
Download the required backbone [pre-trained weights](https://download.pytorch.org/models/resnet50-19c8e357.pth).
```bash
mkdir ckpt
wget https://download.pytorch.org/models/resnet50-19c8e357.pth -O ckpt/resnet50-19c8e357.pth
```
### Commence training and testing
```bash
# train
sh scripts/train.sh
# test
sh scripts/test.sh
```
### Visualization
```
sh scripts/visualize.sh
```
56 MIOpen(HIP): Command [LogCmdBNorm] ./bin/MIOpenDriver bnormfp16 -n 48 -c 1024 -H 16 -W 44 -m 1 --forw 0 -b 1 -s 1 --spatial_dim 2 -L NCHW
168 MIOpen(HIP): Command [LogCmdBNorm] ./bin/MIOpenDriver bnormfp16 -n 48 -c 1024 -H 16 -W 44 -m 1 --forw 1 -b 0 -r 1 -s 1 --spatial_dim 2 -L NCHW
56 MIOpen(HIP): Command [LogCmdBNorm] ./bin/MIOpenDriver bnormfp16 -n 48 -c 128 -H 32 -W 88 -m 1 --forw 0 -b 1 -s 1 --spatial_dim 2 -L NCHW
168 MIOpen(HIP): Command [LogCmdBNorm] ./bin/MIOpenDriver bnormfp16 -n 48 -c 128 -H 32 -W 88 -m 1 --forw 1 -b 0 -r 1 -s 1 --spatial_dim 2 -L NCHW
8 MIOpen(HIP): Command [LogCmdBNorm] ./bin/MIOpenDriver bnormfp16 -n 48 -c 128 -H 64 -W 176 -m 1 --forw 0 -b 1 -s 1 --spatial_dim 2 -L NCHW
24 MIOpen(HIP): Command [LogCmdBNorm] ./bin/MIOpenDriver bnormfp16 -n 48 -c 128 -H 64 -W 176 -m 1 --forw 1 -b 0 -r 1 -s 1 --spatial_dim 2 -L NCHW
32 MIOpen(HIP): Command [LogCmdBNorm] ./bin/MIOpenDriver bnormfp16 -n 48 -c 2048 -H 8 -W 22 -m 1 --forw 0 -b 1 -s 1 --spatial_dim 2 -L NCHW
96 MIOpen(HIP): Command [LogCmdBNorm] ./bin/MIOpenDriver bnormfp16 -n 48 -c 2048 -H 8 -W 22 -m 1 --forw 1 -b 0 -r 1 -s 1 --spatial_dim 2 -L NCHW
88 MIOpen(HIP): Command [LogCmdBNorm] ./bin/MIOpenDriver bnormfp16 -n 48 -c 256 -H 16 -W 44 -m 1 --forw 0 -b 1 -s 1 --spatial_dim 2 -L NCHW
264 MIOpen(HIP): Command [LogCmdBNorm] ./bin/MIOpenDriver bnormfp16 -n 48 -c 256 -H 16 -W 44 -m 1 --forw 1 -b 0 -r 1 -s 1 --spatial_dim 2 -L NCHW
8 MIOpen(HIP): Command [LogCmdBNorm] ./bin/MIOpenDriver bnormfp16 -n 48 -c 256 -H 32 -W 88 -m 1 --forw 0 -b 1 -s 1 --spatial_dim 2 -L NCHW
24 MIOpen(HIP): Command [LogCmdBNorm] ./bin/MIOpenDriver bnormfp16 -n 48 -c 256 -H 32 -W 88 -m 1 --forw 1 -b 0 -r 1 -s 1 --spatial_dim 2 -L NCHW
32 MIOpen(HIP): Command [LogCmdBNorm] ./bin/MIOpenDriver bnormfp16 -n 48 -c 256 -H 64 -W 176 -m 1 --forw 0 -b 1 -s 1 --spatial_dim 2 -L NCHW
96 MIOpen(HIP): Command [LogCmdBNorm] ./bin/MIOpenDriver bnormfp16 -n 48 -c 256 -H 64 -W 176 -m 1 --forw 1 -b 0 -r 1 -s 1 --spatial_dim 2 -L NCHW
8 MIOpen(HIP): Command [LogCmdBNorm] ./bin/MIOpenDriver bnormfp16 -n 48 -c 512 -H 16 -W 44 -m 1 --forw 0 -b 1 -s 1 --spatial_dim 2 -L NCHW
24 MIOpen(HIP): Command [LogCmdBNorm] ./bin/MIOpenDriver bnormfp16 -n 48 -c 512 -H 16 -W 44 -m 1 --forw 1 -b 0 -r 1 -s 1 --spatial_dim 2 -L NCHW
40 MIOpen(HIP): Command [LogCmdBNorm] ./bin/MIOpenDriver bnormfp16 -n 48 -c 512 -H 32 -W 88 -m 1 --forw 0 -b 1 -s 1 --spatial_dim 2 -L NCHW
120 MIOpen(HIP): Command [LogCmdBNorm] ./bin/MIOpenDriver bnormfp16 -n 48 -c 512 -H 32 -W 88 -m 1 --forw 1 -b 0 -r 1 -s 1 --spatial_dim 2 -L NCHW
40 MIOpen(HIP): Command [LogCmdBNorm] ./bin/MIOpenDriver bnormfp16 -n 48 -c 512 -H 8 -W 22 -m 1 --forw 0 -b 1 -s 1 --spatial_dim 2 -L NCHW
120 MIOpen(HIP): Command [LogCmdBNorm] ./bin/MIOpenDriver bnormfp16 -n 48 -c 512 -H 8 -W 22 -m 1 --forw 1 -b 0 -r 1 -s 1 --spatial_dim 2 -L NCHW
8 MIOpen(HIP): Command [LogCmdBNorm] ./bin/MIOpenDriver bnormfp16 -n 48 -c 64 -H 128 -W 352 -m 1 --forw 0 -b 1 -s 1 --spatial_dim 2 -L NCHW
16 MIOpen(HIP): Command [LogCmdBNorm] ./bin/MIOpenDriver bnormfp16 -n 48 -c 64 -H 128 -W 352 -m 1 --forw 1 -b 0 -r 1 -s 1 --spatial_dim 2 -L NCHW
48 MIOpen(HIP): Command [LogCmdBNorm] ./bin/MIOpenDriver bnormfp16 -n 48 -c 64 -H 64 -W 176 -m 1 --forw 0 -b 1 -s 1 --spatial_dim 2 -L NCHW
144 MIOpen(HIP): Command [LogCmdBNorm] ./bin/MIOpenDriver bnormfp16 -n 48 -c 64 -H 64 -W 176 -m 1 --forw 1 -b 0 -r 1 -s 1 --spatial_dim 2 -L NCHW
24 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 1024 -H 16 -W 44 -k 2048 -y 1 -x 1 -p 0 -q 0 -u 2 -v 2 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 1024 -H 16 -W 44 -k 2048 -y 1 -x 1 -p 0 -q 0 -u 2 -v 2 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 1024 -H 16 -W 44 -k 2048 -y 1 -x 1 -p 0 -q 0 -u 2 -v 2 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
136 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 1024 -H 16 -W 44 -k 256 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
48 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 1024 -H 16 -W 44 -k 256 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
48 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 1024 -H 16 -W 44 -k 256 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
24 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 1024 -H 16 -W 44 -k 512 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 1024 -H 16 -W 44 -k 512 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 1024 -H 16 -W 44 -k 512 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
72 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 128 -H 32 -W 88 -k 128 -y 3 -x 3 -p 1 -q 1 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
24 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 128 -H 32 -W 88 -k 128 -y 3 -x 3 -p 1 -q 1 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
24 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 128 -H 32 -W 88 -k 128 -y 3 -x 3 -p 1 -q 1 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
96 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 128 -H 32 -W 88 -k 512 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
32 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 128 -H 32 -W 88 -k 512 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
32 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 128 -H 32 -W 88 -k 512 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
24 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 128 -H 64 -W 176 -k 128 -y 3 -x 3 -p 1 -q 1 -u 2 -v 2 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 128 -H 64 -W 176 -k 128 -y 3 -x 3 -p 1 -q 1 -u 2 -v 2 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 128 -H 64 -W 176 -k 128 -y 3 -x 3 -p 1 -q 1 -u 2 -v 2 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
16 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 2048 -H 8 -W 22 -k 256 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 2048 -H 8 -W 22 -k 256 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 2048 -H 8 -W 22 -k 256 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
48 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 2048 -H 8 -W 22 -k 512 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
16 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 2048 -H 8 -W 22 -k 512 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
16 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 2048 -H 8 -W 22 -k 512 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
144 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 16 -W 44 -k 1024 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
48 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 16 -W 44 -k 1024 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
48 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 16 -W 44 -k 1024 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
16 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 16 -W 44 -k 1 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 16 -W 44 -k 1 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 16 -W 44 -k 1 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
136 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 16 -W 44 -k 256 -y 3 -x 3 -p 1 -q 1 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
48 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 16 -W 44 -k 256 -y 3 -x 3 -p 1 -q 1 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
48 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 16 -W 44 -k 256 -y 3 -x 3 -p 1 -q 1 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
16 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 32 -W 88 -k 1 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 32 -W 88 -k 1 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 32 -W 88 -k 1 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
16 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 32 -W 88 -k 256 -y 3 -x 3 -p 1 -q 1 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 32 -W 88 -k 256 -y 3 -x 3 -p 1 -q 1 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 32 -W 88 -k 256 -y 3 -x 3 -p 1 -q 1 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
24 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 32 -W 88 -k 256 -y 3 -x 3 -p 1 -q 1 -u 2 -v 2 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 32 -W 88 -k 256 -y 3 -x 3 -p 1 -q 1 -u 2 -v 2 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 32 -W 88 -k 256 -y 3 -x 3 -p 1 -q 1 -u 2 -v 2 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
24 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 64 -W 176 -k 128 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 64 -W 176 -k 128 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 64 -W 176 -k 128 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
16 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 64 -W 176 -k 1 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 64 -W 176 -k 1 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 64 -W 176 -k 1 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
16 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 64 -W 176 -k 256 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 64 -W 176 -k 256 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 64 -W 176 -k 256 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
16 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 64 -W 176 -k 256 -y 3 -x 3 -p 1 -q 1 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 64 -W 176 -k 256 -y 3 -x 3 -p 1 -q 1 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 64 -W 176 -k 256 -y 3 -x 3 -p 1 -q 1 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
24 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 64 -W 176 -k 512 -y 1 -x 1 -p 0 -q 0 -u 2 -v 2 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 64 -W 176 -k 512 -y 1 -x 1 -p 0 -q 0 -u 2 -v 2 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 64 -W 176 -k 512 -y 1 -x 1 -p 0 -q 0 -u 2 -v 2 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
48 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 64 -W 176 -k 64 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
16 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 64 -W 176 -k 64 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
16 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 64 -W 176 -k 64 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
16 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 8 -W 22 -k 256 -y 3 -x 3 -p 1 -q 1 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 8 -W 22 -k 256 -y 3 -x 3 -p 1 -q 1 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 256 -H 8 -W 22 -k 256 -y 3 -x 3 -p 1 -q 1 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
16 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 3 -H 256 -W 704 -k 64 -y 7 -x 7 -p 3 -q 3 -u 2 -v 2 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 3 -H 256 -W 704 -k 64 -y 7 -x 7 -p 3 -q 3 -u 2 -v 2 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
24 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 512 -H 16 -W 44 -k 512 -y 3 -x 3 -p 1 -q 1 -u 2 -v 2 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 512 -H 16 -W 44 -k 512 -y 3 -x 3 -p 1 -q 1 -u 2 -v 2 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 512 -H 16 -W 44 -k 512 -y 3 -x 3 -p 1 -q 1 -u 2 -v 2 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
24 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 512 -H 32 -W 88 -k 1024 -y 1 -x 1 -p 0 -q 0 -u 2 -v 2 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 512 -H 32 -W 88 -k 1024 -y 1 -x 1 -p 0 -q 0 -u 2 -v 2 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 512 -H 32 -W 88 -k 1024 -y 1 -x 1 -p 0 -q 0 -u 2 -v 2 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
72 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 512 -H 32 -W 88 -k 128 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
24 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 512 -H 32 -W 88 -k 128 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
24 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 512 -H 32 -W 88 -k 128 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
40 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 512 -H 32 -W 88 -k 256 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
16 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 512 -H 32 -W 88 -k 256 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
16 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 512 -H 32 -W 88 -k 256 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
72 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 512 -H 8 -W 22 -k 2048 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
24 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 512 -H 8 -W 22 -k 2048 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
24 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 512 -H 8 -W 22 -k 2048 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
48 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 512 -H 8 -W 22 -k 512 -y 3 -x 3 -p 1 -q 1 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
16 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 512 -H 8 -W 22 -k 512 -y 3 -x 3 -p 1 -q 1 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
16 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 512 -H 8 -W 22 -k 512 -y 3 -x 3 -p 1 -q 1 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
96 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 64 -H 64 -W 176 -k 256 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
32 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 64 -H 64 -W 176 -k 256 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
32 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 64 -H 64 -W 176 -k 256 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
24 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 64 -H 64 -W 176 -k 64 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 64 -H 64 -W 176 -k 64 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
8 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 64 -H 64 -W 176 -k 64 -y 1 -x 1 -p 0 -q 0 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
72 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 64 -H 64 -W 176 -k 64 -y 3 -x 3 -p 1 -q 1 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 1 -t 1 -S 0
24 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 64 -H 64 -W 176 -k 64 -y 3 -x 3 -p 1 -q 1 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 2 -t 1 -S 0
24 MIOpen(HIP): Command [LogCmdConvolution] ./bin/MIOpenDriver convfp16 -n 48 -c 64 -H 64 -W 176 -k 64 -y 3 -x 3 -p 1 -q 1 -u 1 -v 1 -l 1 -j 1 -m conv -g 1 -F 4 -t 1 -S 0
This diff is collapsed.
This diff is collapsed.
from .datasets import *
from .models import *
from .apis import *
from .core.evaluation import *
from .train import custom_train_model
from .mmdet_train import custom_train_detector
# from .test import custom_multi_gpu_test
# ---------------------------------------------
# Copyright (c) OpenMMLab. All rights reserved.
# ---------------------------------------------
# Modified by Zhiqi Li
# ---------------------------------------------
import random
import warnings
import numpy as np
import torch
import torch.distributed as dist
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (
HOOKS,
DistSamplerSeedHook,
EpochBasedRunner,
Fp16OptimizerHook,
OptimizerHook,
build_optimizer,
build_runner,
get_dist_info,
)
from mmcv.utils import build_from_cfg
from mmdet.core import EvalHook
from mmdet.datasets import build_dataset, replace_ImageToTensor
from mmdet.utils import get_root_logger
import time
import os.path as osp
from projects.mmdet3d_plugin.datasets.builder import build_dataloader
from projects.mmdet3d_plugin.core.evaluation.eval_hooks import (
CustomDistEvalHook,
)
from projects.mmdet3d_plugin.datasets import custom_build_dataset
from mmcv.runner import Hook
class ProfilerHook(Hook):
def __init__(self, profiler):
self.profiler = profiler
def after_train_iter(self, runner):
self.profiler.step()
def custom_train_detector(
model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None,
):
logger = get_root_logger(cfg.log_level)
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
# assert len(dataset)==1s
if "imgs_per_gpu" in cfg.data:
logger.warning(
'"imgs_per_gpu" is deprecated in MMDet V2.0. '
'Please use "samples_per_gpu" instead'
)
if "samples_per_gpu" in cfg.data:
logger.warning(
f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and '
f'"samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"'
f"={cfg.data.imgs_per_gpu} is used in this experiments"
)
else:
logger.warning(
'Automatically set "samples_per_gpu"="imgs_per_gpu"='
f"{cfg.data.imgs_per_gpu} in this experiments"
)
cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu
if "runner" in cfg:
runner_type = cfg.runner["type"]
else:
runner_type = "EpochBasedRunner"
data_loaders = [
build_dataloader(
ds,
cfg.data.samples_per_gpu,
cfg.data.workers_per_gpu,
# cfg.gpus will be ignored if distributed
len(cfg.gpu_ids),
dist=distributed,
seed=cfg.seed,
nonshuffler_sampler=dict(
type="DistributedSampler"
), # dict(type='DistributedSampler'),
runner_type=runner_type,
)
for ds in dataset
]
# put model on gpus
if distributed:
print("============================distributed yes=================================================")
find_unused_parameters = cfg.get("find_unused_parameters", False)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
# model.cuda(),
model.to(device='cuda', memory_format=torch.channels_last),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters,
)
else:
print("============================distributed no=================================================")
model = MMDataParallel(
model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids
)
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
if "runner" not in cfg:
cfg.runner = {
"type": "EpochBasedRunner",
"max_epochs": cfg.total_epochs,
}
warnings.warn(
"config is now expected to have a `runner` section, "
"please set `runner` in your config.",
UserWarning,
)
else:
if "total_epochs" in cfg:
assert cfg.total_epochs == cfg.runner.max_epochs
runner = build_runner(
cfg.runner,
default_args=dict(
model=model,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta,
),
)
# an ugly workaround to make .log and .log.json filenames the same
runner.timestamp = timestamp
# fp16 setting
fp16_cfg = cfg.get("fp16", None)
if fp16_cfg is not None:
optimizer_config = Fp16OptimizerHook(
**cfg.optimizer_config, **fp16_cfg, distributed=distributed
)
elif distributed and "type" not in cfg.optimizer_config:
optimizer_config = OptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
# register hooks
runner.register_training_hooks(
cfg.lr_config,
optimizer_config,
cfg.checkpoint_config,
cfg.log_config,
cfg.get("momentum_config", None),
)
# register profiler hook
# trace_config = dict(type='tb_trace', dir_name='work_dir')
# profiler_config = dict(on_trace_ready=trace_config)
# runner.register_profiler_hook(profiler_config)
if distributed:
if isinstance(runner, EpochBasedRunner):
runner.register_hook(DistSamplerSeedHook())
# register eval hooks
if validate:
# Support batch_size > 1 in validation
val_samples_per_gpu = cfg.data.val.pop("samples_per_gpu", 1)
if val_samples_per_gpu > 1:
assert False
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.val.pipeline = replace_ImageToTensor(
cfg.data.val.pipeline
)
val_dataset = custom_build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(
val_dataset,
samples_per_gpu=val_samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False,
nonshuffler_sampler=dict(type="DistributedSampler"),
)
eval_cfg = cfg.get("evaluation", {})
eval_cfg["by_epoch"] = cfg.runner["type"] != "IterBasedRunner"
eval_cfg["jsonfile_prefix"] = osp.join(
"val",
cfg.work_dir,
time.ctime().replace(" ", "_").replace(":", "_"),
)
eval_hook = CustomDistEvalHook if distributed else EvalHook
runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
# user-defined hooks
if cfg.get("custom_hooks", None):
custom_hooks = cfg.custom_hooks
assert isinstance(
custom_hooks, list
), f"custom_hooks expect list type, but got {type(custom_hooks)}"
for hook_cfg in cfg.custom_hooks:
assert isinstance(hook_cfg, dict), (
"Each item in custom_hooks expects dict type, but got "
f"{type(hook_cfg)}"
)
hook_cfg = hook_cfg.copy()
priority = hook_cfg.pop("priority", "NORMAL")
hook = build_from_cfg(hook_cfg, HOOKS)
runner.register_hook(hook, priority=priority)
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
if cfg.get('enable_profiler', False):
# 创建profiler配置
profiler = torch.profiler.profile(
activities=[
torch.profiler.ProfilerActivity.CPU,
torch.profiler.ProfilerActivity.CUDA
],
schedule=torch.profiler.schedule(
wait=50, # 跳过前1个step
warmup=1, # 预热1个step(不计入结果)
active=1, # 分析3个step
repeat=1 # 只执行一轮
),
on_trace_ready=torch.profiler.tensorboard_trace_handler(
# f"{cfg.work_dir}/profiler_logs" # 输出目录
"/home/SparseDrive/profiler_logs"
# "./profiler_logs"
),
with_stack=False, # 收集调用栈信息
profile_memory=False, # 分析内存使用
record_shapes=True # 记录张量形状
)
# 创建并注册ProfilerHook
profiler_hook = ProfilerHook(profiler)
runner.register_hook(profiler_hook)
# 启动profiler
profiler.start()
print("==================================== profiler.start()===================================================================")
try:
# 运行训练
runner.run(data_loaders, cfg.workflow)
finally:
# 确保profiler停止
profiler.stop()
else:
# 正常训练
runner.run(data_loaders, cfg.workflow)
# runner.run(data_loaders, cfg.workflow)
# ---------------------------------------------
# Copyright (c) OpenMMLab. All rights reserved.
# ---------------------------------------------
# Modified by Zhiqi Li
# ---------------------------------------------
import random
import warnings
import numpy as np
import torch
import torch.distributed as dist
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (
HOOKS,
DistSamplerSeedHook,
EpochBasedRunner,
Fp16OptimizerHook,
OptimizerHook,
build_optimizer,
build_runner,
get_dist_info,
)
from mmcv.utils import build_from_cfg
from mmdet.core import EvalHook
from mmdet.datasets import build_dataset, replace_ImageToTensor
from mmdet.utils import get_root_logger
import time
import os.path as osp
from projects.mmdet3d_plugin.datasets.builder import build_dataloader
from projects.mmdet3d_plugin.core.evaluation.eval_hooks import (
CustomDistEvalHook,
)
from projects.mmdet3d_plugin.datasets import custom_build_dataset
def custom_train_detector(
model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None,
):
logger = get_root_logger(cfg.log_level)
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
# assert len(dataset)==1s
if "imgs_per_gpu" in cfg.data:
logger.warning(
'"imgs_per_gpu" is deprecated in MMDet V2.0. '
'Please use "samples_per_gpu" instead'
)
if "samples_per_gpu" in cfg.data:
logger.warning(
f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and '
f'"samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"'
f"={cfg.data.imgs_per_gpu} is used in this experiments"
)
else:
logger.warning(
'Automatically set "samples_per_gpu"="imgs_per_gpu"='
f"{cfg.data.imgs_per_gpu} in this experiments"
)
cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu
if "runner" in cfg:
runner_type = cfg.runner["type"]
else:
runner_type = "EpochBasedRunner"
data_loaders = [
build_dataloader(
ds,
cfg.data.samples_per_gpu,
cfg.data.workers_per_gpu,
# cfg.gpus will be ignored if distributed
len(cfg.gpu_ids),
dist=distributed,
seed=cfg.seed,
nonshuffler_sampler=dict(
type="DistributedSampler"
), # dict(type='DistributedSampler'),
runner_type=runner_type,
)
for ds in dataset
]
# put model on gpus
if distributed:
find_unused_parameters = cfg.get("find_unused_parameters", False)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters,
)
else:
model = MMDataParallel(
model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids
)
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
if "runner" not in cfg:
cfg.runner = {
"type": "EpochBasedRunner",
"max_epochs": cfg.total_epochs,
}
warnings.warn(
"config is now expected to have a `runner` section, "
"please set `runner` in your config.",
UserWarning,
)
else:
if "total_epochs" in cfg:
assert cfg.total_epochs == cfg.runner.max_epochs
runner = build_runner(
cfg.runner,
default_args=dict(
model=model,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta,
),
)
# an ugly workaround to make .log and .log.json filenames the same
runner.timestamp = timestamp
# fp16 setting
fp16_cfg = cfg.get("fp16", None)
if fp16_cfg is not None:
optimizer_config = Fp16OptimizerHook(
**cfg.optimizer_config, **fp16_cfg, distributed=distributed
)
elif distributed and "type" not in cfg.optimizer_config:
optimizer_config = OptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
# register hooks
runner.register_training_hooks(
cfg.lr_config,
optimizer_config,
cfg.checkpoint_config,
cfg.log_config,
cfg.get("momentum_config", None),
)
# register profiler hook
# trace_config = dict(type='tb_trace', dir_name='work_dir')
# profiler_config = dict(on_trace_ready=trace_config)
# runner.register_profiler_hook(profiler_config)
if distributed:
if isinstance(runner, EpochBasedRunner):
runner.register_hook(DistSamplerSeedHook())
# register eval hooks
if validate:
# Support batch_size > 1 in validation
val_samples_per_gpu = cfg.data.val.pop("samples_per_gpu", 1)
if val_samples_per_gpu > 1:
assert False
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.val.pipeline = replace_ImageToTensor(
cfg.data.val.pipeline
)
val_dataset = custom_build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(
val_dataset,
samples_per_gpu=val_samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False,
nonshuffler_sampler=dict(type="DistributedSampler"),
)
eval_cfg = cfg.get("evaluation", {})
eval_cfg["by_epoch"] = cfg.runner["type"] != "IterBasedRunner"
eval_cfg["jsonfile_prefix"] = osp.join(
"val",
cfg.work_dir,
time.ctime().replace(" ", "_").replace(":", "_"),
)
eval_hook = CustomDistEvalHook if distributed else EvalHook
runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
# user-defined hooks
if cfg.get("custom_hooks", None):
custom_hooks = cfg.custom_hooks
assert isinstance(
custom_hooks, list
), f"custom_hooks expect list type, but got {type(custom_hooks)}"
for hook_cfg in cfg.custom_hooks:
assert isinstance(hook_cfg, dict), (
"Each item in custom_hooks expects dict type, but got "
f"{type(hook_cfg)}"
)
hook_cfg = hook_cfg.copy()
priority = hook_cfg.pop("priority", "NORMAL")
hook = build_from_cfg(hook_cfg, HOOKS)
runner.register_hook(hook, priority=priority)
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow)
# ---------------------------------------------
# Copyright (c) OpenMMLab. All rights reserved.
# ---------------------------------------------
# Modified by Zhiqi Li
# ---------------------------------------------
import os.path as osp
import pickle
import shutil
import tempfile
import time
import mmcv
import torch
import torch.distributed as dist
from mmcv.image import tensor2imgs
from mmcv.runner import get_dist_info
from mmdet.core import encode_mask_results
import mmcv
import numpy as np
import pycocotools.mask as mask_util
def custom_encode_mask_results(mask_results):
"""Encode bitmap mask to RLE code. Semantic Masks only
Args:
mask_results (list | tuple[list]): bitmap mask results.
In mask scoring rcnn, mask_results is a tuple of (segm_results,
segm_cls_score).
Returns:
list | tuple: RLE encoded mask.
"""
cls_segms = mask_results
num_classes = len(cls_segms)
encoded_mask_results = []
for i in range(len(cls_segms)):
encoded_mask_results.append(
mask_util.encode(
np.array(
cls_segms[i][:, :, np.newaxis], order="F", dtype="uint8"
)
)[0]
) # encoded with RLE
return [encoded_mask_results]
def custom_multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
Returns:
list: The prediction results.
"""
model.eval()
bbox_results = []
mask_results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
time.sleep(2) # This line can prevent deadlock problem in some cases.
have_mask = False
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
# encode mask results
if isinstance(result, dict):
if "bbox_results" in result.keys():
bbox_result = result["bbox_results"]
batch_size = len(result["bbox_results"])
bbox_results.extend(bbox_result)
if (
"mask_results" in result.keys()
and result["mask_results"] is not None
):
mask_result = custom_encode_mask_results(
result["mask_results"]
)
mask_results.extend(mask_result)
have_mask = True
else:
batch_size = len(result)
bbox_results.extend(result)
if rank == 0:
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
if gpu_collect:
bbox_results = collect_results_gpu(bbox_results, len(dataset))
if have_mask:
mask_results = collect_results_gpu(mask_results, len(dataset))
else:
mask_results = None
else:
bbox_results = collect_results_cpu(bbox_results, len(dataset), tmpdir)
tmpdir = tmpdir + "_mask" if tmpdir is not None else None
if have_mask:
mask_results = collect_results_cpu(
mask_results, len(dataset), tmpdir
)
else:
mask_results = None
if mask_results is None:
return bbox_results
return {"bbox_results": bbox_results, "mask_results": mask_results}
def collect_results_cpu(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full(
(MAX_LEN,), 32, dtype=torch.uint8, device="cuda"
)
if rank == 0:
mmcv.mkdir_or_exist(".dist_test")
tmpdir = tempfile.mkdtemp(dir=".dist_test")
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device="cuda"
)
dir_tensor[: len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, f"part_{rank}.pkl"))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, f"part_{i}.pkl")
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
"""
bacause we change the sample of the evaluation stage to make sure that
each gpu will handle continuous sample,
"""
# for res in zip(*part_list):
for res in part_list:
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def collect_results_gpu(result_part, size):
collect_results_cpu(result_part, size)
# ---------------------------------------------
# Copyright (c) OpenMMLab. All rights reserved.
# ---------------------------------------------
# Modified by Zhiqi Li
# ---------------------------------------------
from .mmdet_train import custom_train_detector
# from mmseg.apis import train_segmentor
from mmdet.apis import train_detector
def custom_train_model(
model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None,
):
"""A function wrapper for launching model training according to cfg.
Because we need different eval_hook in runner. Should be deprecated in the
future.
"""
if cfg.model.type in ["EncoderDecoder3D"]:
assert False
else:
custom_train_detector(
model,
dataset,
cfg,
distributed=distributed,
validate=validate,
timestamp=timestamp,
meta=meta,
)
def train_model(
model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None,
):
"""A function wrapper for launching model training according to cfg.
Because we need different eval_hook in runner. Should be deprecated in the
future.
"""
train_detector(
model,
dataset,
cfg,
distributed=distributed,
validate=validate,
timestamp=timestamp,
meta=meta,
)
X, Y, Z, W, L, H, SIN_YAW, COS_YAW, VX, VY, VZ = list(range(11)) # undecoded
CNS, YNS = 0, 1 # centerness and yawness indices in quality
YAW = 6 # decoded
from .eval_hooks import CustomDistEvalHook
\ No newline at end of file
# Note: Considering that MMCV's EvalHook updated its interface in V1.3.16,
# in order to avoid strong version dependency, we did not directly
# inherit EvalHook but BaseDistEvalHook.
import bisect
import os.path as osp
import mmcv
import torch.distributed as dist
from mmcv.runner import DistEvalHook as BaseDistEvalHook
from mmcv.runner import EvalHook as BaseEvalHook
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.core.evaluation.eval_hooks import DistEvalHook
def _calc_dynamic_intervals(start_interval, dynamic_interval_list):
assert mmcv.is_list_of(dynamic_interval_list, tuple)
dynamic_milestones = [0]
dynamic_milestones.extend(
[dynamic_interval[0] for dynamic_interval in dynamic_interval_list]
)
dynamic_intervals = [start_interval]
dynamic_intervals.extend(
[dynamic_interval[1] for dynamic_interval in dynamic_interval_list]
)
return dynamic_milestones, dynamic_intervals
class CustomDistEvalHook(BaseDistEvalHook):
def __init__(self, *args, dynamic_intervals=None, **kwargs):
super(CustomDistEvalHook, self).__init__(*args, **kwargs)
self.use_dynamic_intervals = dynamic_intervals is not None
if self.use_dynamic_intervals:
(
self.dynamic_milestones,
self.dynamic_intervals,
) = _calc_dynamic_intervals(self.interval, dynamic_intervals)
def _decide_interval(self, runner):
if self.use_dynamic_intervals:
progress = runner.epoch if self.by_epoch else runner.iter
step = bisect.bisect(self.dynamic_milestones, (progress + 1))
# Dynamically modify the evaluation interval
self.interval = self.dynamic_intervals[step - 1]
def before_train_epoch(self, runner):
"""Evaluate the model only at the start of training by epoch."""
self._decide_interval(runner)
super().before_train_epoch(runner)
def before_train_iter(self, runner):
self._decide_interval(runner)
super().before_train_iter(runner)
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
# Synchronization of BatchNorm's buffer (running_mean
# and running_var) is not supported in the DDP of pytorch,
# which may cause the inconsistent performance of models in
# different ranks, so we broadcast BatchNorm's buffers
# of rank 0 to other ranks to avoid this.
if self.broadcast_bn_buffer:
model = runner.model
for name, module in model.named_modules():
if (
isinstance(module, _BatchNorm)
and module.track_running_stats
):
dist.broadcast(module.running_var, 0)
dist.broadcast(module.running_mean, 0)
if not self._should_evaluate(runner):
return
tmpdir = self.tmpdir
if tmpdir is None:
tmpdir = osp.join(runner.work_dir, ".eval_hook")
from projects.mmdet3d_plugin.apis.test import (
custom_multi_gpu_test,
) # to solve circlur import
results = custom_multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=tmpdir,
gpu_collect=self.gpu_collect,
)
if runner.rank == 0:
print("\n")
runner.log_buffer.output["eval_iter_num"] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score)
from .nuscenes_3d_dataset import NuScenes3DDataset
from .builder import *
from .pipelines import *
from .samplers import *
__all__ = [
'NuScenes3DDataset',
"custom_build_dataset",
]
import copy
import platform
import random
from functools import partial
import numpy as np
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmcv.utils import Registry, build_from_cfg
from torch.utils.data import DataLoader
from mmdet.datasets.samplers import GroupSampler
from projects.mmdet3d_plugin.datasets.samplers import (
GroupInBatchSampler,
DistributedGroupSampler,
DistributedSampler,
build_sampler
)
def build_dataloader(
dataset,
samples_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=True,
seed=None,
shuffler_sampler=None,
nonshuffler_sampler=None,
runner_type="EpochBasedRunner",
**kwargs
):
"""Build PyTorch DataLoader.
In distributed training, each GPU/process has a dataloader.
In non-distributed training, there is only one dataloader for all GPUs.
Args:
dataset (Dataset): A PyTorch dataset.
samples_per_gpu (int): Number of training samples on each GPU, i.e.,
batch size of each GPU.
workers_per_gpu (int): How many subprocesses to use for data loading
for each GPU.
num_gpus (int): Number of GPUs. Only used in non-distributed training.
dist (bool): Distributed training/test or not. Default: True.
shuffle (bool): Whether to shuffle the data at every epoch.
Default: True.
kwargs: any keyword argument to be used to initialize DataLoader
Returns:
DataLoader: A PyTorch dataloader.
"""
rank, world_size = get_dist_info()
batch_sampler = None
if runner_type == 'IterBasedRunner':
print("Use GroupInBatchSampler !!!")
batch_sampler = GroupInBatchSampler(
dataset,
samples_per_gpu,
world_size,
rank,
seed=seed,
)
batch_size = 1
sampler = None
num_workers = workers_per_gpu
elif dist:
# DistributedGroupSampler will definitely shuffle the data to satisfy
# that images on each GPU are in the same group
if shuffle:
print("Use DistributedGroupSampler !!!")
sampler = build_sampler(
shuffler_sampler
if shuffler_sampler is not None
else dict(type="DistributedGroupSampler"),
dict(
dataset=dataset,
samples_per_gpu=samples_per_gpu,
num_replicas=world_size,
rank=rank,
seed=seed,
),
)
else:
sampler = build_sampler(
nonshuffler_sampler
if nonshuffler_sampler is not None
else dict(type="DistributedSampler"),
dict(
dataset=dataset,
num_replicas=world_size,
rank=rank,
shuffle=shuffle,
seed=seed,
),
)
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
# assert False, 'not support in bevformer'
print("WARNING!!!!, Only can be used for obtain inference speed!!!!")
sampler = GroupSampler(dataset, samples_per_gpu) if shuffle else None
batch_size = num_gpus * samples_per_gpu
num_workers = num_gpus * workers_per_gpu
init_fn = (
partial(worker_init_fn, num_workers=num_workers, rank=rank, seed=seed)
if seed is not None
else None
)
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
batch_sampler=batch_sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
pin_memory=False,
worker_init_fn=init_fn,
**kwargs
)
return data_loader
def worker_init_fn(worker_id, num_workers, rank, seed):
# The seed of each worker equals to
# num_worker * rank + worker_id + user_seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
# Copyright (c) OpenMMLab. All rights reserved.
import platform
from mmcv.utils import Registry, build_from_cfg
from mmdet.datasets import DATASETS
from mmdet.datasets.builder import _concat_dataset
if platform.system() != "Windows":
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
base_soft_limit = rlimit[0]
hard_limit = rlimit[1]
soft_limit = min(max(4096, base_soft_limit), hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
OBJECTSAMPLERS = Registry("Object sampler")
def custom_build_dataset(cfg, default_args=None):
try:
from mmdet3d.datasets.dataset_wrappers import CBGSDataset
except:
CBGSDataset = None
from mmdet.datasets.dataset_wrappers import (
ClassBalancedDataset,
ConcatDataset,
RepeatDataset,
)
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset(
[custom_build_dataset(c, default_args) for c in cfg]
)
elif cfg["type"] == "ConcatDataset":
dataset = ConcatDataset(
[custom_build_dataset(c, default_args) for c in cfg["datasets"]],
cfg.get("separate_eval", True),
)
elif cfg["type"] == "RepeatDataset":
dataset = RepeatDataset(
custom_build_dataset(cfg["dataset"], default_args), cfg["times"]
)
elif cfg["type"] == "ClassBalancedDataset":
dataset = ClassBalancedDataset(
custom_build_dataset(cfg["dataset"], default_args),
cfg["oversample_thr"],
)
elif cfg["type"] == "CBGSDataset":
dataset = CBGSDataset(
custom_build_dataset(cfg["dataset"], default_args)
)
elif isinstance(cfg.get("ann_file"), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
import numpy as np
from .distance import chamfer_distance, frechet_distance, chamfer_distance_batch
from typing import List, Tuple, Union
from numpy.typing import NDArray
def average_precision(recalls, precisions, mode='area'):
"""Calculate average precision.
Args:
recalls (ndarray): shape (num_dets, )
precisions (ndarray): shape (num_dets, )
mode (str): 'area' or '11points', 'area' means calculating the area
under precision-recall curve, '11points' means calculating
the average precision of recalls at [0, 0.1, ..., 1]
Returns:
float: calculated average precision
"""
recalls = recalls[np.newaxis, :]
precisions = precisions[np.newaxis, :]
assert recalls.shape == precisions.shape and recalls.ndim == 2
num_scales = recalls.shape[0]
ap = 0.
if mode == 'area':
zeros = np.zeros((num_scales, 1), dtype=recalls.dtype)
ones = np.ones((num_scales, 1), dtype=recalls.dtype)
mrec = np.hstack((zeros, recalls, ones))
mpre = np.hstack((zeros, precisions, zeros))
for i in range(mpre.shape[1] - 1, 0, -1):
mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i])
ind = np.where(mrec[0, 1:] != mrec[0, :-1])[0]
ap = np.sum(
(mrec[0, ind + 1] - mrec[0, ind]) * mpre[0, ind + 1])
elif mode == '11points':
for thr in np.arange(0, 1 + 1e-3, 0.1):
precs = precisions[0, recalls[i, :] >= thr]
prec = precs.max() if precs.size > 0 else 0
ap += prec
ap /= 11
else:
raise ValueError(
'Unrecognized mode, only "area" and "11points" are supported')
return ap
def instance_match(pred_lines: NDArray,
scores: NDArray,
gt_lines: NDArray,
thresholds: Union[Tuple, List],
metric: str='chamfer') -> List:
"""Compute whether detected lines are true positive or false positive.
Args:
pred_lines (array): Detected lines of a sample, of shape (M, INTERP_NUM, 2 or 3).
scores (array): Confidence score of each line, of shape (M, ).
gt_lines (array): GT lines of a sample, of shape (N, INTERP_NUM, 2 or 3).
thresholds (list of tuple): List of thresholds.
metric (str): Distance function for lines matching. Default: 'chamfer'.
Returns:
list_of_tp_fp (list): tp-fp matching result at all thresholds
"""
if metric == 'chamfer':
distance_fn = chamfer_distance
elif metric == 'frechet':
distance_fn = frechet_distance
else:
raise ValueError(f'unknown distance function {metric}')
num_preds = pred_lines.shape[0]
num_gts = gt_lines.shape[0]
# tp and fp
tp_fp_list = []
tp = np.zeros((num_preds), dtype=np.float32)
fp = np.zeros((num_preds), dtype=np.float32)
# if there is no gt lines in this sample, then all pred lines are false positives
if num_gts == 0:
fp[...] = 1
for thr in thresholds:
tp_fp_list.append((tp.copy(), fp.copy()))
return tp_fp_list
if num_preds == 0:
for thr in thresholds:
tp_fp_list.append((tp.copy(), fp.copy()))
return tp_fp_list
assert pred_lines.shape[1] == gt_lines.shape[1], \
"sample points num should be the same"
# distance matrix: M x N
matrix = np.zeros((num_preds, num_gts))
# for i in range(num_preds):
# for j in range(num_gts):
# matrix[i, j] = distance_fn(pred_lines[i], gt_lines[j])
matrix = chamfer_distance_batch(pred_lines, gt_lines)
# for each det, the min distance with all gts
matrix_min = matrix.min(axis=1)
# for each det, which gt is the closest to it
matrix_argmin = matrix.argmin(axis=1)
# sort all dets in descending order by scores
sort_inds = np.argsort(-scores)
# match under different thresholds
for thr in thresholds:
tp = np.zeros((num_preds), dtype=np.float32)
fp = np.zeros((num_preds), dtype=np.float32)
gt_covered = np.zeros(num_gts, dtype=bool)
for i in sort_inds:
if matrix_min[i] <= thr:
matched_gt = matrix_argmin[i]
if not gt_covered[matched_gt]:
gt_covered[matched_gt] = True
tp[i] = 1
else:
fp[i] = 1
else:
fp[i] = 1
tp_fp_list.append((tp, fp))
return tp_fp_list
\ No newline at end of file
from scipy.spatial import distance
from numpy.typing import NDArray
import torch
def chamfer_distance(line1: NDArray, line2: NDArray) -> float:
''' Calculate chamfer distance between two lines. Make sure the
lines are interpolated.
Args:
line1 (array): coordinates of line1
line2 (array): coordinates of line2
Returns:
distance (float): chamfer distance
'''
dist_matrix = distance.cdist(line1, line2, 'euclidean')
dist12 = dist_matrix.min(-1).sum() / len(line1)
dist21 = dist_matrix.min(-2).sum() / len(line2)
return (dist12 + dist21) / 2
def frechet_distance(line1: NDArray, line2: NDArray) -> float:
''' Calculate frechet distance between two lines. Make sure the
lines are interpolated.
Args:
line1 (array): coordinates of line1
line2 (array): coordinates of line2
Returns:
distance (float): frechet distance
'''
raise NotImplementedError
def chamfer_distance_batch(pred_lines, gt_lines):
''' Calculate chamfer distance between two group of lines. Make sure the
lines are interpolated.
Args:
pred_lines (array or tensor): shape (m, num_pts, 2 or 3)
gt_lines (array or tensor): shape (n, num_pts, 2 or 3)
Returns:
distance (array): chamfer distance
'''
_, num_pts, coord_dims = pred_lines.shape
if not isinstance(pred_lines, torch.Tensor):
pred_lines = torch.tensor(pred_lines)
if not isinstance(gt_lines, torch.Tensor):
gt_lines = torch.tensor(gt_lines)
dist_mat = torch.cdist(pred_lines.view(-1, coord_dims),
gt_lines.view(-1, coord_dims), p=2)
# (num_query*num_points, num_gt*num_points)
dist_mat = torch.stack(torch.split(dist_mat, num_pts))
# (num_query, num_points, num_gt*num_points)
dist_mat = torch.stack(torch.split(dist_mat, num_pts, dim=-1))
# (num_gt, num_q, num_pts, num_pts)
dist1 = dist_mat.min(-1)[0].sum(-1)
dist2 = dist_mat.min(-2)[0].sum(-1)
dist_matrix = (dist1 + dist2).transpose(0, 1) / (2 * num_pts)
return dist_matrix.numpy()
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment