Commit 41b18fd8 authored by zhe chen's avatar zhe chen
Browse files

Use pre-commit to reformat code


Use pre-commit to reformat code
parent ff20ea39
[flake8]
ignore = E501, E502, F403, C901, W504, W605, E251, E122, E126, E127, E722, W503, E128, E741, E731, E701, E712
select = E1, E3, E502, E7, E9, W1, W5, W6
max-line-length = 180
exclude=*.egg/*,build,dist,detection/configs/*
[isort]
line-length = 180
multi_line_output = 0
extra_standard_library = setuptools
known_third_party = PIL,asynctest,cityscapesscripts,cv2,gather_models,matplotlib,mmcv,numpy,onnx,onnxruntime,pycocotools,pytest,pytorch_sphinx_theme,requests,scipy,seaborn,six,terminaltables,torch,ts,yaml
no_lines_before = STDLIB,LOCALFOLDER
default_section = THIRDPARTY
[yapf]
BASED_ON_STYLE = pep8
BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true
SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true
[codespell]
skip = *.ipynb
quiet-level = 3
ignore-words-list = patten,nd,ty,mot,hist,formating,winn,gool,datas,wan,confids,TOOD,tood
© 2022 GitHub, Inc.
Terms
Privacy
Security
Status
Docs
Contact GitHub
Pricing
API
exclude: ^internvl_chat_llava/
repos:
- repo: https://github.com/PyCQA/flake8
rev: 5.0.4
hooks:
- id: flake8
- repo: https://github.com/PyCQA/isort
rev: 5.11.5
hooks:
- id: isort
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.3.0
hooks:
- id: trailing-whitespace
- id: check-yaml
- id: end-of-file-fixer
- id: requirements-txt-fixer
- id: double-quote-string-fixer
- id: check-merge-conflict
- id: fix-encoding-pragma
args: ["--remove"]
- id: mixed-line-ending
args: ["--fix=lf"]
- repo: https://github.com/executablebooks/mdformat
rev: 0.7.9
hooks:
- id: mdformat
args: ["--number"]
additional_dependencies:
- mdformat-openmmlab
- mdformat_frontmatter
- linkify-it-py
This diff is collapsed.
This diff is collapsed.
<div id="top" align="center">
# InternImage-based Baseline for Online HD Map Construction Challenge For Autonomous Driving
</div>
</div>
If you need detaild information about the challenge, please refer
to https://github.com/Tsinghua-MARS-Lab/Online-HD-Map-Construction-CVPR2023/tree/master
If you need detaild information about the challenge, please refer to https://github.com/Tsinghua-MARS-Lab/Online-HD-Map-Construction-CVPR2023/tree/master
#### 1. Requirements
```bash
python>=3.8
torch==1.11 # recommend
......@@ -18,8 +20,8 @@ numpy==1.23.5
mmdet3d==1.0.0rc6 # recommend
```
### 2. Install DCNv3 for InternImage
```bash
cd projects/ops_dcnv3
bash make.sh # requires torch>=1.10
......@@ -33,21 +35,17 @@ bash tools/dist_train.sh src/configs/vectormapnet_intern.py ${NUM_GPUS}
Notes: InatenImage provides abundant pre-trained model weights that can be used!!!
### 4. Performance compared to baseline
model name|weight|$\mathrm{mAP}$ | $\mathrm{AP}_{pc}$ | $\mathrm{AP}_{div}$ | $\mathrm{AP}_{bound}$ |
----|:----------:| :--: | :--: | :--: | :--: |
vectormapnet_intern|[Checkpoint](https://github.com/OpenGVLab/InternImage/releases/download/track_model/vectormapnet_internimage.pth) | 49.35 | 45.05 | 56.78 | 46.22 |
vectormapnet_base|[Google Drive](https://drive.google.com/file/d/16D1CMinwA8PG1sd9PV9_WtHzcBohvO-D/view) | 42.79 | 37.22 | 50.47 | 40.68 |
| model name | weight | $\\mathrm{mAP}$ | $\\mathrm{AP}\_{pc}$ | $\\mathrm{AP}\_{div}$ | $\\mathrm{AP}\_{bound}$ |
| ------------------- | :---------------------------------------------------------------------------------------------------------------: | :-------------: | :------------------: | :-------------------: | :---------------------: |
| vectormapnet_intern | [Checkpoint](https://github.com/OpenGVLab/InternImage/releases/download/track_model/vectormapnet_internimage.pth) | 49.35 | 45.05 | 56.78 | 46.22 |
| vectormapnet_base | [Google Drive](https://drive.google.com/file/d/16D1CMinwA8PG1sd9PV9_WtHzcBohvO-D/view) | 42.79 | 37.22 | 50.47 | 40.68 |
## Citation
The evaluation metrics of this challenge follows [HDMapNet](https://arxiv.org/abs/2107.06307). We provide [VectorMapNet](https://arxiv.org/abs/2206.08920) as the baseline. Please cite:
The evaluation metrics of this challenge follows [HDMapNet](https://arxiv.org/abs/2107.06307). We
provide [VectorMapNet](https://arxiv.org/abs/2206.08920) as the baseline. Please cite:
```
@article{li2021hdmapnet,
......@@ -69,8 +67,8 @@ Our dataset is built on top of the [Argoverse 2](https://www.argoverse.org/av2.h
}
```
## License
Before participating in our challenge, you should register on the website and agree to the terms of use of the [Argoverse 2](https://www.argoverse.org/av2.html) dataset.
All code in this project is released under [GNU General Public License v3.0](./LICENSE).
Before participating in our challenge, you should register on the website and agree to the terms of use of
the [Argoverse 2](https://www.argoverse.org/av2.html) dataset. All code in this project is released
under [GNU General Public License v3.0](./LICENSE).
from .models import *
from .datasets import *
\ No newline at end of file
......@@ -125,8 +125,7 @@ data = dict(
classes=class_names,
test_mode=True,
ignore_index=len(class_names),
scene_idxs=data_root +
f'seg_info/Area_{test_area}_resampled_scene_idxs.npy'),
scene_idxs=data_root + f'seg_info/Area_{test_area}_resampled_scene_idxs.npy'),
test=dict(
type=dataset_type,
data_root=data_root,
......
......@@ -25,7 +25,7 @@ model = dict(
in_channels=256,
num_points=256,
gt_per_seed=1,
conv_channels=(128, ),
conv_channels=(128,),
conv_cfg=dict(type='Conv1d'),
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.1),
with_res_feat=False,
......@@ -43,8 +43,8 @@ model = dict(
pred_layer_cfg=dict(
in_channels=1536,
shared_conv_channels=(512, 128),
cls_conv_channels=(128, ),
reg_conv_channels=(128, ),
cls_conv_channels=(128,),
reg_conv_channels=(128,),
conv_cfg=dict(type='Conv1d'),
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.1),
bias=True),
......
......@@ -31,16 +31,16 @@ model = dict(
dir_offset=0.7854, # pi/4
strides=[8, 16, 32, 64, 128],
group_reg_dims=(2, 1, 3, 1, 2), # offset, depth, size, rot, velo
cls_branch=(256, ),
cls_branch=(256,),
reg_branch=(
(256, ), # offset
(256, ), # depth
(256, ), # size
(256, ), # rot
(256,), # offset
(256,), # depth
(256,), # size
(256,), # rot
() # velo
),
dir_branch=(256, ),
attr_branch=(256, ),
dir_branch=(256,),
attr_branch=(256,),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
......
......@@ -11,12 +11,12 @@ meta = {
'output_format': 'vector',
# NOTE: please modify the information below
'method': 'VectorMapNet', # name of your method
'method': 'VectorMapNet', # name of your method
'authors': ['Yicheng Liu', 'Tianyuan Yuan', 'Yue Wang',
'Yilun Wang', 'Hang Zhao'], # author names
'e-mail': 'yuantianyuan01@gmail.com', # your e-mail address
'institution / company': 'MarsLab, Tsinghua University', # your organization
'country / region': 'xxx', # (IMPORTANT) your country/region in iso3166 standard
'Yilun Wang', 'Hang Zhao'], # author names
'e-mail': 'yuantianyuan01@gmail.com', # your e-mail address
'institution / company': 'MarsLab, Tsinghua University', # your organization
'country / region': 'xxx', # (IMPORTANT) your country/region in iso3166 standard
}
# model type
......@@ -30,7 +30,7 @@ plugin_dir = 'src/'
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
img_size = (int(128*2), int((16/9*128)*2))
img_size = (int(128 * 2), int((16 / 9 * 128) * 2))
# category configs
cat2id = {
......@@ -41,14 +41,14 @@ cat2id = {
num_class = max(list(cat2id.values())) + 1
# bev configs
roi_size = (60, 30) # bev range, 60m in x-axis, 30m in y-axis
canvas_size = (200, 100) # bev feature size
roi_size = (60, 30) # bev range, 60m in x-axis, 30m in y-axis
canvas_size = (200, 100) # bev feature size
# vectorize params
coords_dim = 2 # polylines coordinates dimension, 2 or 3
sample_dist = -1 # sampling params, vectormapnet uses simplify
sample_num = -1 # sampling params, vectormapnet uses simplify
simplify = True # sampling params, vectormapnet uses simplify
coords_dim = 2 # polylines coordinates dimension, 2 or 3
sample_dist = -1 # sampling params, vectormapnet uses simplify
sample_num = -1 # sampling params, vectormapnet uses simplify
simplify = True # sampling params, vectormapnet uses simplify
# model configs
head_dim = 256
......@@ -85,21 +85,21 @@ model = dict(
upsample=dict(
zoom_size=(1, 2, 4, 8),
in_channels=128,
out_channels=128,),
xbound=[-roi_size[0]/2, roi_size[0]/2, roi_size[0]/canvas_size[0]],
ybound=[-roi_size[1]/2, roi_size[1]/2, roi_size[1]/canvas_size[1]],
out_channels=128, ),
xbound=[-roi_size[0] / 2, roi_size[0] / 2, roi_size[0] / canvas_size[0]],
ybound=[-roi_size[1] / 2, roi_size[1] / 2, roi_size[1] / canvas_size[1]],
heights=[-1.1, 0, 0.5, 1.1],
out_channels=128,
pretrained=None,
num_cam=7,
),
),
head_cfg=dict(
type='DGHead',
augmentation=True,
augmentation_kwargs=dict(
p=0.3,scale=0.01,
p=0.3, scale=0.01,
bbox_type='xyxy',
),
),
det_net_cfg=dict(
type='MapElementDetector',
num_query=120,
......@@ -135,30 +135,30 @@ model = dict(
num_heads=8,
attn_drop=0.1,
proj_drop=0.1,
dropout_layer=dict(type='Dropout', drop_prob=0.1),),
dropout_layer=dict(type='Dropout', drop_prob=0.1), ),
dict(
type='MultiScaleDeformableAttention',
embed_dims=head_dim,
num_heads=8,
num_levels=1,
),
),
],
ffn_cfgs=dict(
type='FFN',
embed_dims=head_dim,
feedforward_channels=head_dim*2,
feedforward_channels=head_dim * 2,
num_fcs=2,
ffn_drop=0.1,
act_cfg=dict(type='ReLU', inplace=True),
act_cfg=dict(type='ReLU', inplace=True),
),
feedforward_channels=head_dim*2,
feedforward_channels=head_dim * 2,
ffn_dropout=0.1,
operation_order=('norm', 'self_attn', 'norm', 'cross_attn',
'norm', 'ffn',)))
),
'norm', 'ffn',)))
),
positional_encoding=dict(
type='SinePositionalEncoding',
num_feats=head_dim//2,
num_feats=head_dim // 2,
normalize=True,
offset=-0.5),
loss_cls=dict(
......@@ -176,30 +176,30 @@ model = dict(
cost=dict(
type='MapQueriesCost',
cls_cost=dict(type='FocalLossCost', weight=2.0),
reg_cost=dict(type='BBoxCostC', weight=0.1), # continues
iou_cost=dict(type='IoUCostC', weight=1,box_format='xyxy'), # continues
),
reg_cost=dict(type='BBoxCostC', weight=0.1), # continues
iou_cost=dict(type='IoUCostC', weight=1, box_format='xyxy'), # continues
),
),
),
),
gen_net_cfg=dict(
type='PolylineGenerator',
in_channels=128,
encoder_config=None,
decoder_config={
'layer_config': {
'd_model': 256,
'nhead': 8,
'dim_feedforward': 512,
'dropout': 0.2,
'norm_first': True,
're_zero': True,
},
'num_layers': 6,
'layer_config': {
'd_model': 256,
'nhead': 8,
'dim_feedforward': 512,
'dropout': 0.2,
'norm_first': True,
're_zero': True,
},
'num_layers': 6,
},
class_conditional=True,
num_classes=num_class,
canvas_size=canvas_size, #xy
canvas_size=canvas_size, # xy
max_seq_length=500,
decoder_cross_attention=False,
use_discrete_vertex_embeddings=True,
......@@ -207,7 +207,7 @@ model = dict(
max_num_vertices=80,
top_p_gen_model=0.9,
sync_cls_avg_factor=True,
),
),
with_auxiliary_head=False,
model_name='VectorMapNet'
)
......@@ -226,11 +226,11 @@ train_pipeline = [
canvas_size=canvas_size, # xy
coord_dim=2,
num_class=num_class,
threshold=4/200,
threshold=4 / 200,
),
dict(type='LoadMultiViewImagesFromFiles'),
dict(type='ResizeMultiViewImages',
size = (int(128*2), int((16/9*128)*2)), # H, W
size=(int(128 * 2), int((16 / 9 * 128) * 2)), # H, W
change_intrinsics=True,
),
dict(type='Normalize3D', **img_norm_cfg),
......@@ -243,7 +243,7 @@ train_pipeline = [
test_pipeline = [
dict(type='LoadMultiViewImagesFromFiles'),
dict(type='ResizeMultiViewImages',
size=img_size, # H, W
size=img_size, # H, W
change_intrinsics=True,
),
dict(type='Normalize3D', **img_norm_cfg),
......@@ -296,9 +296,9 @@ optimizer = dict(
type='AdamW',
lr=1e-3,
paramwise_cfg=dict(
custom_keys={
'backbone': dict(lr_mult=0.1),
}),
custom_keys={
'backbone': dict(lr_mult=0.1),
}),
weight_decay=0.01)
optimizer_config = dict(grad_clip=dict(max_norm=3.5, norm_type=2))
......@@ -315,7 +315,7 @@ total_epochs = 130
# kwargs for dataset evaluation
eval_kwargs = dict()
evaluation = dict(
interval=5,
interval=5,
**eval_kwargs)
runner = dict(type='EpochBasedRunner', max_epochs=total_epochs)
......
......@@ -11,12 +11,12 @@ meta = {
'output_format': 'vector',
# NOTE: please modify the information below
'method': 'VectorMapNet', # name of your method
'method': 'VectorMapNet', # name of your method
'authors': ['Yicheng Liu', 'Tianyuan Yuan', 'Yue Wang',
'Yilun Wang', 'Hang Zhao'], # author names
'e-mail': 'yuantianyuan01@gmail.com', # your e-mail address
'institution / company': 'MarsLab, Tsinghua University', # your organization
'country / region': 'xxx', # (IMPORTANT) your country/region in iso3166 standard
'Yilun Wang', 'Hang Zhao'], # author names
'e-mail': 'yuantianyuan01@gmail.com', # your e-mail address
'institution / company': 'MarsLab, Tsinghua University', # your organization
'country / region': 'xxx', # (IMPORTANT) your country/region in iso3166 standard
}
# model type
......@@ -28,11 +28,11 @@ plugin_dir = 'src/'
# img configs
# img_norm_cfg = dict(
# mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
# mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_size = (int(128*2), int((16/9*128)*2))
img_size = (int(128 * 2), int((16 / 9 * 128) * 2))
# category configs
cat2id = {
......@@ -43,14 +43,14 @@ cat2id = {
num_class = max(list(cat2id.values())) + 1
# bev configs
roi_size = (60, 30) # bev range, 60m in x-axis, 30m in y-axis
canvas_size = (200, 100) # bev feature size
roi_size = (60, 30) # bev range, 60m in x-axis, 30m in y-axis
canvas_size = (200, 100) # bev feature size
# vectorize params
coords_dim = 2 # polylines coordinates dimension, 2 or 3
sample_dist = -1 # sampling params, vectormapnet uses simplify
sample_num = -1 # sampling params, vectormapnet uses simplify
simplify = True # sampling params, vectormapnet uses simplify
coords_dim = 2 # polylines coordinates dimension, 2 or 3
sample_dist = -1 # sampling params, vectormapnet uses simplify
sample_num = -1 # sampling params, vectormapnet uses simplify
simplify = True # sampling params, vectormapnet uses simplify
# model configs
head_dim = 256
......@@ -62,20 +62,20 @@ model = dict(
backbone_cfg=dict(
type='IPMEncoder',
img_backbone=dict(
_delete_=True,
type='InternImage',
core_op='DCNv3',
channels=80,
depths=[4, 4, 21, 4],
groups=[5, 10, 20, 40],
mlp_ratio=4.,
drop_path_rate=0.3,
norm_layer='LN',
layer_scale=1.0,
offset_scale=1.0,
post_norm=True,
with_cp=False,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
_delete_=True,
type='InternImage',
core_op='DCNv3',
channels=80,
depths=[4, 4, 21, 4],
groups=[5, 10, 20, 40],
mlp_ratio=4.,
drop_path_rate=0.3,
norm_layer='LN',
layer_scale=1.0,
offset_scale=1.0,
post_norm=True,
with_cp=False,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
img_neck=dict(
type='FPN',
in_channels=[80, 160, 320, 640],
......@@ -89,21 +89,21 @@ model = dict(
upsample=dict(
zoom_size=(1, 2, 4, 8),
in_channels=128,
out_channels=128,),
xbound=[-roi_size[0]/2, roi_size[0]/2, roi_size[0]/canvas_size[0]],
ybound=[-roi_size[1]/2, roi_size[1]/2, roi_size[1]/canvas_size[1]],
out_channels=128, ),
xbound=[-roi_size[0] / 2, roi_size[0] / 2, roi_size[0] / canvas_size[0]],
ybound=[-roi_size[1] / 2, roi_size[1] / 2, roi_size[1] / canvas_size[1]],
heights=[-1.1, 0, 0.5, 1.1],
out_channels=128,
pretrained=None,
num_cam=7,
),
),
head_cfg=dict(
type='DGHead',
augmentation=True,
augmentation_kwargs=dict(
p=0.3,scale=0.01,
p=0.3, scale=0.01,
bbox_type='xyxy',
),
),
det_net_cfg=dict(
type='MapElementDetector',
num_query=120,
......@@ -139,30 +139,30 @@ model = dict(
num_heads=8,
attn_drop=0.1,
proj_drop=0.1,
dropout_layer=dict(type='Dropout', drop_prob=0.1),),
dropout_layer=dict(type='Dropout', drop_prob=0.1), ),
dict(
type='MultiScaleDeformableAttention',
embed_dims=head_dim,
num_heads=8,
num_levels=1,
),
),
],
ffn_cfgs=dict(
type='FFN',
embed_dims=head_dim,
feedforward_channels=head_dim*2,
feedforward_channels=head_dim * 2,
num_fcs=2,
ffn_drop=0.1,
act_cfg=dict(type='ReLU', inplace=True),
),
feedforward_channels=head_dim*2,
feedforward_channels=head_dim * 2,
ffn_dropout=0.1,
operation_order=('norm', 'self_attn', 'norm', 'cross_attn',
'norm', 'ffn',)))
),
'norm', 'ffn',)))
),
positional_encoding=dict(
type='SinePositionalEncoding',
num_feats=head_dim//2,
num_feats=head_dim // 2,
normalize=True,
offset=-0.5),
loss_cls=dict(
......@@ -180,30 +180,30 @@ model = dict(
cost=dict(
type='MapQueriesCost',
cls_cost=dict(type='FocalLossCost', weight=2.0),
reg_cost=dict(type='BBoxCostC', weight=0.1), # continues
iou_cost=dict(type='IoUCostC', weight=1,box_format='xyxy'), # continues
),
reg_cost=dict(type='BBoxCostC', weight=0.1), # continues
iou_cost=dict(type='IoUCostC', weight=1, box_format='xyxy'), # continues
),
),
),
),
gen_net_cfg=dict(
type='PolylineGenerator',
in_channels=128,
encoder_config=None,
decoder_config={
'layer_config': {
'd_model': 256,
'nhead': 8,
'dim_feedforward': 512,
'dropout': 0.2,
'norm_first': True,
're_zero': True,
},
'num_layers': 6,
'layer_config': {
'd_model': 256,
'nhead': 8,
'dim_feedforward': 512,
'dropout': 0.2,
'norm_first': True,
're_zero': True,
},
'num_layers': 6,
},
class_conditional=True,
num_classes=num_class,
canvas_size=canvas_size, #xy
canvas_size=canvas_size, # xy
max_seq_length=500,
decoder_cross_attention=False,
use_discrete_vertex_embeddings=True,
......@@ -211,7 +211,7 @@ model = dict(
max_num_vertices=80,
top_p_gen_model=0.9,
sync_cls_avg_factor=True,
),
),
with_auxiliary_head=False,
model_name='VectorMapNet'
)
......@@ -230,11 +230,11 @@ train_pipeline = [
canvas_size=canvas_size, # xy
coord_dim=2,
num_class=num_class,
threshold=4/200,
threshold=4 / 200,
),
dict(type='LoadMultiViewImagesFromFiles'),
dict(type='ResizeMultiViewImages',
size = (int(128*2), int((16/9*128)*2)), # H, W
size=(int(128 * 2), int((16 / 9 * 128) * 2)), # H, W
change_intrinsics=True,
),
dict(type='Normalize3D', **img_norm_cfg),
......@@ -247,7 +247,7 @@ train_pipeline = [
test_pipeline = [
dict(type='LoadMultiViewImagesFromFiles'),
dict(type='ResizeMultiViewImages',
size=img_size, # H, W
size=img_size, # H, W
change_intrinsics=True,
),
dict(type='Normalize3D', **img_norm_cfg),
......@@ -300,9 +300,9 @@ optimizer = dict(
type='AdamW',
lr=1e-3,
paramwise_cfg=dict(
custom_keys={
'backbone': dict(lr_mult=0.1),
}),
custom_keys={
'backbone': dict(lr_mult=0.1),
}),
weight_decay=0.01)
optimizer_config = dict(grad_clip=dict(max_norm=3.5, norm_type=2))
......
from .pipelines import *
from .argo_dataset import AV2Dataset
\ No newline at end of file
from .base_dataset import BaseMapDataset
from mmdet.datasets import DATASETS
import numpy as np
import os
from time import time
import mmcv
import os
import numpy as np
from mmdet.datasets import DATASETS
from shapely.geometry import LineString
from .base_dataset import BaseMapDataset
@DATASETS.register_module()
class AV2Dataset(BaseMapDataset):
"""Argoverse2 map dataset class.
......@@ -22,9 +25,9 @@ class AV2Dataset(BaseMapDataset):
test_mode (bool): whether in test mode
"""
def __init__(self, **kwargs,):
def __init__(self, **kwargs, ):
super().__init__(**kwargs)
def load_annotations(self, ann_file):
"""Load annotations from ann_file.
......@@ -34,20 +37,20 @@ class AV2Dataset(BaseMapDataset):
Returns:
list[dict]: List of annotations.
"""
start_time = time()
ann = mmcv.load(ann_file)
samples = []
for seg_id, sequence in ann.items():
samples.extend(sequence)
samples = samples[::self.interval]
print(f'collected {len(samples)} samples in {(time() - start_time):.2f}s')
self.samples = samples
def get_sample(self, idx):
"""Get data sample. For each sample, map extractor will be applied to extract
map elements.
"""Get data sample. For each sample, map extractor will be applied to extract
map elements.
Args:
idx (int): data index
......@@ -57,7 +60,7 @@ class AV2Dataset(BaseMapDataset):
"""
sample = self.samples[idx]
if not self.test_mode:
ann = sample['annotation']
......@@ -66,7 +69,7 @@ class AV2Dataset(BaseMapDataset):
for k, v in ann.items():
if k in self.cat2id.keys():
map_label2geom[self.cat2id[k]] = [LineString(np.array(l)[:, :3]) for l in v]
ego2img_rts = []
cams = sample['sensor']
for c in cams.values():
......@@ -87,10 +90,10 @@ class AV2Dataset(BaseMapDataset):
# extrinsics are 4x4 tranform matrix, NOTE: **ego2cam**
'cam_extrinsics': [c['extrinsic'] for c in cams.values()],
'ego2img': ego2img_rts,
'ego2global_translation': pose['ego2global_translation'],
'ego2global_translation': pose['ego2global_translation'],
'ego2global_rotation': pose['ego2global_rotation'],
}
if not self.test_mode:
input_dict.update({'map_geoms': map_label2geom}) # {0: List[ped_crossing(LineString)], 1: ...}})
input_dict.update({'map_geoms': map_label2geom}) # {0: List[ped_crossing(LineString)], 1: ...}})
return input_dict
\ No newline at end of file
return input_dict
import numpy as np
import os
import os.path as osp
import mmcv
from .evaluation.vector_eval import VectorEvaluate
import warnings
import mmcv
import numpy as np
from mmdet3d.datasets.pipelines import Compose
from mmdet.datasets import DATASETS
from torch.utils.data import Dataset
import warnings
warnings.filterwarnings("ignore")
from .evaluation.vector_eval import VectorEvaluate
warnings.filterwarnings('ignore')
@DATASETS.register_module()
class BaseMapDataset(Dataset):
......@@ -26,7 +28,8 @@ class BaseMapDataset(Dataset):
work_dir (str): path to work dir
test_mode (bool): whether in test mode
"""
def __init__(self,
def __init__(self,
ann_file,
root_path,
cat2id,
......@@ -36,12 +39,12 @@ class BaseMapDataset(Dataset):
interval=1,
work_dir=None,
test_mode=False,
):
):
super().__init__()
self.ann_file = ann_file
self.meta = meta
self.root_path = root_path
self.classes = list(cat2id.keys())
self.num_classes = len(self.classes)
self.cat2id = cat2id
......@@ -60,12 +63,12 @@ class BaseMapDataset(Dataset):
self.pipeline = Compose(pipeline)
else:
self.pipeline = None
# dummy flags to fit with mmdet dataset
self.flag = np.zeros(len(self), dtype=np.uint8)
self.roi_size = roi_size
self.work_dir = work_dir
self.test_mode = test_mode
......@@ -77,7 +80,7 @@ class BaseMapDataset(Dataset):
def format_results(self, results, denormalize=True, prefix=None):
'''Format prediction result to submission format.
Args:
results (list[Tensor]): List of prediction results.
denormalize (bool): whether to denormalize prediction from (0, 1) \
......@@ -99,18 +102,18 @@ class BaseMapDataset(Dataset):
For each case, the result should be formatted as Dict{'vectors': [], 'scores': [], 'labels': []}
'vectors': List of vector, each vector is a array([[x1, y1], [x2, y2] ...]),
contain all vectors predicted in this sample.
'scores: List of score(float),
'scores: List of score(float),
contain scores of all instances in this sample.
'labels': List of label(int),
'labels': List of label(int),
contain labels of all instances in this sample.
'''
if pred is None: # empty prediction
if pred is None: # empty prediction
continue
single_case = {'vectors': [], 'scores': [], 'labels': []}
token = pred['token']
roi_size = np.array(self.roi_size)
origin = -np.array([self.roi_size[0]/2, self.roi_size[1]/2])
origin = -np.array([self.roi_size[0] / 2, self.roi_size[1] / 2])
for i in range(len(pred['scores'])):
score = pred['scores'][i]
......@@ -120,7 +123,7 @@ class BaseMapDataset(Dataset):
# A line should have >=2 points
if len(vector) < 2:
continue
if denormalize:
eps = 2
vector = vector * (roi_size + eps) + origin
......@@ -128,9 +131,9 @@ class BaseMapDataset(Dataset):
single_case['vectors'].append(vector)
single_case['scores'].append(score)
single_case['labels'].append(label)
submissions['results'][token] = single_case
out_path = osp.join(prefix, 'submission_vector.json')
print(f'\nsaving submissions results to {out_path}')
os.makedirs(os.path.dirname(out_path), exist_ok=True)
......@@ -152,7 +155,7 @@ class BaseMapDataset(Dataset):
self.evaluator = VectorEvaluate(self.ann_file)
print('len of the results', len(results))
result_path = self.format_results(results, denormalize=True, prefix=self.work_dir)
result_dict = self.evaluator.evaluate(result_path, logger=logger)
......@@ -165,7 +168,7 @@ class BaseMapDataset(Dataset):
int: Length of data infos.
"""
return len(self.samples)
def _rand_another(self, idx):
"""Randomly get another item.
......@@ -183,4 +186,3 @@ class BaseMapDataset(Dataset):
input_dict = self.get_sample(idx)
data = self.pipeline(input_dict)
return data
import numpy as np
from .distance import chamfer_distance, frechet_distance
from typing import List, Tuple, Union
import numpy as np
from numpy.typing import NDArray
from .distance import chamfer_distance, frechet_distance
def average_precision(recalls, precisions, mode='area'):
"""Calculate average precision.
"""Calculate average precision.
Args:
recalls (ndarray): shape (num_dets, )
......@@ -31,11 +34,11 @@ def average_precision(recalls, precisions, mode='area'):
mpre = np.hstack((zeros, precisions, zeros))
for i in range(mpre.shape[1] - 1, 0, -1):
mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i])
ind = np.where(mrec[0, 1:] != mrec[0, :-1])[0]
ap = np.sum(
(mrec[0, ind + 1] - mrec[0, ind]) * mpre[0, ind + 1])
elif mode == '11points':
for thr in np.arange(0, 1 + 1e-3, 0.1):
precs = precisions[0, recalls[i, :] >= thr]
......@@ -45,14 +48,15 @@ def average_precision(recalls, precisions, mode='area'):
else:
raise ValueError(
'Unrecognized mode, only "area" and "11points" are supported')
return ap
def instance_match(pred_lines: List[NDArray],
scores: NDArray,
gt_lines: List[NDArray],
thresholds: Union[Tuple, List],
metric: str='chamfer') -> List:
def instance_match(pred_lines: List[NDArray],
scores: NDArray,
gt_lines: List[NDArray],
thresholds: Union[Tuple, List],
metric: str = 'chamfer') -> List:
"""Compute whether detected lines are true positive or false positive.
Args:
......@@ -71,7 +75,7 @@ def instance_match(pred_lines: List[NDArray],
elif metric == 'frechet':
distance_fn = frechet_distance
else:
raise ValueError(f'unknown distance function {metric}')
......@@ -89,7 +93,7 @@ def instance_match(pred_lines: List[NDArray],
for thr in thresholds:
tp_fp_list.append((tp.copy(), fp.copy()))
return tp_fp_list
if num_preds == 0:
for thr in thresholds:
tp_fp_list.append((tp.copy(), fp.copy()))
......@@ -126,7 +130,7 @@ def instance_match(pred_lines: List[NDArray],
fp[i] = 1
else:
fp[i] = 1
tp_fp_list.append((tp, fp))
return tp_fp_list
\ No newline at end of file
return tp_fp_list
from scipy.spatial import distance
from numpy.typing import NDArray
from scipy.spatial import distance
def chamfer_distance(line1: NDArray, line2: NDArray) -> float:
''' Calculate chamfer distance between two lines. Make sure the
''' Calculate chamfer distance between two lines. Make sure the
lines are interpolated.
Args:
line1 (array): coordinates of line1
line2 (array): coordinates of line2
Returns:
distance (float): chamfer distance
'''
dist_matrix = distance.cdist(line1, line2, 'euclidean')
dist12 = dist_matrix.min(-1).sum() / len(line1)
dist21 = dist_matrix.min(-2).sum() / len(line2)
return (dist12 + dist21) / 2
def frechet_distance(line1: NDArray, line2: NDArray) -> float:
''' Calculate frechet distance between two lines. Make sure the
''' Calculate frechet distance between two lines. Make sure the
lines are interpolated.
Args:
line1 (array): coordinates of line1
line2 (array): coordinates of line2
Returns:
distance (float): frechet distance
'''
raise NotImplementedError
raise NotImplementedError
from functools import partial
import numpy as np
from logging import Logger
from multiprocessing import Pool
from mmdet3d.datasets import build_dataset, build_dataloader
from time import time
from typing import Dict, List, Optional
import mmcv
from .AP import instance_match, average_precision
import numpy as np
import prettytable
from time import time
from functools import cached_property
from shapely.geometry import LineString
from numpy.typing import NDArray
from typing import Dict, List, Optional
from logging import Logger
from mmcv import Config
from copy import deepcopy
from shapely.geometry import LineString
INTERP_NUM = 100 # number of points to interpolate during evaluation
SAMPLE_DIST = 0.3 # fixed sample distance
THRESHOLDS = [0.5, 1.0, 1.5] # AP thresholds
N_WORKERS = 16 # num workers to parallel
from .AP import average_precision, instance_match
INTERP_NUM = 100 # number of points to interpolate during evaluation
SAMPLE_DIST = 0.3 # fixed sample distance
THRESHOLDS = [0.5, 1.0, 1.5] # AP thresholds
N_WORKERS = 16 # num workers to parallel
CAT2ID = {
'ped_crossing': 0,
......@@ -25,6 +23,7 @@ CAT2ID = {
'boundary': 2,
}
class VectorEvaluate(object):
"""Evaluator for vectorized map.
......@@ -33,7 +32,7 @@ class VectorEvaluate(object):
n_workers (int): num workers to parallel
"""
def __init__(self, ann_file, n_workers: int=N_WORKERS) -> None:
def __init__(self, ann_file, n_workers: int = N_WORKERS) -> None:
ann = mmcv.load(ann_file)
gts = {}
for seg_id, seq in ann.items():
......@@ -42,69 +41,69 @@ class VectorEvaluate(object):
for cat, vectors in frame['annotation'].items():
# only evaluate in 2-dimension
ann[cat] = [np.array(v)[:, :2] for v in vectors]
gts[frame['timestamp']] = ann
self.gts = gts
self.n_workers = n_workers
self.cat2id = CAT2ID
self.id2cat = {v: k for k, v in self.cat2id.items()}
def interp_fixed_num(self,
vector: NDArray,
def interp_fixed_num(self,
vector: NDArray,
num_pts: int) -> NDArray:
''' Interpolate a polyline.
Args:
vector (array): line coordinates, shape (M, 2)
num_pts (int):
num_pts (int):
Returns:
sampled_points (array): interpolated coordinates
'''
line = LineString(vector)
distances = np.linspace(0, line.length, num_pts)
sampled_points = np.array([list(line.interpolate(distance).coords)
for distance in distances]).squeeze()
sampled_points = np.array([list(line.interpolate(distance).coords)
for distance in distances]).squeeze()
return sampled_points
def interp_fixed_dist(self,
def interp_fixed_dist(self,
vector: NDArray,
sample_dist: float) -> NDArray:
''' Interpolate a line at fixed interval.
Args:
vector (LineString): vector
sample_dist (float): sample interval
Returns:
points (array): interpolated points, shape (N, 2)
'''
line = LineString(vector)
distances = list(np.arange(sample_dist, line.length, sample_dist))
# make sure to sample at least two points when sample_dist > line.length
distances = [0,] + distances + [line.length,]
distances = [0, ] + distances + [line.length, ]
sampled_points = np.array([list(line.interpolate(distance).coords)
for distance in distances]).squeeze()
for distance in distances]).squeeze()
return sampled_points
def _evaluate_single(self,
pred_vectors: List,
scores: List,
groundtruth: List,
thresholds: List,
metric: str='metric') -> Dict[int, NDArray]:
def _evaluate_single(self,
pred_vectors: List,
scores: List,
groundtruth: List,
thresholds: List,
metric: str = 'metric') -> Dict[int, NDArray]:
''' Do single-frame matching for one class.
Args:
pred_vectors (List): List[vector(ndarray) (different length)],
pred_vectors (List): List[vector(ndarray) (different length)],
scores (List): List[score(float)]
groundtruth (List): List of vectors
thresholds (List): List of thresholds
Returns:
tp_fp_score_by_thr (Dict): matching results at different thresholds
e.g. {0.5: (M, 2), 1.0: (M, 2), 1.5: (M, 2)}
......@@ -125,36 +124,36 @@ class VectorEvaluate(object):
# vector_interp = self.interp_fixed_num(vector, INTERP_NUM)
vector_interp = self.interp_fixed_dist(vector, SAMPLE_DIST)
gt_lines.append(vector_interp)
scores = np.array(scores)
tp_fp_list = instance_match(pred_lines, scores, gt_lines, thresholds, metric) # (M, 2)
tp_fp_list = instance_match(pred_lines, scores, gt_lines, thresholds, metric) # (M, 2)
tp_fp_score_by_thr = {}
for i, thr in enumerate(thresholds):
tp, fp = tp_fp_list[i]
tp_fp_score = np.hstack([tp[:, None], fp[:, None], scores[:, None]])
tp_fp_score_by_thr[thr] = tp_fp_score
return tp_fp_score_by_thr # {0.5: (M, 2), 1.0: (M, 2), 1.5: (M, 2)}
def evaluate(self,
result_path: str,
metric: str='chamfer',
logger: Optional[Logger]=None) -> Dict[str, float]:
return tp_fp_score_by_thr # {0.5: (M, 2), 1.0: (M, 2), 1.5: (M, 2)}
def evaluate(self,
result_path: str,
metric: str = 'chamfer',
logger: Optional[Logger] = None) -> Dict[str, float]:
''' Do evaluation for a submission file and print evalution results to `logger` if specified.
The submission will be aligned by tokens before evaluation. We use multi-worker to speed up.
Args:
result_path (str): path to submission file
metric (str): distance metric. Default: 'chamfer'
logger (Logger): logger to print evaluation result, Default: None
Returns:
new_result_dict (Dict): evaluation results. AP by categories.
'''
results = mmcv.load(result_path)
results = results['results']
# re-group samples and gt by label
samples_by_cls = {label: [] for label in self.id2cat.keys()}
num_gts = {label: 0 for label in self.id2cat.keys()}
......@@ -166,7 +165,7 @@ class VectorEvaluate(object):
pred = results[token]
else:
pred = {'vectors': [], 'scores': [], 'labels': []}
# for every sample
vectors_by_cls = {label: [] for label in self.id2cat.keys()}
scores_by_cls = {label: [] for label in self.id2cat.keys()}
......@@ -192,11 +191,11 @@ class VectorEvaluate(object):
start = time()
if self.n_workers > 0:
pool = Pool(self.n_workers)
sum_mAP = 0
pbar = mmcv.ProgressBar(len(self.id2cat))
for label in self.id2cat.keys():
samples = samples_by_cls[label] # List[(pred_lines, scores, gts)]
samples = samples_by_cls[label] # List[(pred_lines, scores, gts)]
result_dict[self.id2cat[label]] = {
'num_gts': num_gts[label],
'num_preds': num_preds[label]
......@@ -210,14 +209,14 @@ class VectorEvaluate(object):
tpfp_score_list = []
for sample in samples:
tpfp_score_list.append(fn(*sample))
for thr in THRESHOLDS:
tp_fp_score = [i[thr] for i in tpfp_score_list]
tp_fp_score = np.vstack(tp_fp_score) # (num_dets, 3)
tp_fp_score = np.vstack(tp_fp_score) # (num_dets, 3)
sort_inds = np.argsort(-tp_fp_score[:, -1])
tp = tp_fp_score[sort_inds, 0] # (num_dets,)
fp = tp_fp_score[sort_inds, 1] # (num_dets,)
tp = tp_fp_score[sort_inds, 0] # (num_dets,)
fp = tp_fp_score[sort_inds, 1] # (num_dets,)
tp = np.cumsum(tp, axis=0)
fp = np.cumsum(fp, axis=0)
eps = np.finfo(np.float32).eps
......@@ -229,38 +228,38 @@ class VectorEvaluate(object):
result_dict[self.id2cat[label]].update({f'AP@{thr}': AP})
pbar.update()
AP = sum_AP / len(THRESHOLDS)
sum_mAP += AP
result_dict[self.id2cat[label]].update({f'AP': AP})
if self.n_workers > 0:
pool.close()
mAP = sum_mAP / len(self.id2cat.keys())
result_dict.update({'mAP': mAP})
print(f"finished in {time() - start:.2f}s")
print(f'finished in {time() - start:.2f}s')
# print results
table = prettytable.PrettyTable(['category', 'num_preds', 'num_gts'] +
[f'AP@{thr}' for thr in THRESHOLDS] + ['AP'])
table = prettytable.PrettyTable(['category', 'num_preds', 'num_gts'] +
[f'AP@{thr}' for thr in THRESHOLDS] + ['AP'])
for label in self.id2cat.keys():
table.add_row([
self.id2cat[label],
self.id2cat[label],
result_dict[self.id2cat[label]]['num_preds'],
result_dict[self.id2cat[label]]['num_gts'],
*[round(result_dict[self.id2cat[label]][f'AP@{thr}'], 4) for thr in THRESHOLDS],
round(result_dict[self.id2cat[label]]['AP'], 4),
])
from mmcv.utils import print_log
print_log('\n'+str(table), logger=logger)
print_log('\n' + str(table), logger=logger)
print_log(f'mAP = {mAP:.4f}\n', logger=logger)
new_result_dict = {}
for name in self.cat2id:
new_result_dict[name] = result_dict[name]['AP']
return new_result_dict
\ No newline at end of file
return new_result_dict
from .loading import LoadMultiViewImagesFromFiles
from .formating import FormatBundleMap
from .transform import ResizeMultiViewImages, PadMultiViewImages, Normalize3D
from .vectorize import VectorizeMap
from .loading import LoadMultiViewImagesFromFiles
from .poly_bbox import PolygonizeLocalMapBbox
from .transform import Normalize3D, PadMultiViewImages, ResizeMultiViewImages
from .vectorize import VectorizeMap
# for argoverse
__all__ = [
'LoadMultiViewImagesFromFiles',
'FormatBundleMap', 'Normalize3D', 'ResizeMultiViewImages', 'PadMultiViewImages',
'VectorizeMap', 'PolygonizeLocalMapBbox'
]
\ No newline at end of file
]
import numpy as np
from mmcv.parallel import DataContainer as DC
from mmdet3d.core.points import BasePoints
from mmdet.datasets.builder import PIPELINES
from mmdet.datasets.pipelines import to_tensor
@PIPELINES.register_module()
class FormatBundleMap(object):
"""Format data for map tasks and then collect data for model input.
......@@ -17,10 +17,10 @@ class FormatBundleMap(object):
- img_metas: (1) to DataContainer (cpu_only=True)
"""
def __init__(self, process_img=True,
keys=['img', 'semantic_mask', 'vectors'],
meta_keys=['intrinsics', 'extrinsics']):
def __init__(self, process_img=True,
keys=['img', 'semantic_mask', 'vectors'],
meta_keys=['intrinsics', 'extrinsics']):
self.process_img = process_img
self.keys = keys
self.meta_keys = meta_keys
......@@ -54,7 +54,7 @@ class FormatBundleMap(object):
else:
img = np.ascontiguousarray(results['img'].transpose(2, 0, 1))
results['img'] = DC(to_tensor(img), stack=True)
if 'semantic_mask' in results:
results['semantic_mask'] = DC(to_tensor(results['semantic_mask']), stack=True)
......@@ -62,7 +62,7 @@ class FormatBundleMap(object):
# vectors may have different sizes
vectors = results['vectors']
results['vectors'] = DC(vectors, stack=False, cpu_only=True)
if 'polys' in results:
results['polys'] = DC(results['polys'], stack=False, cpu_only=True)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment