Commit 41b18fd8 authored by zhe chen's avatar zhe chen
Browse files

Use pre-commit to reformat code


Use pre-commit to reformat code
parent ff20ea39
[flake8]
ignore = E501, E502, F403, C901, W504, W605, E251, E122, E126, E127, E722, W503, E128, E741, E731, E701, E712
select = E1, E3, E502, E7, E9, W1, W5, W6
max-line-length = 180
exclude=*.egg/*,build,dist,detection/configs/*
[isort]
line-length = 180
multi_line_output = 0
extra_standard_library = setuptools
known_third_party = PIL,asynctest,cityscapesscripts,cv2,gather_models,matplotlib,mmcv,numpy,onnx,onnxruntime,pycocotools,pytest,pytorch_sphinx_theme,requests,scipy,seaborn,six,terminaltables,torch,ts,yaml
no_lines_before = STDLIB,LOCALFOLDER
default_section = THIRDPARTY
[yapf]
BASED_ON_STYLE = pep8
BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true
SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true
[codespell]
skip = *.ipynb
quiet-level = 3
ignore-words-list = patten,nd,ty,mot,hist,formating,winn,gool,datas,wan,confids,TOOD,tood
© 2022 GitHub, Inc.
Terms
Privacy
Security
Status
Docs
Contact GitHub
Pricing
API
exclude: ^internvl_chat_llava/
repos:
- repo: https://github.com/PyCQA/flake8
rev: 5.0.4
hooks:
- id: flake8
- repo: https://github.com/PyCQA/isort
rev: 5.11.5
hooks:
- id: isort
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.3.0
hooks:
- id: trailing-whitespace
- id: check-yaml
- id: end-of-file-fixer
- id: requirements-txt-fixer
- id: double-quote-string-fixer
- id: check-merge-conflict
- id: fix-encoding-pragma
args: ["--remove"]
- id: mixed-line-ending
args: ["--fix=lf"]
- repo: https://github.com/executablebooks/mdformat
rev: 0.7.9
hooks:
- id: mdformat
args: ["--number"]
additional_dependencies:
- mdformat-openmmlab
- mdformat_frontmatter
- linkify-it-py
...@@ -29,39 +29,45 @@ The official implementation of ...@@ -29,39 +29,45 @@ The official implementation of
[InternImage: Exploring Large-Scale Vision Foundation Models with Deformable Convolutions](https://arxiv.org/abs/2211.05778). [InternImage: Exploring Large-Scale Vision Foundation Models with Deformable Convolutions](https://arxiv.org/abs/2211.05778).
[[Paper](https://arxiv.org/abs/2211.05778)] [[Blog in Chinese](https://zhuanlan.zhihu.com/p/610772005)] \[[Paper](https://arxiv.org/abs/2211.05778)\] \[[Blog in Chinese](https://zhuanlan.zhihu.com/p/610772005)\]
## Highlights ## Highlights
- :thumbsup: **The strongest open-source visual universal backbone model with up to 3 billion parameters** - :thumbsup: **The strongest open-source visual universal backbone model with up to 3 billion parameters**
- 🏆 **Achieved `90.1% Top1` accuracy in ImageNet, the most accurate among open-source models** - 🏆 **Achieved `90.1% Top1` accuracy in ImageNet, the most accurate among open-source models**
- 🏆 **Achieved `65.5 mAP` on the COCO benchmark dataset for object detection, the only model that exceeded `65.0 mAP`** - 🏆 **Achieved `65.5 mAP` on the COCO benchmark dataset for object detection, the only model that exceeded `65.0 mAP`**
## Related Projects ## Related Projects
### Foundation Models ### Foundation Models
- [Uni-Perceiver](https://github.com/fundamentalvision/Uni-Perceiver): A Pre-training unified architecture for generic perception for zero-shot and few-shot tasks - [Uni-Perceiver](https://github.com/fundamentalvision/Uni-Perceiver): A Pre-training unified architecture for generic perception for zero-shot and few-shot tasks
- [Uni-Perceiver v2](https://arxiv.org/abs/2211.09808): A generalist model for large-scale vision and vision-language tasks - [Uni-Perceiver v2](https://arxiv.org/abs/2211.09808): A generalist model for large-scale vision and vision-language tasks
- [M3I-Pretraining](https://github.com/OpenGVLab/M3I-Pretraining): One-stage pre-training paradigm via maximizing multi-modal mutual information - [M3I-Pretraining](https://github.com/OpenGVLab/M3I-Pretraining): One-stage pre-training paradigm via maximizing multi-modal mutual information
- [InternVL](https://github.com/OpenGVLab/InternVL): The largest open-source vision/vision-language foundation model (14B) to date - [InternVL](https://github.com/OpenGVLab/InternVL): The largest open-source vision/vision-language foundation model (14B) to date
### Autonomous Driving ### Autonomous Driving
- [BEVFormer](https://github.com/fundamentalvision/BEVFormer): A cutting-edge baseline for camera-based 3D detection - [BEVFormer](https://github.com/fundamentalvision/BEVFormer): A cutting-edge baseline for camera-based 3D detection
- [BEVFormer v2](https://arxiv.org/abs/2211.10439): Adapting modern image backbones to Bird's-Eye-View recognition via perspective supervision - [BEVFormer v2](https://arxiv.org/abs/2211.10439): Adapting modern image backbones to Bird's-Eye-View recognition via perspective supervision
## Application in Challenges ## Application in Challenges
- [2022 Waymo 3D Camera-Only Detection Challenge](https://waymo.com/open/challenges/2022/3d-camera-only-detection/): BEVFormer++ **Ranks 1st** based on InternImage - [2022 Waymo 3D Camera-Only Detection Challenge](https://waymo.com/open/challenges/2022/3d-camera-only-detection/): BEVFormer++ **Ranks 1st** based on InternImage
- [nuScenes 3D detection task](https://www.nuscenes.org/object-detection?externalData=all&mapData=all&modalities=Camera): BEVFormer v2 achieves SOTA performance of 64.8 NDS on nuScenes Camera Only - [nuScenes 3D detection task](https://www.nuscenes.org/object-detection?externalData=all&mapData=all&modalities=Camera): BEVFormer v2 achieves SOTA performance of 64.8 NDS on nuScenes Camera Only
- [CVPR 2023 Workshop End-to-End Autonomous Driving](https://opendrivelab.com/e2ead/cvpr23): InternImage supports the baseline of the [3D Occupancy Prediction Challenge](https://opendrivelab.com/AD23Challenge.html#Track3) and [OpenLane Topology Challenge](https://opendrivelab.com/AD23Challenge.html#Track1) - [CVPR 2023 Workshop End-to-End Autonomous Driving](https://opendrivelab.com/e2ead/cvpr23): InternImage supports the baseline of the [3D Occupancy Prediction Challenge](https://opendrivelab.com/AD23Challenge.html#Track3) and [OpenLane Topology Challenge](https://opendrivelab.com/AD23Challenge.html#Track1)
## News ## News
- `Jan 22, 2024`: 🚀 Support [DCNv4](https://github.com/OpenGVLab/DCNv4) in InternImage! - `Jan 22, 2024`: 🚀 Support [DCNv4](https://github.com/OpenGVLab/DCNv4) in InternImage!
- `Mar 14, 2023`: 🚀 "INTERN-2.5" is released! - `Mar 14, 2023`: 🚀 "INTERN-2.5" is released!
- `Feb 28, 2023`: 🚀 InternImage is accepted to CVPR 2023! - `Feb 28, 2023`: 🚀 InternImage is accepted to CVPR 2023!
- `Nov 18, 2022`: 🚀 InternImage-XL merged into [BEVFormer v2](https://arxiv.org/abs/2211.10439) achieves state-of-the-art performance of `63.4 NDS` on nuScenes Camera Only. - `Nov 18, 2022`: 🚀 InternImage-XL merged into [BEVFormer v2](https://arxiv.org/abs/2211.10439) achieves state-of-the-art performance of `63.4 NDS` on nuScenes Camera Only.
- `Nov 10, 2022`: 🚀 InternImage-H achieves a new record `65.4 mAP` on COCO detection test-dev and `62.9 mIoU` on - `Nov 10, 2022`: 🚀 InternImage-H achieves a new record `65.4 mAP` on COCO detection test-dev and `62.9 mIoU` on
ADE20K, outperforming previous models by a large margin. ADE20K, outperforming previous models by a large margin.
## History ## History
- [ ] Models/APIs for other downstream tasks - [ ] Models/APIs for other downstream tasks
- [ ] Support [CVPR 2023 Workshop on End-to-End Autonomous Driving](https://opendrivelab.com/e2ead/cvpr23), see [here](https://github.com/OpenGVLab/InternImage/tree/master/autonomous_driving) - [ ] Support [CVPR 2023 Workshop on End-to-End Autonomous Driving](https://opendrivelab.com/e2ead/cvpr23), see [here](https://github.com/OpenGVLab/InternImage/tree/master/autonomous_driving)
- [ ] Support Segment Anything - [ ] Support Segment Anything
...@@ -77,6 +83,7 @@ ADE20K, outperforming previous models by a large margin. ...@@ -77,6 +83,7 @@ ADE20K, outperforming previous models by a large margin.
- [x] InternImage-T/S/B/L/XL semantic segmentation model - [x] InternImage-T/S/B/L/XL semantic segmentation model
## Introduction ## Introduction
"INTERN-2.5" is a powerful multimodal multitask general model jointly released by SenseTime and Shanghai AI Laboratory. It consists of large-scale vision foundation model "InternImage", pre-training method "M3I-Pretraining", generic decoder "Uni-Perceiver" series, and generic encoder for autonomous driving perception "BEVFormer" series. "INTERN-2.5" is a powerful multimodal multitask general model jointly released by SenseTime and Shanghai AI Laboratory. It consists of large-scale vision foundation model "InternImage", pre-training method "M3I-Pretraining", generic decoder "Uni-Perceiver" series, and generic encoder for autonomous driving perception "BEVFormer" series.
<div align=left> <div align=left>
...@@ -93,10 +100,10 @@ ADE20K, outperforming previous models by a large margin. ...@@ -93,10 +100,10 @@ ADE20K, outperforming previous models by a large margin.
"INTERN-2.5" also demonstrated world's best performance on 16 other important visual benchmark datasets, covering a wide range of tasks such as classification, detection, and segmentation, making it the top-performing model across multiple domains. "INTERN-2.5" also demonstrated world's best performance on 16 other important visual benchmark datasets, covering a wide range of tasks such as classification, detection, and segmentation, making it the top-performing model across multiple domains.
**Performance** **Performance**
- Classification - Classification
<table border="1" width="90%"> <table border="1" width="90%">
<tr align="center"> <tr align="center">
<th colspan="1"> Image Classification</th><th colspan="2"> Scene Classification </th><th colspan="1">Long-Tail Classification</th> <th colspan="1"> Image Classification</th><th colspan="2"> Scene Classification </th><th colspan="1">Long-Tail Classification</th>
...@@ -124,6 +131,7 @@ ADE20K, outperforming previous models by a large margin. ...@@ -124,6 +131,7 @@ ADE20K, outperforming previous models by a large margin.
</table> </table>
- Segmentation - Segmentation
<table border="1" width="90%"> <table border="1" width="90%">
<tr align="center"> <tr align="center">
<th colspan="3">Semantic Segmentation</th><th colspan="1">Street Segmentation</th><th colspan="1">RGBD Segmentation</th> <th colspan="3">Semantic Segmentation</th><th colspan="1">Street Segmentation</th><th colspan="1">RGBD Segmentation</th>
...@@ -141,10 +149,8 @@ ADE20K, outperforming previous models by a large margin. ...@@ -141,10 +149,8 @@ ADE20K, outperforming previous models by a large margin.
**Image-Text Retrieval**: "INTERN-2.5" can quickly locate and retrieve the most semantically relevant images based on textual content requirements. This capability can be applied to both videos and image collections and can be further combined with object detection boxes to enable a variety of applications, helping users quickly and easily find the required image resources. For example, it can return the relevant images specified by the text in the album. **Image-Text Retrieval**: "INTERN-2.5" can quickly locate and retrieve the most semantically relevant images based on textual content requirements. This capability can be applied to both videos and image collections and can be further combined with object detection boxes to enable a variety of applications, helping users quickly and easily find the required image resources. For example, it can return the relevant images specified by the text in the album.
**Image-To-Text**: "INTERN-2.5" has a strong understanding capability in various aspects of visual-to-text tasks such as image captioning, visual question answering, visual reasoning, and optical character recognition. For example, in the context of autonomous driving, it can enhance the scene perception and understanding capabilities, assist the vehicle in judging traffic signal status, road signs, and other information, and provide effective perception information support for vehicle decision-making and planning. **Image-To-Text**: "INTERN-2.5" has a strong understanding capability in various aspects of visual-to-text tasks such as image captioning, visual question answering, visual reasoning, and optical character recognition. For example, in the context of autonomous driving, it can enhance the scene perception and understanding capabilities, assist the vehicle in judging traffic signal status, road signs, and other information, and provide effective perception information support for vehicle decision-making and planning.
**Performance** **Performance**
<table border="1" width="90%"> <table border="1" width="90%">
...@@ -173,6 +179,7 @@ ADE20K, outperforming previous models by a large margin. ...@@ -173,6 +179,7 @@ ADE20K, outperforming previous models by a large margin.
| InternImage-XL | ImageNet-22K | 384x384 | 335M | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_xl_22k_192to384.pth) | | InternImage-XL | ImageNet-22K | 384x384 | 335M | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_xl_22k_192to384.pth) |
| InternImage-H | Joint 427M | 384x384 | 1.08B | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_h_jointto22k_384.pth) | | InternImage-H | Joint 427M | 384x384 | 1.08B | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_h_jointto22k_384.pth) |
| InternImage-G | - | 384x384 | 3B | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_g_pretrainto22k_384.pth) | | InternImage-G | - | 384x384 | 3B | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_g_pretrainto22k_384.pth) |
</div> </div>
</details> </details>
...@@ -183,7 +190,7 @@ ADE20K, outperforming previous models by a large margin. ...@@ -183,7 +190,7 @@ ADE20K, outperforming previous models by a large margin.
<div> <div>
| name | pretrain | resolution | acc@1 | #param | FLOPs | download | | name | pretrain | resolution | acc@1 | #param | FLOPs | download |
| :------------: | :----------: | :--------: | :---: | :----: | :---: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------: | | :------------: | :----------: | :--------: | :---: | :----: | :---: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
| InternImage-T | ImageNet-1K | 224x224 | 83.5 | 30M | 5G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_t_1k_224.pth) \| [cfg](classification/configs/without_lr_decay/internimage_t_1k_224.yaml) | | InternImage-T | ImageNet-1K | 224x224 | 83.5 | 30M | 5G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_t_1k_224.pth) \| [cfg](classification/configs/without_lr_decay/internimage_t_1k_224.yaml) |
| InternImage-S | ImageNet-1K | 224x224 | 84.2 | 50M | 8G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_s_1k_224.pth) \| [cfg](classification/configs/without_lr_decay/internimage_s_1k_224.yaml) | | InternImage-S | ImageNet-1K | 224x224 | 84.2 | 50M | 8G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_s_1k_224.pth) \| [cfg](classification/configs/without_lr_decay/internimage_s_1k_224.yaml) |
| InternImage-B | ImageNet-1K | 224x224 | 84.9 | 97M | 16G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_b_1k_224.pth) \| [cfg](classification/configs/without_lr_decay/internimage_b_1k_224.yaml) | | InternImage-B | ImageNet-1K | 224x224 | 84.9 | 97M | 16G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_b_1k_224.pth) \| [cfg](classification/configs/without_lr_decay/internimage_b_1k_224.yaml) |
...@@ -191,6 +198,7 @@ ADE20K, outperforming previous models by a large margin. ...@@ -191,6 +198,7 @@ ADE20K, outperforming previous models by a large margin.
| InternImage-XL | ImageNet-22K | 384x384 | 88.0 | 335M | 163G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_xl_22kto1k_384.pth) \| [cfg](classification/configs/without_lr_decay/internimage_xl_22kto1k_384.yaml) | | InternImage-XL | ImageNet-22K | 384x384 | 88.0 | 335M | 163G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_xl_22kto1k_384.pth) \| [cfg](classification/configs/without_lr_decay/internimage_xl_22kto1k_384.yaml) |
| InternImage-H | Joint 427M | 640x640 | 89.6 | 1.08B | 1478G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_h_22kto1k_640.pth) \| [cfg](classification/configs/without_lr_decay/internimage_h_22kto1k_640.yaml) | | InternImage-H | Joint 427M | 640x640 | 89.6 | 1.08B | 1478G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_h_22kto1k_640.pth) \| [cfg](classification/configs/without_lr_decay/internimage_h_22kto1k_640.yaml) |
| InternImage-G | - | 512x512 | 90.1 | 3B | 2700G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_g_22kto1k_512.pth) \| [cfg](classification/configs/without_lr_decay/internimage_g_22kto1k_512.yaml) | | InternImage-G | - | 512x512 | 90.1 | 3B | 2700G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_g_22kto1k_512.pth) \| [cfg](classification/configs/without_lr_decay/internimage_g_22kto1k_512.yaml) |
</div> </div>
</details> </details>
...@@ -201,7 +209,7 @@ ADE20K, outperforming previous models by a large margin. ...@@ -201,7 +209,7 @@ ADE20K, outperforming previous models by a large margin.
<div> <div>
| backbone | method | schd | box mAP | mask mAP | #param | FLOPs | download | | backbone | method | schd | box mAP | mask mAP | #param | FLOPs | download |
| :------------: | :--------: | :---: | :-----: | :------: | :----: | :---: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | :------------: | :--------: | :--: | :-----: | :------: | :----: | :---: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
| InternImage-T | Mask R-CNN | 1x | 47.2 | 42.5 | 49M | 270G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask_rcnn_internimage_t_fpn_1x_coco.pth) \| [cfg](detection/configs/coco/mask_rcnn_internimage_t_fpn_1x_coco.py) | | InternImage-T | Mask R-CNN | 1x | 47.2 | 42.5 | 49M | 270G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask_rcnn_internimage_t_fpn_1x_coco.pth) \| [cfg](detection/configs/coco/mask_rcnn_internimage_t_fpn_1x_coco.py) |
| InternImage-T | Mask R-CNN | 3x | 49.1 | 43.7 | 49M | 270G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask_rcnn_internimage_t_fpn_3x_coco.pth) \| [cfg](detection/configs/coco/mask_rcnn_internimage_t_fpn_3x_coco.py) | | InternImage-T | Mask R-CNN | 3x | 49.1 | 43.7 | 49M | 270G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask_rcnn_internimage_t_fpn_3x_coco.pth) \| [cfg](detection/configs/coco/mask_rcnn_internimage_t_fpn_3x_coco.py) |
| InternImage-S | Mask R-CNN | 1x | 47.8 | 43.3 | 69M | 340G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask_rcnn_internimage_s_fpn_1x_coco.pth) \| [cfg](detection/configs/coco/mask_rcnn_internimage_s_fpn_1x_coco.py) | | InternImage-S | Mask R-CNN | 1x | 47.8 | 43.3 | 69M | 340G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask_rcnn_internimage_s_fpn_1x_coco.pth) \| [cfg](detection/configs/coco/mask_rcnn_internimage_s_fpn_1x_coco.py) |
...@@ -222,14 +230,13 @@ ADE20K, outperforming previous models by a large margin. ...@@ -222,14 +230,13 @@ ADE20K, outperforming previous models by a large margin.
</details> </details>
<details> <details>
<summary> ADE20K Semantic Segmentation </summary> <summary> ADE20K Semantic Segmentation </summary>
<br> <br>
<div> <div>
| backbone | method | resolution | mIoU (ss/ms) | #param | FLOPs | download | | backbone | method | resolution | mIoU (ss/ms) | #param | FLOPs | download |
| :------------: | :---------: | :--------: | :----------: | :----: | :---: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | :------------: | :---------: | :--------: | :----------: | :----: | :---: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
| InternImage-T | UperNet | 512x512 | 47.9 / 48.1 | 59M | 944G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/upernet_internimage_t_512_160k_ade20k.pth) \| [cfg](segmentation/configs/ade20k/upernet_internimage_t_512_160k_ade20k.py) | | InternImage-T | UperNet | 512x512 | 47.9 / 48.1 | 59M | 944G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/upernet_internimage_t_512_160k_ade20k.pth) \| [cfg](segmentation/configs/ade20k/upernet_internimage_t_512_160k_ade20k.py) |
| InternImage-S | UperNet | 512x512 | 50.1 / 50.9 | 80M | 1017G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/upernet_internimage_s_512_160k_ade20k.pth) \| [cfg](segmentation/configs/ade20k/upernet_internimage_s_512_160k_ade20k.py) | | InternImage-S | UperNet | 512x512 | 50.1 / 50.9 | 80M | 1017G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/upernet_internimage_s_512_160k_ade20k.pth) \| [cfg](segmentation/configs/ade20k/upernet_internimage_s_512_160k_ade20k.py) |
| InternImage-B | UperNet | 512x512 | 50.8 / 51.3 | 128M | 1185G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/upernet_internimage_b_512_160k_ade20k.pth) \| [cfg](segmentation/configs/ade20k/upernet_internimage_b_512_160k_ade20k.py) | | InternImage-B | UperNet | 512x512 | 50.8 / 51.3 | 128M | 1185G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/upernet_internimage_b_512_160k_ade20k.pth) \| [cfg](segmentation/configs/ade20k/upernet_internimage_b_512_160k_ade20k.py) |
...@@ -262,6 +269,7 @@ ADE20K, outperforming previous models by a large margin. ...@@ -262,6 +269,7 @@ ADE20K, outperforming previous models by a large margin.
| InternImage-XL | 384x384 | 335M | 163G | 47 | | InternImage-XL | 384x384 | 335M | 163G | 47 |
Before using `mmdeploy` to convert our PyTorch models to TensorRT, please make sure you have the DCNv3 custom operator built correctly. You can build it with the following command: Before using `mmdeploy` to convert our PyTorch models to TensorRT, please make sure you have the DCNv3 custom operator built correctly. You can build it with the following command:
```shell ```shell
export MMDEPLOY_DIR=/the/root/path/of/MMDeploy export MMDEPLOY_DIR=/the/root/path/of/MMDeploy
...@@ -278,14 +286,13 @@ make -j$(nproc) && make install ...@@ -278,14 +286,13 @@ make -j$(nproc) && make install
cd ${MMDEPLOY_DIR} cd ${MMDEPLOY_DIR}
pip install -e . pip install -e .
``` ```
For more details on building custom ops, please refering to [this document](https://github.com/open-mmlab/mmdeploy/blob/master/docs/en/01-how-to-build/linux-x86_64.md). For more details on building custom ops, please refering to [this document](https://github.com/open-mmlab/mmdeploy/blob/master/docs/en/01-how-to-build/linux-x86_64.md).
</div> </div>
</details> </details>
## Citations ## Citations
If this work is helpful for your research, please consider citing the following BibTeX entry. If this work is helpful for your research, please consider citing the following BibTeX entry.
......
...@@ -27,31 +27,37 @@ ...@@ -27,31 +27,37 @@
这个代码仓库是[InternImage: Exploring Large-Scale Vision Foundation Models with Deformable Convolutions](https://arxiv.org/abs/2211.05778)的官方实现。 这个代码仓库是[InternImage: Exploring Large-Scale Vision Foundation Models with Deformable Convolutions](https://arxiv.org/abs/2211.05778)的官方实现。
[[论文](https://arxiv.org/abs/2211.05778)] [[知乎专栏](https://zhuanlan.zhihu.com/p/610772005)] \[[论文](https://arxiv.org/abs/2211.05778)\] \[[知乎专栏](https://zhuanlan.zhihu.com/p/610772005)\]
## 亮点 ## 亮点
- :thumbsup: **高达30亿参数的最强视觉通用主干模型** - :thumbsup: **高达30亿参数的最强视觉通用主干模型**
- 🏆 **图像分类标杆数据集ImageNet `90.1% Top1`准确率,开源模型中准确度最高** - 🏆 **图像分类标杆数据集ImageNet `90.1% Top1`准确率,开源模型中准确度最高**
- 🏆 **物体检测标杆数据集COCO `65.5 mAP`,唯一超过`65 mAP`的模型** - 🏆 **物体检测标杆数据集COCO `65.5 mAP`,唯一超过`65 mAP`的模型**
## 相关项目 ## 相关项目
### 多模态基模型 ### 多模态基模型
- [Uni-Perceiver](https://github.com/fundamentalvision/Uni-Perceiver): 通用感知任务预训练统一框架, 可直接处理zero-shot和few-shot任务 - [Uni-Perceiver](https://github.com/fundamentalvision/Uni-Perceiver): 通用感知任务预训练统一框架, 可直接处理zero-shot和few-shot任务
- [Uni-Perceiver v2](https://arxiv.org/abs/2211.09808): - [Uni-Perceiver v2](https://arxiv.org/abs/2211.09808):
用于处理图像/图文任务的通用模型 用于处理图像/图文任务的通用模型
- [M3I-Pretraining](https://github.com/OpenGVLab/M3I-Pretraining): 基于最大化输入和目标的互信息的单阶段预训练范式 - [M3I-Pretraining](https://github.com/OpenGVLab/M3I-Pretraining): 基于最大化输入和目标的互信息的单阶段预训练范式
### 自动驾驶 ### 自动驾驶
- [BEVFormer](https://github.com/fundamentalvision/BEVFormer): 基于BEV的新一代纯视觉环视感知方案 - [BEVFormer](https://github.com/fundamentalvision/BEVFormer): 基于BEV的新一代纯视觉环视感知方案
- [BEVFormer v2](https://arxiv.org/abs/2211.10439): 融合BEV感知和透视图检测的两阶段检测器 - [BEVFormer v2](https://arxiv.org/abs/2211.10439): 融合BEV感知和透视图检测的两阶段检测器
## Application in Challenge ## Application in Challenge
- [2022 Waymo 3D Camera-Only Detection Challenge](https://waymo.com/open/challenges/2022/3d-camera-only-detection/): 基于书生2.5 BEVFormer++取得赛道冠军 - [2022 Waymo 3D Camera-Only Detection Challenge](https://waymo.com/open/challenges/2022/3d-camera-only-detection/): 基于书生2.5 BEVFormer++取得赛道冠军
- [nuScenes 3D detection task](https://www.nuscenes.org/object-detection?externalData=all&mapData=all&modalities=Camera): BEVFormer v2 在nuScenes纯视觉检测任务中取得SOTA性能(64.8 NDS) - [nuScenes 3D detection task](https://www.nuscenes.org/object-detection?externalData=all&mapData=all&modalities=Camera): BEVFormer v2 在nuScenes纯视觉检测任务中取得SOTA性能(64.8 NDS)
- [CVPR 2023 Workshop End-to-End Autonomous Driving](https://opendrivelab.com/e2ead/cvpr23): InternImage作为baseline支持了比赛 - [CVPR 2023 Workshop End-to-End Autonomous Driving](https://opendrivelab.com/e2ead/cvpr23): InternImage作为baseline支持了比赛
[3D Occupancy Prediction Challenge](https://opendrivelab.com/AD23Challenge.html#Track3)[OpenLane Topology Challenge](https://opendrivelab.com/AD23Challenge.html#Track1) [3D Occupancy Prediction Challenge](https://opendrivelab.com/AD23Challenge.html#Track3)[OpenLane Topology Challenge](https://opendrivelab.com/AD23Challenge.html#Track1)
## 最新进展 ## 最新进展
- 2023年3月14日: 🚀 “书生2.5”发布! - 2023年3月14日: 🚀 “书生2.5”发布!
- 2023年2月28日: 🚀 InternImage 被CVPR 2023接收! - 2023年2月28日: 🚀 InternImage 被CVPR 2023接收!
- 2022年11月18日: 🚀 基于 InternImage-XL 主干网络,[BEVFormer v2](https://arxiv.org/abs/2211.10439) 在nuScenes的纯视觉3D检测任务上取得了最佳性能 `63.4 NDS` - 2022年11月18日: 🚀 基于 InternImage-XL 主干网络,[BEVFormer v2](https://arxiv.org/abs/2211.10439) 在nuScenes的纯视觉3D检测任务上取得了最佳性能 `63.4 NDS`
...@@ -59,6 +65,7 @@ ...@@ -59,6 +65,7 @@
- 2022年11月10日: 🚀 InternImage-H 在ADE20K语义分割数据集上取得 `62.9 mIoU` 的SOTA性能! - 2022年11月10日: 🚀 InternImage-H 在ADE20K语义分割数据集上取得 `62.9 mIoU` 的SOTA性能!
## 项目功能 ## 项目功能
- [ ] 各类下游任务 - [ ] 各类下游任务
- [ ] 支持[CVPR 2023 Workshop on End-to-End Autonomous Driving](https://opendrivelab.com/e2ead/cvpr23)[详见](https://github.com/OpenGVLab/InternImage/tree/master/autonomous_driving) - [ ] 支持[CVPR 2023 Workshop on End-to-End Autonomous Driving](https://opendrivelab.com/e2ead/cvpr23)[详见](https://github.com/OpenGVLab/InternImage/tree/master/autonomous_driving)
- [ ] 支持Segment Anything - [ ] 支持Segment Anything
...@@ -73,18 +80,18 @@ ...@@ -73,18 +80,18 @@
- [x] InternImage-T/S/B/L/XL 检测和实例分割模型 - [x] InternImage-T/S/B/L/XL 检测和实例分割模型
- [x] InternImage-T/S/B/L/XL 语义分割模型 - [x] InternImage-T/S/B/L/XL 语义分割模型
## 简介 ## 简介
"书生2.5"是商汤科技与上海人工智能实验室联合发布的多模态多任务通用大模型。"书生2.5"包括大规模视觉基础模型"InternImage",预训练算法"M3I-Pretraining",通用解码器"Uni-Perceiver"系列,以及自动驾驶感知通用编码器"BEVFormer"系列。 "书生2.5"是商汤科技与上海人工智能实验室联合发布的多模态多任务通用大模型。"书生2.5"包括大规模视觉基础模型"InternImage",预训练算法"M3I-Pretraining",通用解码器"Uni-Perceiver"系列,以及自动驾驶感知通用编码器"BEVFormer"系列。
<div align=left> <div align=left>
<img src='./docs/figs/intern_pipeline.png' width=900> <img src='./docs/figs/intern_pipeline.png' width=900>
</div> </div>
## “书生2.5”的应用 ## “书生2.5”的应用
### 1. 图像模态任务性能 ### 1. 图像模态任务性能
- 在图像分类标杆数据集ImageNet上,“书生2.5”仅基于公开数据便达到了 90.1% 的Top-1准确率。这是除谷歌与微软两个未公开模型及额外数据集外,唯一准确率超过90.0%的模型,同时也是世界上开源模型中ImageNet准确度最高,规模最大的模型; - 在图像分类标杆数据集ImageNet上,“书生2.5”仅基于公开数据便达到了 90.1% 的Top-1准确率。这是除谷歌与微软两个未公开模型及额外数据集外,唯一准确率超过90.0%的模型,同时也是世界上开源模型中ImageNet准确度最高,规模最大的模型;
- 在物体检测标杆数据集COCO上,“书生2.5” 取得了 65.5 的 mAP,是世界上唯一超过65 mAP的模型; - 在物体检测标杆数据集COCO上,“书生2.5” 取得了 65.5 的 mAP,是世界上唯一超过65 mAP的模型;
- 在另外16个重要的视觉基础数据集(覆盖分类、检测和分割任务)上取得世界最好性能。 - 在另外16个重要的视觉基础数据集(覆盖分类、检测和分割任务)上取得世界最好性能。
...@@ -93,6 +100,7 @@ ...@@ -93,6 +100,7 @@
<br> <br>
**分类任务** **分类任务**
<table border="1" width="90%"> <table border="1" width="90%">
<tr align="center"> <tr align="center">
<th colspan="1"> 图像分类</th><th colspan="2"> 场景分类 </th><th colspan="1">长尾分类</th> <th colspan="1"> 图像分类</th><th colspan="2"> 场景分类 </th><th colspan="1">长尾分类</th>
...@@ -106,8 +114,8 @@ ...@@ -106,8 +114,8 @@
</table> </table>
<br> <br>
**检测任务** **检测任务**
<table border="1" width="90%"> <table border="1" width="90%">
<tr align="center"> <tr align="center">
<th colspan="4"> 常规物体检测</th><th colspan="2">长尾物体检测 </th><th colspan="2">自动驾驶物体检测</th><th colspan="1">密集物体检测</th> <th colspan="4"> 常规物体检测</th><th colspan="2">长尾物体检测 </th><th colspan="2">自动驾驶物体检测</th><th colspan="1">密集物体检测</th>
...@@ -122,6 +130,7 @@ ...@@ -122,6 +130,7 @@
<br> <br>
**分割任务** **分割任务**
<table border="1" width="90%"> <table border="1" width="90%">
<tr align="center"> <tr align="center">
<th colspan="3">语义分割</th><th colspan="1">街景分割</th><th colspan="1">RGBD分割</th> <th colspan="3">语义分割</th><th colspan="1">街景分割</th><th colspan="1">RGBD分割</th>
...@@ -143,17 +152,15 @@ ...@@ -143,17 +152,15 @@
“书生2.5”可根据文本内容需求快速定位检索出语义最相关的图像。这一能力既可应用于视频和图像集合,也可进一步结合物体检测框,具有丰富的应用模式,帮助用户更便捷、快速地找到所需图像资源, 例如可在相册中返回文本所指定的相关图像。 “书生2.5”可根据文本内容需求快速定位检索出语义最相关的图像。这一能力既可应用于视频和图像集合,也可进一步结合物体检测框,具有丰富的应用模式,帮助用户更便捷、快速地找到所需图像资源, 例如可在相册中返回文本所指定的相关图像。
- 以图生文 - 以图生文
“书生2.5”的“以图生文”在图像描述、视觉问答、视觉推理和文字识别等多个方面均拥有强大的理解能力。例如在自动驾驶场景下,可以提升场景感知理解能力,辅助车辆判断交通信号灯状态、道路标志牌等信息,为车辆的决策规划提供有效的感知信息支持。 “书生2.5”的“以图生文”在图像描述、视觉问答、视觉推理和文字识别等多个方面均拥有强大的理解能力。例如在自动驾驶场景下,可以提升场景感知理解能力,辅助车辆判断交通信号灯状态、道路标志牌等信息,为车辆的决策规划提供有效的感知信息支持。
<div align="left"> <div align="left">
<br> <br>
**图文多模态任务** **图文多模态任务**
<table border="1" width="90%"> <table border="1" width="90%">
<tr align="center"> <tr align="center">
<th colspan="1">图像描述</th><th colspan="2">微调图文检索</th><th colspan="1">零样本图文检索</th> <th colspan="1">图像描述</th><th colspan="2">微调图文检索</th><th colspan="1">零样本图文检索</th>
...@@ -169,7 +176,6 @@ ...@@ -169,7 +176,6 @@
</div> </div>
## 预训练模型 ## 预训练模型
<details> <details>
...@@ -183,6 +189,7 @@ ...@@ -183,6 +189,7 @@
| InternImage-XL | ImageNet-22K | 384x384 | 335M | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_xl_22k_192to384.pth) | | InternImage-XL | ImageNet-22K | 384x384 | 335M | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_xl_22k_192to384.pth) |
| InternImage-H | Joint 427M | 384x384 | 1.08B | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_h_jointto22k_384.pth) | | InternImage-H | Joint 427M | 384x384 | 1.08B | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_h_jointto22k_384.pth) |
| InternImage-G | - | 384x384 | 3B | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_g_pretrainto22k_384.pth) | | InternImage-G | - | 384x384 | 3B | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_g_pretrainto22k_384.pth) |
</div> </div>
</details> </details>
...@@ -193,7 +200,7 @@ ...@@ -193,7 +200,7 @@
<div> <div>
| name | pretrain | resolution | acc@1 | #param | FLOPs | download | | name | pretrain | resolution | acc@1 | #param | FLOPs | download |
| :------------: | :----------: | :--------: | :---: | :----: | :---: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------: | | :------------: | :----------: | :--------: | :---: | :----: | :---: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
| InternImage-T | ImageNet-1K | 224x224 | 83.5 | 30M | 5G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_t_1k_224.pth) \| [cfg](classification/configs/without_lr_decay/internimage_t_1k_224.yaml) | | InternImage-T | ImageNet-1K | 224x224 | 83.5 | 30M | 5G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_t_1k_224.pth) \| [cfg](classification/configs/without_lr_decay/internimage_t_1k_224.yaml) |
| InternImage-S | ImageNet-1K | 224x224 | 84.2 | 50M | 8G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_s_1k_224.pth) \| [cfg](classification/configs/without_lr_decay/internimage_s_1k_224.yaml) | | InternImage-S | ImageNet-1K | 224x224 | 84.2 | 50M | 8G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_s_1k_224.pth) \| [cfg](classification/configs/without_lr_decay/internimage_s_1k_224.yaml) |
| InternImage-B | ImageNet-1K | 224x224 | 84.9 | 97M | 16G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_b_1k_224.pth) \| [cfg](classification/configs/without_lr_decay/internimage_b_1k_224.yaml) | | InternImage-B | ImageNet-1K | 224x224 | 84.9 | 97M | 16G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_b_1k_224.pth) \| [cfg](classification/configs/without_lr_decay/internimage_b_1k_224.yaml) |
...@@ -201,6 +208,7 @@ ...@@ -201,6 +208,7 @@
| InternImage-XL | ImageNet-22K | 384x384 | 88.0 | 335M | 163G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_xl_22kto1k_384.pth) \| [cfg](classification/configs/without_lr_decay/internimage_xl_22kto1k_384.yaml) | | InternImage-XL | ImageNet-22K | 384x384 | 88.0 | 335M | 163G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_xl_22kto1k_384.pth) \| [cfg](classification/configs/without_lr_decay/internimage_xl_22kto1k_384.yaml) |
| InternImage-H | Joint 427M | 640x640 | 89.6 | 1.08B | 1478G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_h_22kto1k_640.pth) \| [cfg](classification/configs/without_lr_decay/internimage_h_22kto1k_640.yaml) | | InternImage-H | Joint 427M | 640x640 | 89.6 | 1.08B | 1478G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_h_22kto1k_640.pth) \| [cfg](classification/configs/without_lr_decay/internimage_h_22kto1k_640.yaml) |
| InternImage-G | - | 512x512 | 90.1 | 3B | 2700G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_g_22kto1k_512.pth) \| [cfg](classification/configs/without_lr_decay/internimage_g_22kto1k_512.yaml) | | InternImage-G | - | 512x512 | 90.1 | 3B | 2700G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_g_22kto1k_512.pth) \| [cfg](classification/configs/without_lr_decay/internimage_g_22kto1k_512.yaml) |
</div> </div>
</details> </details>
...@@ -211,7 +219,7 @@ ...@@ -211,7 +219,7 @@
<div> <div>
| backbone | method | schd | box mAP | mask mAP | #param | FLOPs | download | | backbone | method | schd | box mAP | mask mAP | #param | FLOPs | download |
| :------------: | :--------: | :---: | :-----: | :------: | :----: | :---: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | :------------: | :--------: | :--: | :-----: | :------: | :----: | :---: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
| InternImage-T | Mask R-CNN | 1x | 47.2 | 42.5 | 49M | 270G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask_rcnn_internimage_t_fpn_1x_coco.pth) \| [cfg](detection/configs/coco/mask_rcnn_internimage_t_fpn_1x_coco.py) | | InternImage-T | Mask R-CNN | 1x | 47.2 | 42.5 | 49M | 270G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask_rcnn_internimage_t_fpn_1x_coco.pth) \| [cfg](detection/configs/coco/mask_rcnn_internimage_t_fpn_1x_coco.py) |
| InternImage-T | Mask R-CNN | 3x | 49.1 | 43.7 | 49M | 270G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask_rcnn_internimage_t_fpn_3x_coco.pth) \| [cfg](detection/configs/coco/mask_rcnn_internimage_t_fpn_3x_coco.py) | | InternImage-T | Mask R-CNN | 3x | 49.1 | 43.7 | 49M | 270G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask_rcnn_internimage_t_fpn_3x_coco.pth) \| [cfg](detection/configs/coco/mask_rcnn_internimage_t_fpn_3x_coco.py) |
| InternImage-S | Mask R-CNN | 1x | 47.8 | 43.3 | 69M | 340G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask_rcnn_internimage_s_fpn_1x_coco.pth) \| [cfg](detection/configs/coco/mask_rcnn_internimage_s_fpn_1x_coco.py) | | InternImage-S | Mask R-CNN | 1x | 47.8 | 43.3 | 69M | 340G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask_rcnn_internimage_s_fpn_1x_coco.pth) \| [cfg](detection/configs/coco/mask_rcnn_internimage_s_fpn_1x_coco.py) |
...@@ -232,14 +240,13 @@ ...@@ -232,14 +240,13 @@
</details> </details>
<details> <details>
<summary> ADE20K语义分割 </summary> <summary> ADE20K语义分割 </summary>
<br> <br>
<div> <div>
| backbone | method | resolution | mIoU (ss/ms) | #param | FLOPs | download | | backbone | method | resolution | mIoU (ss/ms) | #param | FLOPs | download |
| :------------: | :---------: | :--------: | :----------: | :----: | :---: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | :------------: | :---------: | :--------: | :----------: | :----: | :---: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
| InternImage-T | UperNet | 512x512 | 47.9 / 48.1 | 59M | 944G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/upernet_internimage_t_512_160k_ade20k.pth) \| [cfg](segmentation/configs/ade20k/upernet_internimage_t_512_160k_ade20k.py) | | InternImage-T | UperNet | 512x512 | 47.9 / 48.1 | 59M | 944G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/upernet_internimage_t_512_160k_ade20k.pth) \| [cfg](segmentation/configs/ade20k/upernet_internimage_t_512_160k_ade20k.py) |
| InternImage-S | UperNet | 512x512 | 50.1 / 50.9 | 80M | 1017G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/upernet_internimage_s_512_160k_ade20k.pth) \| [cfg](segmentation/configs/ade20k/upernet_internimage_s_512_160k_ade20k.py) | | InternImage-S | UperNet | 512x512 | 50.1 / 50.9 | 80M | 1017G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/upernet_internimage_s_512_160k_ade20k.pth) \| [cfg](segmentation/configs/ade20k/upernet_internimage_s_512_160k_ade20k.py) |
| InternImage-B | UperNet | 512x512 | 50.8 / 51.3 | 128M | 1185G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/upernet_internimage_b_512_160k_ade20k.pth) \| [cfg](segmentation/configs/ade20k/upernet_internimage_b_512_160k_ade20k.py) | | InternImage-B | UperNet | 512x512 | 50.8 / 51.3 | 128M | 1185G | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/upernet_internimage_b_512_160k_ade20k.pth) \| [cfg](segmentation/configs/ade20k/upernet_internimage_b_512_160k_ade20k.py) |
...@@ -357,5 +364,4 @@ pip install -e . ...@@ -357,5 +364,4 @@ pip install -e .
<div align=left> <div align=left>
[//]: # (<img src='./docs/figs/log.png' width=600>)
</div> </div>
<div id="top" align="center"> <div id="top" align="center">
# InternImage-based Baseline for Online HD Map Construction Challenge For Autonomous Driving # InternImage-based Baseline for Online HD Map Construction Challenge For Autonomous Driving
</div>
</div>
If you need detaild information about the challenge, please refer
to https://github.com/Tsinghua-MARS-Lab/Online-HD-Map-Construction-CVPR2023/tree/master
If you need detaild information about the challenge, please refer to https://github.com/Tsinghua-MARS-Lab/Online-HD-Map-Construction-CVPR2023/tree/master
#### 1. Requirements #### 1. Requirements
```bash ```bash
python>=3.8 python>=3.8
torch==1.11 # recommend torch==1.11 # recommend
...@@ -18,8 +20,8 @@ numpy==1.23.5 ...@@ -18,8 +20,8 @@ numpy==1.23.5
mmdet3d==1.0.0rc6 # recommend mmdet3d==1.0.0rc6 # recommend
``` ```
### 2. Install DCNv3 for InternImage ### 2. Install DCNv3 for InternImage
```bash ```bash
cd projects/ops_dcnv3 cd projects/ops_dcnv3
bash make.sh # requires torch>=1.10 bash make.sh # requires torch>=1.10
...@@ -33,21 +35,17 @@ bash tools/dist_train.sh src/configs/vectormapnet_intern.py ${NUM_GPUS} ...@@ -33,21 +35,17 @@ bash tools/dist_train.sh src/configs/vectormapnet_intern.py ${NUM_GPUS}
Notes: InatenImage provides abundant pre-trained model weights that can be used!!! Notes: InatenImage provides abundant pre-trained model weights that can be used!!!
### 4. Performance compared to baseline ### 4. Performance compared to baseline
model name|weight|$\mathrm{mAP}$ | $\mathrm{AP}_{pc}$ | $\mathrm{AP}_{div}$ | $\mathrm{AP}_{bound}$ | | model name | weight | $\\mathrm{mAP}$ | $\\mathrm{AP}\_{pc}$ | $\\mathrm{AP}\_{div}$ | $\\mathrm{AP}\_{bound}$ |
----|:----------:| :--: | :--: | :--: | :--: | | ------------------- | :---------------------------------------------------------------------------------------------------------------: | :-------------: | :------------------: | :-------------------: | :---------------------: |
vectormapnet_intern|[Checkpoint](https://github.com/OpenGVLab/InternImage/releases/download/track_model/vectormapnet_internimage.pth) | 49.35 | 45.05 | 56.78 | 46.22 | | vectormapnet_intern | [Checkpoint](https://github.com/OpenGVLab/InternImage/releases/download/track_model/vectormapnet_internimage.pth) | 49.35 | 45.05 | 56.78 | 46.22 |
vectormapnet_base|[Google Drive](https://drive.google.com/file/d/16D1CMinwA8PG1sd9PV9_WtHzcBohvO-D/view) | 42.79 | 37.22 | 50.47 | 40.68 | | vectormapnet_base | [Google Drive](https://drive.google.com/file/d/16D1CMinwA8PG1sd9PV9_WtHzcBohvO-D/view) | 42.79 | 37.22 | 50.47 | 40.68 |
## Citation ## Citation
The evaluation metrics of this challenge follows [HDMapNet](https://arxiv.org/abs/2107.06307). We provide [VectorMapNet](https://arxiv.org/abs/2206.08920) as the baseline. Please cite: The evaluation metrics of this challenge follows [HDMapNet](https://arxiv.org/abs/2107.06307). We
provide [VectorMapNet](https://arxiv.org/abs/2206.08920) as the baseline. Please cite:
``` ```
@article{li2021hdmapnet, @article{li2021hdmapnet,
...@@ -69,8 +67,8 @@ Our dataset is built on top of the [Argoverse 2](https://www.argoverse.org/av2.h ...@@ -69,8 +67,8 @@ Our dataset is built on top of the [Argoverse 2](https://www.argoverse.org/av2.h
} }
``` ```
## License ## License
Before participating in our challenge, you should register on the website and agree to the terms of use of the [Argoverse 2](https://www.argoverse.org/av2.html) dataset. Before participating in our challenge, you should register on the website and agree to the terms of use of
All code in this project is released under [GNU General Public License v3.0](./LICENSE). the [Argoverse 2](https://www.argoverse.org/av2.html) dataset. All code in this project is released
under [GNU General Public License v3.0](./LICENSE).
from .models import *
from .datasets import *
\ No newline at end of file
...@@ -125,8 +125,7 @@ data = dict( ...@@ -125,8 +125,7 @@ data = dict(
classes=class_names, classes=class_names,
test_mode=True, test_mode=True,
ignore_index=len(class_names), ignore_index=len(class_names),
scene_idxs=data_root + scene_idxs=data_root + f'seg_info/Area_{test_area}_resampled_scene_idxs.npy'),
f'seg_info/Area_{test_area}_resampled_scene_idxs.npy'),
test=dict( test=dict(
type=dataset_type, type=dataset_type,
data_root=data_root, data_root=data_root,
......
...@@ -25,7 +25,7 @@ model = dict( ...@@ -25,7 +25,7 @@ model = dict(
in_channels=256, in_channels=256,
num_points=256, num_points=256,
gt_per_seed=1, gt_per_seed=1,
conv_channels=(128, ), conv_channels=(128,),
conv_cfg=dict(type='Conv1d'), conv_cfg=dict(type='Conv1d'),
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.1), norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.1),
with_res_feat=False, with_res_feat=False,
...@@ -43,8 +43,8 @@ model = dict( ...@@ -43,8 +43,8 @@ model = dict(
pred_layer_cfg=dict( pred_layer_cfg=dict(
in_channels=1536, in_channels=1536,
shared_conv_channels=(512, 128), shared_conv_channels=(512, 128),
cls_conv_channels=(128, ), cls_conv_channels=(128,),
reg_conv_channels=(128, ), reg_conv_channels=(128,),
conv_cfg=dict(type='Conv1d'), conv_cfg=dict(type='Conv1d'),
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.1), norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.1),
bias=True), bias=True),
......
...@@ -31,16 +31,16 @@ model = dict( ...@@ -31,16 +31,16 @@ model = dict(
dir_offset=0.7854, # pi/4 dir_offset=0.7854, # pi/4
strides=[8, 16, 32, 64, 128], strides=[8, 16, 32, 64, 128],
group_reg_dims=(2, 1, 3, 1, 2), # offset, depth, size, rot, velo group_reg_dims=(2, 1, 3, 1, 2), # offset, depth, size, rot, velo
cls_branch=(256, ), cls_branch=(256,),
reg_branch=( reg_branch=(
(256, ), # offset (256,), # offset
(256, ), # depth (256,), # depth
(256, ), # size (256,), # size
(256, ), # rot (256,), # rot
() # velo () # velo
), ),
dir_branch=(256, ), dir_branch=(256,),
attr_branch=(256, ), attr_branch=(256,),
loss_cls=dict( loss_cls=dict(
type='FocalLoss', type='FocalLoss',
use_sigmoid=True, use_sigmoid=True,
......
...@@ -30,7 +30,7 @@ plugin_dir = 'src/' ...@@ -30,7 +30,7 @@ plugin_dir = 'src/'
img_norm_cfg = dict( img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
img_size = (int(128*2), int((16/9*128)*2)) img_size = (int(128 * 2), int((16 / 9 * 128) * 2))
# category configs # category configs
cat2id = { cat2id = {
...@@ -85,9 +85,9 @@ model = dict( ...@@ -85,9 +85,9 @@ model = dict(
upsample=dict( upsample=dict(
zoom_size=(1, 2, 4, 8), zoom_size=(1, 2, 4, 8),
in_channels=128, in_channels=128,
out_channels=128,), out_channels=128, ),
xbound=[-roi_size[0]/2, roi_size[0]/2, roi_size[0]/canvas_size[0]], xbound=[-roi_size[0] / 2, roi_size[0] / 2, roi_size[0] / canvas_size[0]],
ybound=[-roi_size[1]/2, roi_size[1]/2, roi_size[1]/canvas_size[1]], ybound=[-roi_size[1] / 2, roi_size[1] / 2, roi_size[1] / canvas_size[1]],
heights=[-1.1, 0, 0.5, 1.1], heights=[-1.1, 0, 0.5, 1.1],
out_channels=128, out_channels=128,
pretrained=None, pretrained=None,
...@@ -97,7 +97,7 @@ model = dict( ...@@ -97,7 +97,7 @@ model = dict(
type='DGHead', type='DGHead',
augmentation=True, augmentation=True,
augmentation_kwargs=dict( augmentation_kwargs=dict(
p=0.3,scale=0.01, p=0.3, scale=0.01,
bbox_type='xyxy', bbox_type='xyxy',
), ),
det_net_cfg=dict( det_net_cfg=dict(
...@@ -135,7 +135,7 @@ model = dict( ...@@ -135,7 +135,7 @@ model = dict(
num_heads=8, num_heads=8,
attn_drop=0.1, attn_drop=0.1,
proj_drop=0.1, proj_drop=0.1,
dropout_layer=dict(type='Dropout', drop_prob=0.1),), dropout_layer=dict(type='Dropout', drop_prob=0.1), ),
dict( dict(
type='MultiScaleDeformableAttention', type='MultiScaleDeformableAttention',
embed_dims=head_dim, embed_dims=head_dim,
...@@ -146,19 +146,19 @@ model = dict( ...@@ -146,19 +146,19 @@ model = dict(
ffn_cfgs=dict( ffn_cfgs=dict(
type='FFN', type='FFN',
embed_dims=head_dim, embed_dims=head_dim,
feedforward_channels=head_dim*2, feedforward_channels=head_dim * 2,
num_fcs=2, num_fcs=2,
ffn_drop=0.1, ffn_drop=0.1,
act_cfg=dict(type='ReLU', inplace=True), act_cfg=dict(type='ReLU', inplace=True),
), ),
feedforward_channels=head_dim*2, feedforward_channels=head_dim * 2,
ffn_dropout=0.1, ffn_dropout=0.1,
operation_order=('norm', 'self_attn', 'norm', 'cross_attn', operation_order=('norm', 'self_attn', 'norm', 'cross_attn',
'norm', 'ffn',))) 'norm', 'ffn',)))
), ),
positional_encoding=dict( positional_encoding=dict(
type='SinePositionalEncoding', type='SinePositionalEncoding',
num_feats=head_dim//2, num_feats=head_dim // 2,
normalize=True, normalize=True,
offset=-0.5), offset=-0.5),
loss_cls=dict( loss_cls=dict(
...@@ -177,7 +177,7 @@ model = dict( ...@@ -177,7 +177,7 @@ model = dict(
type='MapQueriesCost', type='MapQueriesCost',
cls_cost=dict(type='FocalLossCost', weight=2.0), cls_cost=dict(type='FocalLossCost', weight=2.0),
reg_cost=dict(type='BBoxCostC', weight=0.1), # continues reg_cost=dict(type='BBoxCostC', weight=0.1), # continues
iou_cost=dict(type='IoUCostC', weight=1,box_format='xyxy'), # continues iou_cost=dict(type='IoUCostC', weight=1, box_format='xyxy'), # continues
), ),
), ),
), ),
...@@ -199,7 +199,7 @@ model = dict( ...@@ -199,7 +199,7 @@ model = dict(
}, },
class_conditional=True, class_conditional=True,
num_classes=num_class, num_classes=num_class,
canvas_size=canvas_size, #xy canvas_size=canvas_size, # xy
max_seq_length=500, max_seq_length=500,
decoder_cross_attention=False, decoder_cross_attention=False,
use_discrete_vertex_embeddings=True, use_discrete_vertex_embeddings=True,
...@@ -226,11 +226,11 @@ train_pipeline = [ ...@@ -226,11 +226,11 @@ train_pipeline = [
canvas_size=canvas_size, # xy canvas_size=canvas_size, # xy
coord_dim=2, coord_dim=2,
num_class=num_class, num_class=num_class,
threshold=4/200, threshold=4 / 200,
), ),
dict(type='LoadMultiViewImagesFromFiles'), dict(type='LoadMultiViewImagesFromFiles'),
dict(type='ResizeMultiViewImages', dict(type='ResizeMultiViewImages',
size = (int(128*2), int((16/9*128)*2)), # H, W size=(int(128 * 2), int((16 / 9 * 128) * 2)), # H, W
change_intrinsics=True, change_intrinsics=True,
), ),
dict(type='Normalize3D', **img_norm_cfg), dict(type='Normalize3D', **img_norm_cfg),
......
...@@ -28,11 +28,11 @@ plugin_dir = 'src/' ...@@ -28,11 +28,11 @@ plugin_dir = 'src/'
# img configs # img configs
# img_norm_cfg = dict( # img_norm_cfg = dict(
# mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) # mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
img_norm_cfg = dict( img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_size = (int(128*2), int((16/9*128)*2)) img_size = (int(128 * 2), int((16 / 9 * 128) * 2))
# category configs # category configs
cat2id = { cat2id = {
...@@ -89,9 +89,9 @@ model = dict( ...@@ -89,9 +89,9 @@ model = dict(
upsample=dict( upsample=dict(
zoom_size=(1, 2, 4, 8), zoom_size=(1, 2, 4, 8),
in_channels=128, in_channels=128,
out_channels=128,), out_channels=128, ),
xbound=[-roi_size[0]/2, roi_size[0]/2, roi_size[0]/canvas_size[0]], xbound=[-roi_size[0] / 2, roi_size[0] / 2, roi_size[0] / canvas_size[0]],
ybound=[-roi_size[1]/2, roi_size[1]/2, roi_size[1]/canvas_size[1]], ybound=[-roi_size[1] / 2, roi_size[1] / 2, roi_size[1] / canvas_size[1]],
heights=[-1.1, 0, 0.5, 1.1], heights=[-1.1, 0, 0.5, 1.1],
out_channels=128, out_channels=128,
pretrained=None, pretrained=None,
...@@ -101,7 +101,7 @@ model = dict( ...@@ -101,7 +101,7 @@ model = dict(
type='DGHead', type='DGHead',
augmentation=True, augmentation=True,
augmentation_kwargs=dict( augmentation_kwargs=dict(
p=0.3,scale=0.01, p=0.3, scale=0.01,
bbox_type='xyxy', bbox_type='xyxy',
), ),
det_net_cfg=dict( det_net_cfg=dict(
...@@ -139,7 +139,7 @@ model = dict( ...@@ -139,7 +139,7 @@ model = dict(
num_heads=8, num_heads=8,
attn_drop=0.1, attn_drop=0.1,
proj_drop=0.1, proj_drop=0.1,
dropout_layer=dict(type='Dropout', drop_prob=0.1),), dropout_layer=dict(type='Dropout', drop_prob=0.1), ),
dict( dict(
type='MultiScaleDeformableAttention', type='MultiScaleDeformableAttention',
embed_dims=head_dim, embed_dims=head_dim,
...@@ -150,19 +150,19 @@ model = dict( ...@@ -150,19 +150,19 @@ model = dict(
ffn_cfgs=dict( ffn_cfgs=dict(
type='FFN', type='FFN',
embed_dims=head_dim, embed_dims=head_dim,
feedforward_channels=head_dim*2, feedforward_channels=head_dim * 2,
num_fcs=2, num_fcs=2,
ffn_drop=0.1, ffn_drop=0.1,
act_cfg=dict(type='ReLU', inplace=True), act_cfg=dict(type='ReLU', inplace=True),
), ),
feedforward_channels=head_dim*2, feedforward_channels=head_dim * 2,
ffn_dropout=0.1, ffn_dropout=0.1,
operation_order=('norm', 'self_attn', 'norm', 'cross_attn', operation_order=('norm', 'self_attn', 'norm', 'cross_attn',
'norm', 'ffn',))) 'norm', 'ffn',)))
), ),
positional_encoding=dict( positional_encoding=dict(
type='SinePositionalEncoding', type='SinePositionalEncoding',
num_feats=head_dim//2, num_feats=head_dim // 2,
normalize=True, normalize=True,
offset=-0.5), offset=-0.5),
loss_cls=dict( loss_cls=dict(
...@@ -181,7 +181,7 @@ model = dict( ...@@ -181,7 +181,7 @@ model = dict(
type='MapQueriesCost', type='MapQueriesCost',
cls_cost=dict(type='FocalLossCost', weight=2.0), cls_cost=dict(type='FocalLossCost', weight=2.0),
reg_cost=dict(type='BBoxCostC', weight=0.1), # continues reg_cost=dict(type='BBoxCostC', weight=0.1), # continues
iou_cost=dict(type='IoUCostC', weight=1,box_format='xyxy'), # continues iou_cost=dict(type='IoUCostC', weight=1, box_format='xyxy'), # continues
), ),
), ),
), ),
...@@ -203,7 +203,7 @@ model = dict( ...@@ -203,7 +203,7 @@ model = dict(
}, },
class_conditional=True, class_conditional=True,
num_classes=num_class, num_classes=num_class,
canvas_size=canvas_size, #xy canvas_size=canvas_size, # xy
max_seq_length=500, max_seq_length=500,
decoder_cross_attention=False, decoder_cross_attention=False,
use_discrete_vertex_embeddings=True, use_discrete_vertex_embeddings=True,
...@@ -230,11 +230,11 @@ train_pipeline = [ ...@@ -230,11 +230,11 @@ train_pipeline = [
canvas_size=canvas_size, # xy canvas_size=canvas_size, # xy
coord_dim=2, coord_dim=2,
num_class=num_class, num_class=num_class,
threshold=4/200, threshold=4 / 200,
), ),
dict(type='LoadMultiViewImagesFromFiles'), dict(type='LoadMultiViewImagesFromFiles'),
dict(type='ResizeMultiViewImages', dict(type='ResizeMultiViewImages',
size = (int(128*2), int((16/9*128)*2)), # H, W size=(int(128 * 2), int((16 / 9 * 128) * 2)), # H, W
change_intrinsics=True, change_intrinsics=True,
), ),
dict(type='Normalize3D', **img_norm_cfg), dict(type='Normalize3D', **img_norm_cfg),
......
from .pipelines import *
from .argo_dataset import AV2Dataset
\ No newline at end of file
from .base_dataset import BaseMapDataset import os
from mmdet.datasets import DATASETS
import numpy as np
from time import time from time import time
import mmcv import mmcv
import os import numpy as np
from mmdet.datasets import DATASETS
from shapely.geometry import LineString from shapely.geometry import LineString
from .base_dataset import BaseMapDataset
@DATASETS.register_module() @DATASETS.register_module()
class AV2Dataset(BaseMapDataset): class AV2Dataset(BaseMapDataset):
"""Argoverse2 map dataset class. """Argoverse2 map dataset class.
...@@ -22,7 +25,7 @@ class AV2Dataset(BaseMapDataset): ...@@ -22,7 +25,7 @@ class AV2Dataset(BaseMapDataset):
test_mode (bool): whether in test mode test_mode (bool): whether in test mode
""" """
def __init__(self, **kwargs,): def __init__(self, **kwargs, ):
super().__init__(**kwargs) super().__init__(**kwargs)
def load_annotations(self, ann_file): def load_annotations(self, ann_file):
......
import numpy as np
import os import os
import os.path as osp import os.path as osp
import mmcv import warnings
from .evaluation.vector_eval import VectorEvaluate
import mmcv
import numpy as np
from mmdet3d.datasets.pipelines import Compose from mmdet3d.datasets.pipelines import Compose
from mmdet.datasets import DATASETS from mmdet.datasets import DATASETS
from torch.utils.data import Dataset from torch.utils.data import Dataset
import warnings
warnings.filterwarnings("ignore") from .evaluation.vector_eval import VectorEvaluate
warnings.filterwarnings('ignore')
@DATASETS.register_module() @DATASETS.register_module()
class BaseMapDataset(Dataset): class BaseMapDataset(Dataset):
...@@ -26,6 +28,7 @@ class BaseMapDataset(Dataset): ...@@ -26,6 +28,7 @@ class BaseMapDataset(Dataset):
work_dir (str): path to work dir work_dir (str): path to work dir
test_mode (bool): whether in test mode test_mode (bool): whether in test mode
""" """
def __init__(self, def __init__(self,
ann_file, ann_file,
root_path, root_path,
...@@ -110,7 +113,7 @@ class BaseMapDataset(Dataset): ...@@ -110,7 +113,7 @@ class BaseMapDataset(Dataset):
single_case = {'vectors': [], 'scores': [], 'labels': []} single_case = {'vectors': [], 'scores': [], 'labels': []}
token = pred['token'] token = pred['token']
roi_size = np.array(self.roi_size) roi_size = np.array(self.roi_size)
origin = -np.array([self.roi_size[0]/2, self.roi_size[1]/2]) origin = -np.array([self.roi_size[0] / 2, self.roi_size[1] / 2])
for i in range(len(pred['scores'])): for i in range(len(pred['scores'])):
score = pred['scores'][i] score = pred['scores'][i]
...@@ -183,4 +186,3 @@ class BaseMapDataset(Dataset): ...@@ -183,4 +186,3 @@ class BaseMapDataset(Dataset):
input_dict = self.get_sample(idx) input_dict = self.get_sample(idx)
data = self.pipeline(input_dict) data = self.pipeline(input_dict)
return data return data
import numpy as np
from .distance import chamfer_distance, frechet_distance
from typing import List, Tuple, Union from typing import List, Tuple, Union
import numpy as np
from numpy.typing import NDArray from numpy.typing import NDArray
from .distance import chamfer_distance, frechet_distance
def average_precision(recalls, precisions, mode='area'): def average_precision(recalls, precisions, mode='area'):
"""Calculate average precision. """Calculate average precision.
...@@ -48,11 +51,12 @@ def average_precision(recalls, precisions, mode='area'): ...@@ -48,11 +51,12 @@ def average_precision(recalls, precisions, mode='area'):
return ap return ap
def instance_match(pred_lines: List[NDArray], def instance_match(pred_lines: List[NDArray],
scores: NDArray, scores: NDArray,
gt_lines: List[NDArray], gt_lines: List[NDArray],
thresholds: Union[Tuple, List], thresholds: Union[Tuple, List],
metric: str='chamfer') -> List: metric: str = 'chamfer') -> List:
"""Compute whether detected lines are true positive or false positive. """Compute whether detected lines are true positive or false positive.
Args: Args:
......
from scipy.spatial import distance
from numpy.typing import NDArray from numpy.typing import NDArray
from scipy.spatial import distance
def chamfer_distance(line1: NDArray, line2: NDArray) -> float: def chamfer_distance(line1: NDArray, line2: NDArray) -> float:
''' Calculate chamfer distance between two lines. Make sure the ''' Calculate chamfer distance between two lines. Make sure the
...@@ -19,6 +20,7 @@ def chamfer_distance(line1: NDArray, line2: NDArray) -> float: ...@@ -19,6 +20,7 @@ def chamfer_distance(line1: NDArray, line2: NDArray) -> float:
return (dist12 + dist21) / 2 return (dist12 + dist21) / 2
def frechet_distance(line1: NDArray, line2: NDArray) -> float: def frechet_distance(line1: NDArray, line2: NDArray) -> float:
''' Calculate frechet distance between two lines. Make sure the ''' Calculate frechet distance between two lines. Make sure the
lines are interpolated. lines are interpolated.
...@@ -32,4 +34,3 @@ def frechet_distance(line1: NDArray, line2: NDArray) -> float: ...@@ -32,4 +34,3 @@ def frechet_distance(line1: NDArray, line2: NDArray) -> float:
''' '''
raise NotImplementedError raise NotImplementedError
from functools import partial from functools import partial
import numpy as np from logging import Logger
from multiprocessing import Pool from multiprocessing import Pool
from mmdet3d.datasets import build_dataset, build_dataloader from time import time
from typing import Dict, List, Optional
import mmcv import mmcv
from .AP import instance_match, average_precision import numpy as np
import prettytable import prettytable
from time import time
from functools import cached_property
from shapely.geometry import LineString
from numpy.typing import NDArray from numpy.typing import NDArray
from typing import Dict, List, Optional from shapely.geometry import LineString
from logging import Logger
from mmcv import Config from .AP import average_precision, instance_match
from copy import deepcopy
INTERP_NUM = 100 # number of points to interpolate during evaluation INTERP_NUM = 100 # number of points to interpolate during evaluation
SAMPLE_DIST = 0.3 # fixed sample distance SAMPLE_DIST = 0.3 # fixed sample distance
...@@ -25,6 +23,7 @@ CAT2ID = { ...@@ -25,6 +23,7 @@ CAT2ID = {
'boundary': 2, 'boundary': 2,
} }
class VectorEvaluate(object): class VectorEvaluate(object):
"""Evaluator for vectorized map. """Evaluator for vectorized map.
...@@ -33,7 +32,7 @@ class VectorEvaluate(object): ...@@ -33,7 +32,7 @@ class VectorEvaluate(object):
n_workers (int): num workers to parallel n_workers (int): num workers to parallel
""" """
def __init__(self, ann_file, n_workers: int=N_WORKERS) -> None: def __init__(self, ann_file, n_workers: int = N_WORKERS) -> None:
ann = mmcv.load(ann_file) ann = mmcv.load(ann_file)
gts = {} gts = {}
for seg_id, seq in ann.items(): for seg_id, seq in ann.items():
...@@ -84,7 +83,7 @@ class VectorEvaluate(object): ...@@ -84,7 +83,7 @@ class VectorEvaluate(object):
line = LineString(vector) line = LineString(vector)
distances = list(np.arange(sample_dist, line.length, sample_dist)) distances = list(np.arange(sample_dist, line.length, sample_dist))
# make sure to sample at least two points when sample_dist > line.length # make sure to sample at least two points when sample_dist > line.length
distances = [0,] + distances + [line.length,] distances = [0, ] + distances + [line.length, ]
sampled_points = np.array([list(line.interpolate(distance).coords) sampled_points = np.array([list(line.interpolate(distance).coords)
for distance in distances]).squeeze() for distance in distances]).squeeze()
...@@ -96,7 +95,7 @@ class VectorEvaluate(object): ...@@ -96,7 +95,7 @@ class VectorEvaluate(object):
scores: List, scores: List,
groundtruth: List, groundtruth: List,
thresholds: List, thresholds: List,
metric: str='metric') -> Dict[int, NDArray]: metric: str = 'metric') -> Dict[int, NDArray]:
''' Do single-frame matching for one class. ''' Do single-frame matching for one class.
Args: Args:
...@@ -138,8 +137,8 @@ class VectorEvaluate(object): ...@@ -138,8 +137,8 @@ class VectorEvaluate(object):
def evaluate(self, def evaluate(self,
result_path: str, result_path: str,
metric: str='chamfer', metric: str = 'chamfer',
logger: Optional[Logger]=None) -> Dict[str, float]: logger: Optional[Logger] = None) -> Dict[str, float]:
''' Do evaluation for a submission file and print evalution results to `logger` if specified. ''' Do evaluation for a submission file and print evalution results to `logger` if specified.
The submission will be aligned by tokens before evaluation. We use multi-worker to speed up. The submission will be aligned by tokens before evaluation. We use multi-worker to speed up.
...@@ -241,7 +240,7 @@ class VectorEvaluate(object): ...@@ -241,7 +240,7 @@ class VectorEvaluate(object):
mAP = sum_mAP / len(self.id2cat.keys()) mAP = sum_mAP / len(self.id2cat.keys())
result_dict.update({'mAP': mAP}) result_dict.update({'mAP': mAP})
print(f"finished in {time() - start:.2f}s") print(f'finished in {time() - start:.2f}s')
# print results # print results
table = prettytable.PrettyTable(['category', 'num_preds', 'num_gts'] + table = prettytable.PrettyTable(['category', 'num_preds', 'num_gts'] +
...@@ -256,7 +255,7 @@ class VectorEvaluate(object): ...@@ -256,7 +255,7 @@ class VectorEvaluate(object):
]) ])
from mmcv.utils import print_log from mmcv.utils import print_log
print_log('\n'+str(table), logger=logger) print_log('\n' + str(table), logger=logger)
print_log(f'mAP = {mAP:.4f}\n', logger=logger) print_log(f'mAP = {mAP:.4f}\n', logger=logger)
new_result_dict = {} new_result_dict = {}
......
from .loading import LoadMultiViewImagesFromFiles
from .formating import FormatBundleMap from .formating import FormatBundleMap
from .transform import ResizeMultiViewImages, PadMultiViewImages, Normalize3D from .loading import LoadMultiViewImagesFromFiles
from .vectorize import VectorizeMap
from .poly_bbox import PolygonizeLocalMapBbox from .poly_bbox import PolygonizeLocalMapBbox
from .transform import Normalize3D, PadMultiViewImages, ResizeMultiViewImages
from .vectorize import VectorizeMap
# for argoverse # for argoverse
__all__ = [ __all__ = [
......
import numpy as np import numpy as np
from mmcv.parallel import DataContainer as DC from mmcv.parallel import DataContainer as DC
from mmdet3d.core.points import BasePoints from mmdet3d.core.points import BasePoints
from mmdet.datasets.builder import PIPELINES from mmdet.datasets.builder import PIPELINES
from mmdet.datasets.pipelines import to_tensor from mmdet.datasets.pipelines import to_tensor
@PIPELINES.register_module() @PIPELINES.register_module()
class FormatBundleMap(object): class FormatBundleMap(object):
"""Format data for map tasks and then collect data for model input. """Format data for map tasks and then collect data for model input.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment