Commit f3b13cad authored by yeshenglong1's avatar yeshenglong1
Browse files

UpDate README.md

parent 0797920d
import argparse
import mmcv
from mmcv import Config
import os
from renderer import Renderer
CAT2ID = {
'ped_crossing': 0,
'divider': 1,
'boundary': 2,
}
ID2CAT = {v: k for k, v in CAT2ID.items()}
ROI_SIZE = (60, 30)
def parse_args():
parser = argparse.ArgumentParser(
description='Visualize groundtruth and results')
parser.add_argument('log_id', type=str,
help='log_id of data to visualize')
parser.add_argument('ann_file',
help='gt file to visualize')
parser.add_argument('--result',
type=str,
help='prediction result to visualize')
parser.add_argument('--thr',
type=float,
default=0,
help='score threshold to filter predictions')
parser.add_argument(
'--out-dir',
default='demo',
help='directory where visualize results will be saved')
args = parser.parse_args()
return args
def import_plugin(cfg):
'''
import modules, registry will be update
'''
import sys
sys.path.append(os.path.abspath('.'))
if hasattr(cfg, 'plugin'):
if cfg.plugin:
import importlib
if hasattr(cfg, 'plugin_dir'):
def import_path(plugin_dir):
_module_dir = os.path.dirname(plugin_dir)
_module_dir = _module_dir.split('/')
_module_path = _module_dir[0]
for m in _module_dir[1:]:
_module_path = _module_path + '.' + m
print(f'importing {_module_path}/')
plg_lib = importlib.import_module(_module_path)
plugin_dirs = cfg.plugin_dir
if not isinstance(plugin_dirs,list):
plugin_dirs = [plugin_dirs,]
for plugin_dir in plugin_dirs:
import_path(plugin_dir)
else:
# import dir is the dirpath for the config file
_module_dir = os.path.dirname(args.config)
_module_dir = _module_dir.split('/')
_module_path = _module_dir[0]
for m in _module_dir[1:]:
_module_path = _module_path + '.' + m
print(f'importing {_module_path}/')
plg_lib = importlib.import_module(_module_path)
def main(args):
log_id = args.log_id
ann = mmcv.load(args.ann_file)
root_path = os.path.dirname(args.ann_file)
out_dir = os.path.join(args.out_dir, str(log_id))
log_ann = ann[log_id]
renderer = Renderer(roi_size=ROI_SIZE)
if args.result:
result = mmcv.load(args.result)['results']
for frame in mmcv.track_iter_progress(log_ann):
timestamp = frame['timestamp']
sensor = frame['sensor']
annotation = frame['annotation']
imgs = [mmcv.imread(os.path.join(root_path, 'argoverse2', i['image_path'])) for i in sensor.values()]
extrinsics = [i['extrinsic'] for i in sensor.values()]
intrinsics = [i['intrinsic'] for i in sensor.values()]
frame_dir = os.path.join(out_dir, timestamp, 'gt')
os.makedirs(frame_dir, exist_ok=True)
renderer.render_bev_from_vectors(annotation, out_dir=frame_dir)
renderer.render_camera_views_from_vectors(annotation, imgs, extrinsics,
intrinsics, 4, frame_dir)
if args.result:
pred = result[timestamp]
vectors = {cat: [] for cat in CAT2ID.keys()}
for i in range(len(pred['labels'])):
score = pred['scores'][i]
label = pred['labels'][i]
v = pred['vectors'][i]
if score > args.thr:
vectors[ID2CAT[label]].append(v)
frame_dir = os.path.join(out_dir, timestamp, 'pred')
os.makedirs(frame_dir, exist_ok=True)
renderer.render_bev_from_vectors(vectors, out_dir=frame_dir)
renderer.render_camera_views_from_vectors(vectors, imgs,
extrinsics, intrinsics, 4, frame_dir)
if __name__ == '__main__':
args = parse_args()
import argparse
import mmcv
from mmcv import Config
import os
from renderer import Renderer
CAT2ID = {
'ped_crossing': 0,
'divider': 1,
'boundary': 2,
}
ID2CAT = {v: k for k, v in CAT2ID.items()}
ROI_SIZE = (60, 30)
def parse_args():
parser = argparse.ArgumentParser(
description='Visualize groundtruth and results')
parser.add_argument('log_id', type=str,
help='log_id of data to visualize')
parser.add_argument('ann_file',
help='gt file to visualize')
parser.add_argument('--result',
type=str,
help='prediction result to visualize')
parser.add_argument('--thr',
type=float,
default=0,
help='score threshold to filter predictions')
parser.add_argument(
'--out-dir',
default='demo',
help='directory where visualize results will be saved')
args = parser.parse_args()
return args
def import_plugin(cfg):
'''
import modules, registry will be update
'''
import sys
sys.path.append(os.path.abspath('.'))
if hasattr(cfg, 'plugin'):
if cfg.plugin:
import importlib
if hasattr(cfg, 'plugin_dir'):
def import_path(plugin_dir):
_module_dir = os.path.dirname(plugin_dir)
_module_dir = _module_dir.split('/')
_module_path = _module_dir[0]
for m in _module_dir[1:]:
_module_path = _module_path + '.' + m
print(f'importing {_module_path}/')
plg_lib = importlib.import_module(_module_path)
plugin_dirs = cfg.plugin_dir
if not isinstance(plugin_dirs,list):
plugin_dirs = [plugin_dirs,]
for plugin_dir in plugin_dirs:
import_path(plugin_dir)
else:
# import dir is the dirpath for the config file
_module_dir = os.path.dirname(args.config)
_module_dir = _module_dir.split('/')
_module_path = _module_dir[0]
for m in _module_dir[1:]:
_module_path = _module_path + '.' + m
print(f'importing {_module_path}/')
plg_lib = importlib.import_module(_module_path)
def main(args):
log_id = args.log_id
ann = mmcv.load(args.ann_file)
root_path = os.path.dirname(args.ann_file)
out_dir = os.path.join(args.out_dir, str(log_id))
log_ann = ann[log_id]
renderer = Renderer(roi_size=ROI_SIZE)
if args.result:
result = mmcv.load(args.result)['results']
for frame in mmcv.track_iter_progress(log_ann):
timestamp = frame['timestamp']
sensor = frame['sensor']
annotation = frame['annotation']
imgs = [mmcv.imread(os.path.join(root_path, 'argoverse2', i['image_path'])) for i in sensor.values()]
extrinsics = [i['extrinsic'] for i in sensor.values()]
intrinsics = [i['intrinsic'] for i in sensor.values()]
frame_dir = os.path.join(out_dir, timestamp, 'gt')
os.makedirs(frame_dir, exist_ok=True)
renderer.render_bev_from_vectors(annotation, out_dir=frame_dir)
renderer.render_camera_views_from_vectors(annotation, imgs, extrinsics,
intrinsics, 4, frame_dir)
if args.result:
pred = result[timestamp]
vectors = {cat: [] for cat in CAT2ID.keys()}
for i in range(len(pred['labels'])):
score = pred['scores'][i]
label = pred['labels'][i]
v = pred['vectors'][i]
if score > args.thr:
vectors[ID2CAT[label]].append(v)
frame_dir = os.path.join(out_dir, timestamp, 'pred')
os.makedirs(frame_dir, exist_ok=True)
renderer.render_bev_from_vectors(vectors, out_dir=frame_dir)
renderer.render_camera_views_from_vectors(vectors, imgs,
extrinsics, intrinsics, 4, frame_dir)
if __name__ == '__main__':
args = parse_args()
main(args)
\ No newline at end of file
<div id="top" align="center">
# InternImage for CVPR 2023 Workshop on End-to-End Autonomous Driving
</div>
## 1. InternImage-based Baseline for CVPR23 Occupancy Prediction Challenge
We achieve a nimprovement of 1.44 in MIOU baseline by leveraging the InterImage-based model.
model name|weight| mIoU | others | barrier | bicycle | bus | car | construction_vehicle | motorcycle | pedestrian | traffic_cone | trailer | truck | driveable_surface | other_flat | sidewalk | terrain | manmade | vegetation |
----|:----------:| :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :----------------------: | :---: | :------: | :------: |
bevformer_intern-s_occ|[Google Drive](https://drive.google.com/file/d/1LV9K8hrskKf51xY1wbqTKzK7WZmVXEV_/view?usp=sharing)| 25.11 | 6.93 | 35.57 | 10.40 | 35.97 | 41.23 | 13.72 | 20.30 | 21.10 | 18.34 | 19.18 | 28.64 | 49.82 | 30.74 | 31.00 | 27.44 | 19.29 | 17.29 |
bevformer_base_occ|[Google Drive](https://drive.google.com/file/d/1NyoiosafAmne1qiABeNOPXR-P-y0i7_I/view?usp=share_link)| 23.67 | 5.03 | 38.79 | 9.98 | 34.41 | 41.09 | 13.24 | 16.50 | 18.15 | 17.83 | 18.66 | 27.70 | 48.95 | 27.73 | 29.08 | 25.38 | 15.41 | 14.46 |
### Get Started
please refer to [README.md](./occupancy_prediction/README.md)
## 2. InternImage-based Baseline for Online HD Map Construction Challenge For Autonomous Driving
By incorporating the InterImage-based model, we observe an enhancement of 6.56 in mAP baseline.
model name|weight|$\mathrm{mAP}$ | $\mathrm{AP}_{pc}$ | $\mathrm{AP}_{div}$ | $\mathrm{AP}_{bound}$ |
----|:----------:| :--: | :--: | :--: | :--: |
vectormapnet_intern|[Checkpoint](https://github.com/OpenGVLab/InternImage/releases/download/track_model/vectormapnet_internimage.pth) | 49.35 | 45.05 | 56.78 | 46.22 |
vectormapnet_base|[Google Drive](https://drive.google.com/file/d/16D1CMinwA8PG1sd9PV9_WtHzcBohvO-D/view) | 42.79 | 37.22 | 50.47 | 40.68 |
### Get Started
please refer to [README.md](Online-HD-Map-Construction/README.md)
## 3. InternImage-based Baseline for CVPR23 OpenLane-V2 Challenge
Through the implementation of the InterImage-based model, we achieve an advancement of 0.009 in F-score baseline.
| | OpenLane-V2 Score | DET<sub>l</sub> | DET<sub>t</sub> | TOP<sub>ll</sub> | TOP<sub>lt</sub> | F-Score |
|-------------|-------------------|-----------------|-----------------|------------------|------------------|---------|
| base r50 | 0.292 | 0.183 | 0.457 | 0.022 | 0.143 | 0.215 |
| InternImage | 0.325 | 0.194 | 0.537 | 0.02 | 0.17 | 0.224 |
### Get Started
please refer to [README.md](./openlane-v2/README.md)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment