Unverified Commit 6a31be8f authored by YeShenglong1's avatar YeShenglong1 Committed by GitHub
Browse files

Add files via upload

parent 4fb17721
# This schedule is mainly used by models with dynamic voxelization
# optimizer
lr = 0.003 # max learning rate
optimizer = dict(
type='AdamW',
lr=lr,
betas=(0.95, 0.99), # the momentum is change during training
weight_decay=0.001)
optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2))
lr_config = dict(
policy='CosineAnnealing',
warmup='linear',
warmup_iters=1000,
warmup_ratio=1.0 / 10,
min_lr_ratio=1e-5)
momentum_config = None
runner = dict(type='EpochBasedRunner', max_epochs=40)
# For nuScenes dataset, we usually evaluate the model at the end of training.
# Since the models are trained by 24 epochs by default, we set evaluation
# interval to be 20. Please change the interval accordingly if you do not
# use a default schedule.
# optimizer
# This schedule is mainly used by models on nuScenes dataset
optimizer = dict(type='AdamW', lr=1e-4, weight_decay=0.01)
# max_norm=10 is better for SECOND
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(
policy='cyclic',
target_ratio=(10, 1e-4),
cyclic_times=1,
step_ratio_up=0.4,
)
momentum_config = dict(
policy='cyclic',
target_ratio=(0.85 / 0.95, 1),
cyclic_times=1,
step_ratio_up=0.4,
)
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=20)
# The schedule is usually used by models trained on KITTI dataset
# The learning rate set in the cyclic schedule is the initial learning rate
# rather than the max learning rate. Since the target_ratio is (10, 1e-4),
# the learning rate will change from 0.0018 to 0.018, than go to 0.0018*1e-4
lr = 0.0018
# The optimizer follows the setting in SECOND.Pytorch, but here we use
# the offcial AdamW optimizer implemented by PyTorch.
optimizer = dict(type='AdamW', lr=lr, betas=(0.95, 0.99), weight_decay=0.01)
optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2))
# We use cyclic learning rate and momentum schedule following SECOND.Pytorch
# https://github.com/traveller59/second.pytorch/blob/3aba19c9688274f75ebb5e576f65cfe54773c021/torchplus/train/learning_schedules_fastai.py#L69 # noqa
# We implement them in mmcv, for more details, please refer to
# https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/lr_updater.py#L327 # noqa
# https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/momentum_updater.py#L130 # noqa
lr_config = dict(
policy='cyclic',
target_ratio=(10, 1e-4),
cyclic_times=1,
step_ratio_up=0.4,
)
momentum_config = dict(
policy='cyclic',
target_ratio=(0.85 / 0.95, 1),
cyclic_times=1,
step_ratio_up=0.4,
)
# Although the max_epochs is 40, this schedule is usually used we
# RepeatDataset with repeat ratio N, thus the actual max epoch
# number could be Nx40
runner = dict(type='EpochBasedRunner', max_epochs=40)
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
# optimizer
# This schedule is mainly used by models on nuScenes dataset
optimizer = dict(type='AdamW', lr=0.001, weight_decay=0.01)
# max_norm=10 is better for SECOND
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=1.0 / 1000,
step=[20, 23])
momentum_config = None
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=24)
# optimizer
# This schedule is mainly used by models on indoor dataset,
# e.g., VoteNet on SUNRGBD and ScanNet
lr = 0.008 # max learning rate
optimizer = dict(type='AdamW', lr=lr, weight_decay=0.01)
optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2))
lr_config = dict(policy='step', warmup=None, step=[24, 32])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=36)
# optimizer
# This schedule is mainly used on S3DIS dataset in segmentation task
optimizer = dict(type='SGD', lr=0.2, weight_decay=0.0001, momentum=0.9)
optimizer_config = dict(grad_clip=None)
lr_config = dict(policy='CosineAnnealing', warmup=None, min_lr=0.002)
momentum_config = None
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=150)
# optimizer
# This schedule is mainly used on ScanNet dataset in segmentation task
optimizer = dict(type='Adam', lr=0.001, weight_decay=0.01)
optimizer_config = dict(grad_clip=None)
lr_config = dict(policy='CosineAnnealing', warmup=None, min_lr=1e-5)
momentum_config = None
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=200)
# optimizer
# This schedule is mainly used on S3DIS dataset in segmentation task
optimizer = dict(type='Adam', lr=0.001, weight_decay=0.001)
optimizer_config = dict(grad_clip=None)
lr_config = dict(policy='CosineAnnealing', warmup=None, min_lr=1e-5)
momentum_config = None
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=50)
from .pipelines import *
from .argo_dataset import AV2Dataset
\ No newline at end of file
from .base_dataset import BaseMapDataset
from mmdet.datasets import DATASETS
import numpy as np
from time import time
import mmcv
import os
from shapely.geometry import LineString
@DATASETS.register_module()
class AV2Dataset(BaseMapDataset):
"""Argoverse2 map dataset class.
Args:
ann_file (str): annotation file path
cat2id (dict): category to class id
roi_size (tuple): bev range
eval_config (Config): evaluation config
meta (dict): meta information
pipeline (Config): data processing pipeline config,
interval (int): annotation load interval
work_dir (str): path to work dir
test_mode (bool): whether in test mode
"""
def __init__(self, **kwargs,):
super().__init__(**kwargs)
def load_annotations(self, ann_file):
"""Load annotations from ann_file.
Args:
ann_file (str): Path of the annotation file.
Returns:
list[dict]: List of annotations.
"""
start_time = time()
ann = mmcv.load(ann_file)
samples = []
for seg_id, sequence in ann.items():
samples.extend(sequence)
samples = samples[::self.interval]
print(f'collected {len(samples)} samples in {(time() - start_time):.2f}s')
self.samples = samples
def get_sample(self, idx):
"""Get data sample. For each sample, map extractor will be applied to extract
map elements.
Args:
idx (int): data index
Returns:
result (dict): dict of input
"""
sample = self.samples[idx]
if not self.test_mode:
ann = sample['annotation']
# collected required keys
map_label2geom = {}
for k, v in ann.items():
if k in self.cat2id.keys():
map_label2geom[self.cat2id[k]] = [LineString(np.array(l)[:, :3]) for l in v]
ego2img_rts = []
cams = sample['sensor']
for c in cams.values():
extrinsic, intrinsic = np.array(
c['extrinsic']), np.array(c['intrinsic'])
ego2cam_rt = extrinsic
viewpad = np.eye(4)
viewpad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic
ego2cam_rt = (viewpad @ ego2cam_rt)
ego2img_rts.append(ego2cam_rt)
pose = sample['pose']
input_dict = {
'token': sample['timestamp'],
'img_filenames': [os.path.join(self.root_path, c['image_path']) for c in cams.values()],
# intrinsics are 3x3 Ks
'cam_intrinsics': [c['intrinsic'] for c in cams.values()],
# extrinsics are 4x4 tranform matrix, NOTE: **ego2cam**
'cam_extrinsics': [c['extrinsic'] for c in cams.values()],
'ego2img': ego2img_rts,
'ego2global_translation': pose['ego2global_translation'],
'ego2global_rotation': pose['ego2global_rotation'],
}
if not self.test_mode:
input_dict.update({'map_geoms': map_label2geom}) # {0: List[ped_crossing(LineString)], 1: ...}})
return input_dict
\ No newline at end of file
import numpy as np
import os
import os.path as osp
import mmcv
from .evaluation.vector_eval import VectorEvaluate
from mmdet3d.datasets.pipelines import Compose
from mmdet.datasets import DATASETS
from torch.utils.data import Dataset
import warnings
warnings.filterwarnings("ignore")
@DATASETS.register_module()
class BaseMapDataset(Dataset):
"""Map dataset base class.
Args:
ann_file (str): annotation file path
cat2id (dict): category to class id
roi_size (tuple): bev range
eval_config (Config): evaluation config
meta (dict): meta information
pipeline (Config): data processing pipeline config,
interval (int): annotation load interval
work_dir (str): path to work dir
test_mode (bool): whether in test mode
"""
def __init__(self,
ann_file,
root_path,
cat2id,
roi_size,
meta,
pipeline,
interval=1,
work_dir=None,
test_mode=False,
):
super().__init__()
self.ann_file = ann_file
self.meta = meta
self.root_path = root_path
self.classes = list(cat2id.keys())
self.num_classes = len(self.classes)
self.cat2id = cat2id
self.interval = interval
self.load_annotations(self.ann_file)
self.idx2token = {}
for i, s in enumerate(self.samples):
if 'timestamp' in s:
self.idx2token[i] = s['timestamp']
else:
self.idx2token[i] = s['token']
self.token2idx = {v: k for k, v in self.idx2token.items()}
if pipeline is not None:
self.pipeline = Compose(pipeline)
else:
self.pipeline = None
# dummy flags to fit with mmdet dataset
self.flag = np.zeros(len(self), dtype=np.uint8)
self.roi_size = roi_size
self.work_dir = work_dir
self.test_mode = test_mode
def load_annotations(self, ann_file):
raise NotImplementedError
def get_sample(self, idx):
raise NotImplementedError
def format_results(self, results, denormalize=True, prefix=None):
'''Format prediction result to submission format.
Args:
results (list[Tensor]): List of prediction results.
denormalize (bool): whether to denormalize prediction from (0, 1) \
to bev range. Default: True
prefix (str): work dir prefix to save submission file.
Returns:
dict: Evaluation results
'''
meta = self.meta
submissions = {
'meta': meta,
'results': {},
}
for pred in results:
'''
For each case, the result should be formatted as Dict{'vectors': [], 'scores': [], 'labels': []}
'vectors': List of vector, each vector is a array([[x1, y1], [x2, y2] ...]),
contain all vectors predicted in this sample.
'scores: List of score(float),
contain scores of all instances in this sample.
'labels': List of label(int),
contain labels of all instances in this sample.
'''
if pred is None: # empty prediction
continue
single_case = {'vectors': [], 'scores': [], 'labels': []}
token = pred['token']
roi_size = np.array(self.roi_size)
origin = -np.array([self.roi_size[0]/2, self.roi_size[1]/2])
for i in range(len(pred['scores'])):
score = pred['scores'][i]
label = pred['labels'][i]
vector = pred['vectors'][i]
# A line should have >=2 points
if len(vector) < 2:
continue
if denormalize:
eps = 2
vector = vector * (roi_size + eps) + origin
single_case['vectors'].append(vector)
single_case['scores'].append(score)
single_case['labels'].append(label)
submissions['results'][token] = single_case
out_path = osp.join(prefix, 'submission_vector.json')
print(f'\nsaving submissions results to {out_path}')
os.makedirs(os.path.dirname(out_path), exist_ok=True)
mmcv.dump(submissions, out_path)
return out_path
def evaluate(self, results, logger=None, **kwargs):
'''Evaluate prediction result based on `output_format` specified by dataset.
Args:
results (list[Tensor]): List of prediction results.
logger (logger): logger to print evaluation results.
Returns:
dict: Evaluation results.
'''
output_format = self.meta['output_format']
self.evaluator = VectorEvaluate(self.ann_file)
print('len of the results', len(results))
result_path = self.format_results(results, denormalize=True, prefix=self.work_dir)
result_dict = self.evaluator.evaluate(result_path, logger=logger)
return result_dict
def __len__(self):
"""Return the length of data infos.
Returns:
int: Length of data infos.
"""
return len(self.samples)
def _rand_another(self, idx):
"""Randomly get another item.
Returns:
int: Another index of item.
"""
return np.random.choice(self.__len__)
def __getitem__(self, idx):
"""Get item from infos according to the given index.
Returns:
dict: Data dictionary of the corresponding index.
"""
input_dict = self.get_sample(idx)
data = self.pipeline(input_dict)
return data
from .loading import LoadMultiViewImagesFromFiles
from .formating import FormatBundleMap
from .transform import ResizeMultiViewImages, PadMultiViewImages, Normalize3D
from .vectorize import VectorizeMap
from .poly_bbox import PolygonizeLocalMapBbox
# for argoverse
__all__ = [
'LoadMultiViewImagesFromFiles',
'FormatBundleMap', 'Normalize3D', 'ResizeMultiViewImages', 'PadMultiViewImages',
'VectorizeMap', 'PolygonizeLocalMapBbox'
]
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment