"references/vscode:/vscode.git/clone" did not exist on "68b128d5a9ccfe05d64fc1a93e686f8489fd63b9"
Unverified Commit 579b0799 authored by Wenwei Zhang's avatar Wenwei Zhang Committed by GitHub
Browse files

Bump to V0.6.0 (#118)



* Add gitlab CI back

* clean isort

* Update gitlab CI version

* Update mmcv install

* fix unit test bug

* waymo

* Use new flake8

* Update mmdet3d/core/evaluation/waymo_utils/prediction_kitti_to_waymo.py, tools/data_converter/waymo_converter.py files

* Add baseline configs for waymo

* fix linting

* yapf reformat

* update waymo results

* Update waymo model zoo and docs

* Bump v0.6.0

* Fix a minor bug when converting waymo data

* Fix cmds in the waymo doc

* Fix setup.cfg to pass isort test

* Fix waymo configs

* Update model zoo link & doc

* update version date

* clean ci
Co-authored-by: default avatarwangtai <wangtai@sensetime.com>
Co-authored-by: default avatarTai-Wang <tab_wang@outlook.com>
parent 62ce67c0
......@@ -251,7 +251,6 @@ class DataBaseSampler(object):
file_path = os.path.join(
self.data_root,
info['path']) if self.data_root else info['path']
results = dict(pts_filename=file_path)
s_points = self.points_loader(results)['points']
s_points[:, :3] += info['box3d_lidar'][:3]
......
This diff is collapsed.
......@@ -48,8 +48,8 @@ class Base3DDetector(BaseDetector):
Note this setting will change the expected inputs. When
`return_loss=True`, img and img_metas are single-nested (i.e.
torch.Tensor and list[dict]), and when `resturn_loss=False`, img and
img_metas should be double nested (i.e. list[torch.Tensor],
torch.Tensor and list[dict]), and when `resturn_loss=False`, img
and img_metas should be double nested (i.e. list[torch.Tensor],
list[list[dict]]), with the outer list indicating test time
augmentations.
"""
......
......@@ -7,8 +7,8 @@ class TwoStage3DDetector(Base3DDetector, TwoStageDetector):
"""Base class of two-stage 3D detector.
It inherits original ``:class:TwoStageDetector`` and
``:class:Base3DDetector``. This class could serve as a base class for all
two-stage 3D detectors.
``:class:Base3DDetector``. This class could serve as a base class
for all two-stage 3D detectors.
"""
def __init__(self, **kwargs):
......
......@@ -93,7 +93,7 @@ class Base3DRoIHead(nn.Module, metaclass=ABCMeta):
def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
If rescale is False, then returned bboxes and masks will fit the
scale of imgs[0].
"""
pass
......@@ -7,8 +7,8 @@ from . import furthest_point_sample_ext
class FurthestPointSampling(Function):
"""Furthest Point Sampling.
Uses iterative furthest point sampling to select a set of features whose
corresponding points have the furthest distance.
Uses iterative furthest point sampling to select a set of features
whose corresponding points have the furthest distance.
"""
@staticmethod
......
......@@ -5,8 +5,9 @@ import torch
def scatter_nd(indices, updates, shape):
"""pytorch edition of tensorflow scatter_nd.
this function don't contain except handle code. so use this carefully when
indice repeats, don't support repeat add which is supported in tensorflow.
this function don't contain except handle code. so use this
carefully when indice repeats, don't support repeat add which is
supported in tensorflow.
"""
ret = torch.zeros(*shape, dtype=updates.dtype, device=updates.device)
ndim = indices.shape[-1]
......
......@@ -7,3 +7,5 @@ plyfile
# by default we also use tensorboard to log results
tensorboard
trimesh>=2.35.39,<2.35.40
scikit-image
waymo-open-dataset-tf-2-1-0==1.2.0
......@@ -8,6 +8,6 @@ line_length = 79
multi_line_output = 0
known_standard_library = setuptools
known_first_party = mmdet,mmdet3d
known_third_party = load_scannet_data,lyft_dataset_sdk,m2r,matplotlib,mmcv,nuimages,numba,numpy,nuscenes,pandas,plyfile,pycocotools,pyquaternion,pytest,recommonmark,scannet_utils,scipy,seaborn,shapely,skimage,terminaltables,torch,trimesh
known_third_party = load_scannet_data,lyft_dataset_sdk,m2r,matplotlib,mmcv,nuimages,numba,numpy,nuscenes,pandas,plyfile,pycocotools,pyquaternion,pytest,recommonmark,scannet_utils,scipy,seaborn,shapely,skimage,tensorflow,terminaltables,torch,trimesh,waymo_open_dataset
no_lines_before = STDLIB,LOCALFOLDER
default_section = THIRDPARTY
......@@ -44,8 +44,8 @@ def _get_config_module(fname):
def _get_model_cfg(fname):
"""Grab configs necessary to create a model.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
These are deep copied to allow for safe modification of parameters
without influencing other tests.
"""
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
......@@ -56,8 +56,8 @@ def _get_model_cfg(fname):
def _get_detector_cfg(fname):
"""Grab configs necessary to create a detector.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
These are deep copied to allow for safe modification of parameters
without influencing other tests.
"""
import mmcv
config = _get_config_module(fname)
......
......@@ -37,8 +37,8 @@ def _get_config_module(fname):
def _get_detector_cfg(fname):
"""Grab configs necessary to create a detector.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
These are deep copied to allow for safe modification of parameters
without influencing other tests.
"""
import mmcv
config = _get_config_module(fname)
......
......@@ -46,8 +46,8 @@ def _get_config_module(fname):
def _get_head_cfg(fname):
"""Grab configs necessary to create a bbox_head.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
These are deep copied to allow for safe modification of parameters
without influencing other tests.
"""
import mmcv
config = _get_config_module(fname)
......@@ -64,8 +64,8 @@ def _get_head_cfg(fname):
def _get_rpn_head_cfg(fname):
"""Grab configs necessary to create a rpn_head.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
These are deep copied to allow for safe modification of parameters
without influencing other tests.
"""
import mmcv
config = _get_config_module(fname)
......@@ -82,8 +82,8 @@ def _get_rpn_head_cfg(fname):
def _get_roi_head_cfg(fname):
"""Grab configs necessary to create a roi_head.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
These are deep copied to allow for safe modification of parameters
without influencing other tests.
"""
import mmcv
config = _get_config_module(fname)
......@@ -100,8 +100,8 @@ def _get_roi_head_cfg(fname):
def _get_pts_bbox_head_cfg(fname):
"""Grab configs necessary to create a pts_bbox_head.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
These are deep copied to allow for safe modification of parameters
without influencing other tests.
"""
import mmcv
config = _get_config_module(fname)
......@@ -118,8 +118,8 @@ def _get_pts_bbox_head_cfg(fname):
def _get_vote_head_cfg(fname):
"""Grab configs necessary to create a vote_head.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
These are deep copied to allow for safe modification of parameters
without influencing other tests.
"""
import mmcv
config = _get_config_module(fname)
......@@ -136,8 +136,8 @@ def _get_vote_head_cfg(fname):
def _get_parta2_bbox_head_cfg(fname):
"""Grab configs necessary to create a parta2_bbox_head.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
These are deep copied to allow for safe modification of parameters
without influencing other tests.
"""
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
......
......@@ -5,6 +5,7 @@ from tools.data_converter import indoor_converter as indoor
from tools.data_converter import kitti_converter as kitti
from tools.data_converter import lyft_converter as lyft_converter
from tools.data_converter import nuscenes_converter as nuscenes_converter
from tools.data_converter import waymo_converter as waymo
from tools.data_converter.create_gt_database import create_groundtruth_database
......@@ -133,6 +134,48 @@ def sunrgbd_data_prep(root_path, info_prefix, out_dir, workers):
root_path, info_prefix, out_dir, workers=workers)
def waymo_data_prep(root_path,
info_prefix,
version,
out_dir,
workers,
max_sweeps=5):
"""Prepare the info file for waymo dataset.
Args:
root_path (str): Path of dataset root.
info_prefix (str): The prefix of info filenames.
out_dir (str): Output directory of the generated info file.
workers (int): Number of threads to be used.
max_sweeps (int): Number of input consecutive frames. Default: 5 \
Here we store pose information of these frames for later use.
"""
splits = ['training', 'validation', 'testing']
for i, split in enumerate(splits):
load_dir = osp.join(root_path, 'waymo_format', split)
if split == 'validation':
save_dir = osp.join(out_dir, 'kitti_format', 'training')
else:
save_dir = osp.join(out_dir, 'kitti_format', split)
converter = waymo.Waymo2KITTI(
load_dir,
save_dir,
prefix=str(i),
workers=workers,
test_mode=(split == 'test'))
converter.convert()
# Generate waymo infos
out_dir = osp.join(out_dir, 'kitti_format')
kitti.create_waymo_info_file(out_dir, info_prefix, max_sweeps=max_sweeps)
create_groundtruth_database(
'WaymoDataset',
out_dir,
info_prefix,
f'{out_dir}/{info_prefix}_infos_train.pkl',
relative_path=False,
with_mask=False)
parser = argparse.ArgumentParser(description='Data converter arg parser')
parser.add_argument('dataset', metavar='kitti', help='name of the dataset')
parser.add_argument(
......@@ -213,6 +256,14 @@ if __name__ == '__main__':
dataset_name='LyftDataset',
out_dir=args.out_dir,
max_sweeps=args.max_sweeps)
elif args.dataset == 'waymo':
waymo_data_prep(
root_path=args.root_path,
info_prefix=args.extra_tag,
version=args.version,
out_dir=args.out_dir,
workers=args.workers,
max_sweeps=args.max_sweeps)
elif args.dataset == 'scannet':
scannet_data_prep(
root_path=args.root_path,
......
......@@ -183,6 +183,31 @@ def create_groundtruth_database(dataset_class_name,
with_bbox_3d=True,
with_label_3d=True)
])
elif dataset_class_name == 'WaymoDataset':
file_client_args = dict(backend='disk')
dataset_cfg.update(
test_mode=False,
split='training',
modality=dict(
use_lidar=True,
use_depth=False,
use_lidar_intensity=True,
use_camera=False,
),
pipeline=[
dict(
type='LoadPointsFromFile',
load_dim=6,
use_dim=5,
file_client_args=file_client_args),
dict(
type='LoadAnnotations3D',
with_bbox_3d=True,
with_label_3d=True,
file_client_args=file_client_args)
])
dataset = build_dataset(dataset_cfg)
if database_save_path is None:
......
import mmcv
import numpy as np
import pickle
from mmcv import track_iter_progress
from pathlib import Path
from mmdet3d.core.bbox import box_np_ops
from .kitti_data_utils import get_kitti_image_info
from .kitti_data_utils import get_kitti_image_info, get_waymo_image_info
def convert_to_kitti_info_version2(info):
......@@ -43,7 +42,7 @@ def _calculate_num_points_in_gt(data_path,
relative_path,
remove_outside=True,
num_features=4):
for info in track_iter_progress(infos):
for info in mmcv.track_iter_progress(infos):
pc_info = info['point_cloud']
image_info = info['image']
calib = info['calib']
......@@ -80,7 +79,7 @@ def _calculate_num_points_in_gt(data_path,
def create_kitti_info_file(data_path,
pkl_prefix='kitti_',
pkl_prefix='kitti',
save_path=None,
relative_path=True):
"""Create info file of KITTI dataset.
......@@ -113,8 +112,7 @@ def create_kitti_info_file(data_path,
_calculate_num_points_in_gt(data_path, kitti_infos_train, relative_path)
filename = save_path / f'{pkl_prefix}_infos_train.pkl'
print(f'Kitti info train file is saved to {filename}')
with open(filename, 'wb') as f:
pickle.dump(kitti_infos_train, f)
mmcv.dump(kitti_infos_train, filename)
kitti_infos_val = get_kitti_image_info(
data_path,
training=True,
......@@ -125,12 +123,10 @@ def create_kitti_info_file(data_path,
_calculate_num_points_in_gt(data_path, kitti_infos_val, relative_path)
filename = save_path / f'{pkl_prefix}_infos_val.pkl'
print(f'Kitti info val file is saved to {filename}')
with open(filename, 'wb') as f:
pickle.dump(kitti_infos_val, f)
mmcv.dump(kitti_infos_val, filename)
filename = save_path / f'{pkl_prefix}_infos_trainval.pkl'
print(f'Kitti info trainval file is saved to {filename}')
with open(filename, 'wb') as f:
pickle.dump(kitti_infos_train + kitti_infos_val, f)
mmcv.dump(kitti_infos_train + kitti_infos_val, filename)
kitti_infos_test = get_kitti_image_info(
data_path,
......@@ -142,18 +138,109 @@ def create_kitti_info_file(data_path,
relative_path=relative_path)
filename = save_path / f'{pkl_prefix}_infos_test.pkl'
print(f'Kitti info test file is saved to {filename}')
with open(filename, 'wb') as f:
pickle.dump(kitti_infos_test, f)
mmcv.dump(kitti_infos_test, filename)
def create_waymo_info_file(data_path,
pkl_prefix='waymo',
save_path=None,
relative_path=True,
max_sweeps=5):
"""Create info file of waymo dataset.
Given the raw data, generate its related info file in pkl format.
Args:
data_path (str): Path of the data root.
pkl_prefix (str): Prefix of the info file to be generated.
save_path (str | None): Path to save the info file.
relative_path (bool): Whether to use relative path.
max_sweeps (int): Max sweeps before the detection frame to be used.
"""
imageset_folder = Path(data_path) / 'ImageSets'
train_img_ids = _read_imageset_file(str(imageset_folder / 'train.txt'))
val_img_ids = _read_imageset_file(str(imageset_folder / 'val.txt'))
test_img_ids = _read_imageset_file(str(imageset_folder / 'test.txt'))
print('Generate info. this may take several minutes.')
if save_path is None:
save_path = Path(data_path)
else:
save_path = Path(save_path)
waymo_infos_train = get_waymo_image_info(
data_path,
training=True,
velodyne=True,
calib=True,
pose=True,
image_ids=train_img_ids,
relative_path=relative_path,
max_sweeps=max_sweeps)
_calculate_num_points_in_gt(
data_path,
waymo_infos_train,
relative_path,
num_features=6,
remove_outside=False)
filename = save_path / f'{pkl_prefix}_infos_train.pkl'
print(f'Waymo info train file is saved to {filename}')
mmcv.dump(waymo_infos_train, filename)
waymo_infos_val = get_waymo_image_info(
data_path,
training=True,
velodyne=True,
calib=True,
pose=True,
image_ids=val_img_ids,
relative_path=relative_path,
max_sweeps=max_sweeps)
_calculate_num_points_in_gt(
data_path,
waymo_infos_val,
relative_path,
num_features=6,
remove_outside=False)
filename = save_path / f'{pkl_prefix}_infos_val.pkl'
print(f'Waymo info val file is saved to {filename}')
mmcv.dump(waymo_infos_val, filename)
filename = save_path / f'{pkl_prefix}_infos_trainval.pkl'
print(f'Waymo info trainval file is saved to {filename}')
mmcv.dump(waymo_infos_train + waymo_infos_val, filename)
waymo_infos_test = get_waymo_image_info(
data_path,
training=False,
label_info=False,
velodyne=True,
calib=True,
pose=True,
image_ids=test_img_ids,
relative_path=relative_path,
max_sweeps=max_sweeps)
filename = save_path / f'{pkl_prefix}_infos_test.pkl'
print(f'Waymo info test file is saved to {filename}')
mmcv.dump(waymo_infos_test, filename)
def _create_reduced_point_cloud(data_path,
info_path,
save_path=None,
back=False):
with open(info_path, 'rb') as f:
kitti_infos = pickle.load(f)
back=False,
num_features=4,
front_camera_id=2):
"""Create reduced point clouds for given info.
for info in track_iter_progress(kitti_infos):
Args:
data_path (str): Path of original data.
info_path (str): Path of data info.
save_path (str | None): Path to save reduced point cloud data.
Default: None.
back (bool): Whether to flip the points to back.
num_features (int): Number of point features. Default: 4.
front_camera_id (int): The referenced/front camera ID. Default: 2.
"""
kitti_infos = mmcv.load(info_path)
for info in mmcv.track_iter_progress(kitti_infos):
pc_info = info['point_cloud']
image_info = info['image']
calib = info['calib']
......@@ -161,9 +248,13 @@ def _create_reduced_point_cloud(data_path,
v_path = pc_info['velodyne_path']
v_path = Path(data_path) / v_path
points_v = np.fromfile(
str(v_path), dtype=np.float32, count=-1).reshape([-1, 4])
str(v_path), dtype=np.float32,
count=-1).reshape([-1, num_features])
rect = calib['R0_rect']
P2 = calib['P2']
if front_camera_id == 2:
P2 = calib['P2']
else:
P2 = calib[f'P{str(front_camera_id)}']
Trv2c = calib['Tr_velo_to_cam']
# first remove z < 0 points
# keep = points_v[:, -1] > 0
......@@ -196,10 +287,10 @@ def create_reduced_point_cloud(data_path,
test_info_path=None,
save_path=None,
with_back=False):
"""Create reduced point cloud info file.
"""Create reduced point clouds for training/validation/testing.
Args:
data_path (str): Path of original infos.
data_path (str): Path of original data.
pkl_prefix (str): Prefix of info files.
train_info_path (str | None): Path of training set info.
Default: None.
......@@ -207,8 +298,8 @@ def create_reduced_point_cloud(data_path,
Default: None.
test_info_path (str | None): Path of test set info.
Default: None.
save_path (str | None): Path to save reduced info.
with_back (bool | None): Whether to create backup info.
save_path (str | None): Path to save reduced point cloud data.
with_back (bool): Whether to flip the points to back.
"""
if train_info_path is None:
train_info_path = Path(data_path) / f'{pkl_prefix}_infos_train.pkl'
......@@ -219,7 +310,7 @@ def create_reduced_point_cloud(data_path,
print('create reduced point cloud for training set')
_create_reduced_point_cloud(data_path, train_info_path, save_path)
print('create reduced point cloud for validatin set')
print('create reduced point cloud for validation set')
_create_reduced_point_cloud(data_path, val_info_path, save_path)
print('create reduced point cloud for testing set')
_create_reduced_point_cloud(data_path, test_info_path, save_path)
......
import numpy as np
from collections import OrderedDict
from concurrent import futures as futures
from os import path as osp
from pathlib import Path
from skimage import io
def get_image_index_str(img_idx):
return '{:06d}'.format(img_idx)
def get_image_index_str(img_idx, use_prefix_id=False):
if use_prefix_id:
return '{:07d}'.format(img_idx)
else:
return '{:06d}'.format(img_idx)
def get_kitti_info_path(idx,
......@@ -15,8 +19,9 @@ def get_kitti_info_path(idx,
file_tail='.png',
training=True,
relative_path=True,
exist_check=True):
img_idx_str = get_image_index_str(idx)
exist_check=True,
use_prefix_id=False):
img_idx_str = get_image_index_str(idx, use_prefix_id)
img_idx_str += file_tail
prefix = Path(prefix)
if training:
......@@ -35,36 +40,52 @@ def get_image_path(idx,
prefix,
training=True,
relative_path=True,
exist_check=True):
return get_kitti_info_path(idx, prefix, 'image_2', '.png', training,
relative_path, exist_check)
exist_check=True,
info_type='image_2',
use_prefix_id=False):
return get_kitti_info_path(idx, prefix, info_type, '.png', training,
relative_path, exist_check, use_prefix_id)
def get_label_path(idx,
prefix,
training=True,
relative_path=True,
exist_check=True):
return get_kitti_info_path(idx, prefix, 'label_2', '.txt', training,
relative_path, exist_check)
exist_check=True,
info_type='label_2',
use_prefix_id=False):
return get_kitti_info_path(idx, prefix, info_type, '.txt', training,
relative_path, exist_check, use_prefix_id)
def get_velodyne_path(idx,
prefix,
training=True,
relative_path=True,
exist_check=True):
exist_check=True,
use_prefix_id=False):
return get_kitti_info_path(idx, prefix, 'velodyne', '.bin', training,
relative_path, exist_check)
relative_path, exist_check, use_prefix_id)
def get_calib_path(idx,
prefix,
training=True,
relative_path=True,
exist_check=True):
exist_check=True,
use_prefix_id=False):
return get_kitti_info_path(idx, prefix, 'calib', '.txt', training,
relative_path, exist_check)
relative_path, exist_check, use_prefix_id)
def get_pose_path(idx,
prefix,
training=True,
relative_path=True,
exist_check=True,
use_prefix_id=False):
return get_kitti_info_path(idx, prefix, 'pose', '.txt', training,
relative_path, exist_check, use_prefix_id)
def get_label_anno(label_path):
......@@ -126,7 +147,6 @@ def get_kitti_image_info(path,
num_worker=8,
relative_path=True,
with_imageshape=True):
# image_infos = []
"""
KITTI annotation format version 2:
{
......@@ -241,6 +261,185 @@ def get_kitti_image_info(path,
return list(image_infos)
def get_waymo_image_info(path,
training=True,
label_info=True,
velodyne=False,
calib=False,
pose=False,
image_ids=7481,
extend_matrix=True,
num_worker=8,
relative_path=True,
with_imageshape=True,
max_sweeps=5):
"""
Waymo annotation format version like KITTI:
{
[optional]points: [N, 3+] point cloud
[optional, for kitti]image: {
image_idx: ...
image_path: ...
image_shape: ...
}
point_cloud: {
num_features: 6
velodyne_path: ...
}
[optional, for kitti]calib: {
R0_rect: ...
Tr_velo_to_cam0: ...
P0: ...
}
annos: {
location: [num_gt, 3] array
dimensions: [num_gt, 3] array
rotation_y: [num_gt] angle array
name: [num_gt] ground truth name array
[optional]difficulty: kitti difficulty
[optional]group_ids: used for multi-part object
}
}
"""
root_path = Path(path)
if not isinstance(image_ids, list):
image_ids = list(range(image_ids))
def map_func(idx):
info = {}
pc_info = {'num_features': 6}
calib_info = {}
image_info = {'image_idx': idx}
annotations = None
if velodyne:
pc_info['velodyne_path'] = get_velodyne_path(
idx, path, training, relative_path, use_prefix_id=True)
points = np.fromfile(
Path(path) / pc_info['velodyne_path'], dtype=np.float32)
points = np.copy(points).reshape(-1, pc_info['num_features'])
info['timestamp'] = np.int64(points[0, -1])
# values of the last dim are all the timestamp
image_info['image_path'] = get_image_path(
idx,
path,
training,
relative_path,
info_type='image_0',
use_prefix_id=True)
if with_imageshape:
img_path = image_info['image_path']
if relative_path:
img_path = str(root_path / img_path)
image_info['image_shape'] = np.array(
io.imread(img_path).shape[:2], dtype=np.int32)
if label_info:
label_path = get_label_path(
idx,
path,
training,
relative_path,
info_type='label_all',
use_prefix_id=True)
if relative_path:
label_path = str(root_path / label_path)
annotations = get_label_anno(label_path)
info['image'] = image_info
info['point_cloud'] = pc_info
if calib:
calib_path = get_calib_path(
idx, path, training, relative_path=False, use_prefix_id=True)
with open(calib_path, 'r') as f:
lines = f.readlines()
P0 = np.array([float(info) for info in lines[0].split(' ')[1:13]
]).reshape([3, 4])
P1 = np.array([float(info) for info in lines[1].split(' ')[1:13]
]).reshape([3, 4])
P2 = np.array([float(info) for info in lines[2].split(' ')[1:13]
]).reshape([3, 4])
P3 = np.array([float(info) for info in lines[3].split(' ')[1:13]
]).reshape([3, 4])
P4 = np.array([float(info) for info in lines[4].split(' ')[1:13]
]).reshape([3, 4])
if extend_matrix:
P0 = _extend_matrix(P0)
P1 = _extend_matrix(P1)
P2 = _extend_matrix(P2)
P3 = _extend_matrix(P3)
P4 = _extend_matrix(P4)
R0_rect = np.array([
float(info) for info in lines[5].split(' ')[1:10]
]).reshape([3, 3])
if extend_matrix:
rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype)
rect_4x4[3, 3] = 1.
rect_4x4[:3, :3] = R0_rect
else:
rect_4x4 = R0_rect
Tr_velo_to_cam = np.array([
float(info) for info in lines[6].split(' ')[1:13]
]).reshape([3, 4])
if extend_matrix:
Tr_velo_to_cam = _extend_matrix(Tr_velo_to_cam)
calib_info['P0'] = P0
calib_info['P1'] = P1
calib_info['P2'] = P2
calib_info['P3'] = P3
calib_info['P4'] = P4
calib_info['R0_rect'] = rect_4x4
calib_info['Tr_velo_to_cam'] = Tr_velo_to_cam
info['calib'] = calib_info
if pose:
pose_path = get_pose_path(
idx, path, training, relative_path=False, use_prefix_id=True)
info['pose'] = np.loadtxt(pose_path)
if annotations is not None:
info['annos'] = annotations
info['annos']['camera_id'] = info['annos'].pop('score')
add_difficulty_to_annos(info)
sweeps = []
prev_idx = idx
while len(sweeps) < max_sweeps:
prev_info = {}
prev_idx -= 1
prev_info['velodyne_path'] = get_velodyne_path(
prev_idx,
path,
training,
relative_path,
exist_check=False,
use_prefix_id=True)
if_prev_exists = osp.exists(
Path(path) / prev_info['velodyne_path'])
if if_prev_exists:
prev_points = np.fromfile(
Path(path) / prev_info['velodyne_path'], dtype=np.float32)
prev_points = np.copy(prev_points).reshape(
-1, pc_info['num_features'])
prev_info['timestamp'] = np.int64(prev_points[0, -1])
prev_pose_path = get_pose_path(
prev_idx,
path,
training,
relative_path=False,
use_prefix_id=True)
prev_info['pose'] = np.loadtxt(prev_pose_path)
sweeps.append(prev_info)
else:
break
info['sweeps'] = sweeps
return info
with futures.ThreadPoolExecutor(num_worker) as executor:
image_infos = executor.map(map_func, image_ids)
return list(image_infos)
def kitti_anno_to_label_file(annos, folder):
folder = Path(folder)
for anno in annos:
......
This diff is collapsed.
import argparse
import torch
import torch.nn as nn
from mmcv.runner import save_checkpoint
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment