Unverified Commit cce49ba9 authored by Chengyu Wang's avatar Chengyu Wang Committed by GitHub
Browse files

Add openlane v2 (#121)

parent dbf29e61
from .collection import Collection
from .frame import Frame
\ No newline at end of file
# ==============================================================================
# Binaries and/or source for the following packages or projects
# are presented under one or more of the following open source licenses:
# collection.py The OpenLane-V2 Dataset Authors Apache License, Version 2.0
#
# Contact wanghuijie@pjlab.org.cn if you have any issue.
#
# Copyright (c) 2023 The OpenLane-v2 Dataset Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from .frame import Frame
from ..io import io
class Collection:
r"""
A collection of frames.
"""
def __init__(self, data_root : str, meta_root : str, collection : str) -> None:
r"""
Parameters
----------
data_root : str
meta_root : str
collection : str
Name of collection.
"""
try:
meta = io.pickle_load(f'{meta_root}/{collection}.pkl')
except FileNotFoundError:
raise FileNotFoundError('Please run the preprocessing first to generate pickle file of the collection.')
self.frames = {k: Frame(data_root, v) for k, v in meta.items()}
self.keys = list(self.frames.keys())
def get_frame_via_identifier(self, identifier : tuple) -> Frame:
r"""
Returns a frame with the given identifier (split, segment_id, timestamp).
Parameters
----------
identifier : tuple
(split, segment_id, timestamp).
Returns
-------
Frame
A frame identified by the identifier.
"""
return self.frames[identifier]
def get_frame_via_index(self, index : int) -> (tuple, Frame):
r"""
Returns a frame with the given index.
Parameters
----------
index : int
Returns
-------
(tuple, Frame)
The identifier of the frame and the frame.
"""
return self.keys[index], self.frames[self.keys[index]]
# ==============================================================================
# Binaries and/or source for the following packages or projects
# are presented under one or more of the following open source licenses:
# frame.py The OpenLane-V2 Dataset Authors Apache License, Version 2.0
#
# Contact wanghuijie@pjlab.org.cn if you have any issue.
#
# Copyright (c) 2023 The OpenLane-v2 Dataset Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import cv2
import numpy as np
from ..io import io
class Frame:
r"""
A data structure containing meta data of a frame.
"""
def __init__(self, root_path : str, meta : dict) -> None:
r"""
Parameters
----------
root_path : str
meta : dict
Meta data of a frame.
"""
self.root_path = root_path
self.meta = meta
def get_camera_list(self) -> list:
r"""
Retuens a list of camera names.
Returns
-------
list
A list of str.
"""
return list(self.meta['sensor'].keys())
def get_pose(self) -> dict:
r"""
Retuens the pose of ego vehicle.
Returns
-------
dict
{'rotation': [3, 3], 'translation': [3, ]}.
"""
return self.meta['pose']
def get_image_path(self, camera : str) -> str:
r"""
Retuens the image path given a camera.
Parameters
----------
camera : str
Returns
-------
str
Image path.
"""
return f'{self.root_path}/{self.meta["sensor"][camera]["image_path"]}'
def get_rgb_image(self, camera : str) -> np.ndarray:
r"""
Retuens the RGB image given a camera.
Parameters
----------
camera : str
Returns
-------
np.ndarray
RGB Image.
"""
image_path = self.get_image_path(camera)
return cv2.cvtColor(io.cv2_imread(image_path), cv2.COLOR_BGR2RGB)
def get_intrinsic(self, camera : str) -> dict:
r"""
Retuens the intrinsic given a camera.
Parameters
----------
camera : str
Returns
-------
dict
{'K': [3, 3], 'distortion': [3, ]}.
"""
return self.meta['sensor'][camera]['intrinsic']
def get_extrinsic(self, camera : str) -> dict:
r"""
Retuens the extrinsic given a camera.
Parameters
----------
camera : str
Returns
-------
dict
{'rotation': [3, 3], 'translation': [3, ]}.
"""
return self.meta['sensor'][camera]['extrinsic']
def get_annotations(self) -> dict:
r"""
Retuens annotations of the current frame.
Returns
-------
dict
{'lane_centerline': list, 'traffic_element': list, 'topology_lclc': list, 'topology_lcte': list}.
"""
if 'annotation' not in self.meta:
return None
else:
return self.meta['annotation']
def get_annotations_lane_centerlines(self) -> list:
r"""
Retuens lane centerline annotations of the current frame.
Returns
-------
list
[{'id': int, 'points': [n, 3]}].
"""
result = self.get_annotations()
return result['lane_centerline'] if result is not None else result
def get_annotations_traffic_elements(self) -> list:
r"""
Retuens traffic element annotations of the current frame.
Returns
-------
list
[{'id': int, 'category': int, 'attribute': int, 'points': [2, 2]}].
"""
result = self.get_annotations()
return result['traffic_element'] if result is not None else result
def get_annotations_topology_lclc(self) -> list:
r"""
Retuens the adjacent matrix of topology_lclc.
Returns
-------
list
[#lane_centerline, #lane_centerline].
"""
result = self.get_annotations()
return result['topology_lclc'] if result is not None else result
def get_annotations_topology_lcte(self) -> list:
r"""
Retuens the adjacent matrix of topology_lcte.
Returns
-------
list
[#lane_centerline, #traffic_element].
"""
result = self.get_annotations()
return result['topology_lcte'] if result is not None else result
from .evaluate import evaluate
\ No newline at end of file
# ==============================================================================
# Binaries and/or source for the following packages or projects
# are presented under one or more of the following open source licenses:
# distance.py The OpenLane-V2 Dataset Authors Apache License, Version 2.0
#
# Contact wanghuijie@pjlab.org.cn if you have any issue.
#
# Copyright (c) 2023 The OpenLane-v2 Dataset Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from scipy.spatial.distance import cdist, euclidean
from similaritymeasures import frechet_dist
def pairwise(xs: list, ys: list, distance_function: callable, mask: np.ndarray = None, relax: bool = False) -> np.ndarray:
r"""
Calculate pairwise distance.
Parameters
----------
xs : list
List of data in shape (X, ).
ys : list
List of data in shape (Y, ).
distance_function : callable
Function that computes distance between two instance.
mask : np.ndarray
Boolean mask in shape (X, Y).
relax : bool
Relax the result based on distance to ego vehicle.
Returns
-------
np.ndarray
Float in shape (X, Y),
where array[i][j] denotes distance between instance xs[i] and ys[j].
"""
result = np.ones((len(xs), len(ys)), dtype=np.float64) * 1024
for i, x in enumerate(xs):
ego_distance = min([euclidean(p, np.zeros_like(p)) for p in x])
relaxation_factor = max(0.5, 1 - 5e-3 * ego_distance) if relax else 1.0
for j, y in enumerate(ys):
if mask is None or mask[i][j]:
result[i][j] = distance_function(x, y) * relaxation_factor
return result
def chamfer_distance(gt: np.ndarray, pred: np.ndarray) -> float:
r"""
Calculate Chamfer distance.
Parameters
----------
gt : np.ndarray
Curve of (G, N) shape,
where G is the number of data points,
and N is the number of dimmensions.
pred : np.ndarray
Curve of (P, N) shape,
where P is the number of points,
and N is the number of dimmensions.
Returns
-------
float
Chamfer distance
Notes
-----
Adapted from https://github.com/Mrmoore98/VectorMapNet_code/blob/810ae463377f8e724c90a732361a675bcd7cf53b/plugin/datasets/evaluation/precision_recall/tgfg.py#L139.
"""
assert gt.ndim == pred.ndim == 2 and gt.shape[1] == pred.shape[1]
dist_mat = cdist(pred, gt)
dist_pred = dist_mat.min(-1).mean()
dist_gt = dist_mat.min(0).mean()
return (dist_pred + dist_gt) / 2
def frechet_distance(gt: np.ndarray, pred: np.ndarray) -> float:
r"""
Calculate discrete Frechet distance.
Parameters
----------
gt : np.ndarray
Curve of (G, N) shape,
where G is the number of data points,
and N is the number of dimmensions.
pred : np.ndarray
Curve of (P, N) shape,
where P is the number of points,
and N is the number of dimmensions.
Returns
-------
float
discrete Frechet distance
"""
assert gt.ndim == pred.ndim == 2 and gt.shape[1] == pred.shape[1]
return frechet_dist(pred, gt, p=2)
def iou_distance(gt: np.ndarray, pred: np.ndarray) -> float:
r"""
Calculate IoU distance,
which is 1 - IoU.
Parameters
----------
gt : np.ndarray
Bounding box in form [[x1, y1], [x2, y2]].
pred : np.ndarray
Bounding box in form [[x1, y1], [x2, y2]].
Returns
-------
float
IoU distance
"""
assert pred.shape == gt.shape == (2, 2)
bxmin = max(pred[0][0], gt[0][0])
bymin = max(pred[0][1], gt[0][1])
bxmax = min(pred[1][0], gt[1][0])
bymax = min(pred[1][1], gt[1][1])
inter = max((bxmax - bxmin), 0) * max((bymax - bymin), 0)
union = (pred[1][0] - pred[0][0]) * (pred[1][1] - pred[0][1]) + (gt[1][0] - gt[0][0]) * (gt[1][1] - gt[0][1]) - inter
return 1 - inter / union
# ==============================================================================
# Binaries and/or source for the following packages or projects
# are presented under one or more of the following open source licenses:
# f_score.py The OpenLane-V2 Dataset Authors Apache License, Version 2.0
#
# Contact wanghuijie@pjlab.org.cn if you have any issue.
#
# Adapted from:
# https://github.com/OpenPerceptionX/OpenLane/tree/0aaf62045e897d2b20ecf1357ae7742634b8f972/eval/LANE_evaluation/lane3d
#
# Copyright (c) 2023 The OpenLane-v2 Dataset Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Description: This code is to evaluate 3D lane detection. The optimal matching between ground-truth set and predicted
set of lanes are sought via solving a min cost flow.
Evaluation metrics includes:
F-scores
x error close (0 - 40 m)
x error far (0 - 100 m)
z error close (0 - 40 m)
z error far (0 - 100 m)
"""
import numpy as np
from scipy.interpolate import interp1d
from ortools.graph import pywrapgraph
def resample_laneline_in_x(input_lane, steps, out_vis=False):
"""
Interpolate y, z values at each anchor grid, including those beyond the range of input lnae x range
:param input_lane: N x 2 or N x 3 ndarray, one row for a point (x, y, z-optional).
It requires y values of input lane in ascending order
:param steps: a vector of steps
:param out_vis: whether to output visibility indicator which only depends on input y range
:return:
"""
# at least two points are included
assert(input_lane.shape[0] >= 2)
x_min = np.min(input_lane[:, 0])-5
x_max = np.max(input_lane[:, 0])+5
if input_lane.shape[1] < 3:
input_lane = np.concatenate([input_lane, np.zeros([input_lane.shape[0], 1], dtype=np.float32)], axis=1)
f_y = interp1d(input_lane[:, 0], input_lane[:, 1], fill_value="extrapolate")
f_z = interp1d(input_lane[:, 0], input_lane[:, 2], fill_value="extrapolate")
y_values = f_y(steps)
z_values = f_z(steps)
if out_vis:
output_visibility = np.logical_and(steps >= x_min, steps <= x_max)
return y_values, z_values, output_visibility.astype(np.float32) + 1e-9
return y_values, z_values
def SolveMinCostFlow(adj_mat, cost_mat):
"""
Solving an Assignment Problem with MinCostFlow"
:param adj_mat: adjacency matrix with binary values indicating possible matchings between two sets
:param cost_mat: cost matrix recording the matching cost of every possible pair of items from two sets
:return:
"""
# Instantiate a SimpleMinCostFlow solver.
min_cost_flow = pywrapgraph.SimpleMinCostFlow()
# Define the directed graph for the flow.
cnt_1, cnt_2 = adj_mat.shape
cnt_nonzero_row = int(np.sum(np.sum(adj_mat, axis=1) > 0))
cnt_nonzero_col = int(np.sum(np.sum(adj_mat, axis=0) > 0))
# prepare directed graph for the flow
start_nodes = np.zeros(cnt_1, dtype=np.int32).tolist() +\
np.repeat(np.array(range(1, cnt_1+1)), cnt_2).tolist() + \
[i for i in range(cnt_1+1, cnt_1 + cnt_2 + 1)]
end_nodes = [i for i in range(1, cnt_1+1)] + \
np.repeat(np.array([i for i in range(cnt_1+1, cnt_1 + cnt_2 + 1)]).reshape([1, -1]), cnt_1, axis=0).flatten().tolist() + \
[cnt_1 + cnt_2 + 1 for i in range(cnt_2)]
capacities = np.ones(cnt_1, dtype=np.int32).tolist() + adj_mat.flatten().astype(np.int32).tolist() + np.ones(cnt_2, dtype=np.int32).tolist()
costs = (np.zeros(cnt_1, dtype=np.int32).tolist() + cost_mat.flatten().astype(np.int32).tolist() + np.zeros(cnt_2, dtype=np.int32).tolist())
# Define an array of supplies at each node.
supplies = [min(cnt_nonzero_row, cnt_nonzero_col)] + np.zeros(cnt_1 + cnt_2, dtype=np.int32).tolist() + [-min(cnt_nonzero_row, cnt_nonzero_col)]
# supplies = [min(cnt_1, cnt_2)] + np.zeros(cnt_1 + cnt_2, dtype=np.int).tolist() + [-min(cnt_1, cnt_2)]
source = 0
sink = cnt_1 + cnt_2 + 1
# Add each arc.
for i in range(len(start_nodes)):
min_cost_flow.AddArcWithCapacityAndUnitCost(start_nodes[i], end_nodes[i],
capacities[i], costs[i])
# Add node supplies.
for i in range(len(supplies)):
min_cost_flow.SetNodeSupply(i, supplies[i])
match_results = []
# Find the minimum cost flow between node 0 and node 10.
if min_cost_flow.Solve() == min_cost_flow.OPTIMAL:
# print('Total cost = ', min_cost_flow.OptimalCost())
# print()
for arc in range(min_cost_flow.NumArcs()):
# Can ignore arcs leading out of source or into sink.
if min_cost_flow.Tail(arc)!=source and min_cost_flow.Head(arc)!=sink:
# Arcs in the solution have a flow value of 1. Their start and end nodes
# give an assignment of worker to task.
if min_cost_flow.Flow(arc) > 0:
# print('set A item %d assigned to set B item %d. Cost = %d' % (
# min_cost_flow.Tail(arc)-1,
# min_cost_flow.Head(arc)-cnt_1-1,
# min_cost_flow.UnitCost(arc)))
match_results.append([min_cost_flow.Tail(arc)-1,
min_cost_flow.Head(arc)-cnt_1-1,
min_cost_flow.UnitCost(arc)])
else:
print('There was an issue with the min cost flow input.')
return match_results
class LaneEval(object):
def __init__(self):
self.x_samples = np.linspace(-50, 50, num=100, endpoint=False)
self.dist_th = 1.5
self.ratio_th = 0.75
def bench(self, pred_lanes, pred_category, gt_lanes, gt_category):
"""
Matching predicted lanes and ground-truth lanes in their IPM projection, ignoring z attributes.
x error, y_error, and z error are all considered, although the matching does not rely on z
The input of prediction and ground-truth lanes are in ground coordinate, x-right, y-forward, z-up
The fundamental assumption is: 1. there are no two points from different lanes with identical x, y
but different z's
2. there are no two points from a single lane having identical x, y
but different z's
If the interest area is within the current drivable road, the above assumptions are almost always valid.
:param pred_lanes: N X 2 or N X 3 lists depending on 2D or 3D
:param gt_lanes: N X 2 or N X 3 lists depending on 2D or 3D
:return:
"""
r_lane, p_lane, c_lane = 0., 0., 0.
gt_lanes = [lane for lane in gt_lanes if lane.shape[0] > 1]
# only consider those pred lanes overlapping with sampling range
pred_category = [pred_category[k] for k, lane in enumerate(pred_lanes)
if lane[0, 0] < self.x_samples[-1] and lane[-1, 0] > self.x_samples[0]]
pred_lanes = [lane for lane in pred_lanes if lane[0, 0] < self.x_samples[-1] and lane[-1, 0] > self.x_samples[0]]
pred_category = [pred_category[k] for k, lane in enumerate(pred_lanes) if lane.shape[0] > 1]
pred_lanes = [lane for lane in pred_lanes if lane.shape[0] > 1]
# only consider those gt lanes overlapping with sampling range
gt_category = [gt_category[k] for k, lane in enumerate(gt_lanes)
if lane[0, 0] < self.x_samples[-1] and lane[-1, 0] > self.x_samples[0]]
gt_lanes = [lane for lane in gt_lanes if lane[0, 0] < self.x_samples[-1] and lane[-1, 0] > self.x_samples[0]]
gt_category = [gt_category[k] for k, lane in enumerate(gt_lanes) if lane.shape[0] > 1]
gt_lanes = [lane for lane in gt_lanes if lane.shape[0] > 1]
cnt_gt = len(gt_lanes)
cnt_pred = len(pred_lanes)
gt_visibility_mat = np.zeros((cnt_gt, 100))
pred_visibility_mat = np.zeros((cnt_pred, 100))
# resample gt and pred at x_samples
for i in range(cnt_gt):
min_x = np.min(np.array(gt_lanes[i])[:, 0])
max_x = np.max(np.array(gt_lanes[i])[:, 0])
y_values, z_values, visibility_vec = resample_laneline_in_x(np.array(gt_lanes[i]), self.x_samples, out_vis=True)
gt_lanes[i] = np.vstack([y_values, z_values]).T
gt_visibility_mat[i, :] = np.logical_and(self.x_samples >= min_x, self.x_samples <= max_x)
gt_visibility_mat[i, :] = np.logical_and(gt_visibility_mat[i, :], visibility_vec)
for i in range(cnt_pred):
# # ATTENTION: ensure y mono increase before interpolation: but it can reduce size
# pred_lanes[i] = make_lane_y_mono_inc(np.array(pred_lanes[i]))
# pred_lane = prune_3d_lane_by_range(np.array(pred_lanes[i]), self.x_min, self.x_max)
min_x = np.min(np.array(pred_lanes[i])[:, 0])
max_x = np.max(np.array(pred_lanes[i])[:, 0])
y_values, z_values, visibility_vec = resample_laneline_in_x(np.array(pred_lanes[i]), self.x_samples, out_vis=True)
pred_lanes[i] = np.vstack([y_values, z_values]).T
pred_visibility_mat[i, :] = np.logical_and(self.x_samples >= min_x, self.x_samples <= max_x)
pred_visibility_mat[i, :] = np.logical_and(pred_visibility_mat[i, :], visibility_vec)
# pred_visibility_mat[i, :] = np.logical_and(x_values >= self.x_min, x_values <= self.x_max)
# at least two-points for both gt and pred
gt_lanes = [gt_lanes[k] for k in range(cnt_gt) if np.sum(gt_visibility_mat[k, :]) > 1]
gt_category = [gt_category[k] for k in range(cnt_gt) if np.sum(gt_visibility_mat[k, :]) > 1]
gt_visibility_mat = gt_visibility_mat[np.sum(gt_visibility_mat, axis=-1) > 1, :]
cnt_gt = len(gt_lanes)
pred_lanes = [pred_lanes[k] for k in range(cnt_pred) if np.sum(pred_visibility_mat[k, :]) > 1]
pred_category = [pred_category[k] for k in range(cnt_pred) if np.sum(pred_visibility_mat[k, :]) > 1]
pred_visibility_mat = pred_visibility_mat[np.sum(pred_visibility_mat, axis=-1) > 1, :]
cnt_pred = len(pred_lanes)
adj_mat = np.zeros((cnt_gt, cnt_pred), dtype=int)
cost_mat = np.zeros((cnt_gt, cnt_pred), dtype=int)
cost_mat.fill(1000)
num_match_mat = np.zeros((cnt_gt, cnt_pred), dtype=float)
# compute curve to curve distance
for i in range(cnt_gt):
for j in range(cnt_pred):
y_dist = np.abs(gt_lanes[i][:, 0] - pred_lanes[j][:, 0])
z_dist = np.abs(gt_lanes[i][:, 1] - pred_lanes[j][:, 1])
# apply visibility to penalize different partial matching accordingly
both_visible_indices = np.logical_and(gt_visibility_mat[i, :] >= 0.5, pred_visibility_mat[j, :] >= 0.5)
both_invisible_indices = np.logical_and(gt_visibility_mat[i, :] < 0.5, pred_visibility_mat[j, :] < 0.5)
other_indices = np.logical_not(np.logical_or(both_visible_indices, both_invisible_indices))
euclidean_dist = np.sqrt(y_dist ** 2 + z_dist ** 2)
euclidean_dist[both_invisible_indices] = 0
euclidean_dist[other_indices] = self.dist_th
# if np.average(euclidean_dist) < 2*self.dist_th: # don't prune here to encourage finding perfect match
num_match_mat[i, j] = np.sum(euclidean_dist < self.dist_th) - np.sum(both_invisible_indices)
adj_mat[i, j] = 1
# ATTENTION: use the sum as int type to meet the requirements of min cost flow optimization (int type)
# using num_match_mat as cost does not work?
# make sure cost is not set to 0 when it's smaller than 1
cost_ = np.sum(euclidean_dist)
if cost_<1 and cost_>0:
cost_ = 1
else:
cost_ = (cost_).astype(int)
cost_mat[i, j] = cost_
# cost_mat[i, j] = np.sum(euclidean_dist)
# cost_mat[i, j] = num_match_mat[i, j]
# solve bipartite matching vis min cost flow solver
match_results = SolveMinCostFlow(adj_mat, cost_mat)
match_results = np.array(match_results)
# only a match with avg cost < self.dist_th is consider valid one
match_gt_ids = []
match_pred_ids = []
match_num = 0
if match_results.shape[0] > 0:
for i in range(len(match_results)):
if match_results[i, 2] < self.dist_th * self.x_samples.shape[0]:
match_num += 1
gt_i = match_results[i, 0]
pred_i = match_results[i, 1]
# consider match when the matched points is above a ratio
if num_match_mat[gt_i, pred_i] / np.sum(gt_visibility_mat[gt_i, :]) >= self.ratio_th:
r_lane += 1
match_gt_ids.append(gt_i)
if num_match_mat[gt_i, pred_i] / np.sum(pred_visibility_mat[pred_i, :]) >= self.ratio_th:
p_lane += 1
match_pred_ids.append(pred_i)
if pred_category != []:
if pred_category[pred_i] == gt_category[gt_i] or (pred_category[pred_i]==20 and gt_category[gt_i]==21):
c_lane += 1 # category matched num
return r_lane, p_lane, c_lane, cnt_gt, cnt_pred, match_num
def bench_one_submit(self, gts, preds):
laneline_stats = []
laneline_x_error_close = []
laneline_x_error_far = []
laneline_z_error_close = []
laneline_z_error_far = []
for token, pred in preds.items():
pred_lanelines = pred['lane_centerline']
pred_lanes = [lane['points'] for i, lane in enumerate(pred_lanelines)]
pred_category = [np.int8(1) for i, lane in enumerate(pred_lanelines)]
gt = gts[token]
# extrinsic
# evaluate lanelines
# cam_extrinsics = np.array(gt['extrinsic'])
# # Re-calculate extrinsic matrix based on ground coordinate
# R_vg = np.array([[0, 1, 0],
# [-1, 0, 0],
# [0, 0, 1]], dtype=float)
# R_gc = np.array([[1, 0, 0],
# [0, 0, 1],
# [0, -1, 0]], dtype=float)
# cam_extrinsics[:3, :3] = np.matmul(np.matmul(
# np.matmul(np.linalg.inv(R_vg), cam_extrinsics[:3, :3]),
# R_vg), R_gc)
# cam_extrinsics[0:2, 3] = 0.0
gt_lanes_packed = gt['lane_centerline']
gt_lanes, gt_category = [], []
for j, gt_lane_packed in enumerate(gt_lanes_packed):
# A GT lane can be either 2D or 3D
# if a GT lane is 3D, the height is intact from 3D GT, so keep it intact here too
lane = gt_lane_packed['points']
# extrinsic
# lane = np.vstack((lane, np.ones((1, lane.shape[1]))))
# cam_representation = np.linalg.inv(
# np.array([[0, 0, 1, 0],
# [-1, 0, 0, 0],
# [0, -1, 0, 0],
# [0, 0, 0, 1]], dtype=float))
# lane = np.matmul(cam_extrinsics, np.matmul(cam_representation, lane))
# lane = lane[0:3, :].T
gt_lanes.append(lane)
gt_category.append(np.int8(1))
# N to N matching of lanelines
r_lane, p_lane, c_lane, cnt_gt, cnt_pred, match_num = self.bench(pred_lanes,
pred_category,
gt_lanes,
gt_category,
)
laneline_stats.append(np.array([r_lane, p_lane, c_lane, cnt_gt, cnt_pred, match_num]))
# consider x_error z_error only for the matched lanes
# if r_lane > 0 and p_lane > 0:
output_stats = []
laneline_stats = np.array(laneline_stats)
laneline_x_error_close = np.array(laneline_x_error_close)
laneline_x_error_far = np.array(laneline_x_error_far)
laneline_z_error_close = np.array(laneline_z_error_close)
laneline_z_error_far = np.array(laneline_z_error_far)
if np.sum(laneline_stats[:, 3])!= 0:
R_lane = np.sum(laneline_stats[:, 0]) / (np.sum(laneline_stats[:, 3]))
else:
R_lane = np.sum(laneline_stats[:, 0]) / (np.sum(laneline_stats[:, 3]) + 1e-6) # recall = TP / (TP+FN)
if np.sum(laneline_stats[:, 4]) != 0:
P_lane = np.sum(laneline_stats[:, 1]) / (np.sum(laneline_stats[:, 4]))
else:
P_lane = np.sum(laneline_stats[:, 1]) / (np.sum(laneline_stats[:, 4]) + 1e-6) # precision = TP / (TP+FP)
if np.sum(laneline_stats[:, 5]) != 0:
C_lane = np.sum(laneline_stats[:, 2]) / (np.sum(laneline_stats[:, 5]))
else:
C_lane = np.sum(laneline_stats[:, 2]) / (np.sum(laneline_stats[:, 5]) + 1e-6) # category_accuracy
if R_lane + P_lane != 0:
F_lane = 2 * R_lane * P_lane / (R_lane + P_lane)
else:
F_lane = 2 * R_lane * P_lane / (R_lane + P_lane + 1e-6)
output_stats.append(F_lane)
return output_stats[0]
f1 = LaneEval()
from .io import io
\ No newline at end of file
# ==============================================================================
# Binaries and/or source for the following packages or projects
# are presented under one or more of the following open source licenses:
# io.py The OpenLane-V2 Dataset Authors Apache License, Version 2.0
#
# Contact wanghuijie@pjlab.org.cn if you have any issue.
#
# Copyright (c) 2023 The OpenLane-v2 Dataset Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import cv2
import json
import pickle
import numpy as np
class IO:
r"""
Wrapping io in openlanev2,
can be modified for different file systems.
"""
def __init__(self) -> None:
pass
def os_listdir(self, path : str) -> list:
r"""
Parameters
----------
path : str
Returns
-------
list
"""
return os.listdir(path)
def cv2_imread(self, path : str) -> np.ndarray:
r"""
Parameters
----------
path : str
Returns
-------
np.ndarray
"""
return cv2.imread(path)
def json_load(self, path : str) -> dict:
r"""
Parameters
----------
path : str
Returns
-------
dict
"""
with open(path, 'r') as f:
result = json.load(f)
return result
def pickle_dump(self, path : str, obj : object) -> None:
r"""
Parameters
----------
path : str
obj : object
"""
with open(path, 'wb') as f:
pickle.dump(obj, f)
def pickle_load(self, path : str) -> object:
r"""
Parameters
----------
path : str
Returns
-------
object
"""
with open(path, 'rb') as f:
result = pickle.load(f)
return result
io = IO()
from .collect import collect
from .check import check_results
\ No newline at end of file
# ==============================================================================
# Binaries and/or source for the following packages or projects
# are presented under one or more of the following open source licenses:
# check.py The OpenLane-V2 Dataset Authors Apache License, Version 2.0
#
# Contact wanghuijie@pjlab.org.cn if you have any issue.
#
# Copyright (c) 2023 The OpenLane-v2 Dataset Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from iso3166 import countries
from functools import reduce
def check_results(results : dict) -> None:
r"""
Check format of results.
Parameters
----------
results : dcit
Dict storing predicted results.
"""
valid = True
if not isinstance(results, dict):
raise Exception(f'Type of result should be dict')
for key in ['method', 'e-mail', 'institution / company', 'country / region']:
if key in results:
if not isinstance(results[key], str):
raise Exception(f'Type of value in key [{key}] should be str')
if key == 'country / region':
try:
countries.get(results[key])
except Exception:
raise Exception(f'Please specify a valid [{key}] according to ISO3166')
else:
valid = False
print(f'\n*** Missing key [{key}] for a valid submission ***\n')
for key in ['authors']:
if key in results:
if not isinstance(results[key], list):
raise Exception(f'Type of value in key [{key}] should be list')
if len(results[key]) > 10:
raise Exception(f'The number of authors should not exceed 10')
else:
valid = False
print(f'\n*** Missing key [{key}] for a valid submission ***\n')
for key in ['results']:
if key not in results:
raise Exception(f'Miss key [{key}].')
if not isinstance(results[key], dict):
raise Exception(f'Type of value in key [{key}] should be dict')
for token, predictions in results['results'].items():
if not isinstance(predictions, dict):
raise Exception(f'Type of value in key [results/{token}] should be dict')
predictions = predictions['predictions']
if not isinstance(predictions, dict):
raise Exception(f'Type of value in key [results/{token}/predictions] should be dict')
ids = {}
for key in ['lane_centerline', 'traffic_element']:
if key not in predictions:
raise Exception(f'Miss key [results/{token}/predictions/{key}].')
if not isinstance(predictions[key], list):
raise Exception(f'Type of value in key [results/{token}/predictions/{key}] should be list')
for instance in predictions[key]:
for k in ['id', 'points', 'confidence']:
if k not in instance:
raise Exception(f'Miss key [results/{token}/predictions/{key}/k].')
if key == 'traffic_element':
if 'attribute' not in instance:
raise Exception(f'Miss key [results/{token}/predictions/{key}/k].')
points = instance['points']
if not isinstance(points, np.ndarray):
raise Exception(f'Type of value in key [results/{token}/predictions/{key}/{instance["id"]}] should be np.ndarray')
points = np.array(points)
if key == 'lane_centerline' and not (points.ndim == 2 and points.shape[1] == 3):
raise Exception(f'Shape of points in instance [results/{token}/predictions/{key}/{instance["id"]}] should be (#points, 3) but not {points.shape}')
if key == 'traffic_element' and not (points.ndim == 2 and points.shape == (2, 2)):
raise Exception(f'Shape of points in instance [results/{token}/predictions/{key}/{instance["id"]}] should be (2, 2) but not {points.shape}')
ids[key] = [instance['id'] for instance in predictions[key]]
ids_check = reduce(lambda x, y: x + y, ids.values(), [])
if len(set(ids_check)) != len(ids_check):
raise Exception(f'IDs are not unique in [results/{token}/predictions]')
if 'topology_lclc' not in predictions:
raise Exception(f'Miss key [results/{token}/predictions/topology_lclc].')
topology_lclc = predictions['topology_lclc']
if not isinstance(topology_lclc, np.ndarray):
raise Exception(f'Type of value in key [results/{token}/predictions/topology_lclc] should be np.ndarray')
topology_lclc = np.array(topology_lclc)
if not (topology_lclc.ndim == 2 and topology_lclc.shape[0] == len(ids['lane_centerline']) and topology_lclc.shape[1] == len(ids['lane_centerline'])):
raise Exception(f'Shape of adjacent matrix of [results/{token}/predictions/topology_lclc] should be (#lane_centerline, #lane_centerline) but not {topology_lclc.shape}')
if 'topology_lcte' not in predictions:
raise Exception(f'Miss key [results/{token}/predictions/topology_lcte].')
topology_lcte = predictions['topology_lcte']
if not isinstance(topology_lcte, np.ndarray):
raise Exception(f'Type of value in key [results/{token}/predictions/topology_lcte] should be np.ndarray')
topology_lcte = np.array(topology_lcte)
if not (topology_lcte.ndim == 2 and topology_lcte.shape[0] == len(ids['lane_centerline']) and topology_lcte.shape[1] == len(ids['traffic_element'])):
raise Exception(f'Shape of adjacent matrix of [results/{token}/predictions/topology_lcte] should be (#lane_centerline, #traffic_element) but not {topology_lcte.shape}')
return valid
# ==============================================================================
# Binaries and/or source for the following packages or projects
# are presented under one or more of the following open source licenses:
# collect.py The OpenLane-V2 Dataset Authors Apache License, Version 2.0
#
# Contact wanghuijie@pjlab.org.cn if you have any issue.
#
# Copyright (c) 2023 The OpenLane-v2 Dataset Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from tqdm import tqdm
from ..io import io
def collect(root_path : str, data_dict : dict, collection : str, point_interval : int = 1) -> None:
r"""
Load meta data of data in data_dict,
and store in a .pkl with split as file name.
Parameters
----------
root_path : str
data_dict : dict
A dict contains ids of data to be preprocessed.
collection : str
Name of the collection.
point_interval : int
Interval for subsampling points of lane centerlines,
not subsampling as default.
"""
data_list = [(split, segment_id, timestamp.split('.')[0]) \
for split, segment_ids in data_dict.items() \
for segment_id, timestamps in segment_ids.items() \
for timestamp in timestamps
]
meta = {
(split, segment_id, timestamp): io.json_load(f'{root_path}/{split}/{segment_id}/info/{timestamp}.json') \
for split, segment_id, timestamp in tqdm(data_list, desc=f'collecting {collection}', ncols=80)
}
for identifier, frame in meta.items():
for k, v in meta[identifier]['pose'].items():
meta[identifier]['pose'][k] = np.array(v, dtype=np.float64)
for camera in meta[identifier]['sensor'].keys():
for para in ['intrinsic', 'extrinsic']:
for k, v in meta[identifier]['sensor'][camera][para].items():
meta[identifier]['sensor'][camera][para][k] = np.array(v, dtype=np.float64)
if 'annotation' not in frame:
continue
for i, lane_centerline in enumerate(frame['annotation']['lane_centerline']):
meta[identifier]['annotation']['lane_centerline'][i]['points'] = np.array(lane_centerline['points'][::point_interval], dtype=np.float32)
for i, traffic_element in enumerate(frame['annotation']['traffic_element']):
meta[identifier]['annotation']['traffic_element'][i]['points'] = np.array(traffic_element['points'], dtype=np.float32)
meta[identifier]['annotation']['topology_lclc'] = np.array(meta[identifier]['annotation']['topology_lclc'], dtype=np.int8)
meta[identifier]['annotation']['topology_lcte'] = np.array(meta[identifier]['annotation']['topology_lcte'], dtype=np.int8)
io.pickle_dump(f'{root_path}/{collection}.pkl', meta)
# ==============================================================================
# Binaries and/or source for the following packages or projects
# are presented under one or more of the following open source licenses:
# utils.py The OpenLane-V2 Dataset Authors Apache License, Version 2.0
#
# Contact wanghuijie@pjlab.org.cn if you have any issue.
#
# Copyright (c) 2023 The OpenLane-v2 Dataset Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
TRAFFIC_ELEMENT_ATTRIBUTE = {
'unknown': 0,
'red': 1,
'green': 2,
'yellow': 3,
'go_straight': 4,
'turn_left': 5,
'turn_right': 6,
'no_left_turn': 7,
'no_right_turn': 8,
'u_turn': 9,
'no_u_turn': 10,
'slight_left': 11,
'slight_right': 12,
}
def format_metric(metric):
for key, val in metric.items():
print(f'{key} - {val["score"]}')
for k, v in val.items():
if 'score' not in k:
print(f' {k} - {v}')
from .bev import draw_annotation_bev
from .pv import draw_annotation_pv
from .utils import assign_attribute, assign_topology
\ No newline at end of file
# ==============================================================================
# Binaries and/or source for the following packages or projects
# are presented under one or more of the following open source licenses:
# bev.py The OpenLane-V2 Dataset Authors Apache License, Version 2.0
#
# Contact wanghuijie@pjlab.org.cn if you have any issue.
#
# Copyright (c) 2023 The OpenLane-v2 Dataset Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import cv2
import numpy as np
from .utils import THICKNESS, COLOR_DEFAULT, COLOR_DICT, interp_arc
BEV_SCALE = 10
BEV_RANGE = [-50, 50, -25, 25]
def _draw_lane_centerline(image, lane_centerline, with_attribute):
points = np.array(lane_centerline['points'])
points = BEV_SCALE * (-points[:, :2] + np.array([BEV_RANGE[1] , BEV_RANGE[3]]))
points = interp_arc(points)
if points is None:
return
if with_attribute and len(set(lane_centerline['attributes']) - set([0])):
colors = [COLOR_DICT[a] for a in set(lane_centerline['attributes']) - set([0])]
else:
colors = [COLOR_DEFAULT]
for idx, color in enumerate(colors):
for i in range(len(points) - 1):
x1 = int(points[i][0] + idx * THICKNESS * 1.5)
y1 = int(points[i][1] + idx * THICKNESS * 1.5)
x2 = int(points[i+1][0] + idx * THICKNESS * 1.5)
y2 = int(points[i+1][1] + idx * THICKNESS * 1.5)
cv2.line(image, pt1=(y1, x1), pt2=(y2, x2), color=color, thickness=THICKNESS, lineType=cv2.LINE_AA)
def _draw_vertex(image, lane_centerline):
points = BEV_SCALE * (-np.array(lane_centerline['points'])[:, :2] + np.array([BEV_RANGE[1] , BEV_RANGE[3]]))
cv2.circle(image, (int(points[0, 1]), int(points[0, 0])), int(THICKNESS * 1.5), COLOR_DEFAULT, -1)
cv2.circle(image, (int(points[-1, 1]), int(points[-1, 0])), int(THICKNESS * 1.5), COLOR_DEFAULT, -1)
def draw_annotation_bev(annotation, with_attribute):
image = np.ones((
BEV_SCALE * (BEV_RANGE[1] - BEV_RANGE[0]),
BEV_SCALE * (BEV_RANGE[3] - BEV_RANGE[2]),
3,
), dtype=np.int32) * 191
for lane_centerline in annotation['lane_centerline']:
_draw_lane_centerline(image, lane_centerline, with_attribute)
for lane_centerline in annotation['lane_centerline']:
_draw_vertex(image, lane_centerline)
return image
# ==============================================================================
# Binaries and/or source for the following packages or projects
# are presented under one or more of the following open source licenses:
# pv.py The OpenLane-V2 Dataset Authors Apache License, Version 2.0
#
# Contact wanghuijie@pjlab.org.cn if you have any issue.
#
# Copyright (c) 2023 The OpenLane-v2 Dataset Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import cv2
import numpy as np
from .utils import THICKNESS, COLOR_DEFAULT, COLOR_DICT, interp_arc
def _draw_traffic_element(image, traffic_element):
top_left = (
int(traffic_element['points'][0][0]),
int(traffic_element['points'][0][1]),
)
bottom_right = (
int(traffic_element['points'][1][0]),
int(traffic_element['points'][1][1]),
)
color = COLOR_DICT[traffic_element['attribute']]
cv2.rectangle(image, top_left, bottom_right, color=color, thickness=THICKNESS, lineType=cv2.LINE_AA)
def _project(points, intrinsic, extrinsic):
if points is None:
return points
points_in_cam_cor = np.linalg.pinv(np.array(extrinsic['rotation'])) \
@ (points.T - np.array(extrinsic['translation']).reshape(3, -1))
points_in_cam_cor = points_in_cam_cor[:, points_in_cam_cor[2, :] > 0]
if points_in_cam_cor.shape[1] > 1:
points_on_image_cor = np.array(intrinsic['K']) @ points_in_cam_cor
points_on_image_cor = points_on_image_cor / (points_on_image_cor[-1, :].reshape(1, -1))
points_on_image_cor = points_on_image_cor[:2, :].T
else:
points_on_image_cor = None
return points_on_image_cor
def _draw_lane_centerline(image, lane_centerline, intrinsic, extrinsic, with_attribute):
points = _project(interp_arc(lane_centerline['points']), intrinsic, extrinsic)
if points is None:
return
if with_attribute and len(set(lane_centerline['attributes']) - set([0])):
colors = [COLOR_DICT[a] for a in set(lane_centerline['attributes']) - set([0])]
else:
colors = [COLOR_DEFAULT]
for idx, color in enumerate(colors):
for i in range(len(points) - 1):
x1 = int(points[i][0] + idx * THICKNESS * 1.5)
y1 = int(points[i][1] + idx * THICKNESS * 1.5)
x2 = int(points[i+1][0] + idx * THICKNESS * 1.5)
y2 = int(points[i+1][1] + idx * THICKNESS * 1.5)
try:
cv2.line(image, pt1=(x1, y1), pt2=(x2, y2), color=color, thickness=THICKNESS, lineType=cv2.LINE_AA)
except Exception:
return
def _draw_topology(image, topology, intrinsic, extrinsic):
coord_from = [
(topology['traffic_element'][0][0] + topology['traffic_element'][0][0]) / 2,
topology['traffic_element'][1][1],
]
points = _project(interp_arc(topology['lane_centerline']), intrinsic, extrinsic)
if points is None:
return
coord_to = points[len(points) // 2]
color = COLOR_DICT[topology['attribute']]
mid = ((coord_to[0] + coord_from[0]) / 2, (coord_to[1] + coord_from[1]) / 2 - 50,)
curve = np.array([coord_from, mid, coord_to])
pts_fit = np.polyfit(curve[:, 0], curve[:, 1], 2)
xs = np.linspace(curve[0][0], curve[-1][0], 1000)
ys = pts_fit[0] * xs**2 + pts_fit[1] * xs + pts_fit[2]
curve = np.int_([np.array([np.transpose(np.vstack([xs, ys]))])])
cv2.polylines(image, curve, isClosed=False, color=color, thickness=THICKNESS//3, lineType=cv2.LINE_AA)
def draw_annotation_pv(camera, image, annotation, intrinsic, extrinsic, with_attribute, with_topology):
for lane_centerline in annotation['lane_centerline']:
_draw_lane_centerline(image, lane_centerline, intrinsic, extrinsic, with_attribute)
if camera in ['ring_front_center', 'CAM_FRONT']:
for traffic_element in annotation['traffic_element']:
_draw_traffic_element(image, traffic_element)
if with_topology:
for topology in annotation['topology']:
_draw_topology(image, topology, intrinsic, extrinsic)
return image
# ==============================================================================
# Binaries and/or source for the following packages or projects
# are presented under one or more of the following open source licenses:
# utils.py The OpenLane-V2 Dataset Authors Apache License, Version 2.0
#
# Contact wanghuijie@pjlab.org.cn if you have any issue.
#
# Copyright (c) 2023 The OpenLane-v2 Dataset Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
THICKNESS = 4
COLOR_DEFAULT = (0, 0, 255)
COLOR_DICT = {
0: COLOR_DEFAULT,
1: (255, 0, 0),
2: (0, 255, 0),
3: (255, 255, 0),
4: (255, 0, 255),
5: (0, 128, 128),
6: (0, 128, 0),
7: (128, 0, 0),
8: (128, 0, 128),
9: (128, 128, 0),
10: (0, 0, 128),
11: (64, 64, 64),
12: (192, 192, 192),
}
def interp_arc(points, t=1000):
r'''
Linearly interpolate equally-spaced points along a polyline, either in 2d or 3d.
Parameters
----------
points : List
List of shape (N,2) or (N,3), representing 2d or 3d-coordinates.
t : array_like
Number of points that will be uniformly interpolated and returned.
Returns
-------
array_like
Numpy array of shape (N,2) or (N,3)
Notes
-----
Adapted from https://github.com/johnwlambert/argoverse2-api/blob/main/src/av2/geometry/interpolate.py#L120
'''
# filter consecutive points with same coordinate
temp = []
for point in points:
point = point.tolist()
if temp == [] or point != temp[-1]:
temp.append(point)
if len(temp) <= 1:
return None
points = np.array(temp, dtype=points.dtype)
assert points.ndim == 2
# the number of points on the curve itself
n, _ = points.shape
# equally spaced in arclength -- the number of points that will be uniformly interpolated
eq_spaced_points = np.linspace(0, 1, t)
# Compute the chordal arclength of each segment.
# Compute differences between each x coord, to get the dx's
# Do the same to get dy's. Then the hypotenuse length is computed as a norm.
chordlen = np.linalg.norm(np.diff(points, axis=0), axis=1) # type: ignore
# Normalize the arclengths to a unit total
chordlen = chordlen / np.sum(chordlen)
# cumulative arclength
cumarc = np.zeros(len(chordlen) + 1)
cumarc[1:] = np.cumsum(chordlen)
# which interval did each point fall in, in terms of eq_spaced_points? (bin index)
tbins = np.digitize(eq_spaced_points, bins=cumarc).astype(int) # type: ignore
# #catch any problems at the ends
tbins[np.where((tbins <= 0) | (eq_spaced_points <= 0))] = 1 # type: ignore
tbins[np.where((tbins >= n) | (eq_spaced_points >= 1))] = n - 1
s = np.divide((eq_spaced_points - cumarc[tbins - 1]), chordlen[tbins - 1])
anchors = points[tbins - 1, :]
# broadcast to scale each row of `points` by a different row of s
offsets = (points[tbins, :] - points[tbins - 1, :]) * s.reshape(-1, 1)
points_interp = anchors + offsets
return points_interp
def assign_attribute(annotation):
topology_lcte = np.array(annotation['topology_lcte'], dtype=bool)
for i in range(len(annotation['lane_centerline'])):
annotation['lane_centerline'][i]['attributes'] = \
set([ts['attribute'] for j, ts in enumerate(annotation['traffic_element']) if topology_lcte[i][j]])
return annotation
def assign_topology(annotation):
topology_lcte = np.array(annotation['topology_lcte'], dtype=bool)
annotation['topology'] = []
for i in range(topology_lcte.shape[0]):
for j in range(topology_lcte.shape[1]):
if topology_lcte[i][j]:
annotation['topology'].append({
'lane_centerline': annotation['lane_centerline'][i]['points'],
'traffic_element': annotation['traffic_element'][j]['points'],
'attribute': annotation['traffic_element'][j]['attribute'],
})
return annotation
from .core import *
from .datasets import *
from .models import *
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment