Commit a8863510 authored by Yizhou Wang's avatar Yizhou Wang
Browse files

v1.0: first commit

parent 16d8dda7
dataset_cfg = dict(
base_root="/mnt/disk2/CRUW/CRUW_MINI",
data_root="/mnt/disk2/CRUW/CRUW_MINI/sequences",
anno_root="/mnt/disk2/CRUW/CRUW_MINI/annotations",
train=dict(
seqs=[
'2019_04_09_BMS1000_PL_NORMAL',
'2019_04_09_CMS1002_PL_NORMAL',
'2019_04_09_PMS1000_PL_NORMAL',
'2019_04_09_PMS3001_PL_NORMAL',
'2019_05_29_MLMS006_CR_BLUR',
'2019_05_29_PBMS007_PL_BLUR',
'2019_09_29_ONRD001_CS_NORMAL',
'2019_09_29_ONRD002_CS_NORMAL',
'2019_09_29_ONRD004_HW_NORMAL',
'2019_10_13_ONRD048_CS_NIGHT'
],
),
valid=dict(
seqs=[],
),
test=dict(
seqs=[
'2019_04_09_BMS1000_PL_NORMAL',
],
),
demo=dict(
seqs=[],
),
)
model_cfg = dict(
type='CDC',
name='rodnet-cdc-win16-wobg',
max_dets=20,
peak_thres=0.3,
ols_thres=0.3,
)
confmap_cfg = dict(
confmap_sigmas={
'pedestrian': 15,
'cyclist': 20,
'car': 30,
# 'van': 40,
# 'truck': 50,
},
confmap_sigmas_interval={
'pedestrian': [5, 15],
'cyclist': [8, 20],
'car': [10, 30],
# 'van': [15, 40],
# 'truck': [20, 50],
},
confmap_length={
'pedestrian': 1,
'cyclist': 2,
'car': 3,
# 'van': 4,
# 'truck': 5,
}
)
train_cfg = dict(
n_epoch=50,
batch_size=4,
lr=0.00001,
lr_step=5, # lr will decrease 10 times after lr_step epoches
win_size=16,
train_step=1,
train_stride=8,
log_step=100,
save_step=1000,
)
test_cfg = dict(
test_step=1,
test_stride=8,
rr_min=1.0, # min radar range
rr_max=20.0, # max radar range
ra_min=-60.0, # min radar angle
ra_max=60.0, # max radar angle
)
from .object_class import get_class_id, get_class_name
from .generate import generate_confmap
from .ops import normalize_confmap, add_noise_channel
import numpy as np
import math
from rodnet.core.object_class import get_class_id
def generate_confmap(n_obj, obj_info, dataset, config_dict, gaussian_thres=36):
"""
Generate confidence map a radar frame.
:param n_obj: number of objects in this frame
:param obj_info: obj_info includes metadata information
:param dataset: dataset object
:param config_dict: rodnet configurations
:param gaussian_thres: threshold for gaussian distribution in confmaps
:return: generated confmap
"""
n_class = dataset.object_cfg.n_class
classes = dataset.object_cfg.classes
radar_configs = dataset.sensor_cfg.radar_cfg
confmap_sigmas = config_dict['confmap_cfg']['confmap_sigmas']
confmap_sigmas_interval = config_dict['confmap_cfg']['confmap_sigmas_interval']
confmap_length = config_dict['confmap_cfg']['confmap_length']
range_grid = dataset.range_grid
angle_grid = dataset.angle_grid
confmap = np.zeros((n_class, radar_configs['ramap_rsize'], radar_configs['ramap_asize']), dtype=float)
for objid in range(n_obj):
rng_idx = obj_info['center_ids'][objid][0]
agl_idx = obj_info['center_ids'][objid][1]
class_name = obj_info['categories'][objid]
if class_name not in classes:
# print("not recognized class: %s" % class_name)
continue
class_id = get_class_id(class_name, classes)
sigma = 2 * np.arctan(confmap_length[class_name] / (2 * range_grid[rng_idx])) * confmap_sigmas[class_name]
sigma_interval = confmap_sigmas_interval[class_name]
if sigma > sigma_interval[1]:
sigma = sigma_interval[1]
if sigma < sigma_interval[0]:
sigma = sigma_interval[0]
for i in range(radar_configs['ramap_rsize']):
for j in range(radar_configs['ramap_asize']):
distant = (((rng_idx - i) * 2) ** 2 + (agl_idx - j) ** 2) / sigma ** 2
if distant < gaussian_thres: # threshold for confidence maps
value = np.exp(- distant / 2) / (2 * math.pi)
confmap[class_id, i, j] = value if value > confmap[class_id, i, j] else confmap[class_id, i, j]
return confmap
import numpy as np
import math
def normalize_confmap(confmap):
conf_min = np.min(confmap)
conf_max = np.max(confmap)
if conf_max - conf_min != 0:
confmap_norm = (confmap - conf_min) / (conf_max - conf_min)
else:
confmap_norm = confmap
return confmap_norm
def add_noise_channel(confmap, dataset, config_dict):
n_class = dataset.object_cfg.n_class
radar_configs = dataset.sensor_cfg.radar_cfg
confmap_new = np.zeros((n_class + 1, radar_configs['ramap_rsize'], radar_configs['ramap_asize']), dtype=float)
confmap_new[:n_class, :, :] = confmap
conf_max = np.max(confmap, axis=0)
confmap_new[n_class, :, :] = 1.0 - conf_max
return confmap_new
def rotate(origin, point, angle):
"""
Rotate a point counterclockwise by a given angle around a given origin.
The angle should be given in radians.
"""
ox, oy = origin
px, py = point
qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
return qx, qy
def dist_point_segment(point, segment):
x3, y3 = point
(x1, y1), (x2, y2) = segment
px = x2 - x1
py = y2 - y1
norm = px * px + py * py
u = ((x3 - x1) * px + (y3 - y1) * py) / float(norm)
if u > 1:
u = 1
elif u < 0:
u = 0
x = x1 + u * px
y = y1 + u * py
dx = x - x3
dy = y - y3
# Note: If the actual distance does not matter,
# if you only want to compare what this function
# returns to other results of this function, you
# can just return the squared distance instead
# (i.e. remove the sqrt) to gain a little performance
dist = (dx * dx + dy * dy) ** .5
return dist, (x, y)
def rotate_conf_pattern(dx, dy, ori):
dr = (dx * dx + dy * dy) ** 0.5
dtheta = math.atan2(dy, dx)
dtheta -= ori
dx_new = dr * math.cos(dtheta)
dy_new = dr * math.sin(dtheta)
return dx_new, dy_new
# A utility function to calculate area
# of triangle formed by (x1, y1),
# (x2, y2) and (x3, y3)
def area(x1, y1, x2, y2, x3, y3):
return abs((x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)) / 2.0)
# A function to check whether point P(x, y)
# lies inside the triangle formed by
# A(x1, y1), B(x2, y2) and C(x3, y3)
def is_inside_triangle(p1, p2, p3, p):
x1, y1 = p1
x2, y2 = p2
x3, y3 = p3
x, y = p
# Calculate area of triangle ABC
A = area(x1, y1, x2, y2, x3, y3)
# Calculate area of triangle PBC
A1 = area(x, y, x2, y2, x3, y3)
# Calculate area of triangle PAC
A2 = area(x1, y1, x, y, x3, y3)
# Calculate area of triangle PAB
A3 = area(x1, y1, x2, y2, x, y)
# Check if sum of A1, A2 and A3
# is same as A
if A == A1 + A2 + A3:
return True
else:
return False
def get_class_id(class_str, classes):
if class_str in classes:
class_id = classes.index(class_str)
else:
if class_str == '':
raise ValueError("No class name found")
else:
class_id = -1000
return class_id
def get_class_name(class_id, classes):
n_class = len(classes)
if 0 <= class_id < n_class:
class_name = classes[class_id]
elif class_id == -1000:
class_name = '__background'
else:
raise ValueError("Class ID is not defined")
return class_name
from .postprocess import post_process, post_process_single_frame
from .ols import get_ols_btw_objects, get_ols_btw_pts, ols
from .lnms import lnms
from .output_results import write_dets_results, write_dets_results_single_frame
from .merge_confmaps import ConfmapStack
import numpy as np
from .ols import get_ols_btw_objects
def lnms(obj_dicts_in_class, dataset, config_dict):
"""
Location-based NMS
:param obj_dicts_in_class:
:param config_dict:
:return:
"""
model_configs = config_dict['model_cfg']
detect_mat = - np.ones((model_configs['max_dets'], 4))
cur_det_id = 0
# sort peaks by confidence score
inds = np.argsort([-d['score'] for d in obj_dicts_in_class], kind='mergesort')
dts = [obj_dicts_in_class[i] for i in inds]
while len(dts) != 0:
if cur_det_id >= model_configs['max_dets']:
break
p_star = dts[0]
detect_mat[cur_det_id, 0] = p_star['class_id']
detect_mat[cur_det_id, 1] = p_star['range_id']
detect_mat[cur_det_id, 2] = p_star['angle_id']
detect_mat[cur_det_id, 3] = p_star['score']
cur_det_id += 1
del dts[0]
for pid, pi in enumerate(dts):
ols = get_ols_btw_objects(p_star, pi, dataset)
if ols > model_configs['ols_thres']:
del dts[pid]
return detect_mat
import numpy as np
class ConfmapStack:
def __init__(self, confmap_shape):
self.confmap = np.zeros(confmap_shape)
self.count = 0
self.next = None
self.ready = False
def append(self, confmap):
self.confmap = (self.confmap * self.count + confmap) / (self.count + 1)
self.count += 1
def setNext(self, _genconfmap):
self.next = _genconfmap
import math
from cruw.mapping.coor_transform import pol2cart_ramap
from rodnet.core import get_class_name
def get_ols_btw_objects(obj1, obj2, dataset):
classes = dataset.object_cfg.classes
object_sizes = dataset.object_cfg.sizes
if obj1['class_id'] != obj2['class_id']:
print('Error: Computing OLS between different classes!')
raise TypeError("OLS can only be compute between objects with same class. ")
if obj1['score'] < obj2['score']:
raise TypeError("Confidence score of obj1 should not be smaller than obj2. "
"obj1['score'] = %s, obj2['score'] = %s" % (obj1['score'], obj2['score']))
classid = obj1['class_id']
class_str = get_class_name(classid, classes)
rng1 = obj1['range']
agl1 = obj1['angle']
rng2 = obj2['range']
agl2 = obj2['angle']
x1, y1 = pol2cart_ramap(rng1, agl1)
x2, y2 = pol2cart_ramap(rng2, agl2)
dx = x1 - x2
dy = y1 - y2
s_square = x1 ** 2 + y1 ** 2
kappa = object_sizes[class_str] / 100 # TODO: tune kappa
e = (dx ** 2 + dy ** 2) / 2 / (s_square * kappa)
ols = math.exp(-e)
return ols
def get_ols_btw_pts(pt1, pt2, class_id, dataset):
classes = dataset.object_cfg.classes
object_sizes = dataset.object_cfg.sizes
class_str = get_class_name(class_id, classes)
x1, y1 = pol2cart_ramap(pt1[0], pt1[1])
x2, y2 = pol2cart_ramap(pt2[0], pt2[1])
dx = x1 - x2
dy = y1 - y2
s_square = x1 ** 2 + y1 ** 2
kappa = object_sizes[class_str] / 100 # TODO: tune kappa
e = (dx ** 2 + dy ** 2) / 2 / (s_square * kappa)
ols = math.exp(-e)
return ols
def ols(dist, s, kappa):
e = dist ** 2 / 2 / (s ** 2 * kappa)
return math.exp(-e)
import numpy as np
def detect_peaks(image, threshold=0.3):
peaks_row = []
peaks_col = []
height, width = image.shape
for h in range(1, height - 1):
for w in range(2, width - 2):
area = image[h - 1:h + 2, w - 2:w + 3]
center = image[h, w]
flag = np.where(area >= center)
if flag[0].shape[0] == 1 and center > threshold:
peaks_row.append(h)
peaks_col.append(w)
return peaks_row, peaks_col
from rodnet.core.object_class import get_class_name
def write_dets_results(res, data_id, save_path, dataset):
batch_size, win_size, max_dets, _ = res.shape
classes = dataset.object_cfg.classes
with open(save_path, 'a+') as f:
for b in range(batch_size):
for w in range(win_size):
for d in range(max_dets):
cla_id = int(res[b, w, d, 0])
if cla_id == -1:
continue
row_id = res[b, w, d, 1]
col_id = res[b, w, d, 2]
conf = res[b, w, d, 3]
f.write("%d %s %d %d %s\n" % (data_id + w, get_class_name(cla_id, classes), row_id, col_id, conf))
def write_dets_results_single_frame(res, data_id, save_path, dataset):
max_dets, _ = res.shape
classes = dataset.object_cfg.classes
with open(save_path, 'a+') as f:
for d in range(max_dets):
cla_id = int(res[d, 0])
if cla_id == -1:
continue
row_id = res[d, 1]
col_id = res[d, 2]
conf = res[d, 3]
f.write("%d %s %d %d %s\n" % (data_id, get_class_name(cla_id, classes), row_id, col_id, conf))
import numpy as np
from .ops import detect_peaks
from .lnms import lnms
from rodnet.utils.visualization import visualize_postprocessing
def search_surround(peak_conf, row, col, conf_valu, search_size):
height = peak_conf.shape[0]
width = peak_conf.shape[1]
half_size = int((search_size - 1) / 2)
row_start = max(half_size, row - half_size)
row_end = min(height - half_size - 1, row + half_size)
col_start = max(half_size, col - half_size)
col_end = min(width - half_size - 1, col + half_size)
# print(row_start)
No_bigger = True
for i in range(row_start, row_end + 1):
for j in range(col_start, col_end + 1):
if peak_conf[i, j] > conf_valu:
# current conf is not big enough, skip this peak
No_bigger = False
break
return No_bigger, [row_start, row_end, col_start, col_end]
def peak_mapping(peak_conf, peak_class, list_row, list_col, confmap, search_size, o_class):
for i in range(len(list_col)):
row_id = list_row[i]
col_id = list_col[i]
conf_valu = confmap[row_id, col_id]
flag, indices = search_surround(peak_conf, row_id, col_id, conf_valu, search_size)
if flag:
# clear all detections in search window
search_width = indices[1] - indices[0] + 1
search_height = indices[3] - indices[2] + 1
peak_conf[indices[0]:indices[1] + 1, indices[2]:indices[3] + 1] = np.zeros((search_width, search_height))
peak_class[indices[0]:indices[1] + 1, indices[2]:indices[3] + 1] = - np.ones((search_width, search_height))
# write the detected objects to matrix
peak_conf[row_id, col_id] = conf_valu
peak_class[row_id, col_id] = class_ids[o_class]
return peak_conf, peak_class
def find_greatest_points(peak_conf, peak_class):
detect_mat = - np.ones((rodnet_configs['max_dets'], 4))
height = peak_conf.shape[0]
width = peak_conf.shape[1]
peak_flatten = peak_conf.flatten()
indic = np.argsort(peak_flatten)
ind_len = indic.shape[0]
if ind_len >= rodnet_configs['max_dets']:
choos_ind = np.flip(indic[-rodnet_configs['max_dets']:ind_len])
else:
choos_ind = np.flip(indic)
for count, ele_ind in enumerate(choos_ind):
row = ele_ind // width
col = ele_ind % width
if peak_conf[row, col] > 0:
detect_mat[count, 0] = peak_class[row, col]
detect_mat[count, 1] = row
detect_mat[count, 2] = col
detect_mat[count, 3] = peak_conf[row, col]
return detect_mat
def post_process(confmaps, config_dict):
"""
Post-processing for RODNet
:param confmaps: predicted confidence map [B, n_class, win_size, ramap_r, ramap_a]
:param search_size: search other detections within this window (resolution of our system)
:param peak_thres: peak threshold
:return: [B, win_size, max_dets, 4]
"""
n_class = config_dict['class_cfg']['n_class']
model_configs = config_dict['model_cfg']
rng_grid = config_dict['mappings']['range_grid']
agl_grid = config_dict['mappings']['angle_grid']
max_dets = model_configs['max_dets']
peak_thres = model_configs['peak_thres']
batch_size, class_size, win_size, height, width = confmaps.shape
if class_size != n_class:
raise TypeError("Wrong class number setting. ")
res_final = - np.ones((batch_size, win_size, max_dets, 4))
for b in range(batch_size):
for w in range(win_size):
detect_mat = []
for c in range(class_size):
obj_dicts_in_class = []
confmap = np.squeeze(confmaps[b, c, w, :, :])
rowids, colids = detect_peaks(confmap, threshold=peak_thres)
for ridx, aidx in zip(rowids, colids):
rng = rng_grid[ridx]
agl = agl_grid[aidx]
conf = confmap[ridx, aidx]
obj_dict = {'frameid': None, 'range': rng, 'angle': agl, 'ridx': ridx, 'aidx': aidx,
'classid': c, 'score': conf}
obj_dicts_in_class.append(obj_dict)
detect_mat_in_class = lnms(obj_dicts_in_class, config_dict)
detect_mat.append(detect_mat_in_class)
detect_mat = np.array(detect_mat)
detect_mat = np.reshape(detect_mat, (class_size * max_dets, 4))
detect_mat = detect_mat[detect_mat[:, 3].argsort(kind='mergesort')[::-1]]
res_final[b, w, :, :] = detect_mat[:max_dets]
return res_final
def post_process_single_frame(confmaps, dataset, config_dict):
"""
Post-processing for RODNet
:param confmaps: predicted confidence map [B, n_class, win_size, ramap_r, ramap_a]
:param search_size: search other detections within this window (resolution of our system)
:param peak_thres: peak threshold
:return: [B, win_size, max_dets, 4]
"""
n_class = dataset.object_cfg.n_class
rng_grid = dataset.range_grid
agl_grid = dataset.angle_grid
model_configs = config_dict['model_cfg']
max_dets = model_configs['max_dets']
peak_thres = model_configs['peak_thres']
class_size, height, width = confmaps.shape
if class_size != n_class:
raise TypeError("Wrong class number setting. ")
res_final = - np.ones((max_dets, 4))
detect_mat = []
for c in range(class_size):
obj_dicts_in_class = []
confmap = confmaps[c, :, :]
rowids, colids = detect_peaks(confmap, threshold=peak_thres)
for ridx, aidx in zip(rowids, colids):
rng = rng_grid[ridx]
agl = agl_grid[aidx]
conf = confmap[ridx, aidx]
obj_dict = dict(
frame_id=None,
range=rng,
angle=agl,
range_id=ridx,
angle_id=aidx,
class_id=c,
score=conf,
)
obj_dicts_in_class.append(obj_dict)
detect_mat_in_class = lnms(obj_dicts_in_class, dataset, config_dict)
detect_mat.append(detect_mat_in_class)
detect_mat = np.array(detect_mat)
detect_mat = np.reshape(detect_mat, (class_size * max_dets, 4))
detect_mat = detect_mat[detect_mat[:, 3].argsort(kind='mergesort')[::-1]]
res_final[:, :] = detect_mat[:max_dets]
return res_final
if __name__ == "__main__":
input_test = np.random.random_sample((1, 3, 16, 122, 91))
res_final = post_process(input_test)
for b in range(1):
for w in range(16):
confmaps = np.squeeze(input_test[b, :, w, :, :])
visualize_postprocessing(confmaps, res_final[b, w, :, :])
from .chirp_ops import chirp_amp
import numpy as np
def chirp_amp(chirp, radar_data_type):
"""
Calculate amplitude of a chirp
:param chirp: radar data of one chirp (w x h x 2) or (2 x w x h)
:param radar_data_type: current available types include 'RI', 'RISEP', 'AP', 'APSEP'
:return: amplitude map for the input chirp (w x h)
"""
c0, c1, c2 = chirp.shape
if radar_data_type == 'RI' or radar_data_type == 'RISEP':
if c0 == 2:
chirp_abs = np.sqrt(chirp[0, :, :] ** 2 + chirp[1, :, :] ** 2)
elif c2 == 2:
chirp_abs = np.sqrt(chirp[:, :, 0] ** 2 + chirp[:, :, 1] ** 2)
else:
raise ValueError
elif radar_data_type == 'AP' or radar_data_type == 'APSEP':
if c0 == 2:
chirp_abs = chirp[0, :, :]
elif c2 == 2:
chirp_abs = chirp[:, :, 0]
else:
raise ValueError
else:
raise ValueError
return chirp_abs
import numpy as np
import random
import ctypes
import torch
from multiprocessing import Array, Process
class CRDataLoader():
def __init__(self, dataset, shuffle=False, num_parallel_batch=2, noise_channel=False):
# parameters settings
self.dataset = dataset
self.config_dict = self.dataset.config_dict
self.n_class = self.config_dict['class_cfg']['n_class']
self.batch_size = self.config_dict['train_cfg']['batch_size']
self.radar_configs = self.config_dict['dataset_cfg']['radar_cfg']
self.model_configs = self.config_dict['model_cfg']
self.ramap_rsize = self.radar_configs['ramap_rsize']
self.ramap_asize = self.radar_configs['ramap_asize']
self.n_chirps = self.dataset.n_chirps
if noise_channel:
self.n_class = self.n_class + 1
else:
self.n_class = self.n_class
self.length = len(dataset) // self.batch_size + (1 if len(dataset) % self.batch_size != 0 else 0)
self.loading_seq = [i for i in range(len(dataset))]
if shuffle:
random.shuffle(self.loading_seq)
self.restart = False
assert num_parallel_batch > 0 and type(num_parallel_batch) == int
self.win_size = dataset.win_size
n_shradar = num_parallel_batch * self.batch_size * 2 * dataset.win_size * self.n_chirps * self.ramap_rsize \
* self.ramap_asize
self.shradar = Array(ctypes.c_double, n_shradar)
n_shconf = num_parallel_batch * self.batch_size * self.n_class * dataset.win_size * self.ramap_rsize \
* self.ramap_asize
self.shconf = Array(ctypes.c_double, n_shconf)
self.num_parallel_batch = num_parallel_batch
def __len__(self):
return self.length
def __iter__(self):
data_dict_stack = [None, None]
procs = [None, None]
random.shuffle(self.loading_seq)
cur_loading_seq = self.loading_seq[:self.batch_size]
data_dict_stack[0] = self.dataset.getBatch(cur_loading_seq)
procs[0] = Process(target=self.getBatchArray,
args=(self.shradar, self.shconf, data_dict_stack[0], cur_loading_seq, 0))
procs[0].start()
index_num = self.num_parallel_batch - 1
for i in range(self.__len__()):
index_num = (index_num + 1) % self.num_parallel_batch
procs[index_num].join()
procs[index_num] = None
if i < self.length - self.num_parallel_batch:
cur_loading_seq = self.loading_seq[
self.batch_size * (i + self.num_parallel_batch - 1): self.batch_size * (
i + self.num_parallel_batch)]
else:
cur_loading_seq = self.loading_seq[self.batch_size * (i + self.num_parallel_batch - 1):]
if i < self.length - self.num_parallel_batch + 1:
stack_id_next = (index_num + 1) % self.num_parallel_batch
data_dict_stack[stack_id_next] = self.dataset.getBatch(cur_loading_seq)
procs[stack_id_next] = Process(target=self.getBatchArray,
args=(self.shradar, self.shconf, data_dict_stack[stack_id_next],
cur_loading_seq, (index_num + 1) % self.num_parallel_batch))
procs[stack_id_next].start()
shradarnp = np.frombuffer(self.shradar.get_obj())
if self.n_chirps == 1:
shradarnp = shradarnp.reshape(self.num_parallel_batch, self.batch_size, 2, self.win_size,
self.ramap_rsize, self.ramap_asize)
else:
shradarnp = shradarnp.reshape(self.num_parallel_batch, self.batch_size, 2, self.win_size,
self.n_chirps, self.ramap_rsize, self.ramap_asize)
shconfnp = np.frombuffer(self.shconf.get_obj())
shconfnp = shconfnp.reshape(self.num_parallel_batch, self.batch_size, self.n_class, self.win_size,
self.ramap_rsize, self.ramap_asize)
if i < self.length - 1:
data_length = self.batch_size
else:
data_length = len(self.dataset) - self.batch_size * i
data_dict_return = dict(
status=data_dict_stack[index_num]['status'],
image_paths=data_dict_stack[index_num]['image_paths'],
radar_data=torch.from_numpy(shradarnp[index_num, :data_length, :, :, :, :]),
anno=dict(
obj_infos=data_dict_stack[index_num]['anno']['obj_infos'],
confmaps=torch.from_numpy(shconfnp[index_num, :data_length, :, :, :, :]),
)
)
yield data_dict_return
def __getitem__(self, index):
if self.restart:
random.shuffle(self.loading_seq)
if index == self.length - 1:
self.restart = True
results = self.dataset.getBatch(self.loading_seq[self.batch_size * index:])
else:
results = self.dataset.getBatch(self.loading_seq[self.batch_size * index: self.batch_size * (index + 1)])
results = list(results)
for i in range(2):
results[i] = torch.from_numpy(results[i])
return results
def getBatchArray(self, shradar, shconf, data_dict, loading_seq, index):
shradarnp = np.frombuffer(shradar.get_obj())
if self.n_chirps == 1:
shradarnp = shradarnp.reshape(self.num_parallel_batch, self.batch_size, 2, self.win_size,
self.ramap_rsize, self.ramap_asize)
else:
shradarnp = shradarnp.reshape(self.num_parallel_batch, self.batch_size, 2, self.win_size, self.n_chirps,
self.ramap_rsize, self.ramap_asize)
shconfnp = np.frombuffer(shconf.get_obj())
shconfnp = shconfnp.reshape(self.num_parallel_batch, self.batch_size, self.n_class, self.win_size,
self.ramap_rsize, self.ramap_asize)
shradarnp[index, :len(loading_seq), :, :, :, :] = data_dict['radar_data']
shconfnp[index, :len(loading_seq), :, :, :, :] = data_dict['anno']['confmaps']
# def getBatchObjInfo(self, index):
# if index == self.length - 1:
# results = self.dataset.getBatchObjInfo(self.loading_seq[self.batch_size * index:])
# else:
# results = self.dataset.getBatchObjInfo(
# self.loading_seq[self.batch_size * index: self.batch_size * (index + 1)])
# return results
import os
import time
import random
import pickle
import numpy as np
from tqdm import tqdm
from torch.utils import data
from .loaders import list_pkl_filenames
class CRDataset(data.Dataset):
"""
Pytorch Dataloader for CR Dataset
:param detail_dir: data details directory
:param confmap_dir: confidence maps directory
:param win_size: seqence window size
:param n_class: number of classes for detection
:param step: frame step inside each sequence
:param stride: data sampling
:param set_type: train, valid, test
:param is_random_chirp: random load chirp or not
"""
def __init__(self, data_dir, dataset, config_dict, split, is_random_chirp=True, subset=None, noise_channel=False):
# parameters settings
self.data_dir = data_dir
self.dataset = dataset
self.config_dict = config_dict
self.n_class = dataset.object_cfg.n_class
self.win_size = config_dict['train_cfg']['win_size']
self.split = split
if split == 'train' or split == 'valid':
self.step = config_dict['train_cfg']['train_step']
self.stride = config_dict['train_cfg']['train_stride']
else:
self.step = config_dict['test_cfg']['test_step']
self.stride = config_dict['test_cfg']['test_stride']
self.is_random_chirp = is_random_chirp
self.n_chirps = 1
self.noise_channel = noise_channel
# Dataloader for MNet
if 'mnet_cfg' in self.config_dict['model_cfg']:
in_chirps, out_channels = self.config_dict['model_cfg']['mnet_cfg']
self.n_chirps = in_chirps
n_radar_chirps = self.config_dict['dataset_cfg']['radar_cfg']['n_chirps']
self.chirp_ids = []
for c in range(in_chirps):
self.chirp_ids.append(int(n_radar_chirps / in_chirps * c))
# dataset initialization
self.image_paths = []
self.radar_paths = []
self.obj_infos = []
self.confmaps = []
self.n_data = 0
self.index_mapping = []
if subset is not None:
self.data_files = [subset + '.pkl']
else:
self.data_files = list_pkl_filenames(config_dict['dataset_cfg'], split)
self.seq_names = [name.split('.')[0] for name in self.data_files]
self.n_seq = len(self.seq_names)
split_folder = split
for seq_id, data_file in enumerate(tqdm(self.data_files)):
data_file_path = os.path.join(data_dir, split_folder, data_file)
data_details = pickle.load(open(data_file_path, 'rb'))
if split == 'train' or split == 'valid':
assert data_details['anno'] is not None
n_frame = data_details['n_frame']
self.image_paths.append(data_details['image_paths'])
self.radar_paths.append(data_details['radar_paths'])
n_data_in_seq = (n_frame - (self.win_size * self.step - 1)) // self.stride + (
1 if (n_frame - (self.win_size * self.step - 1)) % self.stride > 0 else 0)
self.n_data += n_data_in_seq
for data_id in range(n_data_in_seq):
self.index_mapping.append([seq_id, data_id * self.stride])
if data_details['anno'] is not None:
self.obj_infos.append(data_details['anno']['metadata'])
self.confmaps.append(data_details['anno']['confmaps'])
def __len__(self):
"""Total number of data/label pairs"""
return self.n_data
def __getitem__(self, index):
seq_id, data_id = self.index_mapping[index]
seq_name = self.seq_names[seq_id]
image_paths = self.image_paths[seq_id]
radar_paths = self.radar_paths[seq_id]
if len(self.confmaps) != 0:
this_seq_obj_info = self.obj_infos[seq_id]
this_seq_confmap = self.confmaps[seq_id]
data_dict = dict(
status=True,
seq_names=seq_name,
image_paths=[]
)
if self.is_random_chirp:
chirp_id = random.randint(0, self.dataset.sensor_cfg.radar_cfg['n_chirps'] - 1)
else:
chirp_id = 0
# Dataloader for MNet
if 'mnet_cfg' in self.config_dict['model_cfg']:
chirp_id = self.chirp_ids
radar_configs = self.dataset.sensor_cfg.radar_cfg
ramap_rsize = radar_configs['ramap_rsize']
ramap_asize = radar_configs['ramap_asize']
# Load radar data
try:
if radar_configs['data_type'] == 'RI' or radar_configs['data_type'] == 'AP': # drop this format
radar_npy_win = np.zeros((self.win_size, ramap_rsize, ramap_asize, 2), dtype=np.float32)
for idx, frameid in enumerate(
range(data_id, data_id + self.win_size * self.step, self.step)):
radar_npy_win[idx, :, :, :] = np.load(radar_paths[frameid])
data_dict['image_paths'].append(image_paths[frameid])
elif radar_configs['data_type'] == 'RISEP' or radar_configs['data_type'] == 'APSEP':
if isinstance(chirp_id, int):
radar_npy_win = np.zeros((self.win_size, ramap_rsize, ramap_asize, 2), dtype=np.float32)
for idx, frameid in enumerate(
range(data_id, data_id + self.win_size * self.step, self.step)):
radar_npy_win[idx, :, :, :] = np.load(radar_paths[frameid][chirp_id])
data_dict['image_paths'].append(image_paths[frameid])
elif isinstance(chirp_id, list):
radar_npy_win = np.zeros((self.win_size, self.n_chirps, ramap_rsize, ramap_asize, 2),
dtype=np.float32)
for idx, frameid in enumerate(
range(data_id, data_id + self.win_size * self.step, self.step)):
for cid, c in enumerate(chirp_id):
npy_path = radar_paths[frameid][c]
radar_npy_win[idx, cid, :, :, :] = np.load(npy_path)
data_dict['image_paths'].append(image_paths[frameid])
else:
raise TypeError
else:
raise ValueError
except:
# in case load npy fail
data_dict['status'] = False
if not os.path.exists('./tmp'):
os.makedirs('./tmp')
log_name = 'loadnpyfail-' + time.strftime("%Y%m%d-%H%M%S") + '.txt'
with open(os.path.join('./tmp', log_name), 'w') as f_log:
f_log.write('npy path: ' + radar_paths[frameid][chirp_id] + \
'\nframe indices: %d:%d:%d' % (data_id, data_id + self.win_size * self.step, self.step))
# radar_npy_win = np.transpose(radar_npy_win, (3, 0, 1, 2))
#
# data_dict['radar_data'] = radar_npy_win
#
# if len(self.confmaps) != 0:
# confmap_gt = this_seq_confmap[data_id:data_id + self.win_size * self.step:self.step]
# confmap_gt = np.transpose(confmap_gt, (1, 0, 2, 3))
# obj_info = this_seq_obj_info[data_id:data_id + self.win_size * self.step:self.step]
#
# data_dict['anno'] = dict(
# obj_infos=obj_info,
# confmaps=confmap_gt,
# )
# else:
# data_dict['anno'] = None
return data_dict
# Dataloader for MNet
if 'mnet_cfg' in self.config_dict['model_cfg']:
radar_npy_win = np.transpose(radar_npy_win, (4, 0, 1, 2, 3))
assert radar_npy_win.shape == (
2, self.win_size, self.n_chirps, radar_configs['ramap_rsize'], radar_configs['ramap_asize'])
else:
radar_npy_win = np.transpose(radar_npy_win, (3, 0, 1, 2))
assert radar_npy_win.shape == (2, self.win_size, radar_configs['ramap_rsize'], radar_configs['ramap_asize'])
data_dict['radar_data'] = radar_npy_win
# Load annotations
if len(self.confmaps) != 0:
confmap_gt = this_seq_confmap[data_id:data_id + self.win_size * self.step:self.step]
confmap_gt = np.transpose(confmap_gt, (1, 0, 2, 3))
obj_info = this_seq_obj_info[data_id:data_id + self.win_size * self.step:self.step]
if self.noise_channel:
assert confmap_gt.shape == \
(self.n_class + 1, self.win_size, radar_configs['ramap_rsize'], radar_configs['ramap_asize'])
else:
confmap_gt = confmap_gt[:self.n_class]
assert confmap_gt.shape == \
(self.n_class, self.win_size, radar_configs['ramap_rsize'], radar_configs['ramap_asize'])
data_dict['anno'] = dict(
obj_infos=obj_info,
confmaps=confmap_gt,
)
else:
data_dict['anno'] = None
return data_dict
if __name__ == "__main__":
dataset = CRDataset('./data/data_details', stride=16)
print(len(dataset))
for i in range(len(dataset)):
continue
import os
import time
import random
import pickle
import numpy as np
from tqdm import tqdm
from torch.utils import data
from .collate_functions import _cr_collate_npy
from .loaders import list_pkl_filenames
class CRDatasetSM(data.Dataset):
"""
Pytorch Dataloader for CR Dataset
:param detail_dir: data details directory
:param confmap_dir: confidence maps directory
:param win_size: seqence window size
:param n_class: number of classes for detection
:param step: frame step inside each sequence
:param stride: data sampling
:param set_type: train, valid, test
:param is_random: random load or not
"""
def __init__(self, data_root, config_dict, split, is_random=True, subset=None, noise_channel=False):
# parameters settings
self.data_root = data_root
self.config_dict = config_dict
self.n_class = config_dict['class_cfg']['n_class']
self.win_size = config_dict['train_cfg']['win_size']
self.split = split
if split == 'train' or split == 'valid':
self.step = config_dict['train_cfg']['train_step']
self.stride = config_dict['train_cfg']['train_stride']
else:
self.step = config_dict['test_cfg']['test_step']
self.stride = config_dict['test_cfg']['test_stride']
self.is_random = is_random
self.n_chirps = 1
self.noise_channel = noise_channel
# Dataloader for MNet
if 'mnet_cfg' in self.config_dict['model_cfg']:
in_chirps, out_channels = self.config_dict['model_cfg']['mnet_cfg']
self.n_chirps = in_chirps
n_radar_chirps = self.config_dict['dataset_cfg']['radar_cfg']['n_chirps']
self.chirp_ids = []
for c in range(in_chirps):
self.chirp_ids.append(int(n_radar_chirps / in_chirps * c))
# dataset initialization
self.image_paths = []
self.radar_paths = []
self.obj_infos = []
self.n_data = 0
self.index_mapping = []
if subset is not None:
self.data_files = [subset + '.pkl']
else:
self.data_files = list_pkl_filenames(config_dict['dataset_cfg'], split)
self.seq_names = [name.split('.')[0] for name in self.data_files]
self.n_seq = len(self.seq_names)
for seq_id, data_file in enumerate(tqdm(self.data_files)):
data_file_path = os.path.join(data_root, split, data_file)
data_details = pickle.load(open(data_file_path, 'rb'))
if split == 'train' or split == 'valid':
assert data_details['anno'] is not None
n_frame = data_details['n_frame']
self.image_paths.append(data_details['image_paths'])
self.radar_paths.append(data_details['radar_paths'])
n_data_in_seq = (n_frame - (self.win_size * self.step - 1)) // self.stride + (
1 if (n_frame - (self.win_size * self.step - 1)) % self.stride > 0 else 0)
self.n_data += n_data_in_seq
for data_id in range(n_data_in_seq):
self.index_mapping.append([seq_id, data_id * self.stride])
if data_details['anno'] is not None:
self.obj_infos.append(data_details['anno']['obj_infos'])
def __len__(self):
"""Total number of data/label pairs"""
return self.n_data
def __getitem__(self, index):
seq_id, data_id = self.index_mapping[index]
image_paths = self.image_paths[seq_id]
radar_paths = self.radar_paths[seq_id]
this_data_file = os.path.join(self.data_root, self.split, self.data_files[seq_id])
this_data_details = pickle.load(open(this_data_file, 'rb'))
if this_data_details['anno'] is not None:
this_seq_obj_info = this_data_details['anno']['obj_infos']
this_seq_confmap = this_data_details['anno']['confmaps']
data_dict = dict(
status=True,
image_paths=[]
)
if self.is_random:
chirp_id = random.randint(0, self.config_dict['dataset_cfg']['radar_cfg']['n_chirps'] - 1)
else:
chirp_id = 0
# Dataloader for MNet
if 'mnet_cfg' in self.config_dict['model_cfg']:
chirp_id = self.chirp_ids
radar_configs = self.config_dict['dataset_cfg']['radar_cfg']
ramap_rsize = radar_configs['ramap_rsize']
ramap_asize = radar_configs['ramap_asize']
# Load radar data
try:
if radar_configs['data_type'] == 'RI' or radar_configs['data_type'] == 'AP': # drop this format
radar_npy_win = np.load(radar_paths[chirp_id]) \
[data_id:data_id + self.win_size * self.step:self.step, :, :, :]
for idx, frameid in enumerate(
range(data_id, data_id + self.win_size * self.step, self.step)):
data_dict['image_paths'].append(image_paths[frameid])
elif radar_configs['data_type'] == 'RISEP' or radar_configs['data_type'] == 'APSEP':
if isinstance(chirp_id, int):
radar_npy_win = np.zeros((self.win_size, ramap_rsize, ramap_asize, 2), dtype=np.float32)
for idx, frameid in enumerate(
range(data_id, data_id + self.win_size * self.step, self.step)):
radar_npy_win[idx, :, :, :] = np.load(radar_paths[frameid][chirp_id])
data_dict['image_paths'].append(image_paths[frameid])
elif isinstance(chirp_id, list):
radar_npy_win = np.zeros((self.win_size, self.n_chirps, ramap_rsize, ramap_asize, 2),
dtype=np.float32)
for idx, frameid in enumerate(
range(data_id, data_id + self.win_size * self.step, self.step)):
for cid, c in enumerate(chirp_id):
npy_path = radar_paths[frameid][c]
radar_npy_win[idx, cid, :, :, :] = np.load(npy_path)
data_dict['image_paths'].append(image_paths[frameid])
else:
raise TypeError
else:
raise ValueError
except:
# in case load npy fail
data_dict['status'] = False
if not os.path.exists('./tmp'):
os.makedirs('./tmp')
log_name = 'loadnpyfail-' + time.strftime("%Y%m%d-%H%M%S") + '.txt'
with open(os.path.join('./tmp', log_name), 'w') as f_log:
f_log.write('npy path: ' + radar_paths[frameid][chirp_id] + \
'\nframe indices: %d:%d:%d' % (data_id, data_id + self.win_size * self.step, self.step))
# if 'mnet_cfg' in self.config_dict['model_cfg']:
# radar_npy_win = np.transpose(radar_npy_win, (4, 0, 1, 2, 3))
# else:
# radar_npy_win = np.transpose(radar_npy_win, (3, 0, 1, 2))
#
# data_dict['radar_data'] = radar_npy_win
#
# if len(self.confmaps) != 0:
# confmap_gt = this_seq_confmap[data_id:data_id + self.win_size * self.step:self.step]
# confmap_gt = np.transpose(confmap_gt, (1, 0, 2, 3))
# obj_info = this_seq_obj_info[data_id:data_id + self.win_size * self.step:self.step]
#
# data_dict['anno'] = dict(
# obj_infos=obj_info,
# confmaps=confmap_gt,
# )
# else:
# data_dict['anno'] = None
return data_dict
# Dataloader for MNet
if 'mnet_cfg' in self.config_dict['model_cfg']:
radar_npy_win = np.transpose(radar_npy_win, (4, 0, 1, 2, 3))
assert radar_npy_win.shape == (
2, self.win_size, self.n_chirps, radar_configs['ramap_rsize'], radar_configs['ramap_asize'])
else:
radar_npy_win = np.transpose(radar_npy_win, (3, 0, 1, 2))
assert radar_npy_win.shape == (2, self.win_size, radar_configs['ramap_rsize'], radar_configs['ramap_asize'])
data_dict['radar_data'] = radar_npy_win
# Load annotations
if this_data_details['anno'] is not None:
confmap_gt = this_seq_confmap[data_id:data_id + self.win_size * self.step:self.step]
confmap_gt = np.transpose(confmap_gt, (1, 0, 2, 3))
obj_info = this_seq_obj_info[data_id:data_id + self.win_size * self.step:self.step]
if self.noise_channel:
assert confmap_gt.shape == \
(self.n_class + 1, self.win_size, radar_configs['ramap_rsize'], radar_configs['ramap_asize'])
else:
confmap_gt = confmap_gt[:self.n_class]
assert confmap_gt.shape == \
(self.n_class, self.win_size, radar_configs['ramap_rsize'], radar_configs['ramap_asize'])
data_dict['anno'] = dict(
obj_infos=obj_info,
confmaps=confmap_gt,
)
else:
data_dict['anno'] = None
return data_dict
def getBatch(self, indexes):
"""
Get data batch with the given indices
TODO: change return data format
:param indexes:
:return:
"""
data_dicts = []
for index in indexes:
data_dict = self.__getitem__(index)
data_dicts.append(data_dict)
data_dict_batch = _cr_collate_npy(data_dicts)
return data_dict_batch
# def getObjInfo(self, index):
# seq_id, data_id = self.index_mapping[index]
# this_data_file = os.path.join(self.data_root, self.split, self.data_files[seq_id])
# this_data_details = pickle.load(open(this_data_file, 'rb'))
# obj_info = this_data_details['anno']['obj_infos'][data_id:data_id + self.win_size * self.step:self.step]
# return obj_info
#
# def getBatchObjInfo(self, indexes):
# bObj_info = []
# for index in indexes:
# result = self.getObjInfo(index)
# bObj_info.append(result)
# return bObj_info
if __name__ == "__main__":
dataset = CRDatasetSM('./data/data_details', stride=16)
print(len(dataset))
for i in range(len(dataset)):
continue
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment