database_sampler.py 19.9 KB
Newer Older
1
import pickle
Shaoshuai Shi's avatar
Shaoshuai Shi committed
2

3
import os
4
import copy
Shaoshuai Shi's avatar
Shaoshuai Shi committed
5
import numpy as np
yukang.chen's avatar
yukang.chen committed
6
7
from skimage import io
import torch
8
9
import SharedArray
import torch.distributed as dist
Shaoshuai Shi's avatar
Shaoshuai Shi committed
10

11
from ...ops.iou3d_nms import iou3d_nms_utils
yukang's avatar
yukang committed
12
from ...utils import box_utils, common_utils, box2d_utils, calibration_kitti
yukang.chen's avatar
yukang.chen committed
13
from pcdet.datasets.kitti.kitti_object_eval_python import kitti_common
14
15
16
17
18
19

class DataBaseSampler(object):
    def __init__(self, root_path, sampler_cfg, class_names, logger=None):
        self.root_path = root_path
        self.class_names = class_names
        self.sampler_cfg = sampler_cfg
yukang.chen's avatar
yukang.chen committed
20
        self.aug_with_img = sampler_cfg.get('AUG_WITH_IMAGE', False)
yukang's avatar
yukang committed
21
22
23
24
25
26
27
        self.joint_sample = True
        self.keep_raw =  False
        self.box_iou_thres = 0.5
        self.img_aug_type = 'by_depth'
        self.aug_use_type = 'annotation'
        self.point_refine = True
        self.img_root_path = 'training/image_2'
yukang.chen's avatar
yukang.chen committed
28

29
30
31
32
        self.logger = logger
        self.db_infos = {}
        for class_name in class_names:
            self.db_infos[class_name] = []
33
34
            
        self.use_shared_memory = sampler_cfg.get('USE_SHARED_MEMORY', False)
35
        
36
37
38
39
40
41
42
43
        for db_info_path in sampler_cfg.DB_INFO_PATH:
            db_info_path = self.root_path.resolve() / db_info_path
            with open(str(db_info_path), 'rb') as f:
                infos = pickle.load(f)
                [self.db_infos[cur_class].extend(infos[cur_class]) for cur_class in class_names]

        for func_name, val in sampler_cfg.PREPARE.items():
            self.db_infos = getattr(self, func_name)(self.db_infos, val)
44
        
45
        self.gt_database_data_key = self.load_db_to_shared_memory() if self.use_shared_memory else None
46

47
48
49
        self.sample_groups = {}
        self.sample_class_num = {}
        self.limit_whole_scene = sampler_cfg.get('LIMIT_WHOLE_SCENE', False)
50

51
52
53
54
55
56
57
58
59
60
61
        for x in sampler_cfg.SAMPLE_GROUPS:
            class_name, sample_num = x.split(':')
            if class_name not in class_names:
                continue
            self.sample_class_num[class_name] = sample_num
            self.sample_groups[class_name] = {
                'sample_num': sample_num,
                'pointer': len(self.db_infos[class_name]),
                'indices': np.arange(len(self.db_infos[class_name]))
            }

Gus-Guo's avatar
Gus-Guo committed
62
63
64
65
66
67
68
69
    def __getstate__(self):
        d = dict(self.__dict__)
        del d['logger']
        return d

    def __setstate__(self, d):
        self.__dict__.update(d)

70
71
72
73
74
75
76
77
78
79
80
81
    def __del__(self):
        if self.use_shared_memory:
            self.logger.info('Deleting GT database from shared memory')
            cur_rank, num_gpus = common_utils.get_dist_info()
            sa_key = self.sampler_cfg.DB_DATA_PATH[0]
            if cur_rank % num_gpus == 0 and os.path.exists(f"/dev/shm/{sa_key}"):
                SharedArray.delete(f"shm://{sa_key}")

            if num_gpus > 1:
                dist.barrier()
            self.logger.info('GT database has been removed from shared memory')

82
83
    def load_db_to_shared_memory(self):
        self.logger.info('Loading GT database to shared memory')
84
        cur_rank, world_size, num_gpus = common_utils.get_dist_info(return_gpu_per_machine=True)
85

86
87
88
        assert self.sampler_cfg.DB_DATA_PATH.__len__() == 1, 'Current only support single DB_DATA'
        db_data_path = self.root_path.resolve() / self.sampler_cfg.DB_DATA_PATH[0]
        sa_key = self.sampler_cfg.DB_DATA_PATH[0]
89
90

        if cur_rank % num_gpus == 0 and not os.path.exists(f"/dev/shm/{sa_key}"):
91
92
93
94
95
            gt_database_data = np.load(db_data_path)
            common_utils.sa_create(f"shm://{sa_key}", gt_database_data)
            
        if num_gpus > 1:
            dist.barrier()
96
        self.logger.info('GT database has been saved to shared memory')
97
        return sa_key
98

99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
    def filter_by_difficulty(self, db_infos, removed_difficulty):
        new_db_infos = {}
        for key, dinfos in db_infos.items():
            pre_len = len(dinfos)
            new_db_infos[key] = [
                info for info in dinfos
                if info['difficulty'] not in removed_difficulty
            ]
            if self.logger is not None:
                self.logger.info('Database filter by difficulty %s: %d => %d' % (key, pre_len, len(new_db_infos[key])))
        return new_db_infos

    def filter_by_min_points(self, db_infos, min_gt_points_list):
        for name_num in min_gt_points_list:
            name, min_num = name_num.split(':')
            min_num = int(min_num)
            if min_num > 0 and name in db_infos.keys():
                filtered_infos = []
                for info in db_infos[name]:
                    if info['num_points_in_gt'] >= min_num:
                        filtered_infos.append(info)

                if self.logger is not None:
                    self.logger.info('Database filter by min points %s: %d => %d' %
                                     (name, len(db_infos[name]), len(filtered_infos)))
                db_infos[name] = filtered_infos

        return db_infos

    def sample_with_fixed_number(self, class_name, sample_group):
        """
        Args:
            class_name:
            sample_group:
        Returns:

        """
        sample_num, pointer, indices = int(sample_group['sample_num']), sample_group['pointer'], sample_group['indices']
        if pointer >= len(self.db_infos[class_name]):
            indices = np.random.permutation(len(self.db_infos[class_name]))
            pointer = 0

        sampled_dict = [self.db_infos[class_name][idx] for idx in indices[pointer: pointer + sample_num]]
        pointer += sample_num
        sample_group['pointer'] = pointer
        sample_group['indices'] = indices
        return sampled_dict

    @staticmethod
    def put_boxes_on_road_planes(gt_boxes, road_planes, calib):
        """
        Only validate in KITTIDataset
        Args:
            gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
            road_planes: [a, b, c, d]
            calib:

        Returns:
        """
        a, b, c, d = road_planes
        center_cam = calib.lidar_to_rect(gt_boxes[:, 0:3])
        cur_height_cam = (-d - a * center_cam[:, 0] - c * center_cam[:, 2]) / b
        center_cam[:, 1] = cur_height_cam
        cur_lidar_height = calib.rect_to_lidar(center_cam)[:, 2]
        mv_height = gt_boxes[:, 2] - gt_boxes[:, 5] / 2 - cur_lidar_height
        gt_boxes[:, 2] -= mv_height  # lidar view
        return gt_boxes, mv_height

yukang's avatar
yukang committed
167
    def copy_paste_to_image_kitti(self, data_dict, crop_feat, gt_number, point_idxes=None):
yukang.chen's avatar
yukang.chen committed
168
169
170
171
        image = data_dict['images']
        boxes3d = data_dict['gt_boxes']
        boxes2d = data_dict['gt_boxes2d']
        corners_lidar = box_utils.boxes_to_corners_3d(boxes3d)
yukang's avatar
yukang committed
172
        if 'depth' in self.img_aug_type:
yukang.chen's avatar
yukang.chen committed
173
174
175
176
177
            paste_order = boxes3d[:,0].argsort()
            paste_order = paste_order[::-1]
        else:
            paste_order = np.arange(len(boxes3d),dtype=np.int)

yukang's avatar
yukang committed
178
        if 'reverse' in self.img_aug_type:
yukang.chen's avatar
yukang.chen committed
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
            paste_order = paste_order[::-1]

        paste_mask = -255 * np.ones(image.shape[:2], dtype=np.int)
        fg_mask = np.zeros(image.shape[:2], dtype=np.int)
        overlap_mask = np.zeros(image.shape[:2], dtype=np.int)
        depth_mask = np.zeros((*image.shape[:2], 2), dtype=np.float)
        points_2d, depth_2d = data_dict['calib'].lidar_to_img(data_dict['points'][:,:3])
        points_2d[:,0] = np.clip(points_2d[:,0], a_min=0, a_max=image.shape[1]-1)
        points_2d[:,1] = np.clip(points_2d[:,1], a_min=0, a_max=image.shape[0]-1)
        points_2d = points_2d.astype(np.int)
        for _order in paste_order:
            _box2d = boxes2d[_order]
            image[_box2d[1]:_box2d[3],_box2d[0]:_box2d[2]] = crop_feat[_order]
            overlap_mask[_box2d[1]:_box2d[3],_box2d[0]:_box2d[2]] += \
                (paste_mask[_box2d[1]:_box2d[3],_box2d[0]:_box2d[2]] > 0).astype(np.int)
            paste_mask[_box2d[1]:_box2d[3],_box2d[0]:_box2d[2]] = _order

            if 'cover' in self.aug_use_type:
                # HxWx2 for min and max depth of each box region
                depth_mask[_box2d[1]:_box2d[3],_box2d[0]:_box2d[2],0] = corners_lidar[_order,:,0].min()
                depth_mask[_box2d[1]:_box2d[3],_box2d[0]:_box2d[2],1] = corners_lidar[_order,:,0].max()

            # foreground area of original point cloud in image plane
            if _order < gt_number:
                fg_mask[_box2d[1]:_box2d[3],_box2d[0]:_box2d[2]] = 1

        data_dict['images'] = image

        if not self.joint_sample:
            return data_dict

        new_mask = paste_mask[points_2d[:,1], points_2d[:,0]]==(point_idxes+gt_number)
        if self.keep_raw:
            raw_mask = point_idxes==-1
        else:
            raw_fg = (fg_mask == 1) & (paste_mask >= 0) & (paste_mask < gt_number)
            raw_bg = (fg_mask == 0) & (paste_mask < 0)
            raw_mask = raw_fg[points_2d[:,1], points_2d[:,0]] | raw_bg[points_2d[:,1], points_2d[:,0]]
        keep_mask = new_mask | raw_mask
        data_dict['points_2d'] = points_2d

        if 'annotation' in self.aug_use_type:
            data_dict['points'] = data_dict['points'][keep_mask]
            data_dict['points_2d'] = data_dict['points_2d'][keep_mask]
        elif 'projection' in self.aug_use_type:
            overlap_mask[overlap_mask>=1] = 1
            data_dict['overlap_mask'] = overlap_mask
            if 'cover' in self.aug_use_type:
                data_dict['depth_mask'] = depth_mask

        return data_dict

yukang's avatar
yukang committed
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
    def collect_image_crops_kitti(self, info, data_dict, obj_points, sampled_gt_boxes, sampled_gt_boxes2d, idx):
        calib_file = kitti_common.get_calib_path(int(info['image_idx']), self.root_path, relative_path=False)
        sampled_calib = calibration_kitti.Calibration(calib_file)
        points_2d, depth_2d = sampled_calib.lidar_to_img(obj_points[:,:3])

        if self.point_refine:
            # align calibration metrics for points
            points_ract = data_dict['calib'].img_to_rect(points_2d[:,0], points_2d[:,1], depth_2d)
            points_lidar = data_dict['calib'].rect_to_lidar(points_ract)
            obj_points[:, :3] = points_lidar
            # align calibration metrics for boxes
            box3d_raw = sampled_gt_boxes[idx].reshape(1,-1)
            box3d_coords = box_utils.boxes_to_corners_3d(box3d_raw)[0]
            box3d_box, box3d_depth = sampled_calib.lidar_to_img(box3d_coords)
            box3d_coord_rect = data_dict['calib'].img_to_rect(box3d_box[:,0], box3d_box[:,1], box3d_depth)
            box3d_rect = box_utils.corners_rect_to_camera(box3d_coord_rect).reshape(1,-1)
            box3d_lidar = box_utils.boxes3d_kitti_camera_to_lidar(box3d_rect, data_dict['calib'])
            box2d = box_utils.boxes3d_kitti_camera_to_imageboxes(box3d_rect, data_dict['calib'],
                                                                    data_dict['images'].shape[:2])
            sampled_gt_boxes[idx] = box3d_lidar[0]
            sampled_gt_boxes2d[idx] = box2d[0]

        obj_idx = idx * np.ones(len(obj_points), dtype=np.int)

        # copy crops from images
yukang's avatar
yukang committed
256
        img_path = self.root_path / self.img_root_path / (info['image_idx']+'.png')
yukang's avatar
yukang committed
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
        raw_image = io.imread(img_path)
        raw_image = raw_image.astype(np.float32)
        raw_center = info['bbox'].reshape(2,2).mean(0)
        new_box = sampled_gt_boxes2d[idx].astype(np.int)
        new_shape = np.array([new_box[2]-new_box[0], new_box[3]-new_box[1]])
        raw_box = np.concatenate([raw_center-new_shape/2, raw_center+new_shape/2]).astype(np.int)
        raw_box[0::2] = np.clip(raw_box[0::2], a_min=0, a_max=raw_image.shape[1])
        raw_box[1::2] = np.clip(raw_box[1::2], a_min=0, a_max=raw_image.shape[0])
        if (raw_box[2]-raw_box[0])!=new_shape[0] or (raw_box[3]-raw_box[1])!=new_shape[1]:
            new_center = new_box.reshape(2,2).mean(0)
            new_shape = np.array([raw_box[2]-raw_box[0], raw_box[3]-raw_box[1]])
            new_box = np.concatenate([new_center-new_shape/2, new_center+new_shape/2]).astype(np.int)

        img_crop2d = raw_image[raw_box[1]:raw_box[3],raw_box[0]:raw_box[2]] / 255

        return new_box, img_crop2d, obj_points, obj_idx

    def sample_gt_boxes_2d_kitti(self, data_dict, sampled_boxes, iou1, iou2):
        # filter out box2d iou > thres
        if self.sampler_cfg.get('USE_ROAD_PLANE', False):
            sampled_boxes, mv_height = self.put_boxes_on_road_planes(
                sampled_boxes, data_dict['road_plane'], data_dict['calib']
            )

        # sampled_boxes2d = np.stack([x['bbox'] for x in sampled_dict], axis=0).astype(np.float32)
        boxes3d_camera = box_utils.boxes3d_lidar_to_kitti_camera(sampled_boxes, data_dict['calib'])
        sampled_boxes2d = box_utils.boxes3d_kitti_camera_to_imageboxes(boxes3d_camera, data_dict['calib'],
                                                                        data_dict['images'].shape[:2])
        sampled_boxes2d = torch.Tensor(sampled_boxes2d)
        existed_boxes2d = torch.Tensor(data_dict['gt_boxes2d'])
yukang's avatar
yukang committed
287
288
        iou2d1 = box2d_utils.pairwise_iou(sampled_boxes2d, existed_boxes2d).cpu().numpy()
        iou2d2 = box2d_utils.pairwise_iou(sampled_boxes2d, sampled_boxes2d).cpu().numpy()
yukang's avatar
yukang committed
289
290
291
        iou2d2[range(sampled_boxes2d.shape[0]), range(sampled_boxes2d.shape[0])] = 0
        iou2d1 = iou2d1 if iou2d1.shape[1] > 0 else iou2d2

yukang's avatar
yukang committed
292
293
294
        valid_mask = ((iou2d1.max(axis=1)<self.box_iou_thres) &
                        (iou2d2.max(axis=1)<self.box_iou_thres) &
                        ((iou1.max(axis=1) + iou2.max(axis=1)) == 0)).nonzero()[0]
yukang's avatar
yukang committed
295
296
297
298
299

        sampled_boxes2d = sampled_boxes2d[valid_mask].cpu().numpy()
        return sampled_boxes2d, mv_height, valid_mask

    def add_sampled_boxes_to_scene(self, data_dict, sampled_gt_boxes, total_valid_sampled_dict, mv_height=None, sampled_gt_boxes2d=None):
300
301
302
303
        gt_boxes_mask = data_dict['gt_boxes_mask']
        gt_boxes = data_dict['gt_boxes'][gt_boxes_mask]
        gt_names = data_dict['gt_names'][gt_boxes_mask]
        points = data_dict['points']
yukang.chen's avatar
yukang.chen committed
304
        if self.sampler_cfg.get('USE_ROAD_PLANE', False) and not self.aug_with_img:
305
306
307
308
309
310
            sampled_gt_boxes, mv_height = self.put_boxes_on_road_planes(
                sampled_gt_boxes, data_dict['road_plane'], data_dict['calib']
            )
            data_dict.pop('calib')
            data_dict.pop('road_plane')

yukang's avatar
yukang committed
311
        obj_points_list = []
yukang.chen's avatar
yukang.chen committed
312
313
        # convert sampled 3D boxes to image plane
        if self.aug_with_img:
yukang's avatar
yukang committed
314
315
            obj_index_list, crop_boxes2d = [], []
            gt_number = gt_boxes_mask.sum().astype(np.int)
yukang.chen's avatar
yukang.chen committed
316
317
            gt_boxes2d = data_dict['gt_boxes2d'][gt_boxes_mask].astype(np.int)
            gt_crops2d = [data_dict['images'][_x[1]:_x[3],_x[0]:_x[2]] for _x in gt_boxes2d]
yukang's avatar
yukang committed
318

319
320
        if self.use_shared_memory:
            gt_database_data = SharedArray.attach(f"shm://{self.gt_database_data_key}")
321
            gt_database_data.setflags(write=0)
322
323
324
        else:
            gt_database_data = None 

325
        for idx, info in enumerate(total_valid_sampled_dict):
326
            if self.use_shared_memory:
327
                start_offset, end_offset = info['global_data_offset']
328
                obj_points = copy.deepcopy(gt_database_data[start_offset:end_offset])
329
330
331
332
            else:
                file_path = self.root_path / info['path']
                obj_points = np.fromfile(str(file_path), dtype=np.float32).reshape(
                    [-1, self.sampler_cfg.NUM_POINT_FEATURES])
333
334
335
336
337
338
339

            obj_points[:, :3] += info['box3d_lidar'][:3]

            if self.sampler_cfg.get('USE_ROAD_PLANE', False):
                # mv height
                obj_points[:, 2] -= mv_height[idx]

yukang.chen's avatar
yukang.chen committed
340
            if self.aug_with_img:
yukang's avatar
yukang committed
341
342
                new_box, img_crop2d, obj_points, obj_idx = self.collect_image_crops_kitti(info, data_dict, 
                                                    obj_points, sampled_gt_boxes, sampled_gt_boxes2d, idx)
yukang.chen's avatar
yukang.chen committed
343
344
                crop_boxes2d.append(new_box)
                gt_crops2d.append(img_crop2d)
yukang's avatar
yukang committed
345
                obj_index_list.append(obj_idx)
yukang.chen's avatar
yukang.chen committed
346

yukang's avatar
yukang committed
347
            obj_points_list.append(obj_points)
348
349
350
351
352
353
354
355

        obj_points = np.concatenate(obj_points_list, axis=0)
        sampled_gt_names = np.array([x['name'] for x in total_valid_sampled_dict])

        large_sampled_gt_boxes = box_utils.enlarge_box3d(
            sampled_gt_boxes[:, 0:7], extra_width=self.sampler_cfg.REMOVE_EXTRA_WIDTH
        )
        points = box_utils.remove_points_in_boxes3d(points, large_sampled_gt_boxes)
yukang's avatar
yukang committed
356
        points = np.concatenate([obj_points, points], axis=0)
357
358
359
360
361
        gt_names = np.concatenate([gt_names, sampled_gt_names], axis=0)
        gt_boxes = np.concatenate([gt_boxes, sampled_gt_boxes], axis=0)
        data_dict['gt_boxes'] = gt_boxes
        data_dict['gt_names'] = gt_names
        data_dict['points'] = points
yukang.chen's avatar
yukang.chen committed
362
        if self.aug_with_img:
yukang's avatar
yukang committed
363
364
365
366
            obj_points_idx = np.concatenate(obj_index_list, axis=0)
            point_idxes = -1 * np.ones(len(points), dtype=np.int)
            point_idxes = np.concatenate([obj_points_idx, point_idxes], axis=0)

yukang.chen's avatar
yukang.chen committed
367
            data_dict['gt_boxes2d'] = np.concatenate([gt_boxes2d, np.array(crop_boxes2d)], axis=0)
yukang's avatar
yukang committed
368
369
370
            data_dict = self.copy_paste_to_image_kitti(data_dict, gt_crops2d, gt_number, point_idxes)
            if self.sampler_cfg.get('USE_ROAD_PLANE', False):
                data_dict.pop('road_plane')
yukang.chen's avatar
yukang.chen committed
371

372
373
374
375
376
377
378
379
380
381
382
383
        return data_dict

    def __call__(self, data_dict):
        """
        Args:
            data_dict:
                gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]

        Returns:

        """
        gt_boxes = data_dict['gt_boxes']
384
        gt_names = data_dict['gt_names'].astype(str)
385
386
        existed_boxes = gt_boxes
        total_valid_sampled_dict = []
yukang.chen's avatar
yukang.chen committed
387
388
        sampled_mv_height = []
        sampled_gt_boxes2d = []
389
390
391
392
393
394
395
396
397
        for class_name, sample_group in self.sample_groups.items():
            if self.limit_whole_scene:
                num_gt = np.sum(class_name == gt_names)
                sample_group['sample_num'] = str(int(self.sample_class_num[class_name]) - num_gt)
            if int(sample_group['sample_num']) > 0:
                sampled_dict = self.sample_with_fixed_number(class_name, sample_group)

                sampled_boxes = np.stack([x['box3d_lidar'] for x in sampled_dict], axis=0).astype(np.float32)

yukang's avatar
yukang committed
398
399
                if self.sampler_cfg.get('DATABASE_WITH_FAKELIDAR', False):
                    sampled_boxes = box_utils.boxes3d_kitti_fakelidar_to_lidar(sampled_boxes)
400

401
402
                iou1 = iou3d_nms_utils.boxes_bev_iou_cpu(sampled_boxes[:, 0:7], existed_boxes[:, 0:7])
                iou2 = iou3d_nms_utils.boxes_bev_iou_cpu(sampled_boxes[:, 0:7], sampled_boxes[:, 0:7])
403
                iou2[range(sampled_boxes.shape[0]), range(sampled_boxes.shape[0])] = 0
404
                iou1 = iou1 if iou1.shape[1] > 0 else iou2
405
                valid_mask = ((iou1.max(axis=1) + iou2.max(axis=1)) == 0).nonzero()[0]
yukang.chen's avatar
yukang.chen committed
406
407

                if self.aug_with_img:
yukang's avatar
yukang committed
408
                    sampled_boxes2d, mv_height, valid_mask = self.sample_gt_boxes_2d_kitti(data_dict, sampled_boxes, iou1, iou2)
yukang.chen's avatar
yukang.chen committed
409
                    sampled_gt_boxes2d.append(sampled_boxes2d)
yukang's avatar
yukang committed
410
411
412
413
                    if self.sampler_cfg.get('USE_ROAD_PLANE', False):
                        mv_height = mv_height[valid_mask]
                        sampled_mv_height = np.concatenate((sampled_mv_height, mv_height), axis=0)

414
415
416
417
418
419
420
                valid_sampled_dict = [sampled_dict[x] for x in valid_mask]
                valid_sampled_boxes = sampled_boxes[valid_mask]

                existed_boxes = np.concatenate((existed_boxes, valid_sampled_boxes), axis=0)
                total_valid_sampled_dict.extend(valid_sampled_dict)

        sampled_gt_boxes = existed_boxes[gt_boxes.shape[0]:, :]
yukang's avatar
yukang committed
421

yukang's avatar
yukang committed
422
423
424
        if self.aug_with_img:
            if len(sampled_gt_boxes2d) > 0:
                sampled_gt_boxes2d = np.concatenate(sampled_gt_boxes2d, axis=0)
yukang.chen's avatar
yukang.chen committed
425

426
        if total_valid_sampled_dict.__len__() > 0:
yukang's avatar
yukang committed
427
428
429
430
431
            data_dict = self.add_sampled_boxes_to_scene(data_dict,
                                                        sampled_gt_boxes,
                                                        total_valid_sampled_dict,
                                                        sampled_mv_height,
                                                        sampled_gt_boxes2d)
432
433
434

        data_dict.pop('gt_boxes_mask')
        return data_dict