"examples/tensorflow/vscode:/vscode.git/clone" did not exist on "eae6ce2a4f44e81eda3b876ef50fc045926912f6"
Unverified Commit b2e5ad6b authored by Xiang Xu's avatar Xiang Xu Committed by GitHub
Browse files

[Fix] Deprecating old type alias due to new version of numpy (#2339)

* update type alias

* fix UT
parent 4f2532e5
...@@ -50,5 +50,5 @@ def export(anno_path, out_filename): ...@@ -50,5 +50,5 @@ def export(anno_path, out_filename):
data_label[:, 0:3] -= xyz_min data_label[:, 0:3] -= xyz_min
np.save(f'{out_filename}_point.npy', data_label[:, :6].astype(np.float32)) np.save(f'{out_filename}_point.npy', data_label[:, :6].astype(np.float32))
np.save(f'{out_filename}_sem_label.npy', data_label[:, 6].astype(np.int)) np.save(f'{out_filename}_sem_label.npy', data_label[:, 6].astype(np.int64))
np.save(f'{out_filename}_ins_label.npy', data_label[:, 7].astype(np.int)) np.save(f'{out_filename}_ins_label.npy', data_label[:, 7].astype(np.int64))
...@@ -86,8 +86,8 @@ def export(anno_path, out_filename): ...@@ -86,8 +86,8 @@ def export(anno_path, out_filename):
data_label[:, 0:3] -= xyz_min data_label[:, 0:3] -= xyz_min
np.save(f'{out_filename}_point.npy', data_label[:, :6].astype(np.float32)) np.save(f'{out_filename}_point.npy', data_label[:, :6].astype(np.float32))
np.save(f'{out_filename}_sem_label.npy', data_label[:, 6].astype(np.int)) np.save(f'{out_filename}_sem_label.npy', data_label[:, 6].astype(np.int64))
np.save(f'{out_filename}_ins_label.npy', data_label[:, 7].astype(np.int)) np.save(f'{out_filename}_ins_label.npy', data_label[:, 7].astype(np.int64))
``` ```
......
...@@ -89,8 +89,8 @@ def export(anno_path, out_filename): ...@@ -89,8 +89,8 @@ def export(anno_path, out_filename):
data_label[:, 0:3] -= xyz_min data_label[:, 0:3] -= xyz_min
np.save(f'{out_filename}_point.npy', data_label[:, :6].astype(np.float32)) np.save(f'{out_filename}_point.npy', data_label[:, :6].astype(np.float32))
np.save(f'{out_filename}_sem_label.npy', data_label[:, 6].astype(np.int)) np.save(f'{out_filename}_sem_label.npy', data_label[:, 6].astype(np.int64))
np.save(f'{out_filename}_ins_label.npy', data_label[:, 7].astype(np.int)) np.save(f'{out_filename}_ins_label.npy', data_label[:, 7].astype(np.int64))
``` ```
......
...@@ -75,7 +75,7 @@ class S3DISDataset(Det3DDataset): ...@@ -75,7 +75,7 @@ class S3DISDataset(Det3DDataset):
seg_valid_cat_ids = self.METAINFO['seg_valid_class_ids'] seg_valid_cat_ids = self.METAINFO['seg_valid_class_ids']
neg_label = len(seg_valid_cat_ids) neg_label = len(seg_valid_cat_ids)
seg_label_mapping = np.ones( seg_label_mapping = np.ones(
seg_max_cat_id + 1, dtype=np.int) * neg_label seg_max_cat_id + 1, dtype=np.int64) * neg_label
for cls_idx, cat_id in enumerate(seg_valid_cat_ids): for cls_idx, cat_id in enumerate(seg_valid_cat_ids):
seg_label_mapping[cat_id] = cls_idx seg_label_mapping[cat_id] = cls_idx
self.seg_label_mapping = seg_label_mapping self.seg_label_mapping = seg_label_mapping
......
...@@ -80,7 +80,7 @@ class ScanNetDataset(Det3DDataset): ...@@ -80,7 +80,7 @@ class ScanNetDataset(Det3DDataset):
seg_valid_cat_ids = self.METAINFO['seg_valid_class_ids'] seg_valid_cat_ids = self.METAINFO['seg_valid_class_ids']
neg_label = len(seg_valid_cat_ids) neg_label = len(seg_valid_cat_ids)
seg_label_mapping = np.ones( seg_label_mapping = np.ones(
seg_max_cat_id + 1, dtype=np.int) * neg_label seg_max_cat_id + 1, dtype=np.int64) * neg_label
for cls_idx, cat_id in enumerate(seg_valid_cat_ids): for cls_idx, cat_id in enumerate(seg_valid_cat_ids):
seg_label_mapping[cat_id] = cls_idx seg_label_mapping[cat_id] = cls_idx
self.seg_label_mapping = seg_label_mapping self.seg_label_mapping = seg_label_mapping
......
...@@ -203,7 +203,7 @@ class Seg3DDataset(BaseDataset): ...@@ -203,7 +203,7 @@ class Seg3DDataset(BaseDataset):
seg_valid_cat_ids = self.METAINFO['seg_valid_class_ids'] seg_valid_cat_ids = self.METAINFO['seg_valid_class_ids']
neg_label = len(seg_valid_cat_ids) neg_label = len(seg_valid_cat_ids)
seg_label_mapping = np.ones( seg_label_mapping = np.ones(
seg_max_cat_id + 1, dtype=np.int) * neg_label seg_max_cat_id + 1, dtype=np.int64) * neg_label
for cls_idx, cat_id in enumerate(seg_valid_cat_ids): for cls_idx, cat_id in enumerate(seg_valid_cat_ids):
seg_label_mapping[cat_id] = cls_idx seg_label_mapping[cat_id] = cls_idx
return seg_label_mapping return seg_label_mapping
......
...@@ -878,7 +878,7 @@ class ObjectRangeFilter(BaseTransform): ...@@ -878,7 +878,7 @@ class ObjectRangeFilter(BaseTransform):
# using mask to index gt_labels_3d will cause bug when # using mask to index gt_labels_3d will cause bug when
# len(gt_labels_3d) == 1, where mask=1 will be interpreted # len(gt_labels_3d) == 1, where mask=1 will be interpreted
# as gt_labels_3d[1] and cause out of index error # as gt_labels_3d[1] and cause out of index error
gt_labels_3d = gt_labels_3d[mask.numpy().astype(np.bool)] gt_labels_3d = gt_labels_3d[mask.numpy().astype(bool)]
# limit rad to [-pi, pi] # limit rad to [-pi, pi]
gt_bboxes_3d.limit_yaw(offset=0.5, period=2 * np.pi) gt_bboxes_3d.limit_yaw(offset=0.5, period=2 * np.pi)
...@@ -981,7 +981,7 @@ class ObjectNameFilter(BaseTransform): ...@@ -981,7 +981,7 @@ class ObjectNameFilter(BaseTransform):
""" """
gt_labels_3d = input_dict['gt_labels_3d'] gt_labels_3d = input_dict['gt_labels_3d']
gt_bboxes_mask = np.array([n in self.labels for n in gt_labels_3d], gt_bboxes_mask = np.array([n in self.labels for n in gt_labels_3d],
dtype=np.bool_) dtype=bool)
input_dict['gt_bboxes_3d'] = input_dict['gt_bboxes_3d'][gt_bboxes_mask] input_dict['gt_bboxes_3d'] = input_dict['gt_bboxes_3d'][gt_bboxes_mask]
input_dict['gt_labels_3d'] = input_dict['gt_labels_3d'][gt_bboxes_mask] input_dict['gt_labels_3d'] = input_dict['gt_labels_3d'][gt_bboxes_mask]
...@@ -1954,7 +1954,7 @@ class RandomCrop3D(RandomCrop): ...@@ -1954,7 +1954,7 @@ class RandomCrop3D(RandomCrop):
- gt_bboxes (np.float32) (optional) - gt_bboxes (np.float32) (optional)
- gt_bboxes_labels (np.int64) (optional) - gt_bboxes_labels (np.int64) (optional)
- gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional)
- gt_ignore_flags (np.bool) (optional) - gt_ignore_flags (bool) (optional)
- gt_seg_map (np.uint8) (optional) - gt_seg_map (np.uint8) (optional)
Modified Keys: Modified Keys:
......
...@@ -29,7 +29,7 @@ def aggregate_predictions(masks, labels, scores, valid_class_ids): ...@@ -29,7 +29,7 @@ def aggregate_predictions(masks, labels, scores, valid_class_ids):
# match pred_instance['filename'] from assign_instances_for_scan # match pred_instance['filename'] from assign_instances_for_scan
file_name = f'{id}_{i}' file_name = f'{id}_{i}'
info[file_name] = dict() info[file_name] = dict()
info[file_name]['mask'] = (mask == i).astype(np.int) info[file_name]['mask'] = (mask == i).astype(np.int64)
info[file_name]['label_id'] = valid_class_ids[label[i]] info[file_name]['label_id'] = valid_class_ids[label[i]]
info[file_name]['conf'] = score[i] info[file_name]['conf'] = score[i]
infos.append(info) infos.append(info)
......
...@@ -59,7 +59,7 @@ def evaluate_matches(matches, class_labels, options): ...@@ -59,7 +59,7 @@ def evaluate_matches(matches, class_labels, options):
cur_true = np.ones(len(gt_instances)) cur_true = np.ones(len(gt_instances))
cur_score = np.ones(len(gt_instances)) * (-float('inf')) cur_score = np.ones(len(gt_instances)) * (-float('inf'))
cur_match = np.zeros(len(gt_instances), dtype=np.bool) cur_match = np.zeros(len(gt_instances), dtype=bool)
# collect matches # collect matches
for (gti, gt) in enumerate(gt_instances): for (gti, gt) in enumerate(gt_instances):
found_match = False found_match = False
......
...@@ -88,8 +88,8 @@ def seg_eval(gt_labels, seg_preds, label2cat, ignore_index, logger=None): ...@@ -88,8 +88,8 @@ def seg_eval(gt_labels, seg_preds, label2cat, ignore_index, logger=None):
hist_list = [] hist_list = []
for i in range(len(gt_labels)): for i in range(len(gt_labels)):
gt_seg = gt_labels[i].astype(np.int) gt_seg = gt_labels[i].astype(np.int64)
pred_seg = seg_preds[i].astype(np.int) pred_seg = seg_preds[i].astype(np.int64)
# filter out ignored points # filter out ignored points
pred_seg[gt_seg == ignore_index] = -1 pred_seg[gt_seg == ignore_index] = -1
......
...@@ -87,7 +87,7 @@ class SegMetric(BaseMetric): ...@@ -87,7 +87,7 @@ class SegMetric(BaseMetric):
ignore_index = self.dataset_meta['ignore_index'] ignore_index = self.dataset_meta['ignore_index']
# need to map network output to original label idx # need to map network output to original label idx
cat2label = np.zeros(len(self.dataset_meta['label2cat'])).astype( cat2label = np.zeros(len(self.dataset_meta['label2cat'])).astype(
np.int) np.int64)
for original_label, output_idx in self.dataset_meta['label2cat'].items( for original_label, output_idx in self.dataset_meta['label2cat'].items(
): ):
if output_idx != ignore_index: if output_idx != ignore_index:
...@@ -95,7 +95,7 @@ class SegMetric(BaseMetric): ...@@ -95,7 +95,7 @@ class SegMetric(BaseMetric):
for i, (eval_ann, result) in enumerate(results): for i, (eval_ann, result) in enumerate(results):
sample_idx = eval_ann['point_cloud']['lidar_idx'] sample_idx = eval_ann['point_cloud']['lidar_idx']
pred_sem_mask = result['semantic_mask'].numpy().astype(np.int) pred_sem_mask = result['semantic_mask'].numpy().astype(np.int64)
pred_label = cat2label[pred_sem_mask] pred_label = cat2label[pred_sem_mask]
curr_file = f'{submission_prefix}/{sample_idx}.txt' curr_file = f'{submission_prefix}/{sample_idx}.txt'
np.savetxt(curr_file, pred_label, fmt='%d') np.savetxt(curr_file, pred_label, fmt='%d')
......
...@@ -21,9 +21,9 @@ def test_instance_seg_eval(): ...@@ -21,9 +21,9 @@ def test_instance_seg_eval():
pred_instance_labels = [] pred_instance_labels = []
pred_instance_scores = [] pred_instance_scores = []
for n_points, gt_labels in zip(n_points_list, gt_labels_list): for n_points, gt_labels in zip(n_points_list, gt_labels_list):
gt_instance_mask = np.ones(n_points, dtype=np.int) * -1 gt_instance_mask = np.ones(n_points, dtype=np.int64) * -1
gt_semantic_mask = np.ones(n_points, dtype=np.int) * -1 gt_semantic_mask = np.ones(n_points, dtype=np.int64) * -1
pred_instance_mask = np.ones(n_points, dtype=np.int) * -1 pred_instance_mask = np.ones(n_points, dtype=np.int64) * -1
labels = [] labels = []
scores = [] scores = []
for i, gt_label in enumerate(gt_labels): for i, gt_label in enumerate(gt_labels):
......
...@@ -16,8 +16,8 @@ class TestInstanceSegMetric(unittest.TestCase): ...@@ -16,8 +16,8 @@ class TestInstanceSegMetric(unittest.TestCase):
n_points = 3300 n_points = 3300
gt_labels = [0, 0, 0, 0, 0, 0, 14, 14, 2, 1] gt_labels = [0, 0, 0, 0, 0, 0, 14, 14, 2, 1]
gt_instance_mask = np.ones(n_points, dtype=np.int) * -1 gt_instance_mask = np.ones(n_points, dtype=np.int64) * -1
gt_semantic_mask = np.ones(n_points, dtype=np.int) * -1 gt_semantic_mask = np.ones(n_points, dtype=np.int64) * -1
for i, gt_label in enumerate(gt_labels): for i, gt_label in enumerate(gt_labels):
begin = i * 300 begin = i * 300
end = begin + 300 end = begin + 300
...@@ -31,7 +31,7 @@ class TestInstanceSegMetric(unittest.TestCase): ...@@ -31,7 +31,7 @@ class TestInstanceSegMetric(unittest.TestCase):
results_dict = dict() results_dict = dict()
n_points = 3300 n_points = 3300
gt_labels = [0, 0, 0, 0, 0, 0, 14, 14, 2, 1] gt_labels = [0, 0, 0, 0, 0, 0, 14, 14, 2, 1]
pred_instance_mask = np.ones(n_points, dtype=np.int) * -1 pred_instance_mask = np.ones(n_points, dtype=np.int64) * -1
labels = [] labels = []
scores = [] scores = []
for i, gt_label in enumerate(gt_labels): for i, gt_label in enumerate(gt_labels):
......
...@@ -72,12 +72,12 @@ def test_points_in_convex_polygon_jit(): ...@@ -72,12 +72,12 @@ def test_points_in_convex_polygon_jit():
[[1.0, 0.0], [1.0, 1.0], [0.5, 1.0], [0.0, 1.0]], [[1.0, 0.0], [1.0, 1.0], [0.5, 1.0], [0.0, 1.0]],
[[1.0, 0.0], [0.0, 1.0], [-1.0, 0.0], [0.0, -1.0]]]) [[1.0, 0.0], [0.0, 1.0], [-1.0, 0.0], [0.0, -1.0]]])
res = points_in_convex_polygon_jit(points, polygons) res = points_in_convex_polygon_jit(points, polygons)
expected_res = np.array([[1, 0, 1], [0, 0, 0], [0, 1, 0]]).astype(np.bool) expected_res = np.array([[1, 0, 1], [0, 0, 0], [0, 1, 0]]).astype(bool)
assert np.allclose(res, expected_res) assert np.allclose(res, expected_res)
polygons = np.array([[[0.0, 0.0], [0.0, 1.0], [0.5, 0.5], [1.0, 0.0]], polygons = np.array([[[0.0, 0.0], [0.0, 1.0], [0.5, 0.5], [1.0, 0.0]],
[[0.0, 1.0], [1.0, 1.0], [1.0, 0.5], [1.0, 0.0]], [[0.0, 1.0], [1.0, 1.0], [1.0, 0.5], [1.0, 0.0]],
[[1.0, 0.0], [0.0, -1.0], [-1.0, 0.0], [0.0, 1.1]]]) [[1.0, 0.0], [0.0, -1.0], [-1.0, 0.0], [0.0, 1.1]]])
res = points_in_convex_polygon_jit(points, polygons, clockwise=True) res = points_in_convex_polygon_jit(points, polygons, clockwise=True)
expected_res = np.array([[1, 0, 1], [0, 0, 1], [0, 1, 0]]).astype(np.bool) expected_res = np.array([[1, 0, 1], [0, 0, 1], [0, 1, 0]]).astype(bool)
assert np.allclose(res, expected_res) assert np.allclose(res, expected_res)
...@@ -594,9 +594,9 @@ def add_difficulty_to_annos(info): ...@@ -594,9 +594,9 @@ def add_difficulty_to_annos(info):
occlusion = annos['occluded'] occlusion = annos['occluded']
truncation = annos['truncated'] truncation = annos['truncated']
diff = [] diff = []
easy_mask = np.ones((len(dims), ), dtype=np.bool) easy_mask = np.ones((len(dims), ), dtype=bool)
moderate_mask = np.ones((len(dims), ), dtype=np.bool) moderate_mask = np.ones((len(dims), ), dtype=bool)
hard_mask = np.ones((len(dims), ), dtype=np.bool) hard_mask = np.ones((len(dims), ), dtype=bool)
i = 0 i = 0
for h, o, t in zip(height, occlusion, truncation): for h, o, t in zip(height, occlusion, truncation):
if o > max_occlusion[0] or h <= min_height[0] or t > max_trunc[0]: if o > max_occlusion[0] or h <= min_height[0] or t > max_trunc[0]:
......
...@@ -78,8 +78,10 @@ class S3DISData(object): ...@@ -78,8 +78,10 @@ class S3DISData(object):
f'{self.split}_{sample_idx}_sem_label.npy') f'{self.split}_{sample_idx}_sem_label.npy')
points = np.load(pts_filename).astype(np.float32) points = np.load(pts_filename).astype(np.float32)
pts_instance_mask = np.load(pts_instance_mask_path).astype(np.int) pts_instance_mask = np.load(pts_instance_mask_path).astype(
pts_semantic_mask = np.load(pts_semantic_mask_path).astype(np.int) np.int64)
pts_semantic_mask = np.load(pts_semantic_mask_path).astype(
np.int64)
mmengine.mkdir_or_exist(osp.join(self.root_dir, 'points')) mmengine.mkdir_or_exist(osp.join(self.root_dir, 'points'))
mmengine.mkdir_or_exist(osp.join(self.root_dir, 'instance_mask')) mmengine.mkdir_or_exist(osp.join(self.root_dir, 'instance_mask'))
...@@ -180,8 +182,8 @@ class S3DISSegData(object): ...@@ -180,8 +182,8 @@ class S3DISSegData(object):
12]) # used for seg task 12]) # used for seg task
self.ignore_index = len(self.cat_ids) self.ignore_index = len(self.cat_ids)
self.cat_id2class = np.ones((self.all_ids.shape[0],), dtype=np.int) * \ self.cat_id2class = np.ones(
self.ignore_index (self.all_ids.shape[0], ), dtype=np.int64) * self.ignore_index
for i, cat_id in enumerate(self.cat_ids): for i, cat_id in enumerate(self.cat_ids):
self.cat_id2class[cat_id] = i self.cat_id2class[cat_id] = i
......
...@@ -232,8 +232,8 @@ class ScanNetSegData(object): ...@@ -232,8 +232,8 @@ class ScanNetSegData(object):
]) # used for seg task ]) # used for seg task
self.ignore_index = len(self.cat_ids) self.ignore_index = len(self.cat_ids)
self.cat_id2class = np.ones((self.all_ids.shape[0],), dtype=np.int) * \ self.cat_id2class = np.ones(
self.ignore_index (self.all_ids.shape[0], ), dtype=np.int64) * self.ignore_index
for i, cat_id in enumerate(self.cat_ids): for i, cat_id in enumerate(self.cat_ids):
self.cat_id2class[cat_id] = i self.cat_id2class[cat_id] = i
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment