Unverified Commit 4f88f1a5 authored by VVsssssk's avatar VVsssssk Committed by GitHub
Browse files

[Fix] replace mmcv's function and modules imported with mmengine's and sync...

[Fix] replace mmcv's function and modules imported with mmengine's and sync the latest mmengine (#1765)

* replace mmcv to mmengine

* fix

* fix comments
parent 0e157c31
......@@ -5,7 +5,7 @@ import time
import torch
from mmcv import Config
from mmcv.parallel import MMDataParallel
from mmcv.runner import load_checkpoint, wrap_fp16_model
from mmengine.runner import load_checkpoint
from mmdet3d.datasets import build_dataset
from mmdet3d.models import build_detector
......@@ -56,9 +56,6 @@ def main():
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_module(model)
......
......@@ -3,9 +3,10 @@ import pickle
from os import path as osp
import mmcv
import mmengine
import numpy as np
from mmcv import track_iter_progress
from mmcv.ops import roi_align
from mmengine import track_iter_progress
from pycocotools import mask as maskUtils
from pycocotools.coco import COCO
......@@ -223,7 +224,7 @@ def create_groundtruth_database(dataset_class_name,
if db_info_save_path is None:
db_info_save_path = osp.join(data_path,
f'{info_prefix}_dbinfos_train.pkl')
mmcv.mkdir_or_exist(database_save_path)
mmengine.mkdir_or_exist(database_save_path)
all_db_infos = dict()
if with_mask:
coco = COCO(osp.join(data_path, mask_anno_path))
......@@ -585,7 +586,7 @@ class GTDatabaseCreater:
if self.db_info_save_path is None:
self.db_info_save_path = osp.join(
self.data_path, f'{self.info_prefix}_dbinfos_train.pkl')
mmcv.mkdir_or_exist(self.database_save_path)
mmengine.mkdir_or_exist(self.database_save_path)
if self.with_mask:
self.coco = COCO(osp.join(self.data_path, self.mask_anno_path))
imgIds = self.coco.getImgIds()
......@@ -599,9 +600,9 @@ class GTDatabaseCreater:
dataset.pre_pipeline(input_dict)
return input_dict
multi_db_infos = mmcv.track_parallel_progress(
self.create_single, ((loop_dataset(i)
for i in range(len(dataset))), len(dataset)),
multi_db_infos = mmengine.track_parallel_progress(
self.create_single,
((loop_dataset(i) for i in range(len(dataset))), len(dataset)),
self.num_worker)
print('Make global unique group id')
group_counter_offset = 0
......
......@@ -109,8 +109,8 @@ class _NumPointsInGTCalculater:
return info
def calculate(self, infos):
ret_infos = mmcv.track_parallel_progress(self.calculate_single, infos,
self.num_worker)
ret_infos = mmengine.track_parallel_progress(self.calculate_single,
infos, self.num_worker)
for i, ret_info in enumerate(ret_infos):
infos[i] = ret_info
......@@ -120,7 +120,7 @@ def _calculate_num_points_in_gt(data_path,
relative_path,
remove_outside=True,
num_features=4):
for info in mmcv.track_iter_progress(infos):
for info in mmengine.track_iter_progress(infos):
pc_info = info['point_cloud']
image_info = info['image']
calib = info['calib']
......@@ -325,7 +325,7 @@ def _create_reduced_point_cloud(data_path,
"""
kitti_infos = mmengine.load(info_path)
for info in mmcv.track_iter_progress(kitti_infos):
for info in mmengine.track_iter_progress(kitti_infos):
pc_info = info['point_cloud']
image_info = info['image']
calib = info['calib']
......@@ -428,7 +428,7 @@ def export_2d_annotation(root_path, info_path, mono3d=True):
coco_ann_id = 0
coco_2d_dict = dict(annotations=[], images=[], categories=cat2Ids)
from os import path as osp
for info in mmcv.track_iter_progress(kitti_infos):
for info in mmengine.track_iter_progress(kitti_infos):
coco_infos = get_2d_boxes(info, occluded=[0, 1, 2, 3], mono3d=mono3d)
(height, width,
_) = mmcv.imread(osp.join(root_path,
......
......@@ -4,7 +4,7 @@ from concurrent import futures as futures
from os import path as osp
from pathlib import Path
import mmcv
import mmengine
import numpy as np
from PIL import Image
from skimage import io
......@@ -280,7 +280,7 @@ def get_kitti_image_info(path,
plane_path = get_plane_path(idx, path, training, relative_path)
if relative_path:
plane_path = str(root_path / plane_path)
lines = mmcv.list_from_file(plane_path)
lines = mmengine.list_from_file(plane_path)
info['plane'] = np.array([float(i) for i in lines[3].split()])
if annotations is not None:
......@@ -501,8 +501,9 @@ class WaymoInfoGatherer:
def gather(self, image_ids):
if not isinstance(image_ids, list):
image_ids = list(range(image_ids))
image_infos = mmcv.track_parallel_progress(self.gather_single,
image_ids, self.num_worker)
image_infos = mmengine.track_parallel_progress(self.gather_single,
image_ids,
self.num_worker)
return list(image_infos)
......
......@@ -114,7 +114,7 @@ def _fill_trainval_infos(lyft,
train_lyft_infos = []
val_lyft_infos = []
for sample in mmcv.track_iter_progress(lyft.sample):
for sample in mmengine.track_iter_progress(lyft.sample):
lidar_token = sample['data']['LIDAR_TOP']
sd_rec = lyft.get('sample_data', sample['data']['LIDAR_TOP'])
cs_record = lyft.get('calibrated_sensor',
......@@ -127,7 +127,7 @@ def _fill_trainval_infos(lyft,
lidar_path = abs_lidar_path.split(f'{os.getcwd()}/')[-1]
# relative path
mmcv.check_file_exist(lidar_path)
mmengine.check_file_exist(lidar_path)
info = {
'lidar_path': lidar_path,
......@@ -247,7 +247,7 @@ def export_2d_annotation(root_path, info_path, version):
]
coco_ann_id = 0
coco_2d_dict = dict(annotations=[], images=[], categories=cat2Ids)
for info in mmcv.track_iter_progress(lyft_infos):
for info in mmengine.track_iter_progress(lyft_infos):
for cam in camera_types:
cam_info = info['cams'][cam]
coco_infos = get_2d_boxes(
......
......@@ -160,7 +160,7 @@ def export_nuim_to_coco(nuim, data_root, out_dir, extra_tag, version, nproc):
images = []
print('Process image meta information...')
for sample_info in mmcv.track_iter_progress(nuim.sample_data):
for sample_info in mmengine.track_iter_progress(nuim.sample_data):
if sample_info['is_key_frame']:
img_idx = len(images)
images.append(
......@@ -172,8 +172,8 @@ def export_nuim_to_coco(nuim, data_root, out_dir, extra_tag, version, nproc):
height=sample_info['height']))
seg_root = f'{out_dir}semantic_masks'
mmcv.mkdir_or_exist(seg_root)
mmcv.mkdir_or_exist(osp.join(data_root, 'calibrated'))
mmengine.mkdir_or_exist(seg_root)
mmengine.mkdir_or_exist(osp.join(data_root, 'calibrated'))
global process_img_anno
......@@ -185,11 +185,11 @@ def export_nuim_to_coco(nuim, data_root, out_dir, extra_tag, version, nproc):
print('Process img annotations...')
if nproc > 1:
outputs = mmcv.track_parallel_progress(
outputs = mmengine.track_parallel_progress(
process_img_anno, images, nproc=nproc)
else:
outputs = []
for img_info in mmcv.track_iter_progress(images):
for img_info in mmengine.track_iter_progress(images):
outputs.append(process_img_anno(img_info))
# Determine the index of object annotation
......@@ -208,7 +208,7 @@ def export_nuim_to_coco(nuim, data_root, out_dir, extra_tag, version, nproc):
coco_format_json = dict(
images=images, annotations=annotations, categories=categories)
mmcv.mkdir_or_exist(out_dir)
mmengine.mkdir_or_exist(out_dir)
out_file = osp.join(out_dir, f'{extra_tag}_{version}.json')
print(f'Annotation dumped to {out_file}')
mmengine.dump(coco_format_json, out_file)
......
......@@ -131,7 +131,7 @@ def get_available_scenes(nusc):
# path from lyftdataset is absolute path
lidar_path = lidar_path.split(f'{os.getcwd()}/')[-1]
# relative path
if not mmcv.is_filepath(lidar_path):
if not mmengine.is_filepath(lidar_path):
scene_not_exist = True
break
else:
......@@ -165,7 +165,7 @@ def _fill_trainval_infos(nusc,
train_nusc_infos = []
val_nusc_infos = []
for sample in mmcv.track_iter_progress(nusc.sample):
for sample in mmengine.track_iter_progress(nusc.sample):
lidar_token = sample['data']['LIDAR_TOP']
sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP'])
cs_record = nusc.get('calibrated_sensor',
......@@ -173,7 +173,7 @@ def _fill_trainval_infos(nusc,
pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token'])
lidar_path, boxes, _ = nusc.get_sample_data(lidar_token)
mmcv.check_file_exist(lidar_path)
mmengine.check_file_exist(lidar_path)
info = {
'lidar_path': lidar_path,
......@@ -363,7 +363,7 @@ def export_2d_annotation(root_path, info_path, version, mono3d=True):
]
coco_ann_id = 0
coco_2d_dict = dict(annotations=[], images=[], categories=cat2Ids)
for info in mmcv.track_iter_progress(nusc_infos):
for info in mmengine.track_iter_progress(nusc_infos):
for cam in camera_types:
cam_info = info['cams'][cam]
coco_infos = get_2d_boxes(
......
......@@ -3,7 +3,6 @@ import os
from concurrent import futures as futures
from os import path as osp
import mmcv
import mmengine
import numpy as np
......@@ -82,9 +81,9 @@ class S3DISData(object):
pts_instance_mask = np.load(pts_instance_mask_path).astype(np.int)
pts_semantic_mask = np.load(pts_semantic_mask_path).astype(np.int)
mmcv.mkdir_or_exist(osp.join(self.root_dir, 'points'))
mmcv.mkdir_or_exist(osp.join(self.root_dir, 'instance_mask'))
mmcv.mkdir_or_exist(osp.join(self.root_dir, 'semantic_mask'))
mmengine.mkdir_or_exist(osp.join(self.root_dir, 'points'))
mmengine.mkdir_or_exist(osp.join(self.root_dir, 'instance_mask'))
mmengine.mkdir_or_exist(osp.join(self.root_dir, 'semantic_mask'))
points.tofile(
osp.join(self.root_dir, 'points',
......@@ -194,7 +193,7 @@ class S3DISSegData(object):
def get_seg_infos(self):
scene_idxs, label_weight = self.get_scene_idxs_and_label_weight()
save_folder = osp.join(self.data_root, 'seg_info')
mmcv.mkdir_or_exist(save_folder)
mmengine.mkdir_or_exist(save_folder)
np.save(
osp.join(save_folder, f'{self.split}_resampled_scene_idxs.npy'),
scene_idxs)
......
......@@ -3,7 +3,6 @@ import os
from concurrent import futures as futures
from os import path as osp
import mmcv
import mmengine
import numpy as np
......@@ -39,8 +38,8 @@ class ScanNetData(object):
assert split in ['train', 'val', 'test']
split_file = osp.join(self.root_dir, 'meta_data',
f'scannetv2_{split}.txt')
mmcv.check_file_exist(split_file)
self.sample_id_list = mmcv.list_from_file(split_file)
mmengine.check_file_exist(split_file)
self.sample_id_list = mmengine.list_from_file(split_file)
self.test_mode = (split == 'test')
def __len__(self):
......@@ -49,19 +48,19 @@ class ScanNetData(object):
def get_aligned_box_label(self, idx):
box_file = osp.join(self.root_dir, 'scannet_instance_data',
f'{idx}_aligned_bbox.npy')
mmcv.check_file_exist(box_file)
mmengine.check_file_exist(box_file)
return np.load(box_file)
def get_unaligned_box_label(self, idx):
box_file = osp.join(self.root_dir, 'scannet_instance_data',
f'{idx}_unaligned_bbox.npy')
mmcv.check_file_exist(box_file)
mmengine.check_file_exist(box_file)
return np.load(box_file)
def get_axis_align_matrix(self, idx):
matrix_file = osp.join(self.root_dir, 'scannet_instance_data',
f'{idx}_axis_align_matrix.npy')
mmcv.check_file_exist(matrix_file)
mmengine.check_file_exist(matrix_file)
return np.load(matrix_file)
def get_images(self, idx):
......@@ -83,7 +82,7 @@ class ScanNetData(object):
def get_intrinsics(self, idx):
matrix_file = osp.join(self.root_dir, 'posed_images', idx,
'intrinsic.txt')
mmcv.check_file_exist(matrix_file)
mmengine.check_file_exist(matrix_file)
return np.loadtxt(matrix_file)
def get_infos(self, num_workers=4, has_label=True, sample_id_list=None):
......@@ -111,7 +110,7 @@ class ScanNetData(object):
pts_filename = osp.join(self.root_dir, 'scannet_instance_data',
f'{sample_idx}_vert.npy')
points = np.load(pts_filename)
mmcv.mkdir_or_exist(osp.join(self.root_dir, 'points'))
mmengine.mkdir_or_exist(osp.join(self.root_dir, 'points'))
points.tofile(
osp.join(self.root_dir, 'points', f'{sample_idx}.bin'))
info['pts_path'] = osp.join('points', f'{sample_idx}.bin')
......@@ -143,8 +142,10 @@ class ScanNetData(object):
pts_semantic_mask = np.load(pts_semantic_mask_path).astype(
np.int64)
mmcv.mkdir_or_exist(osp.join(self.root_dir, 'instance_mask'))
mmcv.mkdir_or_exist(osp.join(self.root_dir, 'semantic_mask'))
mmengine.mkdir_or_exist(
osp.join(self.root_dir, 'instance_mask'))
mmengine.mkdir_or_exist(
osp.join(self.root_dir, 'semantic_mask'))
pts_instance_mask.tofile(
osp.join(self.root_dir, 'instance_mask',
......@@ -246,7 +247,7 @@ class ScanNetSegData(object):
return
scene_idxs, label_weight = self.get_scene_idxs_and_label_weight()
save_folder = osp.join(self.data_root, 'seg_info')
mmcv.mkdir_or_exist(save_folder)
mmengine.mkdir_or_exist(save_folder)
np.save(
osp.join(save_folder, f'{self.split}_resampled_scene_idxs.npy'),
scene_idxs)
......
......@@ -3,6 +3,7 @@ from concurrent import futures as futures
from os import path as osp
import mmcv
import mmengine
import numpy as np
from scipy import io as sio
......@@ -85,8 +86,8 @@ class SUNRGBDData(object):
}
assert split in ['train', 'val', 'test']
split_file = osp.join(self.split_dir, f'{split}_data_idx.txt')
mmcv.check_file_exist(split_file)
self.sample_id_list = map(int, mmcv.list_from_file(split_file))
mmengine.check_file_exist(split_file)
self.sample_id_list = map(int, mmengine.list_from_file(split_file))
self.image_dir = osp.join(self.split_dir, 'image')
self.calib_dir = osp.join(self.split_dir, 'calib')
self.depth_dir = osp.join(self.split_dir, 'depth')
......@@ -157,7 +158,7 @@ class SUNRGBDData(object):
pc_info = {'num_features': 6, 'lidar_idx': sample_idx}
info['point_cloud'] = pc_info
mmcv.mkdir_or_exist(osp.join(self.root_dir, 'points'))
mmengine.mkdir_or_exist(osp.join(self.root_dir, 'points'))
pc_upright_depth_subsampled.tofile(
osp.join(self.root_dir, 'points', f'{sample_idx:06d}.bin'))
......
......@@ -12,7 +12,6 @@ import copy
import time
from os import path as osp
import mmcv
import mmengine
import numpy as np
from nuscenes.nuscenes import NuScenes
......@@ -276,7 +275,7 @@ def update_nuscenes_infos(pkl_path, out_dir):
print('Start updating:')
converted_list = []
for i, ori_info_dict in enumerate(
mmcv.track_iter_progress(data_list['infos'])):
mmengine.track_iter_progress(data_list['infos'])):
temp_data_info = get_empty_standard_data_info(
camera_types=camera_types)
temp_data_info['sample_idx'] = i
......@@ -385,7 +384,7 @@ def update_kitti_infos(pkl_path, out_dir):
data_list = mmengine.load(pkl_path)
print('Start updating:')
converted_list = []
for ori_info_dict in mmcv.track_iter_progress(data_list):
for ori_info_dict in mmengine.track_iter_progress(data_list):
temp_data_info = get_empty_standard_data_info()
if 'plane' in ori_info_dict:
......@@ -509,7 +508,7 @@ def update_s3dis_infos(pkl_path, out_dir):
data_list = mmengine.load(pkl_path)
print('Start updating:')
converted_list = []
for i, ori_info_dict in enumerate(mmcv.track_iter_progress(data_list)):
for i, ori_info_dict in enumerate(mmengine.track_iter_progress(data_list)):
temp_data_info = get_empty_standard_data_info()
temp_data_info['sample_id'] = i
temp_data_info['lidar_points']['num_pts_feats'] = ori_info_dict[
......@@ -575,7 +574,7 @@ def update_scannet_infos(pkl_path, out_dir):
data_list = mmengine.load(pkl_path)
print('Start updating:')
converted_list = []
for ori_info_dict in mmcv.track_iter_progress(data_list):
for ori_info_dict in mmengine.track_iter_progress(data_list):
temp_data_info = get_empty_standard_data_info()
temp_data_info['lidar_points']['num_pts_feats'] = ori_info_dict[
'point_cloud']['num_features']
......@@ -638,7 +637,7 @@ def update_sunrgbd_infos(pkl_path, out_dir):
data_list = mmengine.load(pkl_path)
print('Start updating:')
converted_list = []
for ori_info_dict in mmcv.track_iter_progress(data_list):
for ori_info_dict in mmengine.track_iter_progress(data_list):
temp_data_info = get_empty_standard_data_info()
temp_data_info['lidar_points']['num_pts_feats'] = ori_info_dict[
'point_cloud']['num_features']
......@@ -712,7 +711,7 @@ def update_lyft_infos(pkl_path, out_dir):
print('Start updating:')
converted_list = []
for i, ori_info_dict in enumerate(
mmcv.track_iter_progress(data_list['infos'])):
mmengine.track_iter_progress(data_list['infos'])):
temp_data_info = get_empty_standard_data_info()
temp_data_info['sample_idx'] = i
temp_data_info['token'] = ori_info_dict['token']
......@@ -821,7 +820,7 @@ def update_waymo_infos(pkl_path, out_dir):
data_list = mmengine.load(pkl_path)
print('Start updating:')
converted_list = []
for ori_info_dict in mmcv.track_iter_progress(data_list):
for ori_info_dict in mmengine.track_iter_progress(data_list):
temp_data_info = get_empty_standard_data_info(camera_types)
if 'plane' in ori_info_dict:
......
......@@ -14,6 +14,7 @@ from glob import glob
from os.path import join
import mmcv
import mmengine
import numpy as np
import tensorflow as tf
from waymo_open_dataset.utils import range_image_utils, transform_utils
......@@ -94,8 +95,8 @@ class Waymo2KITTI(object):
def convert(self):
"""Convert action."""
print('Start converting ...')
mmcv.track_parallel_progress(self.convert_one, range(len(self)),
self.workers)
mmengine.track_parallel_progress(self.convert_one, range(len(self)),
self.workers)
print('\nFinished ...')
def convert_one(self, file_idx):
......@@ -409,10 +410,10 @@ class Waymo2KITTI(object):
]
dir_list2 = [self.image_save_dir]
for d in dir_list1:
mmcv.mkdir_or_exist(d)
mmengine.mkdir_or_exist(d)
for d in dir_list2:
for i in range(5):
mmcv.mkdir_or_exist(f'{d}{str(i)}')
mmengine.mkdir_or_exist(f'{d}{str(i)}')
def convert_range_image_to_point_cloud(self,
frame,
......
......@@ -3,7 +3,7 @@ from argparse import ArgumentParser, Namespace
from pathlib import Path
from tempfile import TemporaryDirectory
import mmcv
import mmengine
try:
from model_archiver.model_packaging import package_model
......@@ -43,9 +43,9 @@ def mmdet3d2torchserve(
file under `output_folder` it will be overwritten.
Default: False.
"""
mmcv.mkdir_or_exist(output_folder)
mmengine.mkdir_or_exist(output_folder)
config = mmcv.Config.fromfile(config_file)
config = mmengine.Config.fromfile(config_file)
with TemporaryDirectory() as tmpdir:
config.dump(f'{tmpdir}/config.py')
......
......@@ -2,8 +2,8 @@
import argparse
from os import path as osp
import mmcv
from mmcv import Config, DictAction, mkdir_or_exist
import mmengine
from mmengine import Config, DictAction, mkdir_or_exist
from mmdet3d.datasets import build_dataset
from mmdet3d.registry import VISUALIZERS
......@@ -113,7 +113,7 @@ def main():
visualizer = VISUALIZERS.build(cfg.visualizer)
visualizer.dataset_meta = dataset.metainfo
progress_bar = mmcv.ProgressBar(len(dataset))
progress_bar = mmengine.ProgressBar(len(dataset))
for item in dataset:
# the 3D Boxes in input could be in any of three coordinates
......
......@@ -2,7 +2,7 @@
import argparse
import torch
from mmcv.runner import save_checkpoint
from mmengine.runner import save_checkpoint
from torch import nn as nn
from mmdet3d.apis import init_model
......
......@@ -4,7 +4,7 @@ import tempfile
import torch
from mmcv import Config
from mmcv.runner import load_state_dict
from mmengine.runner import load_state_dict
from mmdet3d.models import build_detector
......
......@@ -3,8 +3,8 @@ import argparse
import tempfile
import torch
from mmcv import Config
from mmcv.runner import load_state_dict
from mmengine import Config
from mmengine.runner import load_state_dict
from mmdet3d.models import build_detector
......
......@@ -2,7 +2,6 @@ import argparse
import time
from os import path as osp
import mmcv
import mmengine
import numpy as np
......@@ -21,7 +20,7 @@ def update_sunrgbd_infos(root_dir, out_dir, pkl_files):
print(f'Reading from input file: {in_path}.')
a = mmengine.load(in_path)
print('Start updating:')
for item in mmcv.track_iter_progress(a):
for item in mmengine.track_iter_progress(a):
if 'rotation_y' in item['annos']:
item['annos']['rotation_y'] = -item['annos']['rotation_y']
item['annos']['gt_boxes_upright_depth'][:, -1:] = \
......@@ -46,7 +45,7 @@ def update_outdoor_dbinfos(root_dir, out_dir, pkl_files):
print('Start updating:')
for k in a.keys():
print(f'Updating samples of class {k}:')
for item in mmcv.track_iter_progress(a[k]):
for item in mmengine.track_iter_progress(a[k]):
boxes = item['box3d_lidar'].copy()
# swap l, w (or dx, dy)
item['box3d_lidar'][3] = boxes[4]
......@@ -74,7 +73,7 @@ def update_nuscenes_or_lyft_infos(root_dir, out_dir, pkl_files):
print(f'Reading from input file: {in_path}.')
a = mmengine.load(in_path)
print('Start updating:')
for item in mmcv.track_iter_progress(a['infos']):
for item in mmengine.track_iter_progress(a['infos']):
boxes = item['gt_boxes'].copy()
# swap l, w (or dx, dy)
item['gt_boxes'][:, 3] = boxes[:, 4]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment