Unverified Commit cf6f4732 authored by Xiang Xu's avatar Xiang Xu Committed by GitHub
Browse files

[Feature] File I/O migration and reconstruction (#2319)

* refactor leio

* update mmengine and mmcv version

* update

* update docs

* update version
parent b2e5ad6b
...@@ -11,10 +11,19 @@ class_names = [ ...@@ -11,10 +11,19 @@ class_names = [
'bicycle', 'motorcycle', 'pedestrian', 'animal', 'car', 'bicycle', 'motorcycle', 'pedestrian', 'animal', 'car',
'emergency_vehicle', 'bus', 'other_vehicle', 'truck' 'emergency_vehicle', 'bus', 'other_vehicle', 'truck'
] ]
backend_args = None
train_pipeline = [ train_pipeline = [
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=5, use_dim=5), dict(
dict(type='LoadPointsFromMultiSweeps', sweeps_num=10), type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=5,
use_dim=5,
backend_args=backend_args),
dict(
type='LoadPointsFromMultiSweeps',
sweeps_num=10,
backend_args=backend_args),
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),
dict( dict(
type='GlobalRotScaleTrans', type='GlobalRotScaleTrans',
...@@ -34,8 +43,16 @@ train_pipeline = [ ...@@ -34,8 +43,16 @@ train_pipeline = [
keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
] ]
test_pipeline = [ test_pipeline = [
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=5, use_dim=5), dict(
dict(type='LoadPointsFromMultiSweeps', sweeps_num=10), type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=5,
use_dim=5,
backend_args=backend_args),
dict(
type='LoadPointsFromMultiSweeps',
sweeps_num=10,
backend_args=backend_args),
dict( dict(
type='MultiScaleFlipAug3D', type='MultiScaleFlipAug3D',
img_scale=(1333, 800), img_scale=(1333, 800),
......
...@@ -11,10 +11,19 @@ class_names = [ ...@@ -11,10 +11,19 @@ class_names = [
'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier', 'car', 'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier', 'car',
'truck', 'trailer', 'bus', 'construction_vehicle' 'truck', 'trailer', 'bus', 'construction_vehicle'
] ]
backend_args = None
train_pipeline = [ train_pipeline = [
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=5, use_dim=5), dict(
dict(type='LoadPointsFromMultiSweeps', sweeps_num=10), type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=5,
use_dim=5,
backend_args=backend_args),
dict(
type='LoadPointsFromMultiSweeps',
sweeps_num=10,
backend_args=backend_args),
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),
dict( dict(
type='GlobalRotScaleTrans', type='GlobalRotScaleTrans',
...@@ -34,8 +43,16 @@ train_pipeline = [ ...@@ -34,8 +43,16 @@ train_pipeline = [
keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
] ]
test_pipeline = [ test_pipeline = [
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=5, use_dim=5), dict(
dict(type='LoadPointsFromMultiSweeps', sweeps_num=10), type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=5,
use_dim=5,
backend_args=backend_args),
dict(
type='LoadPointsFromMultiSweeps',
sweeps_num=10,
backend_args=backend_args),
dict( dict(
type='MultiScaleFlipAug3D', type='MultiScaleFlipAug3D',
img_scale=(1333, 800), img_scale=(1333, 800),
......
...@@ -10,7 +10,7 @@ We list some potential troubles encountered by users and developers, along with ...@@ -10,7 +10,7 @@ We list some potential troubles encountered by users and developers, along with
| MMDetection3D version | MMEngine version | MMCV version | MMDetection version | | MMDetection3D version | MMEngine version | MMCV version | MMDetection version |
| --------------------- | :----------------------: | :---------------------: | :----------------------: | | --------------------- | :----------------------: | :---------------------: | :----------------------: |
| dev-1.x | mmengine>=0.4.0, \<1.0.0 | mmcv>=2.0.0rc4, \<2.1.0 | mmdet>=3.0.0rc0, \<3.1.0 | | dev-1.x | mmengine>=0.6.0, \<1.0.0 | mmcv>=2.0.0rc4, \<2.1.0 | mmdet>=3.0.0rc0, \<3.1.0 |
| v1.1.0rc3 | mmengine>=0.1.0, \<1.0.0 | mmcv>=2.0.0rc0, \<2.1.0 | mmdet>=3.0.0rc0, \<3.1.0 | | v1.1.0rc3 | mmengine>=0.1.0, \<1.0.0 | mmcv>=2.0.0rc0, \<2.1.0 | mmdet>=3.0.0rc0, \<3.1.0 |
| v1.1.0rc2 | mmengine>=0.1.0, \<1.0.0 | mmcv>=2.0.0rc0, \<2.1.0 | mmdet>=3.0.0rc0, \<3.1.0 | | v1.1.0rc2 | mmengine>=0.1.0, \<1.0.0 | mmcv>=2.0.0rc0, \<2.1.0 | mmdet>=3.0.0rc0, \<3.1.0 |
| v1.1.0rc1 | mmengine>=0.1.0, \<1.0.0 | mmcv>=2.0.0rc0, \<2.1.0 | mmdet>=3.0.0rc0, \<3.1.0 | | v1.1.0rc1 | mmengine>=0.1.0, \<1.0.0 | mmcv>=2.0.0rc0, \<2.1.0 | mmdet>=3.0.0rc0, \<3.1.0 |
......
...@@ -8,7 +8,7 @@ We support loading data and generated annotation info files (pkl and json) from ...@@ -8,7 +8,7 @@ We support loading data and generated annotation info files (pkl and json) from
```python ```python
# set file client backends as Ceph # set file client backends as Ceph
file_client_args = dict( backend_args = dict(
backend='petrel', backend='petrel',
path_mapping=dict({ path_mapping=dict({
'./data/nuscenes/': './data/nuscenes/':
...@@ -30,15 +30,15 @@ db_sampler = dict( ...@@ -30,15 +30,15 @@ db_sampler = dict(
coord_type='LIDAR', coord_type='LIDAR',
load_dim=4, load_dim=4,
use_dim=4, use_dim=4,
file_client_args=file_client_args), backend_args=backend_args),
# set file client for data base sampler to load db info file # set file client for data base sampler to load db info file
file_client_args=file_client_args) backend_args=backend_args)
train_pipeline = [ train_pipeline = [
# set file client for loading training data # set file client for loading training data
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4, file_client_args=file_client_args), dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4, backend_args=backend_args),
# set file client for loading training data annotations # set file client for loading training data annotations
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, file_client_args=file_client_args), dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, backend_args=backend_args),
dict(type='ObjectSample', db_sampler=db_sampler), dict(type='ObjectSample', db_sampler=db_sampler),
dict( dict(
type='ObjectNoise', type='ObjectNoise',
...@@ -59,7 +59,7 @@ train_pipeline = [ ...@@ -59,7 +59,7 @@ train_pipeline = [
] ]
test_pipeline = [ test_pipeline = [
# set file client for loading validation/testing data # set file client for loading validation/testing data
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4, file_client_args=file_client_args), dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4, backend_args=backend_args),
dict( dict(
type='MultiScaleFlipAug3D', type='MultiScaleFlipAug3D',
img_scale=(1333, 800), img_scale=(1333, 800),
...@@ -87,11 +87,11 @@ data = dict( ...@@ -87,11 +87,11 @@ data = dict(
train=dict( train=dict(
type='RepeatDataset', type='RepeatDataset',
times=2, times=2,
dataset=dict(pipeline=train_pipeline, classes=class_names, file_client_args=file_client_args)), dataset=dict(pipeline=train_pipeline, classes=class_names, backend_args=backend_args)),
# set file client for loading validation info files (.pkl) # set file client for loading validation info files (.pkl)
val=dict(pipeline=test_pipeline, classes=class_names,file_client_args=file_client_args), val=dict(pipeline=test_pipeline, classes=class_names,backend_args=backend_args),
# set file client for loading testing info files (.pkl) # set file client for loading testing info files (.pkl)
test=dict(pipeline=test_pipeline, classes=class_names, file_client_args=file_client_args)) test=dict(pipeline=test_pipeline, classes=class_names, backend_args=backend_args))
``` ```
## Load pretrained model from Ceph ## Load pretrained model from Ceph
......
...@@ -466,11 +466,11 @@ train_pipeline = [ ...@@ -466,11 +466,11 @@ train_pipeline = [
type='LoadPointsFromFile', type='LoadPointsFromFile',
load_dim=5, load_dim=5,
use_dim=5, use_dim=5,
file_client_args=file_client_args), backend_args=backend_args),
dict( dict(
type='LoadPointsFromMultiSweeps', type='LoadPointsFromMultiSweeps',
sweeps_num=10, sweeps_num=10,
file_client_args=file_client_args), backend_args=backend_args),
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),
dict( dict(
type='GlobalRotScaleTrans', type='GlobalRotScaleTrans',
...@@ -491,11 +491,11 @@ test_pipeline = [ ...@@ -491,11 +491,11 @@ test_pipeline = [
type='LoadPointsFromFile', type='LoadPointsFromFile',
load_dim=5, load_dim=5,
use_dim=5, use_dim=5,
file_client_args=file_client_args), backend_args=backend_args),
dict( dict(
type='LoadPointsFromMultiSweeps', type='LoadPointsFromMultiSweeps',
sweeps_num=10, sweeps_num=10,
file_client_args=file_client_args), backend_args=backend_args),
dict( dict(
type='MultiScaleFlipAug3D', type='MultiScaleFlipAug3D',
img_scale=(1333, 800), img_scale=(1333, 800),
......
...@@ -28,11 +28,11 @@ train_pipeline = [ ...@@ -28,11 +28,11 @@ train_pipeline = [
type='LoadPointsFromFile', type='LoadPointsFromFile',
load_dim=5, load_dim=5,
use_dim=5, use_dim=5,
file_client_args=file_client_args), backend_args=backend_args),
dict( dict(
type='LoadPointsFromMultiSweeps', type='LoadPointsFromMultiSweeps',
sweeps_num=10, sweeps_num=10,
file_client_args=file_client_args), backend_args=backend_args),
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),
dict( dict(
type='GlobalRotScaleTrans', type='GlobalRotScaleTrans',
...@@ -52,11 +52,11 @@ test_pipeline = [ ...@@ -52,11 +52,11 @@ test_pipeline = [
type='LoadPointsFromFile', type='LoadPointsFromFile',
load_dim=5, load_dim=5,
use_dim=5, use_dim=5,
file_client_args=file_client_args), backend_args=backend_args),
dict( dict(
type='LoadPointsFromMultiSweeps', type='LoadPointsFromMultiSweeps',
sweeps_num=10, sweeps_num=10,
file_client_args=file_client_args), backend_args=backend_args),
dict( dict(
type='MultiScaleFlipAug', type='MultiScaleFlipAug',
img_scale=(1333, 800), img_scale=(1333, 800),
...@@ -176,11 +176,11 @@ For each operation, we list the related dict fields that are added/updated/remov ...@@ -176,11 +176,11 @@ For each operation, we list the related dict fields that are added/updated/remov
type='LoadPointsFromFile', type='LoadPointsFromFile',
load_dim=5, load_dim=5,
use_dim=5, use_dim=5,
file_client_args=file_client_args), backend_args=backend_args),
dict( dict(
type='LoadPointsFromMultiSweeps', type='LoadPointsFromMultiSweeps',
sweeps_num=10, sweeps_num=10,
file_client_args=file_client_args), backend_args=backend_args),
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),
dict( dict(
type='GlobalRotScaleTrans', type='GlobalRotScaleTrans',
......
...@@ -122,7 +122,7 @@ Note that: ...@@ -122,7 +122,7 @@ Note that:
ann_file='./data/waymo/kitti_format/waymo_infos_val.pkl', ann_file='./data/waymo/kitti_format/waymo_infos_val.pkl',
waymo_bin_file='./data/waymo/waymo_format/gt.bin', waymo_bin_file='./data/waymo/waymo_format/gt.bin',
data_root='./data/waymo/waymo_format', data_root='./data/waymo/waymo_format',
file_client_args=file_client_args, backend_args=backend_args,
convert_kitti_format=True, convert_kitti_format=True,
idx2metainfo='data/waymo/waymo_format/idx2metainfo.pkl' idx2metainfo='data/waymo/waymo_format/idx2metainfo.pkl'
) )
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
| MMDetection3D 版本 | MMEngine 版本 | MMCV 版本 | MMDetection 版本 | | MMDetection3D 版本 | MMEngine 版本 | MMCV 版本 | MMDetection 版本 |
| ------------------ | :----------------------: | :---------------------: | :----------------------: | | ------------------ | :----------------------: | :---------------------: | :----------------------: |
| dev-1.x | mmengine>=0.4.0, \<1.0.0 | mmcv>=2.0.0rc4, \<2.1.0 | mmdet>=3.0.0rc0, \<3.1.0 | | dev-1.x | mmengine>=0.6.0, \<1.0.0 | mmcv>=2.0.0rc4, \<2.1.0 | mmdet>=3.0.0rc0, \<3.1.0 |
| v1.1.0rc3 | mmengine>=0.1.0, \<1.0.0 | mmcv>=2.0.0rc0, \<2.1.0 | mmdet>=3.0.0rc0, \<3.1.0 | | v1.1.0rc3 | mmengine>=0.1.0, \<1.0.0 | mmcv>=2.0.0rc0, \<2.1.0 | mmdet>=3.0.0rc0, \<3.1.0 |
| v1.1.0rc2 | mmengine>=0.1.0, \<1.0.0 | mmcv>=2.0.0rc0, \<2.1.0 | mmdet>=3.0.0rc0, \<3.1.0 | | v1.1.0rc2 | mmengine>=0.1.0, \<1.0.0 | mmcv>=2.0.0rc0, \<2.1.0 | mmdet>=3.0.0rc0, \<3.1.0 |
| v1.1.0rc1 | mmengine>=0.1.0, \<1.0.0 | mmcv>=2.0.0rc0, \<2.1.0 | mmdet>=3.0.0rc0, \<3.1.0 | | v1.1.0rc1 | mmengine>=0.1.0, \<1.0.0 | mmcv>=2.0.0rc0, \<2.1.0 | mmdet>=3.0.0rc0, \<3.1.0 |
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
```python ```python
# set file client backends as Ceph # set file client backends as Ceph
file_client_args = dict( backend_args = dict(
backend='petrel', backend='petrel',
path_mapping=dict({ path_mapping=dict({
'./data/nuscenes/': './data/nuscenes/':
...@@ -30,15 +30,15 @@ db_sampler = dict( ...@@ -30,15 +30,15 @@ db_sampler = dict(
coord_type='LIDAR', coord_type='LIDAR',
load_dim=4, load_dim=4,
use_dim=4, use_dim=4,
file_client_args=file_client_args), backend_args=backend_args),
# set file client for data base sampler to load db info file # set file client for data base sampler to load db info file
file_client_args=file_client_args) backend_args=backend_args)
train_pipeline = [ train_pipeline = [
# set file client for loading training data # set file client for loading training data
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4, file_client_args=file_client_args), dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4, backend_args=backend_args),
# set file client for loading training data annotations # set file client for loading training data annotations
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, file_client_args=file_client_args), dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, backend_args=backend_args),
dict(type='ObjectSample', db_sampler=db_sampler), dict(type='ObjectSample', db_sampler=db_sampler),
dict( dict(
type='ObjectNoise', type='ObjectNoise',
...@@ -59,7 +59,7 @@ train_pipeline = [ ...@@ -59,7 +59,7 @@ train_pipeline = [
] ]
test_pipeline = [ test_pipeline = [
# set file client for loading validation/testing data # set file client for loading validation/testing data
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4, file_client_args=file_client_args), dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4, backend_args=backend_args),
dict( dict(
type='MultiScaleFlipAug3D', type='MultiScaleFlipAug3D',
img_scale=(1333, 800), img_scale=(1333, 800),
...@@ -87,11 +87,11 @@ data = dict( ...@@ -87,11 +87,11 @@ data = dict(
train=dict( train=dict(
type='RepeatDataset', type='RepeatDataset',
times=2, times=2,
dataset=dict(pipeline=train_pipeline, classes=class_names, file_client_args=file_client_args)), dataset=dict(pipeline=train_pipeline, classes=class_names, backend_args=backend_args)),
# set file client for loading validation info files (.pkl) # set file client for loading validation info files (.pkl)
val=dict(pipeline=test_pipeline, classes=class_names,file_client_args=file_client_args), val=dict(pipeline=test_pipeline, classes=class_names,backend_args=backend_args),
# set file client for loading testing info files (.pkl) # set file client for loading testing info files (.pkl)
test=dict(pipeline=test_pipeline, classes=class_names, file_client_args=file_client_args)) test=dict(pipeline=test_pipeline, classes=class_names, backend_args=backend_args))
``` ```
## 从 Ceph 读取预训练模型 ## 从 Ceph 读取预训练模型
......
...@@ -459,11 +459,11 @@ train_pipeline = [ ...@@ -459,11 +459,11 @@ train_pipeline = [
type='LoadPointsFromFile', type='LoadPointsFromFile',
load_dim=5, load_dim=5,
use_dim=5, use_dim=5,
file_client_args=file_client_args), backend_args=backend_args),
dict( dict(
type='LoadPointsFromMultiSweeps', type='LoadPointsFromMultiSweeps',
sweeps_num=10, sweeps_num=10,
file_client_args=file_client_args), backend_args=backend_args),
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),
dict( dict(
type='GlobalRotScaleTrans', type='GlobalRotScaleTrans',
...@@ -484,11 +484,11 @@ test_pipeline = [ ...@@ -484,11 +484,11 @@ test_pipeline = [
type='LoadPointsFromFile', type='LoadPointsFromFile',
load_dim=5, load_dim=5,
use_dim=5, use_dim=5,
file_client_args=file_client_args), backend_args=backend_args),
dict( dict(
type='LoadPointsFromMultiSweeps', type='LoadPointsFromMultiSweeps',
sweeps_num=10, sweeps_num=10,
file_client_args=file_client_args), backend_args=backend_args),
dict( dict(
type='MultiScaleFlipAug3D', type='MultiScaleFlipAug3D',
img_scale=(1333, 800), img_scale=(1333, 800),
......
...@@ -20,11 +20,11 @@ train_pipeline = [ ...@@ -20,11 +20,11 @@ train_pipeline = [
type='LoadPointsFromFile', type='LoadPointsFromFile',
load_dim=5, load_dim=5,
use_dim=5, use_dim=5,
file_client_args=file_client_args), backend_args=backend_args),
dict( dict(
type='LoadPointsFromMultiSweeps', type='LoadPointsFromMultiSweeps',
sweeps_num=10, sweeps_num=10,
file_client_args=file_client_args), backend_args=backend_args),
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),
dict( dict(
type='GlobalRotScaleTrans', type='GlobalRotScaleTrans',
...@@ -44,11 +44,11 @@ test_pipeline = [ ...@@ -44,11 +44,11 @@ test_pipeline = [
type='LoadPointsFromFile', type='LoadPointsFromFile',
load_dim=5, load_dim=5,
use_dim=5, use_dim=5,
file_client_args=file_client_args), backend_args=backend_args),
dict( dict(
type='LoadPointsFromMultiSweeps', type='LoadPointsFromMultiSweeps',
sweeps_num=10, sweeps_num=10,
file_client_args=file_client_args), backend_args=backend_args),
dict( dict(
type='MultiScaleFlipAug', type='MultiScaleFlipAug',
img_scale=(1333, 800), img_scale=(1333, 800),
...@@ -168,11 +168,11 @@ test_pipeline = [ ...@@ -168,11 +168,11 @@ test_pipeline = [
type='LoadPointsFromFile', type='LoadPointsFromFile',
load_dim=5, load_dim=5,
use_dim=5, use_dim=5,
file_client_args=file_client_args), backend_args=backend_args),
dict( dict(
type='LoadPointsFromMultiSweeps', type='LoadPointsFromMultiSweeps',
sweeps_num=10, sweeps_num=10,
file_client_args=file_client_args), backend_args=backend_args),
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),
dict( dict(
type='GlobalRotScaleTrans', type='GlobalRotScaleTrans',
......
...@@ -121,7 +121,7 @@ python tools/create_data.py waymo --root-path ./data/waymo/ --out-dir ./data/way ...@@ -121,7 +121,7 @@ python tools/create_data.py waymo --root-path ./data/waymo/ --out-dir ./data/way
ann_file='./data/waymo/kitti_format/waymo_infos_val.pkl', ann_file='./data/waymo/kitti_format/waymo_infos_val.pkl',
waymo_bin_file='./data/waymo/waymo_format/gt.bin', waymo_bin_file='./data/waymo/waymo_format/gt.bin',
data_root='./data/waymo/waymo_format', data_root='./data/waymo/waymo_format',
file_client_args=file_client_args, backend_args=backend_args,
convert_kitti_format=True, convert_kitti_format=True,
idx2metainfo='data/waymo/waymo_format/idx2metainfo.pkl' idx2metainfo='data/waymo/waymo_format/idx2metainfo.pkl'
) )
......
...@@ -6,11 +6,11 @@ from mmengine.utils import digit_version ...@@ -6,11 +6,11 @@ from mmengine.utils import digit_version
from .version import __version__, version_info from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc0' mmcv_minimum_version = '2.0.0rc4'
mmcv_maximum_version = '2.1.0' mmcv_maximum_version = '2.1.0'
mmcv_version = digit_version(mmcv.__version__) mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.4.0' mmengine_minimum_version = '0.6.0'
mmengine_maximum_version = '1.0.0' mmengine_maximum_version = '1.0.0'
mmengine_version = digit_version(mmengine.__version__) mmengine_version = digit_version(mmengine.__version__)
......
...@@ -4,7 +4,6 @@ import os ...@@ -4,7 +4,6 @@ import os
from os import path as osp from os import path as osp
from typing import Callable, List, Optional, Set, Union from typing import Callable, List, Optional, Set, Union
import mmengine
import numpy as np import numpy as np
import torch import torch
from mmengine.dataset import BaseDataset from mmengine.dataset import BaseDataset
...@@ -61,8 +60,8 @@ class Det3DDataset(BaseDataset): ...@@ -61,8 +60,8 @@ class Det3DDataset(BaseDataset):
load_eval_anns (bool): Whether to load annotations in test_mode, load_eval_anns (bool): Whether to load annotations in test_mode,
the annotation will be save in `eval_ann_infos`, which can be the annotation will be save in `eval_ann_infos`, which can be
used in Evaluator. Defaults to True. used in Evaluator. Defaults to True.
file_client_args (dict): Configuration of file client. backend_args (dict, optional): Arguments to instantiate the
Defaults to dict(backend='disk'). corresponding backend. Defaults to None.
show_ins_var (bool): For debug purpose. Whether to show variation show_ins_var (bool): For debug purpose. Whether to show variation
of the number of instances before and after through pipeline. of the number of instances before and after through pipeline.
Defaults to False. Defaults to False.
...@@ -80,11 +79,10 @@ class Det3DDataset(BaseDataset): ...@@ -80,11 +79,10 @@ class Det3DDataset(BaseDataset):
filter_empty_gt: bool = True, filter_empty_gt: bool = True,
test_mode: bool = False, test_mode: bool = False,
load_eval_anns: bool = True, load_eval_anns: bool = True,
file_client_args: dict = dict(backend='disk'), backend_args: Optional[dict] = None,
show_ins_var: bool = False, show_ins_var: bool = False,
**kwargs) -> None: **kwargs) -> None:
# init file client self.backend_args = backend_args
self.file_client = mmengine.FileClient(**file_client_args)
self.filter_empty_gt = filter_empty_gt self.filter_empty_gt = filter_empty_gt
self.load_eval_anns = load_eval_anns self.load_eval_anns = load_eval_anns
_default_modality_keys = ('use_lidar', 'use_camera') _default_modality_keys = ('use_lidar', 'use_camera')
......
...@@ -331,7 +331,7 @@ class ScanNetInstanceSegDataset(Seg3DDataset): ...@@ -331,7 +331,7 @@ class ScanNetInstanceSegDataset(Seg3DDataset):
test_mode: bool = False, test_mode: bool = False,
ignore_index: Optional[int] = None, ignore_index: Optional[int] = None,
scene_idxs: Optional[Union[np.ndarray, str]] = None, scene_idxs: Optional[Union[np.ndarray, str]] = None,
file_client_args: dict = dict(backend='disk'), backend_args: Optional[dict] = None,
**kwargs) -> None: **kwargs) -> None:
super().__init__( super().__init__(
data_root=data_root, data_root=data_root,
...@@ -343,5 +343,5 @@ class ScanNetInstanceSegDataset(Seg3DDataset): ...@@ -343,5 +343,5 @@ class ScanNetInstanceSegDataset(Seg3DDataset):
test_mode=test_mode, test_mode=test_mode,
ignore_index=ignore_index, ignore_index=ignore_index,
scene_idxs=scene_idxs, scene_idxs=scene_idxs,
file_client_args=file_client_args, backend_args=backend_args,
**kwargs) **kwargs)
...@@ -2,9 +2,9 @@ ...@@ -2,9 +2,9 @@
from os import path as osp from os import path as osp
from typing import Callable, List, Optional, Sequence, Union from typing import Callable, List, Optional, Sequence, Union
import mmengine
import numpy as np import numpy as np
from mmengine.dataset import BaseDataset from mmengine.dataset import BaseDataset
from mmengine.fileio import get_local_path
from mmdet3d.registry import DATASETS from mmdet3d.registry import DATASETS
...@@ -49,8 +49,8 @@ class Seg3DDataset(BaseDataset): ...@@ -49,8 +49,8 @@ class Seg3DDataset(BaseDataset):
load_eval_anns (bool): Whether to load annotations in test_mode, load_eval_anns (bool): Whether to load annotations in test_mode,
the annotation will be save in `eval_ann_infos`, which can be used the annotation will be save in `eval_ann_infos`, which can be used
in Evaluator. Defaults to True. in Evaluator. Defaults to True.
file_client_args (dict): Configuration of file client. backend_args (dict, optional): Arguments to instantiate the
Defaults to dict(backend='disk'). corresponding backend. Defaults to None.
""" """
METAINFO = { METAINFO = {
'classes': None, # names of all classes data used for the task 'classes': None, # names of all classes data used for the task
...@@ -75,10 +75,9 @@ class Seg3DDataset(BaseDataset): ...@@ -75,10 +75,9 @@ class Seg3DDataset(BaseDataset):
test_mode: bool = False, test_mode: bool = False,
serialize_data: bool = False, serialize_data: bool = False,
load_eval_anns: bool = True, load_eval_anns: bool = True,
file_client_args: dict = dict(backend='disk'), backend_args: Optional[dict] = None,
**kwargs) -> None: **kwargs) -> None:
# init file client self.backend_args = backend_args
self.file_client = mmengine.FileClient(**file_client_args)
self.modality = modality self.modality = modality
self.load_eval_anns = load_eval_anns self.load_eval_anns = load_eval_anns
...@@ -317,7 +316,8 @@ class Seg3DDataset(BaseDataset): ...@@ -317,7 +316,8 @@ class Seg3DDataset(BaseDataset):
scene_idxs = np.arange(len(self)) scene_idxs = np.arange(len(self))
if isinstance(scene_idxs, str): if isinstance(scene_idxs, str):
scene_idxs = osp.join(self.data_root, scene_idxs) scene_idxs = osp.join(self.data_root, scene_idxs)
with self.file_client.get_local_path(scene_idxs) as local_path: with get_local_path(
scene_idxs, backend_args=self.backend_args) as local_path:
scene_idxs = np.load(local_path) scene_idxs = np.load(local_path)
else: else:
scene_idxs = np.array(scene_idxs) scene_idxs = np.array(scene_idxs)
......
...@@ -5,6 +5,7 @@ from typing import List, Optional ...@@ -5,6 +5,7 @@ from typing import List, Optional
import mmengine import mmengine
import numpy as np import numpy as np
from mmengine.fileio import get_local_path
from mmdet3d.datasets.transforms import data_augment_utils from mmdet3d.datasets.transforms import data_augment_utils
from mmdet3d.registry import TRANSFORMS from mmdet3d.registry import TRANSFORMS
...@@ -91,26 +92,24 @@ class DataBaseSampler(object): ...@@ -91,26 +92,24 @@ class DataBaseSampler(object):
classes (list[str], optional): List of classes. Defaults to None. classes (list[str], optional): List of classes. Defaults to None.
points_loader (dict): Config of points loader. Defaults to points_loader (dict): Config of points loader. Defaults to
dict(type='LoadPointsFromFile', load_dim=4, use_dim=[0, 1, 2, 3]). dict(type='LoadPointsFromFile', load_dim=4, use_dim=[0, 1, 2, 3]).
file_client_args (dict): Arguments to instantiate a FileClient. backend_args (dict, optional): Arguments to instantiate the
See :class:`mmengine.fileio.FileClient` for details. corresponding backend. Defaults to None.
Defaults to dict(backend='disk').
""" """
def __init__( def __init__(self,
self, info_path: str,
info_path: str, data_root: str,
data_root: str, rate: float,
rate: float, prepare: dict,
prepare: dict, sample_groups: dict,
sample_groups: dict, classes: Optional[List[str]] = None,
classes: Optional[List[str]] = None, points_loader: dict = dict(
points_loader: dict = dict( type='LoadPointsFromFile',
type='LoadPointsFromFile', coord_type='LIDAR',
coord_type='LIDAR', load_dim=4,
load_dim=4, use_dim=[0, 1, 2, 3],
use_dim=[0, 1, 2, 3]), backend_args=None),
file_client_args: dict = dict(backend='disk') backend_args: Optional[dict] = None) -> None:
) -> None:
super().__init__() super().__init__()
self.data_root = data_root self.data_root = data_root
self.info_path = info_path self.info_path = info_path
...@@ -120,10 +119,11 @@ class DataBaseSampler(object): ...@@ -120,10 +119,11 @@ class DataBaseSampler(object):
self.cat2label = {name: i for i, name in enumerate(classes)} self.cat2label = {name: i for i, name in enumerate(classes)}
self.label2cat = {i: name for i, name in enumerate(classes)} self.label2cat = {i: name for i, name in enumerate(classes)}
self.points_loader = TRANSFORMS.build(points_loader) self.points_loader = TRANSFORMS.build(points_loader)
self.file_client = mmengine.FileClient(**file_client_args) self.backend_args = backend_args
# load data base infos # load data base infos
with self.file_client.get_local_path(info_path) as local_path: with get_local_path(
info_path, backend_args=self.backend_args) as local_path:
# loading data from a file-like object needs file format # loading data from a file-like object needs file format
db_infos = mmengine.load(open(local_path, 'rb'), file_format='pkl') db_infos = mmengine.load(open(local_path, 'rb'), file_format='pkl')
......
...@@ -4,11 +4,11 @@ from typing import List, Optional, Union ...@@ -4,11 +4,11 @@ from typing import List, Optional, Union
import mmcv import mmcv
import mmengine import mmengine
import mmengine.fileio as fileio
import numpy as np import numpy as np
from mmcv.transforms import LoadImageFromFile from mmcv.transforms import LoadImageFromFile
from mmcv.transforms.base import BaseTransform from mmcv.transforms.base import BaseTransform
from mmdet.datasets.transforms import LoadAnnotations from mmdet.datasets.transforms import LoadAnnotations
from mmengine.fileio import get
from mmdet3d.registry import TRANSFORMS from mmdet3d.registry import TRANSFORMS
from mmdet3d.structures.bbox_3d import get_box_type from mmdet3d.structures.bbox_3d import get_box_type
...@@ -25,9 +25,8 @@ class LoadMultiViewImageFromFiles(BaseTransform): ...@@ -25,9 +25,8 @@ class LoadMultiViewImageFromFiles(BaseTransform):
to_float32 (bool): Whether to convert the img to float32. to_float32 (bool): Whether to convert the img to float32.
Defaults to False. Defaults to False.
color_type (str): Color type of the file. Defaults to 'unchanged'. color_type (str): Color type of the file. Defaults to 'unchanged'.
file_client_args (dict): Arguments to instantiate a FileClient. backend_args (dict, optional): Arguments to instantiate the
See :class:`mmengine.fileio.FileClient` for details. corresponding backend. Defaults to None.
Defaults to dict(backend='disk').
num_views (int): Number of view in a frame. Defaults to 5. num_views (int): Number of view in a frame. Defaults to 5.
num_ref_frames (int): Number of frame in loading. Defaults to -1. num_ref_frames (int): Number of frame in loading. Defaults to -1.
test_mode (bool): Whether is test mode in loading. Defaults to False. test_mode (bool): Whether is test mode in loading. Defaults to False.
...@@ -38,15 +37,14 @@ class LoadMultiViewImageFromFiles(BaseTransform): ...@@ -38,15 +37,14 @@ class LoadMultiViewImageFromFiles(BaseTransform):
def __init__(self, def __init__(self,
to_float32: bool = False, to_float32: bool = False,
color_type: str = 'unchanged', color_type: str = 'unchanged',
file_client_args: dict = dict(backend='disk'), backend_args: Optional[dict] = None,
num_views: int = 5, num_views: int = 5,
num_ref_frames: int = -1, num_ref_frames: int = -1,
test_mode: bool = False, test_mode: bool = False,
set_default_scale: bool = True) -> None: set_default_scale: bool = True) -> None:
self.to_float32 = to_float32 self.to_float32 = to_float32
self.color_type = color_type self.color_type = color_type
self.file_client_args = file_client_args.copy() self.backend_args = backend_args
self.file_client = None
self.num_views = num_views self.num_views = num_views
# num_ref_frames is used for multi-sweep loading # num_ref_frames is used for multi-sweep loading
self.num_ref_frames = num_ref_frames self.num_ref_frames = num_ref_frames
...@@ -164,12 +162,11 @@ class LoadMultiViewImageFromFiles(BaseTransform): ...@@ -164,12 +162,11 @@ class LoadMultiViewImageFromFiles(BaseTransform):
results['ori_cam2img'] = copy.deepcopy(results['cam2img']) results['ori_cam2img'] = copy.deepcopy(results['cam2img'])
if self.file_client is None:
self.file_client = mmengine.FileClient(**self.file_client_args)
# img is of shape (h, w, c, num_views) # img is of shape (h, w, c, num_views)
# h and w can be different for different views # h and w can be different for different views
img_bytes = [self.file_client.get(name) for name in filename] img_bytes = [
get(name, backend_args=self.backend_args) for name in filename
]
imgs = [ imgs = [
mmcv.imfrombytes(img_byte, flag=self.color_type) mmcv.imfrombytes(img_byte, flag=self.color_type)
for img_byte in img_bytes for img_byte in img_bytes
...@@ -257,13 +254,7 @@ class LoadImageFromFileMono3D(LoadImageFromFile): ...@@ -257,13 +254,7 @@ class LoadImageFromFileMono3D(LoadImageFromFile):
'nuscenes datasets') 'nuscenes datasets')
try: try:
if self.file_client_args is not None: img_bytes = get(filename, backend_args=self.backend_args)
file_client = fileio.FileClient.infer_client(
self.file_client_args, filename)
img_bytes = file_client.get(filename)
else:
img_bytes = fileio.get(
filename, backend_args=self.backend_args)
img = mmcv.imfrombytes( img = mmcv.imfrombytes(
img_bytes, flag=self.color_type, backend=self.imdecode_backend) img_bytes, flag=self.color_type, backend=self.imdecode_backend)
except Exception as e: except Exception as e:
...@@ -331,9 +322,8 @@ class LoadPointsFromMultiSweeps(BaseTransform): ...@@ -331,9 +322,8 @@ class LoadPointsFromMultiSweeps(BaseTransform):
sweeps_num (int): Number of sweeps. Defaults to 10. sweeps_num (int): Number of sweeps. Defaults to 10.
load_dim (int): Dimension number of the loaded points. Defaults to 5. load_dim (int): Dimension number of the loaded points. Defaults to 5.
use_dim (list[int]): Which dimension to use. Defaults to [0, 1, 2, 4]. use_dim (list[int]): Which dimension to use. Defaults to [0, 1, 2, 4].
file_client_args (dict): Arguments to instantiate a FileClient. backend_args (dict, optional): Arguments to instantiate the
See :class:`mmengine.fileio.FileClient` for details. corresponding backend. Defaults to None.
Defaults to dict(backend='disk').
pad_empty_sweeps (bool): Whether to repeat keyframe when pad_empty_sweeps (bool): Whether to repeat keyframe when
sweeps is empty. Defaults to False. sweeps is empty. Defaults to False.
remove_close (bool): Whether to remove close points. Defaults to False. remove_close (bool): Whether to remove close points. Defaults to False.
...@@ -345,7 +335,7 @@ class LoadPointsFromMultiSweeps(BaseTransform): ...@@ -345,7 +335,7 @@ class LoadPointsFromMultiSweeps(BaseTransform):
sweeps_num: int = 10, sweeps_num: int = 10,
load_dim: int = 5, load_dim: int = 5,
use_dim: List[int] = [0, 1, 2, 4], use_dim: List[int] = [0, 1, 2, 4],
file_client_args: dict = dict(backend='disk'), backend_args: Optional[dict] = None,
pad_empty_sweeps: bool = False, pad_empty_sweeps: bool = False,
remove_close: bool = False, remove_close: bool = False,
test_mode: bool = False) -> None: test_mode: bool = False) -> None:
...@@ -356,8 +346,7 @@ class LoadPointsFromMultiSweeps(BaseTransform): ...@@ -356,8 +346,7 @@ class LoadPointsFromMultiSweeps(BaseTransform):
assert max(use_dim) < load_dim, \ assert max(use_dim) < load_dim, \
f'Expect all used dimensions < {load_dim}, got {use_dim}' f'Expect all used dimensions < {load_dim}, got {use_dim}'
self.use_dim = use_dim self.use_dim = use_dim
self.file_client_args = file_client_args.copy() self.backend_args = backend_args
self.file_client = mmengine.FileClient(**self.file_client_args)
self.pad_empty_sweeps = pad_empty_sweeps self.pad_empty_sweeps = pad_empty_sweeps
self.remove_close = remove_close self.remove_close = remove_close
self.test_mode = test_mode self.test_mode = test_mode
...@@ -371,10 +360,8 @@ class LoadPointsFromMultiSweeps(BaseTransform): ...@@ -371,10 +360,8 @@ class LoadPointsFromMultiSweeps(BaseTransform):
Returns: Returns:
np.ndarray: An array containing point clouds data. np.ndarray: An array containing point clouds data.
""" """
if self.file_client is None:
self.file_client = mmengine.FileClient(**self.file_client_args)
try: try:
pts_bytes = self.file_client.get(pts_filename) pts_bytes = get(pts_filename, backend_args=self.backend_args)
points = np.frombuffer(pts_bytes, dtype=np.float32) points = np.frombuffer(pts_bytes, dtype=np.float32)
except ConnectionError: except ConnectionError:
mmengine.check_file_exist(pts_filename) mmengine.check_file_exist(pts_filename)
...@@ -592,21 +579,18 @@ class LoadPointsFromFile(BaseTransform): ...@@ -592,21 +579,18 @@ class LoadPointsFromFile(BaseTransform):
use_color (bool): Whether to use color features. Defaults to False. use_color (bool): Whether to use color features. Defaults to False.
norm_intensity (bool): Whether to normlize the intensity. Defaults to norm_intensity (bool): Whether to normlize the intensity. Defaults to
False. False.
file_client_args (dict): Arguments to instantiate a FileClient. backend_args (dict, optional): Arguments to instantiate the
See :class:`mmengine.fileio.FileClient` for details. corresponding backend. Defaults to None.
Defaults to dict(backend='disk').
""" """
def __init__( def __init__(self,
self, coord_type: str,
coord_type: str, load_dim: int = 6,
load_dim: int = 6, use_dim: Union[int, List[int]] = [0, 1, 2],
use_dim: Union[int, List[int]] = [0, 1, 2], shift_height: bool = False,
shift_height: bool = False, use_color: bool = False,
use_color: bool = False, norm_intensity: bool = False,
norm_intensity: bool = False, backend_args: Optional[dict] = None) -> None:
file_client_args: dict = dict(backend='disk')
) -> None:
self.shift_height = shift_height self.shift_height = shift_height
self.use_color = use_color self.use_color = use_color
if isinstance(use_dim, int): if isinstance(use_dim, int):
...@@ -619,8 +603,7 @@ class LoadPointsFromFile(BaseTransform): ...@@ -619,8 +603,7 @@ class LoadPointsFromFile(BaseTransform):
self.load_dim = load_dim self.load_dim = load_dim
self.use_dim = use_dim self.use_dim = use_dim
self.norm_intensity = norm_intensity self.norm_intensity = norm_intensity
self.file_client_args = file_client_args.copy() self.backend_args = backend_args
self.file_client = None
def _load_points(self, pts_filename: str) -> np.ndarray: def _load_points(self, pts_filename: str) -> np.ndarray:
"""Private function to load point clouds data. """Private function to load point clouds data.
...@@ -631,10 +614,8 @@ class LoadPointsFromFile(BaseTransform): ...@@ -631,10 +614,8 @@ class LoadPointsFromFile(BaseTransform):
Returns: Returns:
np.ndarray: An array containing point clouds data. np.ndarray: An array containing point clouds data.
""" """
if self.file_client is None:
self.file_client = mmengine.FileClient(**self.file_client_args)
try: try:
pts_bytes = self.file_client.get(pts_filename) pts_bytes = get(pts_filename, backend_args=self.backend_args)
points = np.frombuffer(pts_bytes, dtype=np.float32) points = np.frombuffer(pts_bytes, dtype=np.float32)
except ConnectionError: except ConnectionError:
mmengine.check_file_exist(pts_filename) mmengine.check_file_exist(pts_filename)
...@@ -698,7 +679,7 @@ class LoadPointsFromFile(BaseTransform): ...@@ -698,7 +679,7 @@ class LoadPointsFromFile(BaseTransform):
repr_str = self.__class__.__name__ + '(' repr_str = self.__class__.__name__ + '('
repr_str += f'shift_height={self.shift_height}, ' repr_str += f'shift_height={self.shift_height}, '
repr_str += f'use_color={self.use_color}, ' repr_str += f'use_color={self.use_color}, '
repr_str += f'file_client_args={self.file_client_args}, ' repr_str += f'backend_args={self.backend_args}, '
repr_str += f'load_dim={self.load_dim}, ' repr_str += f'load_dim={self.load_dim}, '
repr_str += f'use_dim={self.use_dim})' repr_str += f'use_dim={self.use_dim})'
return repr_str return repr_str
...@@ -807,37 +788,34 @@ class LoadAnnotations3D(LoadAnnotations): ...@@ -807,37 +788,34 @@ class LoadAnnotations3D(LoadAnnotations):
panoptic labels. Defaults to None. panoptic labels. Defaults to None.
dataset_type (str): Type of dataset used for splitting semantic and dataset_type (str): Type of dataset used for splitting semantic and
instance labels. Defaults to None. instance labels. Defaults to None.
file_client_args (dict): Arguments to instantiate a FileClient. backend_args (dict, optional): Arguments to instantiate the
See :class:`mmengine.fileio.FileClient` for details. corresponding backend. Defaults to None.
Defaults to dict(backend='disk').
""" """
def __init__( def __init__(self,
self, with_bbox_3d: bool = True,
with_bbox_3d: bool = True, with_label_3d: bool = True,
with_label_3d: bool = True, with_attr_label: bool = False,
with_attr_label: bool = False, with_mask_3d: bool = False,
with_mask_3d: bool = False, with_seg_3d: bool = False,
with_seg_3d: bool = False, with_bbox: bool = False,
with_bbox: bool = False, with_label: bool = False,
with_label: bool = False, with_mask: bool = False,
with_mask: bool = False, with_seg: bool = False,
with_seg: bool = False, with_bbox_depth: bool = False,
with_bbox_depth: bool = False, with_panoptic_3d: bool = False,
with_panoptic_3d: bool = False, poly2mask: bool = True,
poly2mask: bool = True, seg_3d_dtype: str = 'np.int64',
seg_3d_dtype: str = 'np.int64', seg_offset: int = None,
seg_offset: int = None, dataset_type: str = None,
dataset_type: str = None, backend_args: Optional[dict] = None) -> None:
file_client_args: dict = dict(backend='disk')
) -> None:
super().__init__( super().__init__(
with_bbox=with_bbox, with_bbox=with_bbox,
with_label=with_label, with_label=with_label,
with_mask=with_mask, with_mask=with_mask,
with_seg=with_seg, with_seg=with_seg,
poly2mask=poly2mask, poly2mask=poly2mask,
file_client_args=file_client_args) backend_args=backend_args)
self.with_bbox_3d = with_bbox_3d self.with_bbox_3d = with_bbox_3d
self.with_bbox_depth = with_bbox_depth self.with_bbox_depth = with_bbox_depth
self.with_label_3d = with_label_3d self.with_label_3d = with_label_3d
...@@ -848,7 +826,6 @@ class LoadAnnotations3D(LoadAnnotations): ...@@ -848,7 +826,6 @@ class LoadAnnotations3D(LoadAnnotations):
self.seg_3d_dtype = eval(seg_3d_dtype) self.seg_3d_dtype = eval(seg_3d_dtype)
self.seg_offset = seg_offset self.seg_offset = seg_offset
self.dataset_type = dataset_type self.dataset_type = dataset_type
self.file_client = None
def _load_bboxes_3d(self, results: dict) -> dict: def _load_bboxes_3d(self, results: dict) -> dict:
"""Private function to move the 3D bounding box annotation from """Private function to move the 3D bounding box annotation from
...@@ -914,10 +891,9 @@ class LoadAnnotations3D(LoadAnnotations): ...@@ -914,10 +891,9 @@ class LoadAnnotations3D(LoadAnnotations):
""" """
pts_instance_mask_path = results['pts_instance_mask_path'] pts_instance_mask_path = results['pts_instance_mask_path']
if self.file_client is None:
self.file_client = mmengine.FileClient(**self.file_client_args)
try: try:
mask_bytes = self.file_client.get(pts_instance_mask_path) mask_bytes = get(
pts_instance_mask_path, backend_args=self.backend_args)
pts_instance_mask = np.frombuffer(mask_bytes, dtype=np.int64) pts_instance_mask = np.frombuffer(mask_bytes, dtype=np.int64)
except ConnectionError: except ConnectionError:
mmengine.check_file_exist(pts_instance_mask_path) mmengine.check_file_exist(pts_instance_mask_path)
...@@ -941,10 +917,9 @@ class LoadAnnotations3D(LoadAnnotations): ...@@ -941,10 +917,9 @@ class LoadAnnotations3D(LoadAnnotations):
""" """
pts_semantic_mask_path = results['pts_semantic_mask_path'] pts_semantic_mask_path = results['pts_semantic_mask_path']
if self.file_client is None:
self.file_client = mmengine.FileClient(**self.file_client_args)
try: try:
mask_bytes = self.file_client.get(pts_semantic_mask_path) mask_bytes = get(
pts_semantic_mask_path, backend_args=self.backend_args)
# add .copy() to fix read-only bug # add .copy() to fix read-only bug
pts_semantic_mask = np.frombuffer( pts_semantic_mask = np.frombuffer(
mask_bytes, dtype=self.seg_3d_dtype).copy() mask_bytes, dtype=self.seg_3d_dtype).copy()
...@@ -976,10 +951,9 @@ class LoadAnnotations3D(LoadAnnotations): ...@@ -976,10 +951,9 @@ class LoadAnnotations3D(LoadAnnotations):
""" """
pts_panoptic_mask_path = results['pts_panoptic_mask_path'] pts_panoptic_mask_path = results['pts_panoptic_mask_path']
if self.file_client is None:
self.file_client = mmengine.FileClient(**self.file_client_args)
try: try:
mask_bytes = self.file_client.get(pts_panoptic_mask_path) mask_bytes = get(
pts_panoptic_mask_path, backend_args=self.backend_args)
# add .copy() to fix read-only bug # add .copy() to fix read-only bug
pts_panoptic_mask = np.frombuffer( pts_panoptic_mask = np.frombuffer(
mask_bytes, dtype=self.seg_3d_dtype).copy() mask_bytes, dtype=self.seg_3d_dtype).copy()
......
...@@ -99,7 +99,7 @@ class WaymoDataset(KittiDataset): ...@@ -99,7 +99,7 @@ class WaymoDataset(KittiDataset):
self.cat_ids = range(len(self.METAINFO['classes'])) self.cat_ids = range(len(self.METAINFO['classes']))
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.max_sweeps = max_sweeps self.max_sweeps = max_sweeps
# we do not provide file_client_args to custom_3d init # we do not provide backend_args to custom_3d init
# because we want disk loading for info # because we want disk loading for info
# while ceph loading for Prediction2Waymo # while ceph loading for Prediction2Waymo
super().__init__( super().__init__(
......
...@@ -5,7 +5,7 @@ from typing import Optional, Sequence ...@@ -5,7 +5,7 @@ from typing import Optional, Sequence
import mmcv import mmcv
import numpy as np import numpy as np
from mmengine.fileio import FileClient from mmengine.fileio import get
from mmengine.hooks import Hook from mmengine.hooks import Hook
from mmengine.runner import Runner from mmengine.runner import Runner
from mmengine.utils import mkdir_or_exist from mmengine.utils import mkdir_or_exist
...@@ -44,9 +44,8 @@ class Det3DVisualizationHook(Hook): ...@@ -44,9 +44,8 @@ class Det3DVisualizationHook(Hook):
wait_time (float): The interval of show (s). Defaults to 0. wait_time (float): The interval of show (s). Defaults to 0.
test_out_dir (str, optional): directory where painted images test_out_dir (str, optional): directory where painted images
will be saved in testing process. will be saved in testing process.
file_client_args (dict): Arguments to instantiate a FileClient. backend_args (dict, optional): Arguments to instantiate the
See :class:`mmengine.fileio.FileClient` for details. corresponding backend. Defaults to None.
Defaults to ``dict(backend='disk')``.
""" """
def __init__(self, def __init__(self,
...@@ -57,7 +56,7 @@ class Det3DVisualizationHook(Hook): ...@@ -57,7 +56,7 @@ class Det3DVisualizationHook(Hook):
vis_task: str = 'mono_det', vis_task: str = 'mono_det',
wait_time: float = 0., wait_time: float = 0.,
test_out_dir: Optional[str] = None, test_out_dir: Optional[str] = None,
file_client_args: dict = dict(backend='disk')): backend_args: Optional[dict] = None):
self._visualizer: Visualizer = Visualizer.get_current_instance() self._visualizer: Visualizer = Visualizer.get_current_instance()
self.interval = interval self.interval = interval
self.score_thr = score_thr self.score_thr = score_thr
...@@ -72,8 +71,7 @@ class Det3DVisualizationHook(Hook): ...@@ -72,8 +71,7 @@ class Det3DVisualizationHook(Hook):
self.vis_task = vis_task self.vis_task = vis_task
self.wait_time = wait_time self.wait_time = wait_time
self.file_client_args = file_client_args.copy() self.backend_args = backend_args
self.file_client = None
self.draw = draw self.draw = draw
self.test_out_dir = test_out_dir self.test_out_dir = test_out_dir
self._test_index = 0 self._test_index = 0
...@@ -92,9 +90,6 @@ class Det3DVisualizationHook(Hook): ...@@ -92,9 +90,6 @@ class Det3DVisualizationHook(Hook):
if self.draw is False: if self.draw is False:
return return
if self.file_client is None:
self.file_client = FileClient(**self.file_client_args)
# There is no guarantee that the same batch of images # There is no guarantee that the same batch of images
# is visualized for each evaluation. # is visualized for each evaluation.
total_curr_iter = runner.iter + batch_idx total_curr_iter = runner.iter + batch_idx
...@@ -104,14 +99,14 @@ class Det3DVisualizationHook(Hook): ...@@ -104,14 +99,14 @@ class Det3DVisualizationHook(Hook):
# Visualize only the first data # Visualize only the first data
if 'img_path' in outputs[0]: if 'img_path' in outputs[0]:
img_path = outputs[0].img_path img_path = outputs[0].img_path
img_bytes = self.file_client.get(img_path) img_bytes = get(img_path, backend_args=self.backend_args)
img = mmcv.imfrombytes(img_bytes, channel_order='rgb') img = mmcv.imfrombytes(img_bytes, channel_order='rgb')
data_input['img'] = img data_input['img'] = img
if 'lidar_path' in outputs[0]: if 'lidar_path' in outputs[0]:
lidar_path = outputs[0].lidar_path lidar_path = outputs[0].lidar_path
num_pts_feats = outputs[0].num_pts_feats num_pts_feats = outputs[0].num_pts_feats
pts_bytes = self.file_client.get(lidar_path) pts_bytes = get(lidar_path, backend_args=self.backend_args)
points = np.frombuffer(pts_bytes, dtype=np.float32) points = np.frombuffer(pts_bytes, dtype=np.float32)
points = points.reshape(-1, num_pts_feats) points = points.reshape(-1, num_pts_feats)
data_input['points'] = points data_input['points'] = points
...@@ -146,23 +141,20 @@ class Det3DVisualizationHook(Hook): ...@@ -146,23 +141,20 @@ class Det3DVisualizationHook(Hook):
self.test_out_dir) self.test_out_dir)
mkdir_or_exist(self.test_out_dir) mkdir_or_exist(self.test_out_dir)
if self.file_client is None:
self.file_client = FileClient(**self.file_client_args)
for data_sample in outputs: for data_sample in outputs:
self._test_index += 1 self._test_index += 1
data_input = dict() data_input = dict()
if 'img_path' in data_sample: if 'img_path' in data_sample:
img_path = data_sample.img_path img_path = data_sample.img_path
img_bytes = self.file_client.get(img_path) img_bytes = get(img_path, backend_args=self.backend_args)
img = mmcv.imfrombytes(img_bytes, channel_order='rgb') img = mmcv.imfrombytes(img_bytes, channel_order='rgb')
data_input['img'] = img data_input['img'] = img
if 'lidar_path' in data_sample: if 'lidar_path' in data_sample:
lidar_path = data_sample.lidar_path lidar_path = data_sample.lidar_path
num_pts_feats = data_sample.num_pts_feats num_pts_feats = data_sample.num_pts_feats
pts_bytes = self.file_client.get(lidar_path) pts_bytes = get(lidar_path, backend_args=self.backend_args)
points = np.frombuffer(pts_bytes, dtype=np.float32) points = np.frombuffer(pts_bytes, dtype=np.float32)
points = points.reshape(-1, num_pts_feats) points = points.reshape(-1, num_pts_feats)
data_input['points'] = points data_input['points'] = points
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment