Unverified Commit e9e20242 authored by Jingwei Zhang's avatar Jingwei Zhang Committed by GitHub
Browse files

[Fix] Fix unit test of datasets (#1865)

* fix ci of dataset and visualizer by regenerate pkl

* decrease channels in ut for reducing cuda memory

* still use sample_id in the kitti submitted files

* minor changes

* use waymo-open-dataset-tf-2-6-0

* use waymo-open-dataset-tf-2-5-0 to be compabible with python3.6

* use waymo-open-dataset-tf-2-4-0 to be compabible with python3.6

* no dependency about waymo-open-dataset

* Revert "no dependency about waymo-open-dataset"

This reverts commit 581d4eefe26851a295b3190bcfa73d6fb2ba54aa.

* not install waymo-open-dataset for ci

* max python version is 3.8 because of trimesh

* Revert "max python version is 2.8 because of trimesh"

This reverts commit 6d8a1851b22a2059e7de75b576c2b7f633ad7cfa.

* fix networkx in ci

* update docstring of data_preprocessor

* modify version of typingextensions
parent 4c4dd093
...@@ -168,7 +168,7 @@ class KittiMetric(BaseMetric): ...@@ -168,7 +168,7 @@ class KittiMetric(BaseMetric):
"""Compute the metrics from processed results. """Compute the metrics from processed results.
Args: Args:
results (list): The processed results of each batch. results (list): The processed results of the whole dataset.
Returns: Returns:
Dict[str, float]: The computed metrics. The keys are the names of Dict[str, float]: The computed metrics. The keys are the names of
......
...@@ -104,10 +104,10 @@ class Det3DDataPreprocessor(DetDataPreprocessor): ...@@ -104,10 +104,10 @@ class Det3DDataPreprocessor(DetDataPreprocessor):
``BaseDataPreprocessor``. ``BaseDataPreprocessor``.
Args: Args:
data (List[dict] | List[List[dict]]): data from dataloader. data (dict | List[dict]): data from dataloader.
The outer list always represent the batch size, when it is The dict contains the whole batch data, when it is
a list[list[dict]], the inter list indicate test time a list[dict], the list indicate test time augmentation.
augmentation.
training (bool): Whether to enable training time augmentation. training (bool): Whether to enable training time augmentation.
Defaults to False. Defaults to False.
...@@ -293,7 +293,7 @@ class Det3DDataPreprocessor(DetDataPreprocessor): ...@@ -293,7 +293,7 @@ class Det3DDataPreprocessor(DetDataPreprocessor):
else: else:
raise TypeError('Output of `cast_data` should be a list of dict ' raise TypeError('Output of `cast_data` should be a list of dict '
'or a tuple with inputs and data_samples, but got' 'or a tuple with inputs and data_samples, but got'
f'{type(data)} {data}') f'{type(data)}: {data}')
return batch_pad_shape return batch_pad_shape
@torch.no_grad() @torch.no_grad()
......
...@@ -606,9 +606,8 @@ class Det3DLocalVisualizer(DetLocalVisualizer): ...@@ -606,9 +606,8 @@ class Det3DLocalVisualizer(DetLocalVisualizer):
win_name=name, win_name=name,
wait_time=wait_time) wait_time=wait_time)
mkdir_or_exist(out_file)
if out_file is not None: if out_file is not None:
mkdir_or_exist(out_file)
if drawn_img_3d is not None: if drawn_img_3d is not None:
mmcv.imwrite(drawn_img_3d[..., ::-1], out_file + '.jpg') mmcv.imwrite(drawn_img_3d[..., ::-1], out_file + '.jpg')
if drawn_img is not None: if drawn_img is not None:
......
black==20.8b1 # be compatible with typing-extensions 3.7.4
spconv spconv
waymo-open-dataset-tf-2-1-0==1.2.0 typing-extensions==3.7.4 # required by tensorflow before version 2.6
waymo-open-dataset-tf-2-4-0
lyft_dataset_sdk lyft_dataset_sdk
networkx>=2.2,<2.3 networkx>=2.5
numba==0.53.0 numba==0.53.0
numpy numpy
nuscenes-devkit nuscenes-devkit
......
...@@ -44,9 +44,9 @@ class TestFCOSMono3DHead(TestCase): ...@@ -44,9 +44,9 @@ class TestFCOSMono3DHead(TestCase):
fcos_mono3d_head = FCOSMono3DHead( fcos_mono3d_head = FCOSMono3DHead(
num_classes=10, num_classes=10,
in_channels=256, in_channels=32,
stacked_convs=2, stacked_convs=2,
feat_channels=256, feat_channels=32,
use_direction_classifier=True, use_direction_classifier=True,
diff_rad_by_sin=True, diff_rad_by_sin=True,
pred_attrs=True, pred_attrs=True,
...@@ -55,16 +55,16 @@ class TestFCOSMono3DHead(TestCase): ...@@ -55,16 +55,16 @@ class TestFCOSMono3DHead(TestCase):
dir_limit_offset=0, dir_limit_offset=0,
strides=[8, 16, 32, 64, 128], strides=[8, 16, 32, 64, 128],
group_reg_dims=(2, 1, 3, 1, 2), # offset, depth, size, rot, velo group_reg_dims=(2, 1, 3, 1, 2), # offset, depth, size, rot, velo
cls_branch=(256, ), cls_branch=(32, ),
reg_branch=( reg_branch=(
(256, ), # offset (32, ), # offset
(256, ), # depth (32, ), # depth
(256, ), # size (32, ), # size
(256, ), # rot (32, ), # rot
() # velo () # velo
), ),
dir_branch=(256, ), dir_branch=(32, ),
attr_branch=(256, ), attr_branch=(32, ),
loss_cls=dict( loss_cls=dict(
type='mmdet.FocalLoss', type='mmdet.FocalLoss',
use_sigmoid=True, use_sigmoid=True,
...@@ -96,11 +96,11 @@ class TestFCOSMono3DHead(TestCase): ...@@ -96,11 +96,11 @@ class TestFCOSMono3DHead(TestCase):
# FCOS3D head expects a multiple levels of features per image # FCOS3D head expects a multiple levels of features per image
feats = [ feats = [
torch.rand([1, 256, 116, 200], dtype=torch.float32), torch.rand([1, 32, 116, 200], dtype=torch.float32),
torch.rand([1, 256, 58, 100], dtype=torch.float32), torch.rand([1, 32, 58, 100], dtype=torch.float32),
torch.rand([1, 256, 29, 50], dtype=torch.float32), torch.rand([1, 32, 29, 50], dtype=torch.float32),
torch.rand([1, 256, 15, 25], dtype=torch.float32), torch.rand([1, 32, 15, 25], dtype=torch.float32),
torch.rand([1, 256, 8, 13], dtype=torch.float32) torch.rand([1, 32, 8, 13], dtype=torch.float32)
] ]
# Test forward # Test forward
......
...@@ -19,6 +19,14 @@ class TestFreeAnchor(unittest.TestCase): ...@@ -19,6 +19,14 @@ class TestFreeAnchor(unittest.TestCase):
freeanchor_cfg = _get_detector_cfg( freeanchor_cfg = _get_detector_cfg(
'free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor' 'free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor'
'_sbn-all_8xb4-2x_nus-3d.py') '_sbn-all_8xb4-2x_nus-3d.py')
# decrease channels to reduce cuda memory.
freeanchor_cfg.pts_voxel_encoder.feat_channels = [1, 1]
freeanchor_cfg.pts_middle_encoder.in_channels = 1
freeanchor_cfg.pts_backbone.base_channels = 1
freeanchor_cfg.pts_backbone.stem_channels = 1
freeanchor_cfg.pts_neck.out_channels = 1
freeanchor_cfg.pts_bbox_head.feat_channels = 1
freeanchor_cfg.pts_bbox_head.in_channels = 1
model = MODELS.build(freeanchor_cfg) model = MODELS.build(freeanchor_cfg)
num_gt_instance = 3 num_gt_instance = 3
packed_inputs = _create_detector_inputs( packed_inputs = _create_detector_inputs(
......
...@@ -18,6 +18,14 @@ class TestSSN(unittest.TestCase): ...@@ -18,6 +18,14 @@ class TestSSN(unittest.TestCase):
_setup_seed(0) _setup_seed(0)
ssn_cfg = _get_detector_cfg( ssn_cfg = _get_detector_cfg(
'ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_nus-3d.py') 'ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_nus-3d.py')
ssn_cfg.pts_voxel_encoder.feat_channels = [1, 1]
ssn_cfg.pts_middle_encoder.in_channels = 1
ssn_cfg.pts_backbone.in_channels = 1
ssn_cfg.pts_backbone.out_channels = [1, 1, 1]
ssn_cfg.pts_neck.in_channels = [1, 1, 1]
ssn_cfg.pts_neck.out_channels = [1, 1, 1]
ssn_cfg.pts_bbox_head.in_channels = 3
ssn_cfg.pts_bbox_head.feat_channels = 1
model = MODELS.build(ssn_cfg) model = MODELS.build(ssn_cfg)
num_gt_instance = 50 num_gt_instance = 50
packed_inputs = _create_detector_inputs( packed_inputs = _create_detector_inputs(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment