"docs/vscode:/vscode.git/clone" did not exist on "76ca91dff2e108de4b8d9770bbe44289cc16e344"
Unverified Commit 76e351a7 authored by Wenwei Zhang's avatar Wenwei Zhang Committed by GitHub
Browse files

Release v1.0.0rc2

parents 5111eda8 4422eaab
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmcv.ops import SparseModule, SparseSequential
from torch import nn
from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
from .spconv import IS_SPCONV2_AVAILABLE
if IS_SPCONV2_AVAILABLE:
from spconv.pytorch import SparseModule, SparseSequential
else:
from mmcv.ops import SparseModule, SparseSequential
def replace_feature(out, new_features):
if 'replace_feature' in out.__dir__():
# spconv 2.x behaviour
return out.replace_feature(new_features)
else:
out.features = new_features
return out
class SparseBottleneck(Bottleneck, SparseModule):
......@@ -46,21 +60,21 @@ class SparseBottleneck(Bottleneck, SparseModule):
identity = x.features
out = self.conv1(x)
out.features = self.bn1(out.features)
out.features = self.relu(out.features)
out = replace_feature(out, self.bn1(out.features))
out = replace_feature(out, self.relu(out.features))
out = self.conv2(out)
out.features = self.bn2(out.features)
out.features = self.relu(out.features)
out = replace_feature(out, self.bn2(out.features))
out = replace_feature(out, self.relu(out.features))
out = self.conv3(out)
out.features = self.bn3(out.features)
out = replace_feature(out, self.bn3(out.features))
if self.downsample is not None:
identity = self.downsample(x)
out.features += identity
out.features = self.relu(out.features)
out = replace_feature(out, out.features + identity)
out = replace_feature(out, self.relu(out.features))
return out
......@@ -104,19 +118,18 @@ class SparseBasicBlock(BasicBlock, SparseModule):
identity = x.features
assert x.features.dim() == 2, f'x.features.dim()={x.features.dim()}'
out = self.conv1(x)
out.features = self.norm1(out.features)
out.features = self.relu(out.features)
out = replace_feature(out, self.norm1(out.features))
out = replace_feature(out, self.relu(out.features))
out = self.conv2(out)
out.features = self.norm2(out.features)
out = replace_feature(out, self.norm2(out.features))
if self.downsample is not None:
identity = self.downsample(x)
out.features += identity
out.features = self.relu(out.features)
out = replace_feature(out, out.features + identity)
out = replace_feature(out, self.relu(out.features))
return out
......
# Copyright (c) OpenMMLab. All rights reserved.
from .overwrite_spconv.write_spconv2 import register_spconv2
try:
import spconv
except ImportError:
IS_SPCONV2_AVAILABLE = False
else:
if hasattr(spconv, '__version__') and spconv.__version__ >= '2.0.0':
IS_SPCONV2_AVAILABLE = register_spconv2()
else:
IS_SPCONV2_AVAILABLE = False
__all__ = ['IS_SPCONV2_AVAILABLE']
# Copyright (c) OpenMMLab. All rights reserved.
import itertools
from mmcv.cnn.bricks.registry import CONV_LAYERS
from torch.nn.parameter import Parameter
def register_spconv2():
"""This func registers spconv2.0 spconv ops to overwrite the default mmcv
spconv ops."""
try:
from spconv.pytorch import (SparseConv2d, SparseConv3d, SparseConv4d,
SparseConvTranspose2d,
SparseConvTranspose3d, SparseInverseConv2d,
SparseInverseConv3d, SparseModule,
SubMConv2d, SubMConv3d, SubMConv4d)
except ImportError:
return False
else:
CONV_LAYERS._register_module(SparseConv2d, 'SparseConv2d', force=True)
CONV_LAYERS._register_module(SparseConv3d, 'SparseConv3d', force=True)
CONV_LAYERS._register_module(SparseConv4d, 'SparseConv4d', force=True)
CONV_LAYERS._register_module(
SparseConvTranspose2d, 'SparseConvTranspose2d', force=True)
CONV_LAYERS._register_module(
SparseConvTranspose3d, 'SparseConvTranspose3d', force=True)
CONV_LAYERS._register_module(
SparseInverseConv2d, 'SparseInverseConv2d', force=True)
CONV_LAYERS._register_module(
SparseInverseConv3d, 'SparseInverseConv3d', force=True)
CONV_LAYERS._register_module(SubMConv2d, 'SubMConv2d', force=True)
CONV_LAYERS._register_module(SubMConv3d, 'SubMConv3d', force=True)
CONV_LAYERS._register_module(SubMConv4d, 'SubMConv4d', force=True)
SparseModule._load_from_state_dict = _load_from_state_dict
SparseModule._save_to_state_dict = _save_to_state_dict
return True
def _save_to_state_dict(self, destination, prefix, keep_vars):
"""Rewrite this func to compat the convolutional kernel weights between
spconv 1.x in MMCV and 2.x in spconv2.x.
Kernel weights in MMCV spconv has shape in (D,H,W,in_channel,out_channel) ,
while those in spcon2.x is in (out_channel,D,H,W,in_channel).
"""
for name, param in self._parameters.items():
if param is not None:
param = param if keep_vars else param.detach()
if name == 'weight':
dims = list(range(1, len(param.shape))) + [0]
param = param.permute(*dims)
destination[prefix + name] = param
for name, buf in self._buffers.items():
if buf is not None and name not in self._non_persistent_buffers_set:
destination[prefix + name] = buf if keep_vars else buf.detach()
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
"""Rewrite this func to compat the convolutional kernel weights between
spconv 1.x in MMCV and 2.x in spconv2.x.
Kernel weights in MMCV spconv has shape in (D,H,W,in_channel,out_channel) ,
while those in spcon2.x is in (out_channel,D,H,W,in_channel).
"""
for hook in self._load_state_dict_pre_hooks.values():
hook(state_dict, prefix, local_metadata, strict, missing_keys,
unexpected_keys, error_msgs)
local_name_params = itertools.chain(self._parameters.items(),
self._buffers.items())
local_state = {k: v.data for k, v in local_name_params if v is not None}
for name, param in local_state.items():
key = prefix + name
if key in state_dict:
input_param = state_dict[key]
# Backward compatibility: loading 1-dim tensor from
# 0.3.* to version 0.4+
if len(param.shape) == 0 and len(input_param.shape) == 1:
input_param = input_param[0]
dims = [len(input_param.shape) - 1] + list(
range(len(input_param.shape) - 1))
input_param = input_param.permute(*dims)
if input_param.shape != param.shape:
# local shape should match the one in checkpoint
error_msgs.append(
f'size mismatch for {key}: copying a param with '
f'shape {key, input_param.shape} from checkpoint,'
f'the shape in current model is {param.shape}.')
continue
if isinstance(input_param, Parameter):
# backwards compatibility for serialized parameters
input_param = input_param.data
try:
param.copy_(input_param)
except Exception:
error_msgs.append(
f'While copying the parameter named "{key}", whose '
f'dimensions in the model are {param.size()} and whose '
f'dimensions in the checkpoint are {input_param.size()}.')
elif strict:
missing_keys.append(key)
if strict:
for key, input_param in state_dict.items():
if key.startswith(prefix):
input_name = key[len(prefix):]
input_name = input_name.split(
'.', 1)[0] # get the name of param/buffer/child
if input_name not in self._modules \
and input_name not in local_state:
unexpected_keys.append(key)
......@@ -4,9 +4,11 @@ from mmcv.utils import Registry, build_from_cfg, print_log
from .collect_env import collect_env
from .compat_cfg import compat_cfg
from .logger import get_root_logger
from .misc import find_latest_checkpoint
from .setup_env import setup_multi_processes
__all__ = [
'Registry', 'build_from_cfg', 'get_root_logger', 'collect_env',
'print_log', 'setup_multi_processes', 'compat_cfg'
'print_log', 'setup_multi_processes', 'find_latest_checkpoint',
'compat_cfg'
]
......@@ -5,6 +5,7 @@ from mmcv.utils import get_git_hash
import mmdet
import mmdet3d
import mmseg
from mmdet3d.ops.spconv import IS_SPCONV2_AVAILABLE
def collect_env():
......@@ -13,7 +14,7 @@ def collect_env():
env_info['MMDetection'] = mmdet.__version__
env_info['MMSegmentation'] = mmseg.__version__
env_info['MMDetection3D'] = mmdet3d.__version__ + '+' + get_git_hash()[:7]
env_info['spconv2.0'] = IS_SPCONV2_AVAILABLE
return env_info
......
# Copyright (c) OpenMMLab. All rights reserved.
import glob
import os.path as osp
import warnings
def find_latest_checkpoint(path, suffix='pth'):
"""Find the latest checkpoint from the working directory. This function is
copied from mmdetection.
Args:
path(str): The path to find checkpoints.
suffix(str): File extension.
Defaults to pth.
Returns:
latest_path(str | None): File path of the latest checkpoint.
References:
.. [1] https://github.com/microsoft/SoftTeacher
/blob/main/ssod/utils/patch.py
"""
if not osp.exists(path):
warnings.warn('The path of checkpoints does not exist.')
return None
if osp.exists(osp.join(path, f'latest.{suffix}')):
return osp.join(path, f'latest.{suffix}')
checkpoints = glob.glob(osp.join(path, f'*.{suffix}'))
if len(checkpoints) == 0:
warnings.warn('There are no checkpoints in the path.')
return None
latest = -1
latest_path = None
for checkpoint in checkpoints:
count = int(osp.basename(checkpoint).split('_')[-1].split('.')[0])
if count > latest:
latest = count
latest_path = checkpoint
return latest_path
# Copyright (c) Open-MMLab. All rights reserved.
__version__ = '1.0.0rc1'
__version__ = '1.0.0rc2'
short_version = __version__
......
docutils==0.16.0
m2r
mistune==0.8.4
myst-parser
-e git+https://github.com/open-mmlab/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
recommonmark
sphinx==4.0.2
sphinx-copybutton
sphinx_markdown_tables
open3d
spconv
waymo-open-dataset-tf-2-1-0==1.2.0
......@@ -3,7 +3,7 @@ import pytest
import torch
from torch import nn as nn
from mmdet.models import build_loss
from mmdet3d.models.builder import build_loss
def test_chamfer_disrance():
......
......@@ -353,3 +353,55 @@ def test_dla_net():
assert results[3].shape == torch.Size([4, 128, 4, 4])
assert results[4].shape == torch.Size([4, 256, 2, 2])
assert results[5].shape == torch.Size([4, 512, 1, 1])
def test_mink_resnet():
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
try:
import MinkowskiEngine as ME
except ImportError:
pytest.skip('test requires MinkowskiEngine installation')
coordinates, features = [], []
np.random.seed(42)
# batch of 2 point clouds
for i in range(2):
c = torch.from_numpy(np.random.rand(500, 3) * 100)
coordinates.append(c.float().cuda())
f = torch.from_numpy(np.random.rand(500, 3))
features.append(f.float().cuda())
tensor_coordinates, tensor_features = ME.utils.sparse_collate(
coordinates, features)
x = ME.SparseTensor(
features=tensor_features, coordinates=tensor_coordinates)
# MinkResNet34 with 4 outputs
cfg = dict(type='MinkResNet', depth=34, in_channels=3)
self = build_backbone(cfg).cuda()
self.init_weights()
y = self(x)
assert len(y) == 4
assert y[0].F.shape == torch.Size([900, 64])
assert y[0].tensor_stride[0] == 8
assert y[1].F.shape == torch.Size([472, 128])
assert y[1].tensor_stride[0] == 16
assert y[2].F.shape == torch.Size([105, 256])
assert y[2].tensor_stride[0] == 32
assert y[3].F.shape == torch.Size([16, 512])
assert y[3].tensor_stride[0] == 64
# MinkResNet50 with 2 outputs
cfg = dict(
type='MinkResNet', depth=34, in_channels=3, num_stages=2, pool=False)
self = build_backbone(cfg).cuda()
self.init_weights()
y = self(x)
assert len(y) == 2
assert y[0].F.shape == torch.Size([985, 64])
assert y[0].tensor_stride[0] == 4
assert y[1].F.shape == torch.Size([900, 128])
assert y[1].tensor_stride[0] == 8
......@@ -9,7 +9,7 @@ def test_secfpn():
upsample_strides=[1, 2],
out_channels=[4, 6],
)
from mmdet.models.builder import build_neck
from mmdet3d.models.builder import build_neck
neck = build_neck(neck_cfg)
assert neck.deblocks[0][0].in_channels == 2
assert neck.deblocks[1][0].in_channels == 3
......
......@@ -1177,8 +1177,9 @@ def test_boxes3d_overlaps():
# same boxes under different coordinates should have the same iou
assert torch.allclose(
expected_iou_tensor, cam_overlaps_3d, rtol=1e-4, atol=1e-7)
assert torch.allclose(cam_overlaps_3d, overlaps_3d_iou)
expected_iou_tensor, cam_overlaps_3d, rtol=1e-3, atol=1e-4)
assert torch.allclose(
cam_overlaps_3d, overlaps_3d_iou, rtol=1e-3, atol=1e-4)
with pytest.raises(AssertionError):
cam_boxes1.overlaps(cam_boxes1, boxes1)
......
......@@ -32,17 +32,8 @@ def test_compat_loader_args():
val=dict(samples_per_gpu=3),
test=dict(samples_per_gpu=2),
train=dict())))
with pytest.warns(None) as record:
cfg = compat_loader_args(cfg)
# 5 warning
assert len(record) == 5
# assert the warning message
assert 'train_dataloader' in record.list[0].message.args[0]
assert 'samples_per_gpu' in record.list[0].message.args[0]
assert 'persistent_workers' in record.list[1].message.args[0]
assert 'train_dataloader' in record.list[1].message.args[0]
assert 'workers_per_gpu' in record.list[2].message.args[0]
assert 'train_dataloader' in record.list[2].message.args[0]
cfg = compat_loader_args(cfg)
assert cfg.data.train_dataloader.workers_per_gpu == 1
assert cfg.data.train_dataloader.samples_per_gpu == 1
assert cfg.data.train_dataloader.persistent_workers
......@@ -63,11 +54,7 @@ def test_compat_loader_args():
dict(samples_per_gpu=3)],
train=dict())))
with pytest.warns(None) as record:
cfg = compat_loader_args(cfg)
# 6 warning
assert len(record) == 6
assert cfg.data.test_dataloader.samples_per_gpu == 3
cfg = compat_loader_args(cfg)
# assert can not set args at the same time
cfg = ConfigDict(
......
......@@ -79,6 +79,12 @@ def build_data_cfg(config_path, skip_type, aug, cfg_options):
for i in range(len(cfg.train_pipeline)):
if cfg.train_pipeline[i]['type'] == 'LoadAnnotations3D':
show_pipeline.insert(i, cfg.train_pipeline[i])
# Collect points as well as labels
if cfg.train_pipeline[i]['type'] == 'Collect3D':
if show_pipeline[-1]['type'] == 'Collect3D':
show_pipeline[-1] = cfg.train_pipeline[i]
else:
show_pipeline.append(cfg.train_pipeline[i])
train_data_cfg['pipeline'] = [
x for x in show_pipeline if x['type'] not in skip_type
......
......@@ -5,7 +5,7 @@ import torch
from mmcv.runner import save_checkpoint
from torch import nn as nn
from mmdet.apis import init_model
from mmdet3d.apis import init_model
def fuse_conv_bn(conv, bn):
......
......@@ -11,17 +11,18 @@ from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
import mmdet
from mmdet3d.apis import single_gpu_test
from mmdet3d.datasets import build_dataloader, build_dataset
from mmdet3d.models import build_model
from mmdet.apis import multi_gpu_test, set_random_seed
from mmdet.datasets import replace_ImageToTensor
try:
# If mmdet version > 2.20.0, setup_multi_processes would be imported and
if mmdet.__version__ > '2.23.0':
# If mmdet version > 2.23.0, setup_multi_processes would be imported and
# used from mmdet instead of mmdet3d.
from mmdet.utils import setup_multi_processes
except ImportError:
else:
from mmdet3d.utils import setup_multi_processes
try:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment