Unverified Commit 32a4328b authored by Wenwei Zhang's avatar Wenwei Zhang Committed by GitHub
Browse files

Bump version to V1.0.0rc0

Bump version to V1.0.0rc0
parents 86cc487c a8817998
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import math import math
import numpy as np import numpy as np
import torch import torch
from mmcv.cnn import CONV_LAYERS from mmcv.cnn import CONV_LAYERS
...@@ -143,16 +144,16 @@ class SparseConvolution(SparseModule): ...@@ -143,16 +144,16 @@ class SparseConvolution(SparseModule):
out_tensor.indice_dict = input.indice_dict out_tensor.indice_dict = input.indice_dict
out_tensor.grid = input.grid out_tensor.grid = input.grid
return out_tensor return out_tensor
datas = input.find_indice_pair(self.indice_key) data = input.find_indice_pair(self.indice_key)
if self.inverse: if self.inverse:
assert datas is not None and self.indice_key is not None assert data is not None and self.indice_key is not None
_, outids, indice_pairs, indice_pair_num, out_spatial_shape = datas _, outids, indice_pairs, indice_pair_num, out_spatial_shape = data
assert indice_pairs.shape[0] == np.prod( assert indice_pairs.shape[0] == np.prod(
self.kernel_size self.kernel_size
), 'inverse conv must have same kernel size as its couple conv' ), 'inverse conv must have same kernel size as its couple conv'
else: else:
if self.indice_key is not None and datas is not None: if self.indice_key is not None and data is not None:
outids, _, indice_pairs, indice_pair_num, _ = datas outids, _, indice_pairs, indice_pair_num, _ = data
else: else:
outids, indice_pairs, indice_pair_num = ops.get_indice_pairs( outids, indice_pairs, indice_pair_num = ops.get_indice_pairs(
indices, indices,
......
...@@ -93,7 +93,7 @@ struct delimiters { ...@@ -93,7 +93,7 @@ struct delimiters {
}; };
// Functor to print containers. You can use this directly if you want // Functor to print containers. You can use this directly if you want
// to specificy a non-default delimiters type. The printing logic can // to specify a non-default delimiters type. The printing logic can
// be customized by specializing the nested template. // be customized by specializing the nested template.
template <typename T, typename TChar = char, template <typename T, typename TChar = char,
......
...@@ -73,7 +73,7 @@ void sstream_print(SStream &ss, T val, TArgs... args) { ...@@ -73,7 +73,7 @@ void sstream_print(SStream &ss, T val, TArgs... args) {
if (!(expr)) { \ if (!(expr)) { \
std::stringstream __macro_s; \ std::stringstream __macro_s; \
__macro_s << __FILE__ << " " << __LINE__ << "\n"; \ __macro_s << __FILE__ << " " << __LINE__ << "\n"; \
__macro_s << #expr << " assert faild. "; \ __macro_s << #expr << " assert failed. "; \
tv::sstream_print(__macro_s, __VA_ARGS__); \ tv::sstream_print(__macro_s, __VA_ARGS__); \
throw std::runtime_error(__macro_s.str()); \ throw std::runtime_error(__macro_s.str()); \
} \ } \
...@@ -84,7 +84,7 @@ void sstream_print(SStream &ss, T val, TArgs... args) { ...@@ -84,7 +84,7 @@ void sstream_print(SStream &ss, T val, TArgs... args) {
if (!(expr)) { \ if (!(expr)) { \
std::stringstream __macro_s; \ std::stringstream __macro_s; \
__macro_s << __FILE__ << " " << __LINE__ << "\n"; \ __macro_s << __FILE__ << " " << __LINE__ << "\n"; \
__macro_s << #expr << " assert faild. "; \ __macro_s << #expr << " assert failed. "; \
tv::sstream_print(__macro_s, __VA_ARGS__); \ tv::sstream_print(__macro_s, __VA_ARGS__); \
throw std::invalid_argument(__macro_s.str()); \ throw std::invalid_argument(__macro_s.str()); \
} \ } \
......
...@@ -12,8 +12,9 @@ ...@@ -12,8 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import sys import sys
import torch
from collections import OrderedDict from collections import OrderedDict
import torch
from torch import nn from torch import nn
from .structure import SparseConvTensor from .structure import SparseConvTensor
......
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np import numpy as np
import torch import torch
......
...@@ -11,9 +11,10 @@ ...@@ -11,9 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import numpy as np
import unittest import unittest
import numpy as np
class TestCase(unittest.TestCase): class TestCase(unittest.TestCase):
......
# Copyright (c) OpenMMLab. All rights reserved.
from .scatter_points import DynamicScatter, dynamic_scatter from .scatter_points import DynamicScatter, dynamic_scatter
from .voxelize import Voxelization, voxelization from .voxelize import Voxelization, voxelization
......
# Copyright (c) OpenMMLab. All rights reserved.
import torch import torch
from torch import nn from torch import nn
from torch.autograd import Function from torch.autograd import Function
......
...@@ -305,7 +305,7 @@ int hard_voxelize_gpu(const at::Tensor& points, at::Tensor& voxels, ...@@ -305,7 +305,7 @@ int hard_voxelize_gpu(const at::Tensor& points, at::Tensor& voxels,
cudaDeviceSynchronize(); cudaDeviceSynchronize();
AT_CUDA_CHECK(cudaGetLastError()); AT_CUDA_CHECK(cudaGetLastError());
// 3. determin voxel num and voxel's coor index // 3. determine voxel num and voxel's coor index
// make the logic in the CUDA device could accelerate about 10 times // make the logic in the CUDA device could accelerate about 10 times
auto coor_to_voxelidx = -at::ones( auto coor_to_voxelidx = -at::ones(
{ {
...@@ -316,7 +316,7 @@ int hard_voxelize_gpu(const at::Tensor& points, at::Tensor& voxels, ...@@ -316,7 +316,7 @@ int hard_voxelize_gpu(const at::Tensor& points, at::Tensor& voxels,
{ {
1, 1,
}, },
points.options().dtype(at::kInt)); // must be zero from the begining points.options().dtype(at::kInt)); // must be zero from the beginning
AT_DISPATCH_ALL_TYPES( AT_DISPATCH_ALL_TYPES(
temp_coors.scalar_type(), "determin_duplicate", ([&] { temp_coors.scalar_type(), "determin_duplicate", ([&] {
......
...@@ -3,7 +3,9 @@ from mmcv.utils import Registry, build_from_cfg, print_log ...@@ -3,7 +3,9 @@ from mmcv.utils import Registry, build_from_cfg, print_log
from .collect_env import collect_env from .collect_env import collect_env
from .logger import get_root_logger from .logger import get_root_logger
from .setup_env import setup_multi_processes
__all__ = [ __all__ = [
'Registry', 'build_from_cfg', 'get_root_logger', 'collect_env', 'print_log' 'Registry', 'build_from_cfg', 'get_root_logger', 'collect_env',
'print_log', 'setup_multi_processes'
] ]
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
import logging import logging
from mmcv.utils import get_logger from mmcv.utils import get_logger
......
# Copyright (c) OpenMMLab. All rights reserved.
import os
import platform
import warnings
import cv2
from torch import multiprocessing as mp
def setup_multi_processes(cfg):
"""Setup multi-processing environment variables."""
# set multi-process start method as `fork` to speed up the training
if platform.system() != 'Windows':
mp_start_method = cfg.get('mp_start_method', 'fork')
current_method = mp.get_start_method(allow_none=True)
if current_method is not None and current_method != mp_start_method:
warnings.warn(
f'Multi-processing start method `{mp_start_method}` is '
f'different from the previous setting `{current_method}`.'
f'It will be force set to `{mp_start_method}`. You can change '
f'this behavior by changing `mp_start_method` in your config.')
mp.set_start_method(mp_start_method, force=True)
# disable opencv multithreading to avoid system being overloaded
opencv_num_threads = cfg.get('opencv_num_threads', 0)
cv2.setNumThreads(opencv_num_threads)
# setup OMP threads
# This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa
if 'OMP_NUM_THREADS' not in os.environ and cfg.data.workers_per_gpu > 1:
omp_num_threads = 1
warnings.warn(
f'Setting OMP_NUM_THREADS environment variable for each process '
f'to be {omp_num_threads} in default, to avoid your system being '
f'overloaded, please further tune the variable for optimal '
f'performance in your application as needed.')
os.environ['OMP_NUM_THREADS'] = str(omp_num_threads)
# setup MKL threads
if 'MKL_NUM_THREADS' not in os.environ and cfg.data.workers_per_gpu > 1:
mkl_num_threads = 1
warnings.warn(
f'Setting MKL_NUM_THREADS environment variable for each process '
f'to be {mkl_num_threads} in default, to avoid your system being '
f'overloaded, please further tune the variable for optimal '
f'performance in your application as needed.')
os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads)
# Copyright (c) Open-MMLab. All rights reserved. # Copyright (c) Open-MMLab. All rights reserved.
__version__ = '0.18.1' __version__ = '1.0.0rc0'
short_version = __version__ short_version = __version__
......
...@@ -2,7 +2,6 @@ Import: ...@@ -2,7 +2,6 @@ Import:
- configs/3dssd/metafile.yml - configs/3dssd/metafile.yml
- configs/centerpoint/metafile.yml - configs/centerpoint/metafile.yml
- configs/dynamic_voxelization/metafile.yml - configs/dynamic_voxelization/metafile.yml
- configs/fp16/metafile.yml
- configs/free_anchor/metafile.yml - configs/free_anchor/metafile.yml
- configs/h3dnet/metafile.yml - configs/h3dnet/metafile.yml
- configs/imvotenet/metafile.yml - configs/imvotenet/metafile.yml
...@@ -17,3 +16,4 @@ Import: ...@@ -17,3 +16,4 @@ Import:
- configs/votenet/metafile.yml - configs/votenet/metafile.yml
- configs/fcos3d/metafile.yml - configs/fcos3d/metafile.yml
- configs/imvoxelnet/metafile.yml - configs/imvoxelnet/metafile.yml
- configs/pgd/metafile.yml
lyft_dataset_sdk lyft_dataset_sdk
networkx>=2.2,<2.3 networkx>=2.2,<2.3
# we may unlock the verion of numba in the future numba==0.53.0
numba==0.48.0 numpy
numpy<1.20.0
nuscenes-devkit nuscenes-devkit
plyfile plyfile
scikit-image scikit-image
......
...@@ -6,8 +6,11 @@ SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true ...@@ -6,8 +6,11 @@ SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true
[isort] [isort]
line_length = 79 line_length = 79
multi_line_output = 0 multi_line_output = 0
known_standard_library = setuptools extra_standard_library = setuptools
known_first_party = mmdet,mmseg,mmdet3d known_first_party = mmdet,mmseg,mmdet3d
known_third_party = cv2,imageio,indoor3d_util,load_scannet_data,lyft_dataset_sdk,m2r,matplotlib,mmcv,nuimages,numba,numpy,nuscenes,pandas,plyfile,pycocotools,pyquaternion,pytest,pytorch_sphinx_theme,recommonmark,scannet_utils,scipy,seaborn,shapely,skimage,sphinx,tensorflow,terminaltables,torch,trimesh,waymo_open_dataset known_third_party = cv2,imageio,indoor3d_util,load_scannet_data,lyft_dataset_sdk,m2r,matplotlib,mmcv,nuimages,numba,numpy,nuscenes,pandas,plyfile,pycocotools,pyquaternion,pytest,pytorch_sphinx_theme,recommonmark,requests,scannet_utils,scipy,seaborn,shapely,skimage,sphinx,tensorflow,terminaltables,torch,trimesh,ts,waymo_open_dataset
no_lines_before = STDLIB,LOCALFOLDER no_lines_before = STDLIB,LOCALFOLDER
default_section = THIRDPARTY default_section = THIRDPARTY
[codespell]
ignore-words-list = ans,refridgerator,crate,hist,formating,dout,wan,nd,fo,avod,AVOD
from setuptools import find_packages, setup
import os import os
import platform import platform
import shutil import shutil
import sys import sys
import torch
import warnings import warnings
from os import path as osp from os import path as osp
from setuptools import find_packages, setup
import torch
from torch.utils.cpp_extension import (BuildExtension, CppExtension, from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension) CUDAExtension)
...@@ -273,6 +273,11 @@ if __name__ == '__main__': ...@@ -273,6 +273,11 @@ if __name__ == '__main__':
'src/roiaware_pool3d_kernel.cu', 'src/roiaware_pool3d_kernel.cu',
'src/points_in_boxes_cuda.cu', 'src/points_in_boxes_cuda.cu',
]), ]),
make_cuda_ext(
name='roipoint_pool3d_ext',
module='mmdet3d.ops.roipoint_pool3d',
sources=['src/roipoint_pool3d.cpp'],
sources_cuda=['src/roipoint_pool3d_kernel.cu']),
make_cuda_ext( make_cuda_ext(
name='ball_query_ext', name='ball_query_ext',
module='mmdet3d.ops.ball_query', module='mmdet3d.ops.ball_query',
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment