Commit d08a9ec2 authored by Kai Chen's avatar Kai Chen
Browse files

Merge branch 'master' into single-stage

parents 626e1e19 810b7110
import warnings
import torch.nn as nn
from mmcv.cnn import kaiming_init, constant_init
from .norm import build_norm_layer
......@@ -51,15 +52,8 @@ class ConvModule(nn.Module):
self.groups = self.conv.groups
if self.with_norm:
# self.norm_type, self.norm_params = parse_norm(normalize)
# assert self.norm_type in [None, 'BN', 'SyncBN', 'GN', 'SN']
# self.Norm2d = norm_cfg[self.norm_type]
if self.activate_last:
self.norm = build_norm_layer(normalize, out_channels)
# self.norm = self.Norm2d(out_channels, **self.norm_params)
else:
self.norm = build_norm_layer(normalize, in_channels)
# self.norm = self.Norm2d(in_channels, **self.norm_params)
norm_channels = out_channels if self.activate_last else in_channels
self.norm = build_norm_layer(normalize, norm_channels)
if self.with_activatation:
assert activation in ['relu'], 'Only ReLU supported.'
......@@ -71,13 +65,9 @@ class ConvModule(nn.Module):
def init_weights(self):
nonlinearity = 'relu' if self.activation is None else self.activation
nn.init.kaiming_normal_(
self.conv.weight, mode='fan_out', nonlinearity=nonlinearity)
if self.with_bias:
nn.init.constant_(self.conv.bias, 0)
kaiming_init(self.conv, nonlinearity=nonlinearity)
if self.with_norm:
nn.init.constant_(self.norm.weight, 1)
nn.init.constant_(self.norm.bias, 0)
constant_init(self.norm, 1, bias=0)
def forward(self, x, activate=True, norm=True):
if self.activate_last:
......
......@@ -5,4 +5,4 @@ all:
$(PYTHON) setup.py build_ext --inplace
clean:
rm *.so
rm -f *.so
import os
from distutils.core import setup
from distutils.extension import Extension
import os.path as osp
from distutils.core import setup, Extension
import numpy as np
from Cython.Build import cythonize
from Cython.Distutils import build_ext
CUDA_ROOT = '/usr/local/cuda'
CUDA = {
"include": os.path.join(CUDA_ROOT, 'include'),
"lib": os.path.join(CUDA_ROOT, 'lib64'),
"nvcc": os.path.join(CUDA_ROOT, 'bin', "nvcc")
}
inc_dirs = [CUDA['include'], np.get_include()]
lib_dirs = [CUDA['lib']]
# extensions
ext_args = dict(
include_dirs=inc_dirs,
library_dirs=lib_dirs,
include_dirs=[np.get_include()],
language='c++',
libraries=['cudart'],
extra_compile_args={
"cc": ['-Wno-unused-function', '-Wno-write-strings'],
"nvcc": [
'-arch=sm_52', '--ptxas-options=-v', '-c', '--compiler-options',
'-fPIC'
],
'cc': ['-Wno-unused-function', '-Wno-write-strings'],
'nvcc': ['-c', '--compiler-options', '-fPIC'],
},
)
extensions = [
Extension('cpu_nms', ['cpu_nms.pyx'], **ext_args),
Extension('gpu_nms', ['gpu_nms.pyx', 'nms_kernel.cu'], **ext_args),
Extension('cpu_soft_nms', ['cpu_soft_nms.pyx'], **ext_args),
Extension('gpu_nms', ['gpu_nms.pyx', 'nms_kernel.cu'], **ext_args),
]
......@@ -59,9 +42,9 @@ def customize_compiler_for_nvcc(self):
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if os.path.splitext(src)[1] == '.cu':
if osp.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
self.set_executable('compiler_so', 'nvcc')
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
......
......@@ -12,7 +12,7 @@ def readme():
MAJOR = 0
MINOR = 5
PATCH = 0
PATCH = 1
SUFFIX = ''
SHORT_VERSION = '{}.{}.{}{}'.format(MAJOR, MINOR, PATCH, SUFFIX)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment