Unverified Commit c0f5492e authored by zhuyuanhao's avatar zhuyuanhao Committed by GitHub
Browse files

add ext ops, support parrots (#310)



* add ext ops, support parrots

* fix lint

* fix lint

* update op from mmdetection

* support non-pytorch env

* fix import bug

* test not import mmcv.op

* rename mmcv.op to mmcv.ops

* fix compile warning

* 1. fix syncbn warning in pytorch 1.5
2. support only cpu compile
3. add point_sample from mmdet

* fix text bug

* update docstrings

* fix line endings

* minor updates

* remove non_local from ops

* bug fix for nonlocal2d

* rename ops_ext to _ext and _ext to _flow_warp_ext

* update the doc

* try clang-format github action

* fix github action

* add ops to api.rst

* fix cpp format

* fix clang format issues

* remove .clang-format
Co-authored-by: default avatarKai Chen <chenkaidev@gmail.com>
parent a7bf7701
# Copyright (c) Open-MMLab. All rights reserved. # Copyright (c) Open-MMLab. All rights reserved.
# cython: language_level=3
STUFF = "Hi" STUFF = "Hi"
import numpy as np import numpy as np
......
import glob
import os
import platform import platform
import re import re
import setuptools
from pkg_resources import DistributionNotFound, get_distribution from pkg_resources import DistributionNotFound, get_distribution
from setuptools import Extension, dist, find_packages, setup from setuptools import dist, find_packages, setup
dist.Distribution().fetch_build_eggs(['Cython', 'numpy>=1.11.1']) dist.Distribution().fetch_build_eggs(['Cython', 'numpy>=1.11.1'])
import numpy # NOQA: E402 # isort:skip import numpy # NOQA: E402 # isort:skip
from Cython.Distutils import build_ext # NOQA: E402 # isort:skip from Cython.Build import cythonize # NOQA: E402 # isort:skip
from Cython.Distutils import build_ext as build_cmd # NOQA: E402 # isort:skip
def choose_requirement(primary, secondary): def choose_requirement(primary, secondary):
...@@ -121,16 +125,19 @@ install_requires = parse_requirements() ...@@ -121,16 +125,19 @@ install_requires = parse_requirements()
for main, secondary in CHOOSE_INSTALL_REQUIRES: for main, secondary in CHOOSE_INSTALL_REQUIRES:
install_requires.append(choose_requirement(main, secondary)) install_requires.append(choose_requirement(main, secondary))
if platform.system() == 'Darwin':
extra_compile_args = ['-stdlib=libc++']
extra_link_args = ['-stdlib=libc++']
else:
extra_compile_args = []
extra_link_args = []
EXT_MODULES = [ def get_extensions():
Extension( extensions = []
name='mmcv._ext',
if platform.system() == 'Darwin':
extra_compile_args = ['-stdlib=libc++']
extra_link_args = ['-stdlib=libc++']
else:
extra_compile_args = []
extra_link_args = []
ext_flow = setuptools.Extension(
name='mmcv._flow_warp_ext',
sources=[ sources=[
'./mmcv/video/optflow_warp/flow_warp.cpp', './mmcv/video/optflow_warp/flow_warp.cpp',
'./mmcv/video/optflow_warp/flow_warp_module.pyx' './mmcv/video/optflow_warp/flow_warp_module.pyx'
...@@ -138,14 +145,71 @@ EXT_MODULES = [ ...@@ -138,14 +145,71 @@ EXT_MODULES = [
include_dirs=[numpy.get_include()], include_dirs=[numpy.get_include()],
language='c++', language='c++',
extra_compile_args=extra_compile_args, extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args, extra_link_args=extra_link_args)
), extensions.extend(cythonize(ext_flow))
]
try:
import torch
cuda_args = [
'-gencode=arch=compute_52,code=sm_52',
'-gencode=arch=compute_60,code=sm_60',
'-gencode=arch=compute_61,code=sm_61',
'-gencode=arch=compute_70,code=sm_70',
'-gencode=arch=compute_70,code=compute_70'
]
ext_name = 'mmcv._ext'
if torch.__version__ == 'parrots':
from parrots.utils.build_extension import BuildExtension, Extension
op_files = glob.glob('./mmcv/ops/csrc/parrots/*')
include_path = os.path.abspath('./mmcv/ops/csrc')
ext_ops = Extension(
name=ext_name,
sources=op_files,
include_dirs=[include_path],
extra_compile_args={
'nvcc': cuda_args,
'cxx': [],
},
cuda=True)
extensions.append(ext_ops)
else:
from torch.utils.cpp_extension import (BuildExtension,
CUDAExtension, CppExtension)
# prevent ninja from using too many resources
os.environ.setdefault('MAX_JOBS', '4')
define_macros = []
extra_compile_args = {'cxx': []}
if (torch.cuda.is_available()
or os.getenv('FORCE_CUDA', '0') == '1'):
define_macros += [('WITH_CUDA', None)]
extra_compile_args['nvcc'] = cuda_args
op_files = glob.glob('./mmcv/ops/csrc/pytorch/*')
extension = CUDAExtension
else:
print(f'Compiling {ext_name} without CUDA')
op_files = glob.glob('./mmcv/ops/csrc/pytorch/*.cpp')
extension = CppExtension
include_path = os.path.abspath('./mmcv/ops/csrc')
ext_ops = extension(
name=ext_name,
sources=op_files,
include_dirs=[include_path],
define_macros=define_macros,
extra_compile_args=extra_compile_args)
extensions.append(ext_ops)
global build_cmd
build_cmd = BuildExtension
except ModuleNotFoundError:
print('Skip building ext ops due to the absence of torch.')
return extensions
setup( setup(
name='mmcv', name='mmcv',
version=get_version(), version=get_version(),
description='Open MMLab Computer Vision Foundation', description='OpenMMLab Computer Vision Foundation',
long_description=readme(), long_description=readme(),
keywords='computer vision', keywords='computer vision',
packages=find_packages(), packages=find_packages(),
...@@ -155,18 +219,17 @@ setup( ...@@ -155,18 +219,17 @@ setup(
'License :: OSI Approved :: Apache Software License', 'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent', 'Operating System :: OS Independent',
'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.8',
'Topic :: Utilities', 'Topic :: Utilities',
], ],
url='https://github.com/open-mmlab/mmcv', url='https://github.com/open-mmlab/mmcv',
author='Kai Chen', author='MMCV Authors',
author_email='chenkaidev@gmail.com', author_email='chenkaidev@gmail.com',
setup_requires=['pytest-runner'], setup_requires=['pytest-runner'],
tests_require=['pytest'], tests_require=['pytest'],
install_requires=install_requires, install_requires=install_requires,
ext_modules=EXT_MODULES, ext_modules=get_extensions(),
cmdclass={'build_ext': build_ext}, cmdclass={'build_ext': build_cmd},
zip_safe=False) zip_safe=False)
import torch import torch
from mmcv.cnn.bricks import HSigmoid from mmcv.cnn.bricks import HSigmoid
def test_hsigmoid(): def test_hsigmoid():
act = HSigmoid() act = HSigmoid()
input_shape = torch.Size([1, 3, 64, 64]) input_shape = torch.Size([1, 3, 64, 64])
input = torch.randn(input_shape) input = torch.randn(input_shape)
output = act(input) output = act(input)
expected_output = torch.min( expected_output = torch.min(
torch.max((input + 1) / 2, torch.zeros(input_shape)), torch.max((input + 1) / 2, torch.zeros(input_shape)),
torch.ones(input_shape)) torch.ones(input_shape))
# test output shape # test output shape
assert output.shape == expected_output.shape assert output.shape == expected_output.shape
# test output value # test output value
assert torch.equal(output, expected_output) assert torch.equal(output, expected_output)
import torch import torch
from torch.nn.functional import relu6 from torch.nn.functional import relu6
from mmcv.cnn.bricks import HSwish from mmcv.cnn.bricks import HSwish
def test_hswish(): def test_hswish():
# test inplace # test inplace
act = HSwish(inplace=True) act = HSwish(inplace=True)
assert act.act.inplace assert act.act.inplace
act = HSwish() act = HSwish()
assert not act.act.inplace assert not act.act.inplace
input = torch.randn(1, 3, 64, 64) input = torch.randn(1, 3, 64, 64)
expected_output = input * relu6(input + 3) / 6 expected_output = input * relu6(input + 3) / 6
output = act(input) output = act(input)
# test output shape # test output shape
assert output.shape == expected_output.shape assert output.shape == expected_output.shape
# test output value # test output value
assert torch.equal(output, expected_output) assert torch.equal(output, expected_output)
...@@ -201,23 +201,23 @@ class TestGeometric: ...@@ -201,23 +201,23 @@ class TestGeometric:
patches = mmcv.imcrop(self.img, bboxes) patches = mmcv.imcrop(self.img, bboxes)
assert len(patches) == bboxes.shape[0] assert len(patches) == bboxes.shape[0]
for i in range(len(patches)): for i in range(len(patches)):
ref_patch = np.load(patch_path + '/{}.npy'.format(i)) ref_patch = np.load(patch_path + f'/{i}.npy')
assert_array_equal(patches[i], ref_patch) assert_array_equal(patches[i], ref_patch)
# crop with scaling and no padding # crop with scaling and no padding
patches = mmcv.imcrop(self.img, bboxes, 1.2) patches = mmcv.imcrop(self.img, bboxes, 1.2)
for i in range(len(patches)): for i in range(len(patches)):
ref_patch = np.load(patch_path + '/scale_{}.npy'.format(i)) ref_patch = np.load(patch_path + f'/scale_{i}.npy')
assert_array_equal(patches[i], ref_patch) assert_array_equal(patches[i], ref_patch)
# crop with scaling and padding # crop with scaling and padding
patches = mmcv.imcrop(self.img, bboxes, 1.2, pad_fill=[255, 255, 0]) patches = mmcv.imcrop(self.img, bboxes, 1.2, pad_fill=[255, 255, 0])
for i in range(len(patches)): for i in range(len(patches)):
ref_patch = np.load(patch_path + '/pad_{}.npy'.format(i)) ref_patch = np.load(patch_path + f'/pad_{i}.npy')
assert_array_equal(patches[i], ref_patch) assert_array_equal(patches[i], ref_patch)
patches = mmcv.imcrop(self.img, bboxes, 1.2, pad_fill=0) patches = mmcv.imcrop(self.img, bboxes, 1.2, pad_fill=0)
for i in range(len(patches)): for i in range(len(patches)):
ref_patch = np.load(patch_path + '/pad0_{}.npy'.format(i)) ref_patch = np.load(patch_path + f'/pad0_{i}.npy')
assert_array_equal(patches[i], ref_patch) assert_array_equal(patches[i], ref_patch)
def test_impad(self): def test_impad(self):
......
import numpy as np
import torch
class TestBBox(object):
def _test_bbox_overlaps(self, dtype=torch.float):
if not torch.cuda.is_available():
return
from mmcv.ops import bbox_overlaps
b1 = torch.tensor([[1.0, 1.0, 3.0, 4.0], [2.0, 2.0, 3.0, 4.0],
[7.0, 7.0, 8.0, 8.0]]).cuda().type(dtype)
b2 = torch.tensor([[0.0, 2.0, 2.0, 5.0], [2.0, 1.0, 3.0,
3.0]]).cuda().type(dtype)
should_output = np.array([[0.33333334, 0.5], [0.2, 0.5], [0.0, 0.0]])
out = bbox_overlaps(b1, b2, offset=1)
assert np.allclose(out.cpu().numpy(), should_output, 1e-2)
b1 = torch.tensor([[1.0, 1.0, 3.0, 4.0], [2.0, 2.0, 3.0,
4.0]]).cuda().type(dtype)
b2 = torch.tensor([[0.0, 2.0, 2.0, 5.0], [2.0, 1.0, 3.0,
3.0]]).cuda().type(dtype)
should_output = np.array([0.33333334, 0.5])
out = bbox_overlaps(b1, b2, aligned=True, offset=1)
assert np.allclose(out.cpu().numpy(), should_output, 1e-2)
b1 = torch.tensor([[0.0, 0.0, 3.0, 3.0]]).cuda().type(dtype)
b1 = torch.tensor([[0.0, 0.0, 3.0, 3.0]]).cuda().type(dtype)
b2 = torch.tensor([[4.0, 0.0, 5.0, 3.0], [3.0, 0.0, 4.0, 3.0],
[2.0, 0.0, 3.0, 3.0], [1.0, 0.0, 2.0,
3.0]]).cuda().type(dtype)
should_output = np.array([0, 0.2, 0.5, 0.5])
out = bbox_overlaps(b1, b2, offset=1)
assert np.allclose(out.cpu().numpy(), should_output, 1e-2)
def test_bbox_overlaps_float(self):
self._test_bbox_overlaps(torch.float)
def test_bbox_overlaps_half(self):
self._test_bbox_overlaps(torch.half)
import torch
from torch.autograd import gradcheck
class TestCarafe(object):
def test_carafe_naive_gradcheck(self):
if not torch.cuda.is_available():
return
from mmcv.ops import CARAFENaive
feat = torch.randn(
2, 64, 3, 3, requires_grad=True, device='cuda').double()
mask = torch.randn(
2, 100, 6, 6, requires_grad=True,
device='cuda').sigmoid().double()
gradcheck(CARAFENaive(5, 4, 2), (feat, mask), atol=1e-4, eps=1e-4)
def test_carafe_gradcheck(self):
if not torch.cuda.is_available():
return
from mmcv.ops import CARAFE
feat = torch.randn(
2, 64, 3, 3, requires_grad=True, device='cuda').double()
mask = torch.randn(
2, 100, 6, 6, requires_grad=True,
device='cuda').sigmoid().double()
gradcheck(CARAFE(5, 4, 2), (feat, mask), atol=1e-4, eps=1e-4)
import numpy as np
import torch
import torch.nn as nn
class Loss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input, target):
input = input.view(-1)
target = target.view(-1)
return torch.mean(input - target)
class TestCrissCrossAttention(object):
def test_cc_attention(self):
if not torch.cuda.is_available():
return
from mmcv.ops import CrissCrossAttention
loss_func = Loss()
input = np.fromfile(
'tests/data/for_ccattention/ccattention_input.bin',
dtype=np.float32)
output = np.fromfile(
'tests/data/for_ccattention/ccattention_output.bin',
dtype=np.float32)
input = input.reshape((1, 32, 45, 45))
output = output.reshape((1, 32, 45, 45))
label = torch.ones((1, 32, 45, 45))
input = torch.FloatTensor(input)
output = torch.FloatTensor(output)
input.requires_grad = True
shape = input.shape
channel = shape[1]
cca = CrissCrossAttention(channel)
cca.cuda()
input = input.cuda()
label = label.cuda()
cca.train()
test_output = cca(input)
test_loss = loss_func(test_output, label)
test_loss.backward()
test_output = test_output.detach().cpu().numpy()
output = output.numpy()
assert np.allclose(test_output, output)
assert test_output.shape == shape
import torch
from torch.autograd import gradcheck
class TestCornerPool(object):
def test_corner_pool_top_gradcheck(self):
if not torch.cuda.is_available():
return
from mmcv.ops import CornerPool
input = torch.randn(2, 4, 5, 5, requires_grad=True, device='cuda')
gradcheck(CornerPool('top'), (input, ), atol=1e-3, eps=1e-4)
def test_corner_pool_bottom_gradcheck(self):
if not torch.cuda.is_available():
return
from mmcv.ops import CornerPool
input = torch.randn(2, 4, 5, 5, requires_grad=True, device='cuda')
gradcheck(CornerPool('bottom'), (input, ), atol=1e-3, eps=1e-4)
def test_corner_pool_left_gradcheck(self):
if not torch.cuda.is_available():
return
from mmcv.ops import CornerPool
input = torch.randn(2, 4, 5, 5, requires_grad=True, device='cuda')
gradcheck(CornerPool('left'), (input, ), atol=1e-3, eps=1e-4)
def test_corner_pool_right_gradcheck(self):
if not torch.cuda.is_available():
return
from mmcv.ops import CornerPool
input = torch.randn(2, 4, 5, 5, requires_grad=True, device='cuda')
gradcheck(CornerPool('right'), (input, ), atol=1e-3, eps=1e-4)
import numpy as np
import torch
input = [[[[1., 2., 3.], [0., 1., 2.], [3., 5., 2.]]]]
offset_weight = [[[0.1, 0.4, 0.6, 0.1]], [[0.3, 0.2, 0.1, 0.3]],
[[0.5, 0.5, 0.2, 0.8]], [[0.8, 0.3, 0.9, 0.1]],
[[0.3, 0.1, 0.2, 0.5]], [[0.3, 0.7, 0.5, 0.3]],
[[0.6, 0.2, 0.5, 0.3]], [[0.4, 0.1, 0.8, 0.4]]]
offset_bias = [0.7, 0.1, 0.8, 0.5, 0.6, 0.5, 0.4, 0.7]
deform_weight = [[[0.4, 0.2, 0.1, 0.9]]]
gt_out = [[[[1.650, 0.], [0.000, 0.]]]]
gt_x_grad = [[[[-0.666, 0.204, 0.000], [0.030, -0.416, 0.012],
[0.000, 0.252, 0.129]]]]
gt_offset_weight_grad = [[[[1.44, 2.88], [0.00, 1.44]]],
[[[-0.72, -1.44], [0.00, -0.72]]],
[[[0.00, 0.00], [0.00, 0.00]]],
[[[0.00, 0.00], [0.00, 0.00]]],
[[[-0.10, -0.20], [0.00, -0.10]]],
[[[-0.08, -0.16], [0.00, -0.08]]],
[[[-0.54, -1.08], [0.00, -0.54]]],
[[[-0.54, -1.08], [0.00, -0.54]]]]
gt_offset_bias_grad = [1.44, -0.72, 0., 0., -0.10, -0.08, -0.54, -0.54],
gt_deform_weight_grad = [[[[3.62, 0.], [0.40, 0.18]]]]
class TestDeformconv(object):
def _test_deformconv(self, dtype=torch.float, threshold=1e-3):
if not torch.cuda.is_available():
return
from mmcv.ops import DeformConv2dPack
c_in = 1
c_out = 1
x = torch.Tensor(input).cuda().type(dtype)
x.requires_grad = True
model = DeformConv2dPack(c_in, c_out, 2, stride=1, padding=0)
model.conv_offset.weight.data = torch.nn.Parameter(
torch.Tensor(offset_weight).reshape(8, 1, 2, 2))
model.conv_offset.bias.data = torch.nn.Parameter(
torch.Tensor(offset_bias).reshape(8))
model.weight.data = torch.nn.Parameter(
torch.Tensor(deform_weight).reshape(1, 1, 2, 2))
model.cuda().type(dtype)
out = model(x)
out.backward(torch.ones_like(out))
assert np.allclose(out.data.detach().cpu().numpy(), gt_out, threshold)
assert np.allclose(x.grad.detach().cpu().numpy(), gt_x_grad, threshold)
assert np.allclose(
model.conv_offset.weight.grad.detach().cpu().numpy(),
gt_offset_weight_grad, threshold)
assert np.allclose(model.conv_offset.bias.grad.detach().cpu().numpy(),
gt_offset_bias_grad, threshold)
assert np.allclose(model.weight.grad.detach().cpu().numpy(),
gt_deform_weight_grad, threshold)
def test_deformconv(self):
self._test_deformconv(torch.double)
self._test_deformconv(torch.float)
self._test_deformconv(torch.half, 1e-1)
import os
import numpy as np
import torch
_USING_PARROTS = True
try:
from parrots.autograd import gradcheck
except ImportError:
from torch.autograd import gradcheck
_USING_PARROTS = False
cur_dir = os.path.dirname(os.path.abspath(__file__))
inputs = [([[[[1., 2.], [3., 4.]]]], [[0., 0., 0., 1., 1.]]),
([[[[1., 2.], [3., 4.]], [[4., 3.], [2.,
1.]]]], [[0., 0., 0., 1., 1.]]),
([[[[1., 2., 5., 6.], [3., 4., 7., 8.], [9., 10., 13., 14.],
[11., 12., 15., 16.]]]], [[0., 0., 0., 3., 3.]])]
outputs = [([[[[1, 1.25], [1.5, 1.75]]]], [[[[3.0625, 0.4375],
[0.4375, 0.0625]]]]),
([[[[1., 1.25], [1.5, 1.75]], [[4, 3.75],
[3.5, 3.25]]]], [[[[3.0625, 0.4375],
[0.4375, 0.0625]],
[[3.0625, 0.4375],
[0.4375,
0.0625]]]]),
([[[[1.9375, 4.75],
[7.5625,
10.375]]]], [[[[0.47265625, 0.4296875, 0.4296875, 0.04296875],
[0.4296875, 0.390625, 0.390625, 0.0390625],
[0.4296875, 0.390625, 0.390625, 0.0390625],
[0.04296875, 0.0390625, 0.0390625,
0.00390625]]]])]
class TestDeformRoIPool(object):
def test_deform_roi_pool_gradcheck(self):
if not torch.cuda.is_available():
return
from mmcv.ops import DeformRoIPoolPack
pool_h = 2
pool_w = 2
spatial_scale = 1.0
sampling_ratio = 2
for case in inputs:
np_input = np.array(case[0])
np_rois = np.array(case[1])
x = torch.tensor(
np_input, device='cuda', dtype=torch.float, requires_grad=True)
rois = torch.tensor(np_rois, device='cuda', dtype=torch.float)
output_c = x.size(1)
droipool = DeformRoIPoolPack((pool_h, pool_w),
output_c,
spatial_scale=spatial_scale,
sampling_ratio=sampling_ratio).cuda()
if _USING_PARROTS:
pass
# gradcheck(droipool, (x, rois), no_grads=[rois])
else:
gradcheck(droipool, (x, rois), eps=1e-2, atol=1e-2)
def test_modulated_deform_roi_pool_gradcheck(self):
if not torch.cuda.is_available():
return
from mmcv.ops import ModulatedDeformRoIPoolPack
pool_h = 2
pool_w = 2
spatial_scale = 1.0
sampling_ratio = 2
for case in inputs:
np_input = np.array(case[0])
np_rois = np.array(case[1])
x = torch.tensor(
np_input, device='cuda', dtype=torch.float, requires_grad=True)
rois = torch.tensor(np_rois, device='cuda', dtype=torch.float)
output_c = x.size(1)
droipool = ModulatedDeformRoIPoolPack(
(pool_h, pool_w),
output_c,
spatial_scale=spatial_scale,
sampling_ratio=sampling_ratio).cuda()
if _USING_PARROTS:
pass
# gradcheck(droipool, (x, rois), no_grads=[rois])
else:
gradcheck(droipool, (x, rois), eps=1e-2, atol=1e-2)
import numpy as np
import torch
_USING_PARROTS = True
try:
from parrots.autograd import gradcheck
except ImportError:
from torch.autograd import gradcheck
_USING_PARROTS = False
# torch.set_printoptions(precision=8, threshold=100)
inputs = [
([[1., 0], [0, 1.]], [0, 1]),
([[1., 0, -1.], [0, 1., 2.]], [2, 1]),
([[1e-6, 2e-6, 3e-6], [4e-6, 5e-5, 6e-4], [7e-3, 8e-2, 9e-1]], [1, 2, 0]),
]
softmax_outputs = [(0.00566451, [[-0.00657264, 0.00657264],
[0.00657264, -0.00657264]]),
(0.34956908, [[0.10165970, 0.03739851, -0.13905823],
[0.01227554, -0.10298023, 0.09070466]]),
(0.15754992, [[0.02590877, -0.05181759, 0.02590882],
[0.02589641, 0.02589760, -0.05179400],
[-0.07307514, 0.02234372, 0.05073142]])]
sigmoid_outputs = [(0.13562961, [[-0.00657264, 0.11185755],
[0.11185755, -0.00657264]]),
(1.10251057, [[0.28808805, 0.11185755, -0.09602935],
[0.11185755, -0.00657264, 0.40376765]]),
(0.42287254, [[0.07457182, -0.02485716, 0.07457201],
[0.07457211, 0.07457669, -0.02483728],
[-0.02462499, 0.08277918, 0.18050370]])]
class Testfocalloss(object):
def _test_softmax(self, dtype=torch.float):
if not torch.cuda.is_available():
return
from mmcv.ops import softmax_focal_loss
alpha = 0.25
gamma = 2.0
for case, output in zip(inputs, softmax_outputs):
np_x = np.array(case[0])
np_y = np.array(case[1])
np_x_grad = np.array(output[1])
x = torch.from_numpy(np_x).cuda().type(dtype)
x.requires_grad_()
y = torch.from_numpy(np_y).cuda().long()
loss = softmax_focal_loss(x, y, gamma, alpha, None, 'mean')
loss.backward()
assert np.allclose(loss.data.cpu().numpy(), output[0], 1e-2)
assert np.allclose(x.grad.data.cpu(), np_x_grad, 1e-2)
def _test_sigmoid(self, dtype=torch.float):
if not torch.cuda.is_available():
return
from mmcv.ops import sigmoid_focal_loss
alpha = 0.25
gamma = 2.0
for case, output in zip(inputs, sigmoid_outputs):
np_x = np.array(case[0])
np_y = np.array(case[1])
np_x_grad = np.array(output[1])
x = torch.from_numpy(np_x).cuda().type(dtype)
x.requires_grad_()
y = torch.from_numpy(np_y).cuda().long()
loss = sigmoid_focal_loss(x, y, gamma, alpha, None, 'mean')
loss.backward()
assert np.allclose(loss.data.cpu().numpy(), output[0], 1e-2)
assert np.allclose(x.grad.data.cpu(), np_x_grad, 1e-2)
def _test_grad_softmax(self, dtype=torch.float):
if not torch.cuda.is_available():
return
from mmcv.ops import SoftmaxFocalLoss
alpha = 0.25
gamma = 2.0
for case in inputs:
np_x = np.array(case[0])
np_y = np.array(case[1])
x = torch.from_numpy(np_x).cuda().type(dtype)
x.requires_grad_()
y = torch.from_numpy(np_y).cuda().long()
floss = SoftmaxFocalLoss(gamma, alpha)
if _USING_PARROTS:
# gradcheck(floss, (x, y),
# no_grads=[y])
pass
else:
gradcheck(floss, (x, y), eps=1e-2, atol=1e-2)
def _test_grad_sigmoid(self, dtype=torch.float):
if not torch.cuda.is_available():
return
from mmcv.ops import SigmoidFocalLoss
alpha = 0.25
gamma = 2.0
for case in inputs:
np_x = np.array(case[0])
np_y = np.array(case[1])
x = torch.from_numpy(np_x).cuda().type(dtype)
x.requires_grad_()
y = torch.from_numpy(np_y).cuda().long()
floss = SigmoidFocalLoss(gamma, alpha)
if _USING_PARROTS:
# gradcheck(floss, (x, y),
# no_grads=[y])
pass
else:
gradcheck(floss, (x, y), eps=1e-2, atol=1e-2)
def test_softmax_float(self):
self._test_softmax(dtype=torch.float)
def test_softmax_half(self):
self._test_softmax(dtype=torch.half)
def test_sigmoid_float(self):
self._test_sigmoid(dtype=torch.float)
def test_sigmoid_half(self):
self._test_sigmoid(dtype=torch.half)
def test_grad_softmax_float(self):
self._test_grad_softmax(dtype=torch.float)
def test_grad_sigmoid_float(self):
self._test_grad_sigmoid(dtype=torch.float)
import torch
class TestInfo(object):
def test_info(self):
if not torch.cuda.is_available():
return
from mmcv.ops import get_compiler_version, get_compiling_cuda_version
cv = get_compiler_version()
ccv = get_compiling_cuda_version()
assert cv is not None
assert ccv is not None
import torch
class TestMaskedConv2d(object):
def test_masked_conv2d(self):
if not torch.cuda.is_available():
return
from mmcv.ops import MaskedConv2d
input = torch.randn(1, 3, 16, 16, requires_grad=True, device='cuda')
mask = torch.randn(1, 16, 16, requires_grad=True, device='cuda')
conv = MaskedConv2d(3, 3, 3).cuda()
output = conv(input, mask)
assert output is not None
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment