"docs/vscode:/vscode.git/clone" did not exist on "85cc16aed25da5c94a9e7aa3877a35982b898e65"
Unverified Commit 0b4285d9 authored by Zaida Zhou's avatar Zaida Zhou Committed by GitHub
Browse files

Pick commits from master (#2164)



* [Docs] Add swith_language.md in docs (#2160)

* [Fix] Fix onnx unit tests (#2155)

* [Docs] Limit extension versions (#2144)

* Support PrRoIPool operation

* Add MPS bbox overlap

* Add .pre-commit-config-zh-cn.yaml (#2135)
Co-authored-by: default avatarxcnick <xcnick0412@gmail.com>
Co-authored-by: default avatarJingwei Zhang <zjw18@mails.tsinghua.edu.cn>
Co-authored-by: default avatarq.yao <yaoqian@sensetime.com>
parent 47a61c3b
...@@ -240,6 +240,18 @@ void ball_query_forward(Tensor new_xyz_tensor, Tensor xyz_tensor, ...@@ -240,6 +240,18 @@ void ball_query_forward(Tensor new_xyz_tensor, Tensor xyz_tensor,
Tensor idx_tensor, int b, int n, int m, Tensor idx_tensor, int b, int n, int m,
float min_radius, float max_radius, int nsample); float min_radius, float max_radius, int nsample);
void prroi_pool_forward(Tensor input, Tensor rois, Tensor output,
int pooled_height, int pooled_width,
float spatial_scale);
void prroi_pool_backward(Tensor grad_output, Tensor rois, Tensor grad_input,
int pooled_height, int pooled_width,
float spatial_scale);
void prroi_pool_coor_backward(Tensor output, Tensor grad_output, Tensor input,
Tensor rois, Tensor grad_rois, int pooled_height,
int pooled_width, float spatial_scale);
template <unsigned NDim> template <unsigned NDim>
std::vector<torch::Tensor> get_indice_pairs_forward( std::vector<torch::Tensor> get_indice_pairs_forward(
torch::Tensor indices, int64_t batchSize, torch::Tensor indices, int64_t batchSize,
...@@ -828,4 +840,17 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { ...@@ -828,4 +840,17 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
"chamfer_distance_backward", py::arg("xyz1"), py::arg("xyz2"), "chamfer_distance_backward", py::arg("xyz1"), py::arg("xyz2"),
py::arg("gradxyz1"), py::arg("gradxyz2"), py::arg("graddist1"), py::arg("gradxyz1"), py::arg("gradxyz2"), py::arg("graddist1"),
py::arg("graddist2"), py::arg("idx1"), py::arg("idx2")); py::arg("graddist2"), py::arg("idx1"), py::arg("idx2"));
m.def("prroi_pool_forward", &prroi_pool_forward, "prroi_pool forward",
py::arg("input"), py::arg("rois"), py::arg("output"),
py::arg("pooled_height"), py::arg("pooled_width"),
py::arg("spatial_scale"));
m.def("prroi_pool_backward", &prroi_pool_backward, "prroi_pool_backward",
py::arg("grad_output"), py::arg("rois"), py::arg("grad_input"),
py::arg("pooled_height"), py::arg("pooled_width"),
py::arg("spatial_scale"));
m.def("prroi_pool_coor_backward", &prroi_pool_coor_backward,
"prroi_pool_coor_backward", py::arg("output"), py::arg("grad_output"),
py::arg("input"), py::arg("rois"), py::arg("grad_rois"),
py::arg("pooled_height"), py::arg("pooled_width"),
py::arg("spatial_scale"));
} }
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple, Union
import torch
import torch.nn as nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from ..utils import ext_loader
ext_module = ext_loader.load_ext(
'_ext',
['prroi_pool_forward', 'prroi_pool_backward', 'prroi_pool_coor_backward'])
class PrRoIPoolFunction(Function):
@staticmethod
def symbolic(g, features, rois, output_size, spatial_scale):
return g.op(
'mmcv::PrRoIPool',
features,
rois,
pooled_height_i=int(output_size[0]),
pooled_width_i=int(output_size[1]),
spatial_scale_f=float(spatial_scale))
@staticmethod
def forward(ctx,
features: torch.Tensor,
rois: torch.Tensor,
output_size: Tuple,
spatial_scale: float = 1.0) -> torch.Tensor:
if 'FloatTensor' not in features.type(
) or 'FloatTensor' not in rois.type():
raise ValueError(
'Precise RoI Pooling only takes float input, got '
f'{features.type()} for features and {rois.type()} for rois.')
pooled_height = int(output_size[0])
pooled_width = int(output_size[1])
spatial_scale = float(spatial_scale)
features = features.contiguous()
rois = rois.contiguous()
output_shape = (rois.size(0), features.size(1), pooled_height,
pooled_width)
output = features.new_zeros(output_shape)
params = (pooled_height, pooled_width, spatial_scale)
ext_module.prroi_pool_forward(features, rois, output, *params)
ctx.params = params
# everything here is contiguous.
ctx.save_for_backward(features, rois, output)
return output
@staticmethod
@once_differentiable
def backward(
ctx, grad_output: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, None, None, None]:
features, rois, output = ctx.saved_tensors
grad_input = grad_output.new_zeros(*features.shape)
grad_coor = grad_output.new_zeros(*rois.shape)
if features.requires_grad:
grad_output = grad_output.contiguous()
ext_module.prroi_pool_backward(grad_output, rois, grad_input,
*ctx.params)
if rois.requires_grad:
grad_output = grad_output.contiguous()
ext_module.prroi_pool_coor_backward(output, grad_output, features,
rois, grad_coor, *ctx.params)
return grad_input, grad_coor, None, None, None
prroi_pool = PrRoIPoolFunction.apply
class PrRoIPool(nn.Module):
"""The operation of precision RoI pooling. The implementation of PrRoIPool
is modified from https://github.com/vacancy/PreciseRoIPooling/
Precise RoI Pooling (PrRoIPool) is an integration-based (bilinear
interpolation) average pooling method for RoI Pooling. It avoids any
quantization and has a continuous gradient on bounding box coordinates.
It is:
1. different from the original RoI Pooling proposed in Fast R-CNN. PrRoI
Pooling uses average pooling instead of max pooling for each bin and has a
continuous gradient on bounding box coordinates. That is, one can take the
derivatives of some loss function w.r.t the coordinates of each RoI and
optimize the RoI coordinates.
2. different from the RoI Align proposed in Mask R-CNN. PrRoI Pooling uses
a full integration-based average pooling instead of sampling a constant
number of points. This makes the gradient w.r.t. the coordinates
continuous.
Args:
output_size (Union[int, tuple]): h, w.
spatial_scale (float, optional): scale the input boxes by this number.
Defaults to 1.0.
"""
def __init__(self,
output_size: Union[int, tuple],
spatial_scale: float = 1.0):
super().__init__()
self.output_size = _pair(output_size)
self.spatial_scale = float(spatial_scale)
def forward(self, features: torch.Tensor,
rois: torch.Tensor) -> torch.Tensor:
"""Forward function.
Args:
features (torch.Tensor): The feature map.
rois (torch.Tensor): The RoI bboxes in [tl_x, tl_y, br_x, br_y]
format.
Returns:
torch.Tensor: The pooled results.
"""
return prroi_pool(features, rois, self.output_size, self.spatial_scale)
def __repr__(self):
s = self.__class__.__name__
s += f'(output_size={self.output_size}, '
s += f'spatial_scale={self.spatial_scale})'
return s
docutils==0.16.0 docutils==0.16.0
markdown<3.4.0 markdown>=3.4.0
myst-parser myst-parser
opencv-python opencv-python
-e git+https://github.com/open-mmlab/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme -e git+https://github.com/open-mmlab/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
sphinx==4.0.2 sphinx==4.0.2
sphinx-copybutton sphinx-copybutton
sphinx_markdown_tables sphinx_markdown_tables>=0.0.16
torch torch
...@@ -305,6 +305,30 @@ def get_extensions(): ...@@ -305,6 +305,30 @@ def get_extensions():
extension = MLUExtension extension = MLUExtension
include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common')) include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common'))
include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/mlu')) include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/mlu'))
elif (hasattr(torch.backends, 'mps')
and torch.backends.mps.is_available()) or os.getenv(
'FORCE_MPS', '0') == '1':
# objc compiler support
from distutils.unixccompiler import UnixCCompiler
if '.mm' not in UnixCCompiler.src_extensions:
UnixCCompiler.src_extensions.append('.mm')
UnixCCompiler.language_map['.mm'] = 'objc'
define_macros += [('MMCV_WITH_MPS', None)]
extra_compile_args = {}
extra_compile_args['cxx'] = ['-Wall', '-std=c++17']
extra_compile_args['cxx'] += [
'-framework', 'Metal', '-framework', 'Foundation'
]
extra_compile_args['cxx'] += ['-ObjC++']
# src
op_files = glob.glob('./mmcv/ops/csrc/pytorch/*.cpp') + \
glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp') + \
glob.glob('./mmcv/ops/csrc/common/mps/*.mm') + \
glob.glob('./mmcv/ops/csrc/pytorch/mps/*.mm')
extension = CppExtension
include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common'))
include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/mps'))
else: else:
print(f'Compiling {ext_name} only with CPU') print(f'Compiling {ext_name} only with CPU')
op_files = glob.glob('./mmcv/ops/csrc/pytorch/*.cpp') + \ op_files = glob.glob('./mmcv/ops/csrc/pytorch/*.cpp') + \
......
...@@ -3,7 +3,7 @@ import numpy as np ...@@ -3,7 +3,7 @@ import numpy as np
import pytest import pytest
import torch import torch
from mmcv.utils import IS_CUDA_AVAILABLE, IS_MLU_AVAILABLE from mmcv.utils import IS_CUDA_AVAILABLE, IS_MLU_AVAILABLE, IS_MPS_AVAILABLE
class TestBBox: class TestBBox:
...@@ -43,7 +43,11 @@ class TestBBox: ...@@ -43,7 +43,11 @@ class TestBBox:
pytest.param( pytest.param(
'mlu', 'mlu',
marks=pytest.mark.skipif( marks=pytest.mark.skipif(
not IS_MLU_AVAILABLE, reason='requires MLU support')) not IS_MLU_AVAILABLE, reason='requires MLU support')),
pytest.param(
'mps',
marks=pytest.mark.skipif(
not IS_MPS_AVAILABLE, reason='requires MPS support'))
]) ])
def test_bbox_overlaps_float(self, device): def test_bbox_overlaps_float(self, device):
self._test_bbox_overlaps(device, dtype=torch.float) self._test_bbox_overlaps(device, dtype=torch.float)
......
...@@ -67,7 +67,8 @@ def process_grid_sample(func, input, grid, ort_custom_op_path=''): ...@@ -67,7 +67,8 @@ def process_grid_sample(func, input, grid, ort_custom_op_path=''):
input_initializer = [node.name for node in onnx_model.graph.initializer] input_initializer = [node.name for node in onnx_model.graph.initializer]
net_feed_input = list(set(input_all) - set(input_initializer)) net_feed_input = list(set(input_all) - set(input_initializer))
assert (len(net_feed_input) == 2) assert (len(net_feed_input) == 2)
sess = rt.InferenceSession(onnx_file, session_options) sess = rt.InferenceSession(
onnx_file, session_options, providers=['CPUExecutionProvider'])
ort_result = sess.run(None, { ort_result = sess.run(None, {
'input': input.detach().numpy(), 'input': input.detach().numpy(),
'grid': grid.detach().numpy() 'grid': grid.detach().numpy()
...@@ -160,7 +161,8 @@ def test_nms(): ...@@ -160,7 +161,8 @@ def test_nms():
input_initializer = [node.name for node in onnx_model.graph.initializer] input_initializer = [node.name for node in onnx_model.graph.initializer]
net_feed_input = list(set(input_all) - set(input_initializer)) net_feed_input = list(set(input_all) - set(input_initializer))
assert (len(net_feed_input) == 2) assert (len(net_feed_input) == 2)
sess = rt.InferenceSession(onnx_file, session_options) sess = rt.InferenceSession(
onnx_file, session_options, providers=['CPUExecutionProvider'])
onnx_dets, _ = sess.run(None, { onnx_dets, _ = sess.run(None, {
'scores': scores.detach().numpy(), 'scores': scores.detach().numpy(),
'boxes': boxes.detach().numpy() 'boxes': boxes.detach().numpy()
...@@ -234,7 +236,8 @@ def test_softnms(): ...@@ -234,7 +236,8 @@ def test_softnms():
] ]
net_feed_input = list(set(input_all) - set(input_initializer)) net_feed_input = list(set(input_all) - set(input_initializer))
assert (len(net_feed_input) == 2) assert (len(net_feed_input) == 2)
sess = rt.InferenceSession(onnx_file, session_options) sess = rt.InferenceSession(
onnx_file, session_options, providers=['CPUExecutionProvider'])
onnx_dets, onnx_inds = sess.run(None, { onnx_dets, onnx_inds = sess.run(None, {
'scores': scores.detach().numpy(), 'scores': scores.detach().numpy(),
'boxes': boxes.detach().numpy() 'boxes': boxes.detach().numpy()
...@@ -302,7 +305,8 @@ def test_roialign(): ...@@ -302,7 +305,8 @@ def test_roialign():
] ]
net_feed_input = list(set(input_all) - set(input_initializer)) net_feed_input = list(set(input_all) - set(input_initializer))
assert (len(net_feed_input) == 2) assert (len(net_feed_input) == 2)
sess = rt.InferenceSession(onnx_file, session_options) sess = rt.InferenceSession(
onnx_file, session_options, providers=['CPUExecutionProvider'])
onnx_output = sess.run(None, { onnx_output = sess.run(None, {
'input': input.detach().numpy(), 'input': input.detach().numpy(),
'rois': rois.detach().numpy() 'rois': rois.detach().numpy()
...@@ -378,7 +382,8 @@ def test_roialign_rotated(): ...@@ -378,7 +382,8 @@ def test_roialign_rotated():
] ]
net_feed_input = list(set(input_all) - set(input_initializer)) net_feed_input = list(set(input_all) - set(input_initializer))
assert (len(net_feed_input) == 2) assert (len(net_feed_input) == 2)
sess = rt.InferenceSession(onnx_file, session_options) sess = rt.InferenceSession(
onnx_file, session_options, providers=['CPUExecutionProvider'])
onnx_output = sess.run(None, { onnx_output = sess.run(None, {
'features': input.detach().numpy(), 'features': input.detach().numpy(),
'rois': rois.detach().numpy() 'rois': rois.detach().numpy()
...@@ -440,7 +445,8 @@ def test_roipool(): ...@@ -440,7 +445,8 @@ def test_roipool():
] ]
net_feed_input = list(set(input_all) - set(input_initializer)) net_feed_input = list(set(input_all) - set(input_initializer))
assert (len(net_feed_input) == 2) assert (len(net_feed_input) == 2)
sess = rt.InferenceSession(onnx_file) sess = rt.InferenceSession(
onnx_file, providers=['CPUExecutionProvider'])
onnx_output = sess.run( onnx_output = sess.run(
None, { None, {
'input': input.detach().cpu().numpy(), 'input': input.detach().cpu().numpy(),
...@@ -470,7 +476,7 @@ def test_interpolate(): ...@@ -470,7 +476,7 @@ def test_interpolate():
onnx_file, onnx_file,
input_names=['input'], input_names=['input'],
opset_version=opset_version) opset_version=opset_version)
sess = rt.InferenceSession(onnx_file) sess = rt.InferenceSession(onnx_file, providers=['CPUExecutionProvider'])
onnx_result = sess.run(None, {'input': dummy_input.detach().numpy()}) onnx_result = sess.run(None, {'input': dummy_input.detach().numpy()})
pytorch_result = func(dummy_input).detach().numpy() pytorch_result = func(dummy_input).detach().numpy()
...@@ -581,7 +587,8 @@ def test_rotated_feature_align(): ...@@ -581,7 +587,8 @@ def test_rotated_feature_align():
input_initializer = [node.name for node in onnx_model.graph.initializer] input_initializer = [node.name for node in onnx_model.graph.initializer]
net_feed_input = list(set(input_all) - set(input_initializer)) net_feed_input = list(set(input_all) - set(input_initializer))
assert (len(net_feed_input) == 2) assert (len(net_feed_input) == 2)
sess = rt.InferenceSession(onnx_file, session_options) sess = rt.InferenceSession(
onnx_file, session_options, providers=['CPUExecutionProvider'])
onnx_output = sess.run(None, { onnx_output = sess.run(None, {
'feature': feature.detach().numpy(), 'feature': feature.detach().numpy(),
'bbox': bbox.detach().numpy() 'bbox': bbox.detach().numpy()
...@@ -629,7 +636,8 @@ def test_corner_pool(mode, opset=11): ...@@ -629,7 +636,8 @@ def test_corner_pool(mode, opset=11):
session_options = rt.SessionOptions() session_options = rt.SessionOptions()
session_options.register_custom_ops_library(ort_custom_op_path) session_options.register_custom_ops_library(ort_custom_op_path)
sess = rt.InferenceSession(onnx_file, session_options) sess = rt.InferenceSession(
onnx_file, session_options, providers=['CPUExecutionProvider'])
ort_result = sess.run(None, {'input': input.detach().numpy()}) ort_result = sess.run(None, {'input': input.detach().numpy()})
pytorch_results = wrapped_model(input.clone()) pytorch_results = wrapped_model(input.clone())
...@@ -698,7 +706,8 @@ def test_cummax_cummin(key, opset=11): ...@@ -698,7 +706,8 @@ def test_cummax_cummin(key, opset=11):
session_options = rt.SessionOptions() session_options = rt.SessionOptions()
session_options.register_custom_ops_library(ort_custom_op_path) session_options.register_custom_ops_library(ort_custom_op_path)
sess = rt.InferenceSession(onnx_file, session_options) sess = rt.InferenceSession(
onnx_file, session_options, providers=['CPUExecutionProvider'])
ort_output, ort_inds = sess.run(None, ort_output, ort_inds = sess.run(None,
{'input': input.detach().numpy()}) {'input': input.detach().numpy()})
pytorch_output, pytorch_inds = wrapped_model(input.clone()) pytorch_output, pytorch_inds = wrapped_model(input.clone())
...@@ -737,7 +746,7 @@ def test_roll(shifts_dims_pair): ...@@ -737,7 +746,7 @@ def test_roll(shifts_dims_pair):
net_feed_input = list(set(input_all) - set(input_initializer)) net_feed_input = list(set(input_all) - set(input_initializer))
assert (len(net_feed_input) == 1) assert (len(net_feed_input) == 1)
sess = rt.InferenceSession(onnx_file) sess = rt.InferenceSession(onnx_file, providers=['CPUExecutionProvider'])
ort_output = sess.run(None, {'input': input.detach().numpy()})[0] ort_output = sess.run(None, {'input': input.detach().numpy()})[0]
with torch.no_grad(): with torch.no_grad():
...@@ -822,7 +831,8 @@ def test_modulated_deform_conv2d(): ...@@ -822,7 +831,8 @@ def test_modulated_deform_conv2d():
session_options.register_custom_ops_library(ort_custom_op_path) session_options.register_custom_ops_library(ort_custom_op_path)
# compute onnx_output # compute onnx_output
sess = rt.InferenceSession(onnx_file, session_options) sess = rt.InferenceSession(
onnx_file, session_options, providers=['CPUExecutionProvider'])
onnx_output = sess.run( onnx_output = sess.run(
None, { None, {
'input': input.cpu().detach().numpy(), 'input': input.cpu().detach().numpy(),
...@@ -902,7 +912,8 @@ def test_deform_conv2d(threshold=1e-3): ...@@ -902,7 +912,8 @@ def test_deform_conv2d(threshold=1e-3):
session_options.register_custom_ops_library(ort_custom_op_path) session_options.register_custom_ops_library(ort_custom_op_path)
# compute onnx_output # compute onnx_output
sess = rt.InferenceSession(onnx_file, session_options) sess = rt.InferenceSession(
onnx_file, session_options, providers=['CPUExecutionProvider'])
onnx_output = sess.run( onnx_output = sess.run(
None, { None, {
'input': x.cpu().detach().numpy(), 'input': x.cpu().detach().numpy(),
......
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmcv.utils import IS_CUDA_AVAILABLE
_USING_PARROTS = True
try:
from parrots.autograd import gradcheck
except ImportError:
from torch.autograd import gradcheck
_USING_PARROTS = False
inputs = [([[[[1., 2.], [3., 4.]]]], [[0., 0., 0., 1., 1.]]),
([[[[1., 2.], [3., 4.]], [[4., 3.], [2.,
1.]]]], [[0., 0., 0., 1., 1.]]),
([[[[1., 2., 5., 6.], [3., 4., 7., 8.], [9., 10., 13., 14.],
[11., 12., 15., 16.]]]], [[0., 0., 0., 3., 3.]])]
outputs = [
([[[[1.75, 2.25], [2.75, 3.25]]]], [[[[1., 1.],
[1., 1.]]]], [[0., 2., 4., 2., 4.]]),
([[[[1.75, 2.25], [2.75, 3.25]],
[[3.25, 2.75], [2.25, 1.75]]]], [[[[1., 1.], [1., 1.]],
[[1., 1.],
[1., 1.]]]], [[0., 0., 0., 0., 0.]]),
([[[[3.75, 6.91666651],
[10.08333302,
13.25]]]], [[[[0.11111111, 0.22222224, 0.22222222, 0.11111111],
[0.22222224, 0.444444448, 0.44444448, 0.22222224],
[0.22222224, 0.44444448, 0.44444448, 0.22222224],
[0.11111111, 0.22222224, 0.22222224, 0.11111111]]]],
[[0.0, 3.33333302, 6.66666603, 3.33333349, 6.66666698]])
]
class TestPrRoiPool:
@pytest.mark.parametrize('device', [
pytest.param(
'cuda',
marks=pytest.mark.skipif(
not IS_CUDA_AVAILABLE, reason='requires CUDA support'))
])
def test_roipool_gradcheck(self, device):
from mmcv.ops import PrRoIPool
pool_h = 2
pool_w = 2
spatial_scale = 1.0
for case in inputs:
np_input = np.array(case[0], dtype=np.float32)
np_rois = np.array(case[1], dtype=np.float32)
x = torch.tensor(np_input, device=device, requires_grad=True)
rois = torch.tensor(np_rois, device=device)
froipool = PrRoIPool((pool_h, pool_w), spatial_scale)
if _USING_PARROTS:
pass
# gradcheck(froipool, (x, rois), no_grads=[rois])
else:
gradcheck(froipool, (x, rois), eps=1e-2, atol=1e-2)
def _test_roipool_allclose(self, device, dtype=torch.float):
from mmcv.ops import prroi_pool
pool_h = 2
pool_w = 2
spatial_scale = 1.0
for case, output in zip(inputs, outputs):
np_input = np.array(case[0], dtype=np.float32)
np_rois = np.array(case[1], dtype=np.float32)
np_output = np.array(output[0], dtype=np.float32)
np_input_grad = np.array(output[1], dtype=np.float32)
np_rois_grad = np.array(output[2], dtype=np.float32)
x = torch.tensor(
np_input, dtype=dtype, device=device, requires_grad=True)
rois = torch.tensor(
np_rois, dtype=dtype, device=device, requires_grad=True)
output = prroi_pool(x, rois, (pool_h, pool_w), spatial_scale)
output.backward(torch.ones_like(output))
assert np.allclose(output.data.cpu().numpy(), np_output, 1e-3)
assert np.allclose(x.grad.data.cpu().numpy(), np_input_grad, 1e-3)
assert np.allclose(rois.grad.data.cpu().numpy(), np_rois_grad,
1e-3)
@pytest.mark.parametrize('device', [
pytest.param(
'cuda',
marks=pytest.mark.skipif(
not IS_CUDA_AVAILABLE, reason='requires CUDA support'))
])
def test_roipool_allclose_float(self, device):
self._test_roipool_allclose(device, dtype=torch.float)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment