Unverified Commit 07a9c956 authored by Vasilis Vryniotis's avatar Vasilis Vryniotis Committed by GitHub
Browse files

Remove _new_empty_tensor. (#3156)


Co-authored-by: default avatarFrancisco Massa <fvsmassa@gmail.com>
parent e337103f
...@@ -76,20 +76,6 @@ class ONNXExporterTester(unittest.TestCase): ...@@ -76,20 +76,6 @@ class ONNXExporterTester(unittest.TestCase):
else: else:
raise raise
@unittest.skip("Disable test until Split w/ zero sizes is implemented in ORT")
def test_new_empty_tensor(self):
class Module(torch.nn.Module):
def __init__(self):
super(Module, self).__init__()
self.conv2 = ops.misc.ConvTranspose2d(16, 33, (3, 5))
def forward(self, input2):
return self.conv2(input2)
input = torch.rand(0, 16, 10, 10)
test_input = torch.rand(0, 16, 20, 20)
self.run_model(Module(), [(input, ), (test_input,)], do_constant_folding=False)
def test_nms(self): def test_nms(self):
boxes = torch.rand(5, 4) boxes = torch.rand(5, 4)
boxes[:, 2:] += torch.rand(5, 2) boxes[:, 2:] += torch.rand(5, 2)
......
...@@ -449,15 +449,6 @@ class NMSTester(unittest.TestCase): ...@@ -449,15 +449,6 @@ class NMSTester(unittest.TestCase):
self.test_nms_cuda(dtype=dtype) self.test_nms_cuda(dtype=dtype)
class NewEmptyTensorTester(unittest.TestCase):
def test_new_empty_tensor(self):
input = torch.tensor([2., 2.], requires_grad=True)
new_shape = [3, 3]
out = torch.ops.torchvision._new_empty_tensor_op(input, new_shape)
assert out.size() == torch.Size([3, 3])
assert out.dtype == input.dtype
class DeformConvTester(OpTester, unittest.TestCase): class DeformConvTester(OpTester, unittest.TestCase):
def expected_fn(self, x, weight, offset, mask, bias, stride=1, padding=0, dilation=1): def expected_fn(self, x, weight, offset, mask, bias, stride=1, padding=0, dilation=1):
stride_h, stride_w = _pair(stride) stride_h, stride_w = _pair(stride)
......
#include "new_empty_tensor_op.h"
#include <torch/autograd.h>
#include <torch/types.h>
namespace vision {
namespace ops {
namespace {
class NewEmptyTensorOp : public torch::autograd::Function<NewEmptyTensorOp> {
public:
static torch::autograd::variable_list forward(
torch::autograd::AutogradContext* ctx,
const torch::autograd::Variable& input,
const c10::List<int64_t>& new_shape) {
ctx->saved_data["shape"] = input.sizes();
std::vector<int64_t> shape(new_shape.begin(), new_shape.end());
return {input.new_empty(shape, at::TensorOptions())};
}
static torch::autograd::variable_list backward(
torch::autograd::AutogradContext* ctx,
const torch::autograd::variable_list& grad_output) {
// Use data saved in forward
auto shape = ctx->saved_data["shape"].toIntList();
auto out = forward(ctx, grad_output[0], shape);
return {out[0], at::Tensor()};
}
};
} // namespace
at::Tensor new_empty_tensor(
const at::Tensor& input,
const c10::List<int64_t>& shape) {
return NewEmptyTensorOp::apply(input, shape)[0];
}
TORCH_LIBRARY_FRAGMENT(torchvision, m) {
m.def("_new_empty_tensor_op", &new_empty_tensor);
}
} // namespace ops
} // namespace vision
#pragma once
#include <ATen/ATen.h>
#include "../macros.h"
namespace vision {
namespace ops {
VISION_API at::Tensor new_empty_tensor(
const at::Tensor& input,
const c10::List<int64_t>& shape);
} // namespace ops
} // namespace vision
from .boxes import nms, batched_nms, remove_small_boxes, clip_boxes_to_image, box_area, box_iou, generalized_box_iou from .boxes import nms, batched_nms, remove_small_boxes, clip_boxes_to_image, box_area, box_iou, generalized_box_iou
from .boxes import box_convert from .boxes import box_convert
from .new_empty_tensor import _new_empty_tensor
from .deform_conv import deform_conv2d, DeformConv2d from .deform_conv import deform_conv2d, DeformConv2d
from .roi_align import roi_align, RoIAlign from .roi_align import roi_align, RoIAlign
from .roi_pool import roi_pool, RoIPool from .roi_pool import roi_pool, RoIPool
...@@ -19,7 +18,7 @@ __all__ = [ ...@@ -19,7 +18,7 @@ __all__ = [
'deform_conv2d', 'DeformConv2d', 'nms', 'batched_nms', 'remove_small_boxes', 'deform_conv2d', 'DeformConv2d', 'nms', 'batched_nms', 'remove_small_boxes',
'clip_boxes_to_image', 'box_convert', 'clip_boxes_to_image', 'box_convert',
'box_area', 'box_iou', 'generalized_box_iou', 'roi_align', 'RoIAlign', 'roi_pool', 'box_area', 'box_iou', 'generalized_box_iou', 'roi_align', 'RoIAlign', 'roi_pool',
'RoIPool', '_new_empty_tensor', 'ps_roi_align', 'PSRoIAlign', 'ps_roi_pool', 'RoIPool', 'ps_roi_align', 'PSRoIAlign', 'ps_roi_pool',
'PSRoIPool', 'MultiScaleRoIAlign', 'FeaturePyramidNetwork', 'PSRoIPool', 'MultiScaleRoIAlign', 'FeaturePyramidNetwork',
'sigmoid_focal_loss' 'sigmoid_focal_loss'
] ]
...@@ -38,18 +38,7 @@ def _register_custom_op(): ...@@ -38,18 +38,7 @@ def _register_custom_op():
pooled_shape_i=(pooled_height, pooled_width), spatial_scale_f=spatial_scale) pooled_shape_i=(pooled_height, pooled_width), spatial_scale_f=spatial_scale)
return roi_pool, None return roi_pool, None
@parse_args('v', 'is')
def new_empty_tensor_op(g, input, shape):
dtype = input.type().scalarType()
if dtype is None:
dtype = 'Float'
dtype = scalar_type_to_onnx.index(cast_pytorch_to_onnx[dtype])
shape = g.op("Constant", value_t=torch.tensor(shape))
return g.op("ConstantOfShape", shape,
value_t=torch.tensor([0], dtype=scalar_type_to_pytorch_type[dtype]))
from torch.onnx import register_custom_op_symbolic from torch.onnx import register_custom_op_symbolic
register_custom_op_symbolic('torchvision::nms', symbolic_multi_label_nms, _onnx_opset_version) register_custom_op_symbolic('torchvision::nms', symbolic_multi_label_nms, _onnx_opset_version)
register_custom_op_symbolic('torchvision::roi_align', roi_align, _onnx_opset_version) register_custom_op_symbolic('torchvision::roi_align', roi_align, _onnx_opset_version)
register_custom_op_symbolic('torchvision::roi_pool', roi_pool, _onnx_opset_version) register_custom_op_symbolic('torchvision::roi_pool', roi_pool, _onnx_opset_version)
register_custom_op_symbolic('torchvision::_new_empty_tensor_op', new_empty_tensor_op, _onnx_opset_version)
import torch
from torch.jit.annotations import List
from torch import Tensor
def _new_empty_tensor(x: Tensor, shape: List[int]) -> Tensor:
"""
Arguments:
input (Tensor): input tensor
shape List[int]: the new empty tensor shape
Returns:
output (Tensor)
"""
return torch.ops.torchvision._new_empty_tensor_op(x, shape)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment