Unverified Commit 87b2fe35 authored by Cagri Eryilmaz's avatar Cagri Eryilmaz Committed by GitHub
Browse files

DepthToSpace Operator Implementation (#950)

Supports 1,11,13 ONNX Operator Set
parent cb4b94ef
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/make_op.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
struct parse_depthtospace : op_parser<parse_depthtospace>
{
std::vector<op_desc> operators() const { return {{"DepthToSpace"}}; }
instruction_ref parse(const op_desc& /*opd*/,
const onnx_parser& /*parser*/,
const onnx_parser::node_info& info,
std::vector<instruction_ref> args) const
{
auto s = args[0]->get_shape();
// mode attribute of DepthToSpace
auto mode = std::string("DCR");
if(contains(info.attributes, "mode"))
{
mode = info.attributes.at("mode").s(); // DCR or CRD?
}
// blocksize attribute of DepthToSpace
int blocksize = 0;
if(contains(info.attributes, "blocksize"))
{
blocksize = info.attributes.at("blocksize").i();
}
if(blocksize < 1)
{
MIGRAPHX_THROW("DepthToSpace: blocksize is less than 1");
}
// calculate dimensions
auto lens1 = s.lens();
auto lens2 = s.lens();
unsigned long divisor = std::pow(blocksize, 2);
if((lens2[1] % divisor) == 0)
lens2[1] = lens2[1] / divisor;
else
MIGRAPHX_THROW("DepthToSpace: div by blocksize quotient not int ");
lens1.push_back(lens1[2]);
lens1.push_back(lens1[3]);
lens2[2] = lens2[2] * blocksize;
lens2[3] = lens2[3] * blocksize;
lens1[2] = blocksize;
std::vector<int64_t> perm;
if(mode == "DCR")
{
lens1[3] = lens1[1] / divisor;
lens1[1] = blocksize;
perm = {0, 3, 4, 1, 5, 2};
}
else if(mode == "CRD")
{
lens1[1] = lens1[1] / divisor;
lens1[3] = blocksize;
perm = {0, 1, 4, 2, 5, 3};
}
else
MIGRAPHX_THROW("DepthToSpace: mode attribute cannot be read.");
auto temp1 = info.add_instruction(make_op("reshape", {{"dims", lens1}}), args[0]);
auto temp2 = info.add_instruction(make_op("transpose", {{"permutation", perm}}), temp1);
return info.add_instruction(make_op("reshape", {{"dims", lens2}}),
info.make_contiguous(temp2));
}
};
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
depthtospace_crd_test:
6
xy" DepthToSpace*
blocksize*
mode"CRDdepthtospace_crd_testZ
x




b
y





B
\ No newline at end of file
depthtospace_simple_test:
6
xy" DepthToSpace*
blocksize*
mode"DCRdepthtospace_simple_testZ
x




b
y




B
\ No newline at end of file
depthtospace_test:
6
xy" DepthToSpace*
blocksize*
mode"DCRdepthtospace_testZ
x




b
y





B
\ No newline at end of file
......@@ -1016,6 +1016,51 @@ def deconv_stride_test():
return ([node], [x, w], [y])
@onnx_test
def depthtospace_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 8, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2, 10, 10])
node = onnx.helper.make_node('DepthToSpace',
inputs=['x'],
outputs=['y'],
blocksize=2,
mode='DCR')
return ([node], [x], [y])
@onnx_test
def depthtospace_simple_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 8, 2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 4, 6])
node = onnx.helper.make_node('DepthToSpace',
inputs=['x'],
outputs=['y'],
blocksize=2,
mode='DCR')
return ([node], [x], [y])
@onnx_test
def depthtospace_crd_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 8, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2, 10, 10])
node = onnx.helper.make_node('DepthToSpace',
inputs=['x'],
outputs=['y'],
blocksize=2,
mode='CRD')
return ([node], [x], [y])
@onnx_test
def dequantizelinear_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.INT8, [5])
......
......@@ -917,6 +917,51 @@ TEST_CASE(deconv_output_shape_3d_test)
EXPECT(p == prog);
}
TEST_CASE(depthtospace_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("x", {migraphx::shape::float_type, {2, 8, 5, 5}});
auto tmp1 =
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {2, 2, 2, 2, 5, 5}}}), l0);
auto tmp2 = mm->add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 3, 4, 1, 5, 2}}}), tmp1);
auto tmp3 = mm->add_instruction(migraphx::make_op("contiguous"), tmp2);
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {2, 2, 10, 10}}}), tmp3);
auto prog = optimize_onnx("depthtospace_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(depthtospace_crd_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("x", {migraphx::shape::float_type, {2, 8, 5, 5}});
auto tmp1 =
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {2, 2, 2, 2, 5, 5}}}), l0);
auto tmp2 = mm->add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 1, 4, 2, 5, 3}}}), tmp1);
auto tmp3 = mm->add_instruction(migraphx::make_op("contiguous"), tmp2);
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {2, 2, 10, 10}}}), tmp3);
auto prog = optimize_onnx("depthtospace_crd_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(depthtospace_simple_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("x", {migraphx::shape::float_type, {1, 8, 2, 3}});
auto tmp1 =
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {1, 2, 2, 2, 2, 3}}}), l0);
auto tmp2 = mm->add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 3, 4, 1, 5, 2}}}), tmp1);
auto tmp3 = mm->add_instruction(migraphx::make_op("contiguous"), tmp2);
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {1, 2, 4, 6}}}), tmp3);
auto prog = optimize_onnx("depthtospace_simple_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(dequantizelinear_test)
{
migraphx::program p;
......
......@@ -45,6 +45,24 @@ TEST_CASE(averagepool_nt_cip_test)
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(depthtospace_simple_test)
{
auto p = migraphx::parse_onnx("depthtospace_simple_test.onnx");
p.compile(migraphx::ref::target{});
std::vector<float> data_in(48);
std::iota(std::begin(data_in), std::end(data_in), 0);
migraphx::shape s_x{migraphx::shape::float_type, {1, 8, 2, 3}};
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s_x, data_in.data());
auto result = p.eval(pp).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0, 12, 1, 13, 2, 14, 24, 36, 25, 37, 26, 38, 3, 15, 4, 16,
5, 17, 27, 39, 28, 40, 29, 41, 6, 18, 7, 19, 8, 20, 30, 42,
31, 43, 32, 44, 9, 21, 10, 22, 11, 23, 33, 45, 34, 46, 35, 47};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(gather_elements)
{
migraphx::program p = migraphx::parse_onnx("gather_elements_axis0_test.onnx");
......
......@@ -253,10 +253,6 @@ def create_backend_test(testname=None, target_device=None):
backend_test.exclude(r'test_constantofshape_float_ones_cpu')
backend_test.exclude(r'test_constantofshape_int_shape_zero_cpu')
backend_test.exclude(r'test_constantofshape_int_zeros_cpu')
backend_test.exclude(r'test_depthtospace_crd_mode_cpu')
backend_test.exclude(r'test_depthtospace_crd_mode_example_cpu')
backend_test.exclude(r'test_depthtospace_dcr_mode_cpu')
backend_test.exclude(r'test_depthtospace_example_cpu')
backend_test.exclude(r'test_expand_dim_changed_cpu')
backend_test.exclude(r'test_expand_dim_unchanged_cpu')
backend_test.exclude(r'test_expand_shape_model1_cpu')
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment