Unverified Commit 85e8f164 authored by Charlie Lin's avatar Charlie Lin Committed by GitHub
Browse files

Parse dynamic ONNX `Shape` op and create `dimensions_of` MIGX operator (#1923)

parent 40c1693b
...@@ -133,6 +133,7 @@ register_migraphx_ops( ...@@ -133,6 +133,7 @@ register_migraphx_ops(
cosh cosh
cos cos
dequantizelinear dequantizelinear
dimensions_of
div div
dot dot
elu elu
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_OPERATORS_DIMENSIONS_OF_HPP
#define MIGRAPHX_GUARD_OPERATORS_DIMENSIONS_OF_HPP
#include <migraphx/check_shapes.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/dyn_output.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace op {
/**
* Returns the dimensions of the input argument from starting axis to ending axis.
* Atleast `end` must be set to use this operator (set `end` to ndim for default ONNX behavior of
* `Shape` operator) This should only be used for dynamic shapes as this can be simplified to a
* literal for static shapes.
*/
struct dimensions_of
{
std::size_t start = 0;
std::size_t end = 0;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.start, "start"), f(self.end, "end"));
}
std::string name() const { return "dimensions_of"; }
shape compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this, true}.has(1);
if(start >= end)
{
MIGRAPHX_THROW("DIMENSIONS_OF: start >= end. start = " + std::to_string(start) +
", end = " + std::to_string(end));
}
return shape{shape::int64_type, {end - start}};
}
argument compute(const shape& output_shape, std::vector<argument> args) const
{
argument result{output_shape};
auto input_lens = args[0].get_shape().lens();
result.visit([&](auto output) {
std::copy(input_lens.cbegin() + start, input_lens.cbegin() + end, output.begin());
});
return result;
}
};
} // namespace op
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
...@@ -48,6 +48,7 @@ ...@@ -48,6 +48,7 @@
#include <migraphx/op/convolution_backwards.hpp> #include <migraphx/op/convolution_backwards.hpp>
#include <migraphx/op/cosh.hpp> #include <migraphx/op/cosh.hpp>
#include <migraphx/op/cos.hpp> #include <migraphx/op/cos.hpp>
#include <migraphx/op/dimensions_of.hpp>
#include <migraphx/op/div.hpp> #include <migraphx/op/div.hpp>
#include <migraphx/op/dot.hpp> #include <migraphx/op/dot.hpp>
#include <migraphx/op/elu.hpp> #include <migraphx/op/elu.hpp>
......
...@@ -30,8 +30,11 @@ namespace migraphx { ...@@ -30,8 +30,11 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
namespace onnx { namespace onnx {
// Use a literal instruction to replace the shape since, output of /**
// shape operator are literals in migraphx * If static shape input, creates a literal in migraphx.
* If dynamic shape input, creates a dimensions_of operator in migraphx (runtime evaluation of
* shape).
*/
struct parse_shape : op_parser<parse_shape> struct parse_shape : op_parser<parse_shape>
{ {
std::vector<op_desc> operators() const { return {{"Shape"}}; } std::vector<op_desc> operators() const { return {{"Shape"}}; }
...@@ -43,13 +46,54 @@ struct parse_shape : op_parser<parse_shape> ...@@ -43,13 +46,54 @@ struct parse_shape : op_parser<parse_shape>
{ {
if(args.size() != 1) if(args.size() != 1)
MIGRAPHX_THROW("Shape: operator should have 1 operand"); MIGRAPHX_THROW("Shape: operator should have 1 operand");
std::vector<std::size_t> arg_shape = args[0]->get_shape().lens(); auto input_shape = args[0]->get_shape();
std::vector<int64_t> vec_shape(arg_shape.size()); int input_ndim = input_shape.ndim();
migraphx::shape s(migraphx::shape::int64_type, {arg_shape.size()}); std::size_t start = 0;
std::transform(arg_shape.begin(), arg_shape.end(), vec_shape.begin(), [](auto i) { std::size_t end = input_ndim;
return int64_t(i); // Normalizing the start and end is handled here because of how the static shape version
}); // works. Clamping to [-r, r], where r is ndim of input and then making positive.
return info.add_literal(migraphx::literal{s, vec_shape}); auto normalize_ind = [&](int64_t ind) {
if(ind < (-1 * input_ndim))
{
ind = -1 * input_ndim;
}
if(ind > input_ndim)
{
ind = input_ndim;
}
return (ind >= 0) ? ind : input_ndim + ind;
};
if(contains(info.attributes, "end"))
{
end = normalize_ind(info.attributes.at("end").i());
}
if(contains(info.attributes, "start"))
{
start = normalize_ind(info.attributes.at("start").i());
}
if(end <= start)
{
MIGRAPHX_THROW("PARSE_SHAPE: ending axis <= starting axis, end: " +
std::to_string(end) + " start: " + std::to_string(start));
}
if(input_shape.dynamic())
{
return info.add_instruction(make_op("dimensions_of", {{"start", start}, {"end", end}}),
args[0]);
}
else
{
std::size_t output_ndim = end - start;
std::vector<int64_t> vec_shape(output_ndim);
migraphx::shape s(migraphx::shape::int64_type, {output_ndim});
std::vector<std::size_t> input_lens = input_shape.lens();
std::transform(input_lens.begin() + start,
input_lens.begin() + end,
vec_shape.begin(),
[](auto i) { return int64_t(i); });
return info.add_literal(migraphx::literal{s, vec_shape});
}
} }
}; };
......
...@@ -6165,6 +6165,101 @@ def shape_test(): ...@@ -6165,6 +6165,101 @@ def shape_test():
return ([node], [x], [y]) return ([node], [x], [y])
@onnx_test()
def shape_dyn_test0():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
[None, 4, None, None])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [4])
node = onnx.helper.make_node(
'Shape',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test()
def shape_dyn_test1():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
[None, 4, None, None])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2])
node = onnx.helper.make_node('Shape', inputs=['x'], outputs=['y'], start=2)
return ([node], [x], [y])
@onnx_test()
def shape_dyn_test2():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
[None, 4, None, None])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2])
node = onnx.helper.make_node('Shape',
inputs=['x'],
outputs=['y'],
start=-2)
return ([node], [x], [y])
@onnx_test()
def shape_dyn_test3():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
[None, 4, None, None])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2])
node = onnx.helper.make_node('Shape',
inputs=['x'],
outputs=['y'],
start=1,
end=2)
return ([node], [x], [y])
@onnx_test()
def shape_end_oob_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
[None, 4, None, None])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2])
node = onnx.helper.make_node('Shape', inputs=['x'], outputs=['y'], end=5)
return ([node], [x], [y])
@onnx_test()
def shape_start_oob_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
[None, 4, None, None])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2])
node = onnx.helper.make_node('Shape',
inputs=['x'],
outputs=['y'],
start=-6)
return ([node], [x], [y])
@onnx_test()
def shape_end_less_start_error():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
[None, 4, None, None])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2])
node = onnx.helper.make_node('Shape',
inputs=['x'],
outputs=['y'],
start=3,
end=1)
return ([node], [x], [y])
@onnx_test() @onnx_test()
def shape_gather_test(): def shape_gather_test():
values = np.array([1]) values = np.array([1])
......
...@@ -6060,6 +6060,118 @@ TEST_CASE(shape_test) ...@@ -6060,6 +6060,118 @@ TEST_CASE(shape_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(shape_dyn_test0)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}}};
auto p0 = mm->add_parameter("x", s);
migraphx::shape s_shape{migraphx::shape::int64_type, {4}};
auto ret = mm->add_instruction(migraphx::make_op("dimensions_of", {{"end", 4}}), p0);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}};
auto prog = parse_onnx("shape_dyn_test0.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(shape_dyn_test1)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}}};
auto p0 = mm->add_parameter("x", s);
migraphx::shape s_shape{migraphx::shape::int64_type, {4}};
auto ret =
mm->add_instruction(migraphx::make_op("dimensions_of", {{"start", 2}, {"end", 4}}), p0);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}};
auto prog = parse_onnx("shape_dyn_test1.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(shape_dyn_test2)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}}};
auto p0 = mm->add_parameter("x", s);
migraphx::shape s_shape{migraphx::shape::int64_type, {4}};
auto ret =
mm->add_instruction(migraphx::make_op("dimensions_of", {{"start", 2}, {"end", 4}}), p0);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}};
auto prog = parse_onnx("shape_dyn_test2.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(shape_dyn_test3)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}}};
auto p0 = mm->add_parameter("x", s);
migraphx::shape s_shape{migraphx::shape::int64_type, {4}};
auto ret =
mm->add_instruction(migraphx::make_op("dimensions_of", {{"start", 1}, {"end", 2}}), p0);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}};
auto prog = parse_onnx("shape_dyn_test3.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(shape_end_oob_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}}};
auto p0 = mm->add_parameter("x", s);
migraphx::shape s_shape{migraphx::shape::int64_type, {4}};
auto ret = mm->add_instruction(migraphx::make_op("dimensions_of", {{"end", 4}}), p0);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}};
auto prog = migraphx::parse_onnx("shape_end_oob_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(shape_start_oob_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}}};
auto p0 = mm->add_parameter("x", s);
migraphx::shape s_shape{migraphx::shape::int64_type, {4}};
auto ret = mm->add_instruction(migraphx::make_op("dimensions_of", {{"end", 4}}), p0);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}};
auto prog = migraphx::parse_onnx("shape_start_oob_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(shape_end_less_start_error)
{
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}};
EXPECT(test::throws([&] { migraphx::parse_onnx("shape_end_less_start_error.onnx", options); }));
}
TEST_CASE(shape_gather_test) TEST_CASE(shape_gather_test)
{ {
migraphx::program p; migraphx::program p;
......
No preview for this file type
...@@ -557,6 +557,39 @@ TEST_CASE(convolution_backwards_dyn_kernel_2d) ...@@ -557,6 +557,39 @@ TEST_CASE(convolution_backwards_dyn_kernel_2d)
expect_shape(output, migraphx::make_op("convolution_backwards"), input, weights); expect_shape(output, migraphx::make_op("convolution_backwards"), input, weights);
} }
TEST_CASE(dimensions_of0)
{
migraphx::shape input{migraphx::shape::float_type, {4, 3, 2, 1}};
migraphx::shape output{migraphx::shape::int64_type, {4}};
expect_shape(output, migraphx::make_op("dimensions_of", {{"end", 4}}), input);
}
TEST_CASE(dimensions_of1)
{
migraphx::shape input{migraphx::shape::float_type, {4, 3, 2, 1}};
migraphx::shape output{migraphx::shape::int64_type, {2}};
expect_shape(output, migraphx::make_op("dimensions_of", {{"start", 1}, {"end", 3}}), input);
}
TEST_CASE(dimensions_of2)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4, {2}}, {2, 4}, {2, 4}, {1, 6, {2}}}};
migraphx::shape output{migraphx::shape::int64_type, {2}};
expect_shape(output, migraphx::make_op("dimensions_of", {{"start", 1}, {"end", 3}}), input);
}
TEST_CASE(dimensions_of_error0)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4, {2}}, {2, 4}}};
throws_shape(migraphx::make_op("dimensions_of", {{"start", 3}, {"end", 3}}), input);
}
TEST_CASE(dimensions_of_error1)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4, {2}}, {2, 4}}};
throws_shape(migraphx::make_op("dimensions_of", {{"start", 3}, {"end", 0}}), input);
}
TEST_CASE(dot_ndim_error0) TEST_CASE(dot_ndim_error0)
{ {
migraphx::shape s_m1{migraphx::shape::float_type, {5}}; migraphx::shape s_m1{migraphx::shape::float_type, {5}};
......
...@@ -2357,6 +2357,46 @@ TEST_CASE(dequantizelinear) ...@@ -2357,6 +2357,46 @@ TEST_CASE(dequantizelinear)
} }
} }
TEST_CASE(dimensions_of_test0)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4, {2, 4}}, {3, 3}, {4, 4}}};
auto p1 = mm->add_parameter("x", s);
mm->add_instruction(migraphx::make_op("dimensions_of", {{"end", 3}}), p1);
p.compile(migraphx::make_target("ref"));
std::vector<float> x_data(24, 1.0);
migraphx::shape input_fixed_shape{migraphx::shape::float_type, {2, 3, 4}};
migraphx::parameter_map params;
params["x"] = migraphx::argument(input_fixed_shape, x_data.data());
auto result = p.eval(params).back();
std::vector<int64_t> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<int64_t> gold = {2, 3, 4};
EXPECT(migraphx::verify::verify_range(results_vector, gold));
}
TEST_CASE(dimensions_of_test1)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4, {1, 4}}, {3, 3}, {3, 8}, {3, 8}}};
auto p1 = mm->add_parameter("x", s);
mm->add_instruction(migraphx::make_op("dimensions_of", {{"start", 2}, {"end", 4}}), p1);
p.compile(migraphx::make_target("ref"));
std::vector<float> x_data(48, 1.0);
migraphx::shape input_fixed_shape{migraphx::shape::float_type, {1, 3, 4, 4}};
migraphx::parameter_map params;
params["x"] = migraphx::argument(input_fixed_shape, x_data.data());
auto result = p.eval(params).back();
std::vector<int64_t> results_vector(2);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<int64_t> gold = {4, 4};
EXPECT(migraphx::verify::verify_range(results_vector, gold));
}
TEST_CASE(div_test) TEST_CASE(div_test)
{ {
migraphx::program p; migraphx::program p;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment