Unverified Commit 4c32afcc authored by Charlie Lin's avatar Charlie Lin Committed by GitHub
Browse files

Dynamic ref flatten (#1482)

Changes flatten's compute_shape() to handle dynamic shapes
Calculates the flattened shape with the min, max, and opt
parent 352c2465
...@@ -55,17 +55,47 @@ struct flatten ...@@ -55,17 +55,47 @@ struct flatten
std::string name() const { return "flatten"; } std::string name() const { return "flatten"; }
shape normalize_compute_shape(std::vector<shape> inputs) const shape normalize_compute_shape(std::vector<shape> inputs) const
{ {
check_shapes{inputs, *this}.has(1).standard(); check_shapes{inputs, *this, true}.has(1);
auto&& lens = inputs.front().lens(); auto s = inputs[0];
auto x = if(s.dynamic())
std::accumulate(lens.begin(), lens.begin() + axis, std::size_t{1}, std::multiplies<>{}); {
auto y = auto min_lens = s.min_lens();
std::accumulate(lens.begin() + axis, lens.end(), std::size_t{1}, std::multiplies<>{}); auto max_lens = s.max_lens();
return {inputs.at(0).type(), {x, y}}; auto opt_lens = s.opt_lens();
// If any of the opt values is 0, output opt will be 0
shape::dynamic_dimension x = {
std::accumulate(
min_lens.begin(), min_lens.begin() + axis, std::size_t{1}, std::multiplies<>{}),
std::accumulate(
max_lens.begin(), max_lens.begin() + axis, std::size_t{1}, std::multiplies<>{}),
std::accumulate(opt_lens.begin(),
opt_lens.begin() + axis,
std::size_t{1},
std::multiplies<>{})};
shape::dynamic_dimension y = {
std::accumulate(
min_lens.begin() + axis, min_lens.end(), std::size_t{1}, std::multiplies<>{}),
std::accumulate(
max_lens.begin() + axis, max_lens.end(), std::size_t{1}, std::multiplies<>{}),
std::accumulate(
opt_lens.begin() + axis, opt_lens.end(), std::size_t{1}, std::multiplies<>{}),
};
return {s.type(), {x, y}};
}
else
{
check_shapes{inputs, *this}.standard();
auto&& lens = s.lens();
auto x = std::accumulate(
lens.begin(), lens.begin() + axis, std::size_t{1}, std::multiplies<>{});
auto y = std::accumulate(
lens.begin() + axis, lens.end(), std::size_t{1}, std::multiplies<>{});
return {s.type(), {x, y}};
}
} }
argument compute(shape output_shape, std::vector<argument> args) const argument compute(const dyn_output& dyn_out, std::vector<argument> args) const
{ {
return args[0].reshape(output_shape); return args[0].reshape(dyn_out.computed_shape);
} }
std::ptrdiff_t output_alias(const std::vector<shape>&) const { return 0; } std::ptrdiff_t output_alias(const std::vector<shape>&) const { return 0; }
}; };
......
...@@ -1966,6 +1966,19 @@ def flatten_nonstd_test(): ...@@ -1966,6 +1966,19 @@ def flatten_nonstd_test():
return ([trans, node, node2], [x], [y, y2]) return ([trans, node, node2], [x], [y, y2])
@onnx_test
def flatten_dyn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, 3, 4, 5])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [None, 20])
node = onnx.helper.make_node('Flatten',
inputs=['0'],
axis=2,
outputs=['2'])
return ([node], [x], [y])
@onnx_test @onnx_test
def floor_test(): def floor_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
......
...@@ -1977,6 +1977,23 @@ TEST_CASE(flatten_nonstd_test) ...@@ -1977,6 +1977,23 @@ TEST_CASE(flatten_nonstd_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(flatten_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0",
migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {3, 3, 0}, {4, 4, 0}, {5, 5, 0}}});
auto c0 = mm->add_instruction(migraphx::make_op("contiguous"), l0);
auto ret = mm->add_instruction(migraphx::make_op("flatten", {{"axis", 2}}), c0);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
auto prog = parse_onnx("flatten_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(floor_test) TEST_CASE(floor_test)
{ {
migraphx::program p; migraphx::program p;
......
...@@ -495,6 +495,62 @@ TEST_CASE(flatten_shape) ...@@ -495,6 +495,62 @@ TEST_CASE(flatten_shape)
throws_shape(migraphx::make_op("flatten", {{"axis", -5}}), input); throws_shape(migraphx::make_op("flatten", {{"axis", -5}}), input);
} }
TEST_CASE(flatten_dyn_axis0)
{
migraphx::shape input{migraphx::shape::float_type,
{{1, 4, 0}, {4, 4, 0}, {6, 6, 0}, {8, 8, 0}}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{1, 1, 0}, {192, 768, 0}}},
migraphx::make_op("flatten", {{"axis", 0}}),
input);
expect_shape(migraphx::shape{migraphx::shape::float_type, {{1, 1, 0}, {192, 768, 0}}},
migraphx::make_op("flatten", {{"axis", -4}}),
input);
}
TEST_CASE(flatten_dyn_axis1)
{
migraphx::shape input{migraphx::shape::float_type,
{{2, 2, 2}, {4, 4, 0}, {4, 6, 5}, {4, 6, 5}}};
expect_shape(
migraphx::shape{migraphx::shape::float_type, {{2, 2, 2}, {4 * 4 * 4, 4 * 6 * 6, 0}}},
migraphx::make_op("flatten", {{"axis", 1}}),
input);
expect_shape(
migraphx::shape{migraphx::shape::float_type, {{2, 2, 2}, {4 * 4 * 4, 4 * 6 * 6, 0}}},
migraphx::make_op("flatten", {{"axis", -3}}),
input);
}
TEST_CASE(flatten_dyn_axis2)
{
migraphx::shape input{migraphx::shape::float_type,
{{2, 2, 2}, {4, 4, 0}, {4, 6, 5}, {4, 6, 5}}};
expect_shape(
migraphx::shape{migraphx::shape::float_type, {{2 * 4, 2 * 4, 0}, {4 * 4, 6 * 6, 5 * 5}}},
migraphx::make_op("flatten", {{"axis", 2}}),
input);
}
TEST_CASE(flatten_dyn_axis3)
{
migraphx::shape input{migraphx::shape::float_type,
{{1, 4, 0}, {4, 4, 0}, {6, 6, 0}, {8, 8, 0}}};
expect_shape(
migraphx::shape{migraphx::shape::float_type, {{1 * 4 * 6, 4 * 4 * 6, 0}, {8, 8, 0}}},
migraphx::make_op("flatten", {{"axis", 3}}),
input);
}
TEST_CASE(flatten_dyn_axis4)
{
migraphx::shape input{migraphx::shape::float_type,
{{1, 4, 0}, {4, 4, 0}, {6, 6, 0}, {8, 8, 0}}};
expect_shape(migraphx::shape{migraphx::shape::float_type,
{{1 * 4 * 6 * 8, 4 * 4 * 6 * 8, 0}, {1, 1, 0}}},
migraphx::make_op("flatten", {{"axis", 4}}),
input);
}
TEST_CASE(gather) TEST_CASE(gather)
{ {
{ {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment