Unverified Commit 0d197f27 authored by Brian Pickrell's avatar Brian Pickrell Committed by GitHub
Browse files

Dynamic reduce (#1432)

Implements dynamic shapes in reduce_op and all its child operator classes (reduce_max etc.)
parent 4394e9b3
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <migraphx/op/name.hpp> #include <migraphx/op/name.hpp>
#include <migraphx/check_shapes.hpp> #include <migraphx/check_shapes.hpp>
#include <migraphx/dyn_output.hpp>
#include <migraphx/argument.hpp> #include <migraphx/argument.hpp>
#include <migraphx/tensor_view.hpp> #include <migraphx/tensor_view.hpp>
#include <migraphx/shape_for_each.hpp> #include <migraphx/shape_for_each.hpp>
...@@ -105,18 +106,41 @@ struct reduce_op : op_name<Derived> ...@@ -105,18 +106,41 @@ struct reduce_op : op_name<Derived>
return tuned_axes; return tuned_axes;
} }
/**
* @brief returns a shape in which the axis or axes named
* for reduction by this op are set, to size 1.
*
* @param inputs list of input shapes
* @return shape
*/
shape normalize_compute_shape(std::vector<shape> inputs) const shape normalize_compute_shape(std::vector<shape> inputs) const
{ {
check_shapes{inputs, *this}.has(1); check_shapes{inputs, *this, true}.has(1);
auto s = inputs.at(0); auto s = inputs.at(0);
auto lens = s.lens(); if(s.dynamic())
auto tuned_axes = tune_axes(lens.size());
for(auto axis : tuned_axes)
{ {
lens[axis] = 1; auto output_dyn_dims = s.dyn_dims();
auto tuned_axes = tune_axes(output_dyn_dims.size());
for(const auto& axis : tuned_axes)
{
// At the time of writing, there's no functional difference between
// optimum of 0 (no opt) or 1.
output_dyn_dims[axis] = {1, 1, 0};
}
return shape{s.type(), output_dyn_dims};
}
else
{
auto lens = s.lens();
auto tuned_axes = tune_axes(lens.size());
for(const auto& axis : tuned_axes)
{
lens[axis] = 1;
}
return inputs[0].with_lens(lens);
} }
return inputs[0].with_lens(lens);
} }
template <class T> template <class T>
...@@ -124,7 +148,7 @@ struct reduce_op : op_name<Derived> ...@@ -124,7 +148,7 @@ struct reduce_op : op_name<Derived>
const std::vector<T>& in_lens, const std::vector<T>& in_lens,
std::vector<T>& out_lens) const std::vector<T>& out_lens) const
{ {
for(auto axis : tuned_axes) for(const auto& axis : tuned_axes)
{ {
out_lens[axis] = in_lens[axis]; out_lens[axis] = in_lens[axis];
} }
...@@ -151,17 +175,17 @@ struct reduce_op : op_name<Derived> ...@@ -151,17 +175,17 @@ struct reduce_op : op_name<Derived>
static_cast<const Derived&>(*this).output(batch_shape)(val); static_cast<const Derived&>(*this).output(batch_shape)(val);
} }
argument compute(const shape& output_shape, std::vector<argument> args) const argument compute(const dyn_output& dyn_out, std::vector<argument> args) const
{ {
argument result{output_shape}; argument result{dyn_out.computed_shape};
auto arg_lens = args.front().get_shape().lens(); auto arg_lens = args.front().get_shape().lens();
auto tuned_axes = tune_axes(arg_lens.size()); auto tuned_axes = tune_axes(arg_lens.size());
std::vector<std::size_t> batch_lens(output_shape.lens().size(), 1); std::vector<std::size_t> batch_lens(dyn_out.computed_shape.lens().size(), 1);
tune_dims(tuned_axes, arg_lens, batch_lens); tune_dims(tuned_axes, arg_lens, batch_lens);
shape batch_shape{output_shape.type(), batch_lens}; shape batch_shape{dyn_out.computed_shape.type(), batch_lens};
visit_all(result, args[0])([&](auto output, auto input) { visit_all(result, args[0])([&](auto output, auto input) {
par_for(output_shape.elements(), [&](auto i) { par_for(dyn_out.computed_shape.elements(), [&](auto i) {
auto out_idx = output_shape.multi(i); auto out_idx = dyn_out.computed_shape.multi(i);
this->reduce(input, batch_shape, tuned_axes, out_idx, output); this->reduce(input, batch_shape, tuned_axes, out_idx, output);
}); });
}); });
......
...@@ -68,8 +68,7 @@ instruction_ref parse_reduce_oper(const std::string& op_name, ...@@ -68,8 +68,7 @@ instruction_ref parse_reduce_oper(const std::string& op_name,
} }
else else
{ {
std::size_t n_dim = args.front()->get_shape().lens().size(); axes.resize(args.front()->get_shape().ndim());
axes.resize(n_dim);
std::iota(axes.begin(), axes.end(), 0); std::iota(axes.begin(), axes.end(), 0);
} }
} }
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
##################################################################################### #####################################################################################
# This script generates onnx files for MIGraphX onnx operator tests. # This script generates onnx files for MIGraphX onnx operator tests.
# To generate an individual onnx file, you can use the following # To generate an individual onnx file, you can use the following
# command: python -c "import gen_onnx; gen_onnx.{test_name}_test()" # command: python3 -c "import gen_onnx; gen_onnx.{test_name}_test()"
import numpy as np import numpy as np
import onnx import onnx
from onnx import helper from onnx import helper
...@@ -4675,6 +4675,34 @@ def reducel1_test(): ...@@ -4675,6 +4675,34 @@ def reducel1_test():
return ([node], [x], [y]) return ([node], [x], [y])
@onnx_test
def reducel1_dyn_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [None])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [None])
axes = [-2]
node = onnx.helper.make_node('ReduceL1',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y])
@onnx_test
def reducel1_dyn_noaxes_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [None])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [None])
node = onnx.helper.make_node('ReduceL1',
inputs=['x'],
outputs=['y'],
keepdims=0)
return ([node], [x], [y])
@onnx_test() @onnx_test()
def reducel2_test(): def reducel2_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
...@@ -4724,6 +4752,22 @@ def reduce_log_sum_exp_test(): ...@@ -4724,6 +4752,22 @@ def reduce_log_sum_exp_test():
def reducemax_test(): def reducemax_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 6]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 6])
axes = [2]
node = onnx.helper.make_node('ReduceMax',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y])
@onnx_test
def reducemax_dyn_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [None, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [None, 4, 6])
axes = [2] axes = [2]
node = onnx.helper.make_node('ReduceMax', node = onnx.helper.make_node('ReduceMax',
......
...@@ -4468,6 +4468,50 @@ TEST_CASE(reducel1_test) ...@@ -4468,6 +4468,50 @@ TEST_CASE(reducel1_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(reducel1_dyn_test)
{
{
migraphx::program p;
auto* mm = p.get_main_module();
// a shape with 4 dynamic dimensions
auto l0 = mm->add_parameter("x",
migraphx::shape{migraphx::shape::float_type,
{{3, 3, 0}, {3, 5, 0}, {4, 6, 5}, {5, 7, 6}}});
auto abs_ins = mm->add_instruction(migraphx::make_op("abs"), l0);
auto sum_ins =
mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {-2}}}), abs_ins);
auto sq_ins = mm->add_instruction(migraphx::make_op("squeeze", {{"axes", {-2}}}), sum_ins);
mm->add_return({sq_ins});
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{3, 3}, {3, 5}, {4, 6, 5}, {5, 7, 6}};
auto prog = migraphx::parse_onnx("reducel1_dyn_test.onnx", options);
EXPECT(p == prog);
}
{
migraphx::program p;
auto* mm = p.get_main_module();
// No axes given in the onnx file. Parser should default to all axes.
auto l0 = mm->add_parameter("x",
migraphx::shape{migraphx::shape::float_type,
{{3, 3, 0}, {3, 5, 0}, {4, 6, 5}, {5, 7, 6}}});
auto abs_ins = mm->add_instruction(migraphx::make_op("abs"), l0);
auto sum_ins =
mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {0, 1, 2, 3}}}), abs_ins);
auto sq_ins =
mm->add_instruction(migraphx::make_op("squeeze", {{"axes", {0, 1, 2, 3}}}), sum_ins);
mm->add_return({sq_ins});
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{3, 3}, {3, 5}, {4, 6, 5}, {5, 7, 6}};
auto prog = migraphx::parse_onnx("reducel1_dyn_noaxes_test.onnx", options);
EXPECT(p == prog);
}
}
TEST_CASE(reducel2_test) TEST_CASE(reducel2_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -4518,6 +4562,24 @@ TEST_CASE(reducemax_test) ...@@ -4518,6 +4562,24 @@ TEST_CASE(reducemax_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(reducemax_dyn_test)
{
// input shape with 4 dynamic dimensions
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"x", migraphx::shape{migraphx::shape::float_type, {{3, 3}, {4, 4}, {5, 5}, {6, 6}}});
auto r0 = mm->add_instruction(migraphx::make_op("reduce_max", {{"axes", {2}}}), l0);
auto r1 = mm->add_instruction(migraphx::make_op("squeeze", {{"axes", {2}}}), r0);
mm->add_return({r1});
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{3, 3}, {4, 4}, {5, 5}, {6, 6}};
auto prog = migraphx::parse_onnx("reducemax_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(reducemean_test) TEST_CASE(reducemean_test)
{ {
migraphx::program p; migraphx::program p;
......
...@@ -1946,9 +1946,51 @@ void test_reduce_ops() ...@@ -1946,9 +1946,51 @@ void test_reduce_ops()
} }
} }
// dynamic shape
template <class T>
void test_dyn_reduce_ops()
{
{
migraphx::shape input{migraphx::shape::float_type, {{2, 3, 3}, {2, 4, 4}}};
expect_shape(migraphx::shape{migraphx::shape::float_type,
std::vector<migraphx::shape::dynamic_dimension>(
{{2, 3, 3}, {1, 1, 0}})},
T{{-1}},
input);
}
{
migraphx::shape input{migraphx::shape::float_type, {{2, 3, 3}, {2, 4, 4}}};
expect_shape(migraphx::shape{migraphx::shape::float_type,
std::vector<migraphx::shape::dynamic_dimension>(
{{1, 1, 0}, {2, 4, 4}})},
T{{0}},
input);
}
{
// Empty axis argument reduces all axes
migraphx::shape input{migraphx::shape::float_type, {{2, 3, 3}, {2, 4, 4}}};
expect_shape(migraphx::shape{migraphx::shape::float_type,
std::vector<migraphx::shape::dynamic_dimension>(
{{1, 1, 0}, {1, 1, 0}})},
T{{}},
input);
}
{
migraphx::shape input{migraphx::shape::float_type, {{2, 3, 3}, {2, 4, 4}}};
throws_shape(T{{4}}, input);
}
}
TEST_CASE(reduce_max) { test_reduce_ops<migraphx::op::reduce_max>(); }
TEST_CASE(reduce_mean) { test_reduce_ops<migraphx::op::reduce_mean>(); } TEST_CASE(reduce_mean) { test_reduce_ops<migraphx::op::reduce_mean>(); }
TEST_CASE(reduce_prod) { test_reduce_ops<migraphx::op::reduce_prod>(); }
TEST_CASE(reduce_sum) { test_reduce_ops<migraphx::op::reduce_sum>(); } TEST_CASE(reduce_sum) { test_reduce_ops<migraphx::op::reduce_sum>(); }
TEST_CASE(reduce_max_dyn) { test_dyn_reduce_ops<migraphx::op::reduce_max>(); }
TEST_CASE(reduce_mean_dyn) { test_dyn_reduce_ops<migraphx::op::reduce_mean>(); }
TEST_CASE(reduce_prod_dyn) { test_dyn_reduce_ops<migraphx::op::reduce_prod>(); }
TEST_CASE(reduce_sum_dyn) { test_dyn_reduce_ops<migraphx::op::reduce_sum>(); }
TEST_CASE(reshape_shape) TEST_CASE(reshape_shape)
{ {
migraphx::shape input{migraphx::shape::float_type, {24, 1, 1, 1}}; migraphx::shape input{migraphx::shape::float_type, {24, 1, 1, 1}};
......
...@@ -5753,6 +5753,27 @@ TEST_CASE(reduce_max_axis0) ...@@ -5753,6 +5753,27 @@ TEST_CASE(reduce_max_axis0)
EXPECT(results_vector == gold); EXPECT(results_vector == gold);
} }
TEST_CASE(reduce_max_dynamic_axis0)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{2, 4, 2}, {3, 5, 3}}};
auto input = mm->add_parameter("X", s);
auto reduce_max_op = migraphx::make_op("reduce_max", {{"axes", {0}}});
mm->add_instruction(reduce_max_op, input);
p.compile(migraphx::ref::target{});
migraphx::parameter_map params;
migraphx::shape input_fixed_shape{migraphx::shape::float_type, {2, 5}};
std::vector<float> input_data{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
params["X"] = migraphx::argument(input_fixed_shape, input_data.data());
auto result = p.eval(params).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {6, 7, 8, 9, 10};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(reduce_max_axis01) TEST_CASE(reduce_max_axis01)
{ {
migraphx::program p; migraphx::program p;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment