Commit d6cd850b authored by Brian Pickrell's avatar Brian Pickrell
Browse files

Misc. style changes. Added a shape test and an onnx test for the case of...

Misc. style changes.  Added a shape test and an onnx test for the case of reduce op. with no axes specified (should reduce all axes).  The new Onnx test case is stll under construction and doesn't pass.
parent 8801eb42
...@@ -123,6 +123,8 @@ struct reduce_op : op_name<Derived> ...@@ -123,6 +123,8 @@ struct reduce_op : op_name<Derived>
auto tuned_axes = tune_axes(output_dyn_dims.size()); auto tuned_axes = tune_axes(output_dyn_dims.size());
for(const auto& axis : tuned_axes) for(const auto& axis : tuned_axes)
{ {
// At the time of writing, there's no functional difference between
// optimum of 0 (no opt) or 1.
output_dyn_dims[axis] = {1, 1, 0}; output_dyn_dims[axis] = {1, 1, 0};
} }
......
...@@ -4637,6 +4637,18 @@ def reducel1_dyn_test(): ...@@ -4637,6 +4637,18 @@ def reducel1_dyn_test():
return ([node], [x], [y]) return ([node], [x], [y])
@onnx_test
def reducel1_dyn_noaxes_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [None])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [None])
node = onnx.helper.make_node('ReduceL1',
inputs=['x'],
outputs=['y'],
keepdims=0)
return ([node], [x], [y])
@onnx_test @onnx_test
def reducel2_test(): def reducel2_test():
......
...@@ -4425,22 +4425,43 @@ TEST_CASE(reducel1_test) ...@@ -4425,22 +4425,43 @@ TEST_CASE(reducel1_test)
TEST_CASE(reducel1_dyn_test) TEST_CASE(reducel1_dyn_test)
{ {
{
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
// a shape with 4 dynamic dimensions // a shape with 4 dynamic dimensions
auto l0 = mm->add_parameter( auto l0 = mm->add_parameter(
"x", "x",
migraphx::shape{migraphx::shape::float_type, {{3, 3, 0}, {3, 5, 0}, {4, 6, 5}, {5, 7, 6}}}); migraphx::shape{migraphx::shape::float_type, {{3, 3, 0}, {3, 5, 0}, {4, 6, 5}, {5, 7, 6}}});
auto abs_l0 = mm->add_instruction(migraphx::make_op("abs"), l0); auto abs_ins = mm->add_instruction(migraphx::make_op("abs"), l0);
auto sum_l0 = mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {-2}}}), abs_l0); auto sum_ins = mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {-2}}}), abs_ins);
auto sq_l0 = mm->add_instruction(migraphx::make_op("squeeze", {{"axes", {-2}}}), sum_l0); auto sq_ins = mm->add_instruction(migraphx::make_op("squeeze", {{"axes", {-2}}}), sum_ins);
mm->add_return({sq_l0}); mm->add_return({sq_ins});
migraphx::onnx_options options; migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{3, 3}, {3, 5}, {4, 6, 5}, {5, 7, 6}}; options.map_dyn_input_dims["x"] = {{3, 3}, {3, 5}, {4, 6, 5}, {5, 7, 6}};
auto prog = migraphx::parse_onnx("reducel1_dyn_test.onnx", options); auto prog = migraphx::parse_onnx("reducel1_dyn_test.onnx", options);
EXPECT(p == prog); EXPECT(p == prog);
}
{
migraphx::program p;
auto* mm = p.get_main_module();
// a shape with 4 dynamic dimensions
auto l0 = mm->add_parameter(
"x",
migraphx::shape{migraphx::shape::float_type, {{3, 3, 0}, {3, 5, 0}, {4, 6, 5}, {5, 7, 6}}});
auto abs_ins = mm->add_instruction(migraphx::make_op("abs"), l0);
auto sum_ins = mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {}}}), abs_ins);
auto sq_ins = mm->add_instruction(migraphx::make_op("squeeze", {{"axes", {}}}), sum_ins);
mm->add_return({sq_ins});
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{3, 3}, {3, 5}, {4, 6, 5}, {5, 7, 6}};
auto prog = migraphx::parse_onnx("reducel1_dyn_noaxes_test.onnx", options);
EXPECT(p == prog);
}
} }
TEST_CASE(reducel2_test) TEST_CASE(reducel2_test)
......
...@@ -1799,6 +1799,15 @@ void test_dyn_reduce_ops() ...@@ -1799,6 +1799,15 @@ void test_dyn_reduce_ops()
T{{0}}, T{{0}},
input); input);
} }
{
// Empty axis argument reduces all axes
migraphx::shape input{migraphx::shape::float_type, {{2, 3, 3}, {2, 4, 4}}};
expect_shape(migraphx::shape{migraphx::shape::float_type,
std::vector<migraphx::shape::dynamic_dimension>(
{{1, 1, 0}, {1, 1, 0}})},
T{{}},
input);
}
{ {
migraphx::shape input{migraphx::shape::float_type, {{2, 3, 3}, {2, 4, 4}}}; migraphx::shape input{migraphx::shape::float_type, {{2, 3, 3}, {2, 4, 4}}};
throws_shape(T{{4}}, input); throws_shape(T{{4}}, input);
......
...@@ -5742,9 +5742,9 @@ TEST_CASE(reduce_max_dynamic_axis0) ...@@ -5742,9 +5742,9 @@ TEST_CASE(reduce_max_dynamic_axis0)
p.compile(migraphx::ref::target{}); p.compile(migraphx::ref::target{});
migraphx::parameter_map params; migraphx::parameter_map params;
migraphx::shape input_fixed_shape0{migraphx::shape::float_type, {2, 5}}; migraphx::shape input_fixed_shape{migraphx::shape::float_type, {2, 5}};
std::vector<float> input_data{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; std::vector<float> input_data{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
params["X"] = migraphx::argument(input_fixed_shape0, input_data.data()); params["X"] = migraphx::argument(input_fixed_shape, input_data.data());
auto result = p.eval(params).back(); auto result = p.eval(params).back();
std::vector<float> results_vector; std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); }); result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment