Commit f60b5421 authored by Shucai Xiao's avatar Shucai Xiao Committed by mvermeulen
Browse files

Improve parsing of pooling operators related to onnxruntime (#426)



* change char to upper case

* clang format

* minor changes

* improve implementation of the pooling operator

* clang format

* improve implementation of the pooling operator

* clang format

* improving parsing the pooling operators

* clang format

* continue improve pooling operators

* clang format

* correct the explicit pad values for maxpool

* clang format

* add unit test for parsing poolig operators

* clang format

* fix review comments

* refine the padding processing in parsing pooling operator

* clang format

* add more onnx unit test to have better code coverage

* clang format

* refine code to fix review comments

* clang format

* fix onnx unit test
Co-authored-by: default avatarmvermeulen <5479696+mvermeulen@users.noreply.github.com>
parent 3af87aaf
...@@ -327,6 +327,65 @@ struct onnx_parser ...@@ -327,6 +327,65 @@ struct onnx_parser
} }
} }
template <class Op>
instruction_ref process_auto_pad_attribute(instruction_ref ins,
attribute_map& attributes,
Op& op,
const std::vector<std::size_t>& in_lens)
{
if(!contains(attributes, "auto_pad"))
{
return ins;
}
auto auto_pad = attributes["auto_pad"].s();
if(auto_pad.find("SAME") != std::string::npos)
{
// calculate the padding
std::array<std::size_t, 2> out_lens;
out_lens[0] = (in_lens[2] + op.stride[0] - 1) / op.stride[0];
out_lens[1] = (in_lens[3] + op.stride[1] - 1) / op.stride[1];
std::array<std::size_t, 2> explicit_pads;
explicit_pads[0] = (out_lens[0] - 1) * op.stride[0] + op.lengths[0] - in_lens[2];
explicit_pads[1] = (out_lens[1] - 1) * op.stride[1] + op.lengths[1] - in_lens[3];
op.padding[0] = explicit_pads[0] / 2;
op.padding[1] = explicit_pads[1] / 2;
explicit_pads[0] -= 2 * op.padding[0];
explicit_pads[1] -= 2 * op.padding[1];
std::vector<std::int64_t> pads(8, 0);
if(explicit_pads[0] != 0 or explicit_pads[1] != 0)
{
if(auto_pad == "SAME_UPPER")
{
pads[6] = explicit_pads[0];
pads[7] = explicit_pads[1];
}
else if(auto_pad == "SAME_LOWER")
{
pads[2] = explicit_pads[0];
pads[3] = explicit_pads[1];
}
// MaxPool
if(op.mode == "max")
{
ins = prog.add_instruction(op::pad{pads, std::numeric_limits<float>::lowest()},
ins);
}
// AveragePool
else
{
ins = prog.add_instruction(op::pad{pads}, ins);
}
}
op.padding_mode = op::padding_mode_t::same;
}
return ins;
}
instruction_ref instruction_ref
parse_conv(const std::string&, attribute_map attributes, std::vector<instruction_ref> args) parse_conv(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
{ {
...@@ -406,20 +465,40 @@ struct onnx_parser ...@@ -406,20 +465,40 @@ struct onnx_parser
auto lens = args.front()->get_shape().lens(); auto lens = args.front()->get_shape().lens();
op.lengths = {lens[2], lens[3]}; op.lengths = {lens[2], lens[3]};
} }
if(contains(attributes, "pads")) if(contains(attributes, "pads"))
{ {
if(contains(attributes, "auto_pad"))
{
auto s = attributes["auto_pad"].s();
if(to_upper(s) != "NOTSET")
{
MIGRAPHX_THROW(
"PARSE_POOLING: auto_pad and padding cannot be specified simultaneously");
}
}
std::vector<std::int64_t> padding; std::vector<std::int64_t> padding;
copy(attributes["pads"].ints(), std::back_inserter(padding)); copy(attributes["pads"].ints(), std::back_inserter(padding));
if(padding.size() != 4) if(padding.size() != 4)
{ {
MIGRAPHX_THROW("padding should have 4 values"); MIGRAPHX_THROW("PARSE_POOLING: padding should have 4 values");
} }
if(padding[0] != padding[2] || padding[1] != padding[3]) if(padding[0] != padding[2] || padding[1] != padding[3])
{ {
// insert zeros for pad op (args[0] has 4 dims) // insert zeros for pad op (args[0] has 4 dims)
padding = {0, 0, padding[0], padding[1], 0, 0, padding[2], padding[3]}; padding = {0, 0, padding[0], padding[1], 0, 0, padding[2], padding[3]};
l0 = prog.add_instruction(op::pad{padding, std::numeric_limits<float>::lowest()}, // MaxPool
l0); if(op.mode == "max")
{
l0 = prog.add_instruction(
op::pad{padding, std::numeric_limits<float>::lowest()}, l0);
}
// AveragePool
else
{
l0 = prog.add_instruction(op::pad{padding}, l0);
}
} }
else else
{ {
...@@ -427,6 +506,7 @@ struct onnx_parser ...@@ -427,6 +506,7 @@ struct onnx_parser
op.padding[1] = padding[1]; op.padding[1] = padding[1];
} }
} }
if(contains(attributes, "strides")) if(contains(attributes, "strides"))
{ {
copy(attributes["strides"].ints(), op.stride.begin()); copy(attributes["strides"].ints(), op.stride.begin());
...@@ -435,14 +515,11 @@ struct onnx_parser ...@@ -435,14 +515,11 @@ struct onnx_parser
{ {
copy(attributes["kernel_shape"].ints(), op.lengths.begin()); copy(attributes["kernel_shape"].ints(), op.lengths.begin());
} }
if(contains(attributes, "auto_pad")) if(contains(attributes, "auto_pad"))
{ {
auto s = attributes["auto_pad"].s(); auto in_lens = args[0]->get_shape().lens();
if(s.find("SAME_UPPER") == std::string::npos) l0 = process_auto_pad_attribute(l0, attributes, op, in_lens);
{
MIGRAPHX_THROW("auto_pad only supports SAME_UPPER for pooling");
}
op.padding_mode = op::padding_mode_t::same;
} }
return prog.add_instruction(op, l0); return prog.add_instruction(op, l0);
......
averagepool_same_lower_test:
E
xy" AveragePool*
auto_pad"
SAME_LOWER*
kernel_shape@@averagepool_same_lower_testZ
x




b
y




B
\ No newline at end of file
averagepool_same_upper_test:
E
xy" AveragePool*
auto_pad"
SAME_UPPER*
kernel_shape@@averagepool_same_upper_testZ
x




b
y




B
\ No newline at end of file
...@@ -144,6 +144,50 @@ def atan_test(): ...@@ -144,6 +144,50 @@ def atan_test():
return ([node], [x], [y]) return ([node], [x], [y])
@onnx_test
def averagepool_notset_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 1, 1])
node = onnx.helper.make_node('AveragePool',
inputs=['x'],
outputs=['y'],
kernel_shape=[6, 6],
strides=[2, 2],
pads=[0, 0, 1, 1],
auto_pad='NOTSET')
return ([node], [x], [y])
@onnx_test
def averagepool_same_lower_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node('AveragePool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2, 2],
auto_pad='SAME_LOWER')
return ([node], [x], [y])
@onnx_test
def averagepool_same_upper_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node('AveragePool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2, 2],
auto_pad='SAME_UPPER')
return ([node], [x], [y])
@onnx_test @onnx_test
def cast_test(): def cast_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [10]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [10])
...@@ -1020,6 +1064,36 @@ def max_test(): ...@@ -1020,6 +1064,36 @@ def max_test():
return ([node], [a, b, c], [y]) return ([node], [a, b, c], [y])
@onnx_test
def maxpool_notset_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 1, 1])
node = onnx.helper.make_node('MaxPool',
inputs=['x'],
outputs=['y'],
kernel_shape=[6, 6],
strides=[2, 2],
pads=[0, 0, 1, 1],
auto_pad='NOTSET')
return ([node], [x], [y])
@onnx_test
def maxpool_same_upper_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node('MaxPool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2, 2],
auto_pad='SAME_UPPER')
return ([node], [x], [y])
@onnx_test @onnx_test
def min_test(): def min_test():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3]) a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
......
maxpool_same_upper_test:
A
xy"MaxPool*
auto_pad"
SAME_UPPER*
kernel_shape@@maxpool_same_upper_testZ
x




b
y




B
\ No newline at end of file
...@@ -120,6 +120,51 @@ TEST_CASE(atan_test) ...@@ -120,6 +120,51 @@ TEST_CASE(atan_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(averagepool_notset_test)
{
migraphx::program p;
auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
std::vector<int64_t> pads = {0, 0, 0, 0, 0, 0, 1, 1};
auto ins_pad = p.add_instruction(migraphx::op::pad{pads}, input);
p.add_instruction(migraphx::op::pooling{"average", {0, 0}, {2, 2}, {6, 6}}, ins_pad);
auto prog = optimize_onnx("averagepool_notset_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(averagepool_same_lower_test)
{
migraphx::program p;
auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
std::vector<int64_t> pads = {0, 0, 1, 1, 0, 0, 0, 0};
auto ins_pad = p.add_instruction(migraphx::op::pad{pads}, input);
p.add_instruction(
migraphx::op::pooling{
"average", {0, 0}, {1, 1}, {2, 2}, migraphx::op::padding_mode_t::same},
ins_pad);
auto prog = optimize_onnx("averagepool_same_lower_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(averagepool_same_upper_test)
{
migraphx::program p;
auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
std::vector<int64_t> pads = {0, 0, 0, 0, 0, 0, 1, 1};
auto ins_pad = p.add_instruction(migraphx::op::pad{pads}, input);
p.add_instruction(
migraphx::op::pooling{
"average", {0, 0}, {1, 1}, {2, 2}, migraphx::op::padding_mode_t::same},
ins_pad);
auto prog = optimize_onnx("averagepool_same_upper_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(cast_test) TEST_CASE(cast_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -764,6 +809,36 @@ TEST_CASE(max_test) ...@@ -764,6 +809,36 @@ TEST_CASE(max_test)
optimize_onnx("max_test.onnx"); optimize_onnx("max_test.onnx");
} }
TEST_CASE(maxpool_notset_test)
{
migraphx::program p;
auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
std::vector<int64_t> pads = {0, 0, 0, 0, 0, 0, 1, 1};
float val = std::numeric_limits<float>::lowest();
auto ins_pad = p.add_instruction(migraphx::op::pad{pads, val}, input);
p.add_instruction(migraphx::op::pooling{"max", {0, 0}, {2, 2}, {6, 6}}, ins_pad);
auto prog = optimize_onnx("maxpool_notset_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(maxpool_same_upper_test)
{
migraphx::program p;
auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
std::vector<int64_t> pads = {0, 0, 0, 0, 0, 0, 1, 1};
float val = std::numeric_limits<float>::lowest();
auto ins_pad = p.add_instruction(migraphx::op::pad{pads, val}, input);
p.add_instruction(
migraphx::op::pooling{"max", {0, 0}, {1, 1}, {2, 2}, migraphx::op::padding_mode_t::same},
ins_pad);
auto prog = optimize_onnx("maxpool_same_upper_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(min_test) TEST_CASE(min_test)
{ {
migraphx::program p; migraphx::program p;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment