Commit ab6cd9d3 authored by Paul's avatar Paul
Browse files

Merge branch 'develop' into cppcheck

parents ae9ecf1c 2d0d96e8
...@@ -16,6 +16,13 @@ namespace migraphx { ...@@ -16,6 +16,13 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
namespace op { namespace op {
enum padding_mode_t
{
default_, // NOLINT
same,
valid
};
struct not_computable struct not_computable
{ {
argument compute(const shape&, const std::vector<argument>&) const argument compute(const shape&, const std::vector<argument>&) const
...@@ -58,12 +65,7 @@ struct convolution ...@@ -58,12 +65,7 @@ struct convolution
std::array<std::size_t, 2> padding = {{0, 0}}; std::array<std::size_t, 2> padding = {{0, 0}};
std::array<std::size_t, 2> stride = {{1, 1}}; std::array<std::size_t, 2> stride = {{1, 1}};
std::array<std::size_t, 2> dilation = {{1, 1}}; std::array<std::size_t, 2> dilation = {{1, 1}};
enum padding_mode_t
{
default_, // NOLINT
same,
valid
};
padding_mode_t padding_mode = default_; padding_mode_t padding_mode = default_;
int group = 1; int group = 1;
...@@ -138,12 +140,7 @@ struct im2col ...@@ -138,12 +140,7 @@ struct im2col
std::array<std::size_t, 2> padding = {{0, 0}}; std::array<std::size_t, 2> padding = {{0, 0}};
std::array<std::size_t, 2> stride = {{1, 1}}; std::array<std::size_t, 2> stride = {{1, 1}};
std::array<std::size_t, 2> dilation = {{1, 1}}; std::array<std::size_t, 2> dilation = {{1, 1}};
enum padding_mode_t
{
default_, // NOLINT
same,
valid
};
padding_mode_t padding_mode = default_; padding_mode_t padding_mode = default_;
template <class Self, class F> template <class Self, class F>
...@@ -189,12 +186,14 @@ struct pooling ...@@ -189,12 +186,14 @@ struct pooling
std::array<std::size_t, 2> padding = {{0, 0}}; std::array<std::size_t, 2> padding = {{0, 0}};
std::array<std::size_t, 2> stride = {{1, 1}}; std::array<std::size_t, 2> stride = {{1, 1}};
std::array<std::size_t, 2> lengths = {{1, 1}}; std::array<std::size_t, 2> lengths = {{1, 1}};
padding_mode_t padding_mode = default_;
template <class Self, class F> template <class Self, class F>
static auto reflect(Self& self, F f) static auto reflect(Self& self, F f)
{ {
return pack(f(self.mode, "mode"), return pack(f(self.mode, "mode"),
f(self.padding, "padding"), f(self.padding, "padding"),
f(self.padding, "padding_mode"),
f(self.stride, "stride"), f(self.stride, "stride"),
f(self.lengths, "lengths")); f(self.lengths, "lengths"));
} }
...@@ -211,7 +210,10 @@ struct pooling ...@@ -211,7 +210,10 @@ struct pooling
assert(lengths[0] <= (input.lens()[2] + 2 * padding[0])); assert(lengths[0] <= (input.lens()[2] + 2 * padding[0]));
assert(lengths[1] <= (input.lens()[3] + 2 * padding[1])); assert(lengths[1] <= (input.lens()[3] + 2 * padding[1]));
return {t, if(padding_mode == default_)
{
return {
t,
{ {
input.lens()[0], input.lens()[0],
input.lens()[1], input.lens()[1],
...@@ -226,6 +228,39 @@ struct pooling ...@@ -226,6 +228,39 @@ struct pooling
static_cast<float>(stride[1]))) + static_cast<float>(stride[1]))) +
1)), 1)),
}}; }};
}
else if(padding_mode == same)
{
return {t,
{input.lens()[0],
input.lens()[1],
static_cast<std::size_t>(
std::ceil(static_cast<double>(input.lens()[2]) / stride[0])),
static_cast<std::size_t>(
std::ceil(static_cast<double>(input.lens()[3]) / stride[1]))}};
}
else if(padding_mode == valid)
{
return {t,
{
input.lens()[0],
input.lens()[1],
std::size_t(std::max<std::ptrdiff_t>(
1,
std::ptrdiff_t(std::floor((input.lens()[2] - lengths[0]) /
static_cast<float>(stride[0]))) +
1)),
std::size_t(std::max<std::ptrdiff_t>(
1,
std::ptrdiff_t(std::floor((input.lens()[3] - lengths[1]) /
static_cast<float>(stride[1]))) +
1)),
}};
}
else
{
MIGRAPHX_THROW("Invalid padding mode");
}
} }
}; };
...@@ -614,13 +649,13 @@ struct pad ...@@ -614,13 +649,13 @@ struct pad
{ {
std::vector<int64_t> pads; std::vector<int64_t> pads;
float value = 0.0f; float value = 0.0f;
enum padding_mode_t enum pad_op_mode_t
{ {
constant_pad, constant_pad,
reflect_pad, reflect_pad,
edge_pad edge_pad
}; };
padding_mode_t mode = constant_pad; pad_op_mode_t mode = constant_pad;
template <class Self, class F> template <class Self, class F>
static auto reflect(Self& self, F f) static auto reflect(Self& self, F f)
......
...@@ -215,24 +215,30 @@ struct onnx_parser ...@@ -215,24 +215,30 @@ struct onnx_parser
parse_conv(const std::string&, attribute_map attributes, std::vector<instruction_ref> args) parse_conv(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
{ {
op::convolution op; op::convolution op;
auto l0 = args[0];
if(contains(attributes, "pads")) if(contains(attributes, "pads"))
{ {
if(contains(attributes, "auto_pad")) if(contains(attributes, "auto_pad"))
{ {
MIGRAPHX_THROW("auto_pad and padding cannot be specified simultaneously"); MIGRAPHX_THROW("auto_pad and padding cannot be specified simultaneously");
} }
std::vector<std::size_t> padding(4); std::vector<std::int64_t> padding;
copy(attributes["pads"].ints(), padding.begin()); copy(attributes["pads"].ints(), std::back_inserter(padding));
if(padding.size() != 4) if(padding.size() != 4)
{ {
MIGRAPHX_THROW("padding should have 4 values"); MIGRAPHX_THROW("padding should have 4 values");
} }
if(padding[0] != padding[2] || padding[1] != padding[3]) if(padding[0] != padding[2] || padding[1] != padding[3])
{ {
MIGRAPHX_THROW("migraphx does not support asymetric padding"); // insert zeros for pad op (args[0] has 4 dims)
padding = {0, 0, padding[0], padding[1], 0, 0, padding[2], padding[3]};
l0 = prog.add_instruction(op::pad{padding}, l0);
}
else
{
op.padding[0] = padding[0];
op.padding[1] = padding[1];
} }
op.padding[0] = padding[0];
op.padding[1] = padding[1];
} }
if(contains(attributes, "strides")) if(contains(attributes, "strides"))
{ {
...@@ -252,7 +258,7 @@ struct onnx_parser ...@@ -252,7 +258,7 @@ struct onnx_parser
if(s.find("SAME") != std::string::npos) if(s.find("SAME") != std::string::npos)
{ {
op.padding_mode = op::convolution::same; op.padding_mode = op::padding_mode_t::same;
} }
} }
if(contains(attributes, "group")) if(contains(attributes, "group"))
...@@ -266,7 +272,7 @@ struct onnx_parser ...@@ -266,7 +272,7 @@ struct onnx_parser
auto l2 = prog.add_instruction(op::broadcast{axis, l1->get_shape()}, args[2]); auto l2 = prog.add_instruction(op::broadcast{axis, l1->get_shape()}, args[2]);
return prog.add_instruction(op::add{}, l1, l2); return prog.add_instruction(op::add{}, l1, l2);
} }
return prog.add_instruction(op, args); return prog.add_instruction(op, l0, args[1]);
} }
instruction_ref parse_pooling(const std::string& name, instruction_ref parse_pooling(const std::string& name,
...@@ -274,6 +280,7 @@ struct onnx_parser ...@@ -274,6 +280,7 @@ struct onnx_parser
std::vector<instruction_ref> args) std::vector<instruction_ref> args)
{ {
op::pooling op{ends_with(name, "MaxPool") ? "max" : "average"}; op::pooling op{ends_with(name, "MaxPool") ? "max" : "average"};
auto l0 = args[0];
if(starts_with(name, "Global")) if(starts_with(name, "Global"))
{ {
auto lens = args.front()->get_shape().lens(); auto lens = args.front()->get_shape().lens();
...@@ -281,18 +288,23 @@ struct onnx_parser ...@@ -281,18 +288,23 @@ struct onnx_parser
} }
if(contains(attributes, "pads")) if(contains(attributes, "pads"))
{ {
std::vector<std::size_t> padding(4); std::vector<std::int64_t> padding;
copy(attributes["pads"].ints(), padding.begin()); copy(attributes["pads"].ints(), std::back_inserter(padding));
if(padding.size() != 4) if(padding.size() != 4)
{ {
MIGRAPHX_THROW("padding should have 4 values"); MIGRAPHX_THROW("padding should have 4 values");
} }
if(padding[0] != padding[2] || padding[1] != padding[3]) if(padding[0] != padding[2] || padding[1] != padding[3])
{ {
MIGRAPHX_THROW("migraphx does not support asymetric padding"); // insert zeros for pad op (args[0] has 4 dims)
padding = {0, 0, padding[0], padding[1], 0, 0, padding[2], padding[3]};
l0 = prog.add_instruction(op::pad{padding}, l0);
}
else
{
op.padding[0] = padding[0];
op.padding[1] = padding[1];
} }
op.padding[0] = padding[0];
op.padding[1] = padding[1];
} }
if(contains(attributes, "strides")) if(contains(attributes, "strides"))
{ {
...@@ -305,13 +317,14 @@ struct onnx_parser ...@@ -305,13 +317,14 @@ struct onnx_parser
if(contains(attributes, "auto_pad")) if(contains(attributes, "auto_pad"))
{ {
auto s = attributes["auto_pad"].s(); auto s = attributes["auto_pad"].s();
if(to_upper(s) != "NOTSET") if(s.find("SAME_UPPER") == std::string::npos)
{ {
MIGRAPHX_THROW("auto_pad is not supported for pooling"); MIGRAPHX_THROW("auto_pad only supports SAME_UPPER for pooling");
} }
op.padding_mode = op::padding_mode_t::same;
} }
return prog.add_instruction(op, std::move(args)); return prog.add_instruction(op, l0);
} }
instruction_ref instruction_ref
......
...@@ -953,6 +953,22 @@ struct test_pad ...@@ -953,6 +953,22 @@ struct test_pad
} }
}; };
struct test_pooling_autopad
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s0{migraphx::shape::float_type, {1, 3, 63, 63}};
auto l0 = p.add_parameter("x", s0);
migraphx::op::pooling op{"max"};
op.padding_mode = migraphx::op::padding_mode_t::same;
op.lengths = {2, 2};
op.stride = {2, 2};
p.add_instruction(op, l0);
return p;
}
};
struct test_gather struct test_gather
{ {
migraphx::program create_program() const migraphx::program create_program() const
...@@ -1070,6 +1086,7 @@ struct test_conv_bn_relu_pooling2 ...@@ -1070,6 +1086,7 @@ struct test_conv_bn_relu_pooling2
int main() int main()
{ {
verify_program<test_pooling_autopad>();
verify_program<test_abs>(); verify_program<test_abs>();
verify_program<test_concat>(); verify_program<test_concat>();
verify_program<test_concat2>(); verify_program<test_concat2>();
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment