Unverified Commit dced4d4b authored by Shucai Xiao's avatar Shucai Xiao Committed by GitHub
Browse files

Averagepooling asymetric pading (#553)



* initial progress

* formatting

* add pooling changes

* formatting

* change eliminate_pad

* formatting

* rename var

* fomratting

* update op shape test and compute

* formatting

* revert conv constructor

* formatting

* change initializer

* formatting

* fix tidy

* change quant conv and shape check

* add tests and fixes

* formatting

* fix type

* fix conv test

* formatting

* add pooling and bn tests

* formatting

* add inconsistent attr tests

* fix padding issue

* formatting

* progress on 1d to 2d

* formatting

* change compute and compile functions

* formatting

* fix duplicate

* fix conflict

* fix issue with 1d conv

* formatting

* add check for 3d limit

* rename function

* formatting

* update to MIOPen 2.3

* add support for nd pooling

* formatting

* test miopen 2.4

* change function name

* rename functions

* formatting

* add op_shape test

* add gpu ops tests

* formatting

* add pkg-config

* add to support asymmetric padding of averagepool

* clang format

* fix bug for average pooling

* clang format

* fix a bug

* add unit tests for the asymmetric padding of averagepool

* clang format

* change functions

* formatting

* additional code refinement

* clang format

* change to copy_backward

* formatting

* remove an optimization for pooling

* clang format

* add and fix unit tests

* clang format

* test diff miopen version

* add pooling shape tests

* temp disable test

* fix cppcheck error

* fix cppcheck error

* revert to miopen 2.4

* fix review comments

* fix review comments

* clang format

* fixed review comments

* clang format

* fix cppcheck error
Co-authored-by: default avatarKhalique <15948690+kahmed10@users.noreply.github.com>
Co-authored-by: default avatarmvermeulen <5479696+mvermeulen@users.noreply.github.com>
parent e66968a2
...@@ -26,7 +26,7 @@ void eliminate_pad::apply(program& p) const ...@@ -26,7 +26,7 @@ void eliminate_pad::apply(program& p) const
else if(op_name == "im2col") else if(op_name == "im2col")
update_op(op::im2col{}, input, ins, p); update_op(op::im2col{}, input, ins, p);
else if(op_name == "pooling") else if(op_name == "pooling")
update_op(op::pooling{}, input, ins, p); update_pooling(input, ins, p);
} }
} }
...@@ -54,5 +54,32 @@ void eliminate_pad::update_op(T, ...@@ -54,5 +54,32 @@ void eliminate_pad::update_op(T,
p.replace_instruction(ins, op, new_inputs); p.replace_instruction(ins, op, new_inputs);
} }
void eliminate_pad::update_pooling(const instruction_ref& input,
const instruction_ref& ins,
program& p) const
{
auto pad_op = any_cast<op::pad>(input->get_operator());
if(!pad_op.symmetric())
return;
auto kdims = input->get_shape().lens().size() - 2;
auto kdims_it = pad_op.pads.begin() + 2;
std::vector<size_t> new_pads(kdims_it, kdims_it + kdims);
auto op = any_cast<op::pooling>(ins->get_operator());
if(op.mode == "average")
{
return;
}
op.padding = new_pads;
std::vector<instruction_ref> new_inputs{ins->inputs()};
new_inputs.front() = input->inputs().front();
p.replace_instruction(ins, op, new_inputs);
}
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx } // namespace migraphx
...@@ -22,6 +22,8 @@ struct eliminate_pad ...@@ -22,6 +22,8 @@ struct eliminate_pad
void apply(program& p) const; void apply(program& p) const;
template <class T> template <class T>
void update_op(T, const instruction_ref& input, const instruction_ref& ins, program& p) const; void update_op(T, const instruction_ref& input, const instruction_ref& ins, program& p) const;
void update_pooling(const instruction_ref& input, const instruction_ref& ins, program& p) const;
}; };
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
......
...@@ -320,7 +320,7 @@ struct onnx_parser ...@@ -320,7 +320,7 @@ struct onnx_parser
return curr_ins; return curr_ins;
} }
bool is_asym_padding(const std::vector<int64_t>& padding) static bool is_asym_padding(const std::vector<int64_t>& padding)
{ {
assert(padding.size() % 2 == 0); assert(padding.size() % 2 == 0);
size_t pad_ndims = padding.size() / 2; size_t pad_ndims = padding.size() / 2;
...@@ -339,13 +339,14 @@ struct onnx_parser ...@@ -339,13 +339,14 @@ struct onnx_parser
void check_asym_padding(instruction_ref& ins, void check_asym_padding(instruction_ref& ins,
const std::vector<int64_t>& padding, const std::vector<int64_t>& padding,
Op& op, Op& op,
int count_include_pad = 0,
float pad_val = 0) float pad_val = 0)
{ {
size_t pad_ndims = padding.size() / 2; size_t pad_ndims = padding.size() / 2;
auto left_pad_it = padding.begin(); auto left_pad_it = padding.begin();
auto right_pad_it = left_pad_it + pad_ndims; auto right_pad_it = left_pad_it + pad_ndims;
if(is_asym_padding(padding)) if(is_asym_padding(padding) or count_include_pad == 1)
{ {
std::vector<int64_t> asym_pads{0, 0, 0, 0}; // don't pad N and C std::vector<int64_t> asym_pads{0, 0, 0, 0}; // don't pad N and C
// add left pads // add left pads
...@@ -447,47 +448,6 @@ struct onnx_parser ...@@ -447,47 +448,6 @@ struct onnx_parser
} }
} }
template <class Op>
instruction_ref process_auto_pad_attribute(instruction_ref ins,
node_info info,
Op& op,
std::vector<std::size_t> k_lens,
std::vector<std::size_t> dilation,
const std::vector<std::size_t>& in_lens,
float value = 0.0f)
{
size_t kdims = in_lens.size() - 2;
assert(k_lens.size() == kdims and dilation.size() == kdims);
if(!contains(info.attributes, "auto_pad"))
{
return ins;
}
auto auto_pad = info.attributes["auto_pad"].s();
if(auto_pad.find("SAME") != std::string::npos)
{
op.padding_mode = op::padding_mode_t::same;
bool is_same_upper = (auto_pad.find("SAME_UPPER") != std::string::npos);
std::vector<int64_t> padding(2 * kdims);
for(size_t i = 0; i < padding.size() / 2; i++)
{
calculate_padding(i,
padding,
in_lens[i + 2],
op.stride[i],
dilation[i],
k_lens[i],
is_same_upper);
}
check_asym_padding(ins, padding, op, value);
}
return ins;
}
void calc_reflect_indices(std::vector<int>& indices, const int64_t num_dims) void calc_reflect_indices(std::vector<int>& indices, const int64_t num_dims)
{ {
int k = 0; int k = 0;
...@@ -593,6 +553,56 @@ struct onnx_parser ...@@ -593,6 +553,56 @@ struct onnx_parser
} }
} }
template <class Op>
static void cal_auto_padding_size(node_info info,
Op& op,
const std::vector<std::size_t>& k_lens,
const std::vector<std::size_t>& dilation,
const std::vector<std::size_t>& in_lens,
std::vector<int64_t>& paddings)
{
size_t kdims = in_lens.size() - 2;
assert(k_lens.size() == kdims and dilation.size() == kdims);
if(!contains(info.attributes, "auto_pad"))
{
return;
}
auto auto_pad = info.attributes["auto_pad"].s();
if(auto_pad.find("SAME") != std::string::npos)
{
op.padding_mode = op::padding_mode_t::same;
bool is_same_upper = (auto_pad.find("SAME_UPPER") != std::string::npos);
paddings.resize(2 * kdims);
for(size_t i = 0; i < paddings.size() / 2; i++)
{
calculate_padding(i,
paddings,
in_lens[i + 2],
op.stride[i],
dilation[i],
k_lens[i],
is_same_upper);
}
}
}
static void check_padding_mode(node_info info, const std::string& op_name)
{
// ensure pads availabe only when auto_pad is "NOT_SET"
if(contains(info.attributes, "pads") and contains(info.attributes, "auto_pad"))
{
auto s = info.attributes["auto_pad"].s();
if(to_upper(s) != "NOTSET")
{
MIGRAPHX_THROW("PARSE_" + op_name +
": auto_pad and padding cannot be specified simultaneously");
}
}
}
template <class Op> template <class Op>
instruction_ref instruction_ref
parse_conv(const std::string&, node_info info, std::vector<instruction_ref> args) parse_conv(const std::string&, node_info info, std::vector<instruction_ref> args)
...@@ -604,23 +614,9 @@ struct onnx_parser ...@@ -604,23 +614,9 @@ struct onnx_parser
assert(in_lens.size() > 2); assert(in_lens.size() > 2);
auto kdims = in_lens.size() - 2; auto kdims = in_lens.size() - 2;
std::vector<int64_t> padding; // ensure pads availabe only when auto_pad is "NOT_SET"
if(contains(info.attributes, "pads")) check_padding_mode(info, "CONV");
{
if(contains(info.attributes, "auto_pad"))
{
auto s = info.attributes["auto_pad"].s();
if(contains(info.attributes, "pads") and to_upper(s) != "NOTSET")
{
MIGRAPHX_THROW(
"PARSE_CONV: auto_pad and padding cannot be specified simultaneously");
}
}
op.padding.clear();
copy(info.attributes["pads"].ints(), std::back_inserter(padding));
check_attr_sizes(kdims, padding.size() / 2, "PARSE_CONV: inconsistent paddings");
check_asym_padding(l0, padding, op);
}
if(contains(info.attributes, "strides")) if(contains(info.attributes, "strides"))
{ {
op.stride.clear(); op.stride.clear();
...@@ -633,13 +629,24 @@ struct onnx_parser ...@@ -633,13 +629,24 @@ struct onnx_parser
copy(info.attributes["dilations"].ints(), std::back_inserter(op.dilation)); copy(info.attributes["dilations"].ints(), std::back_inserter(op.dilation));
check_attr_sizes(kdims, op.dilation.size(), "PARSE_CONV: inconsistent dilations"); check_attr_sizes(kdims, op.dilation.size(), "PARSE_CONV: inconsistent dilations");
} }
std::vector<int64_t> padding;
if(contains(info.attributes, "pads"))
{
op.padding.clear();
copy(info.attributes["pads"].ints(), std::back_inserter(padding));
check_attr_sizes(kdims, padding.size() / 2, "PARSE_CONV: inconsistent paddings");
}
if(contains(info.attributes, "auto_pad")) if(contains(info.attributes, "auto_pad"))
{ {
auto weight_lens = weights->get_shape().lens(); auto weight_lens = weights->get_shape().lens();
std::vector<std::size_t> k_lens(weight_lens.begin() + 2, weight_lens.end()); std::vector<std::size_t> k_lens(weight_lens.begin() + 2, weight_lens.end());
l0 = process_auto_pad_attribute(l0, info, op, k_lens, op.dilation, in_lens); cal_auto_padding_size(info, op, k_lens, op.dilation, in_lens, padding);
} }
check_asym_padding(l0, padding, op);
if(contains(info.attributes, "group")) if(contains(info.attributes, "group"))
{ {
op.group = parse_value(info.attributes.at("group")).at<int>(); op.group = parse_value(info.attributes.at("group")).at<int>();
...@@ -662,17 +669,11 @@ struct onnx_parser ...@@ -662,17 +669,11 @@ struct onnx_parser
assert(in_lens.size() > 2); assert(in_lens.size() > 2);
auto kdims = in_lens.size() - 2; auto kdims = in_lens.size() - 2;
// ensure pads availabe only when auto_pad is "NOT_SET"
check_padding_mode(info, "CONV_TRANSPOSE");
if(contains(info.attributes, "pads")) if(contains(info.attributes, "pads"))
{ {
if(contains(info.attributes, "auto_pad"))
{
auto s = info.attributes["auto_pad"].s();
if(contains(info.attributes, "pads") and to_upper(s) != "NOTSET")
{
MIGRAPHX_THROW("PARSE_CONV_TRANSPOSE: auto_pad and padding cannot be specified "
"simultaneously");
}
}
copy(info.attributes["pads"].ints(), std::back_inserter(padding)); copy(info.attributes["pads"].ints(), std::back_inserter(padding));
asym_padding = is_asym_padding(padding); asym_padding = is_asym_padding(padding);
...@@ -778,6 +779,49 @@ struct onnx_parser ...@@ -778,6 +779,49 @@ struct onnx_parser
return add_bias(args, l1, 1); return add_bias(args, l1, 1);
} }
static void
tune_padding_to_symmetric(int64_t& left, int64_t& right, const int stride, int64_t& s_start)
{
s_start = 0;
if(left > right)
{
right = left;
}
else if(left < right)
{
auto diff = right - left;
s_start = (diff + stride - 1) / stride;
left = left + s_start * stride;
right = left;
}
}
static void tune_padding_size(const op::pooling& op,
std::vector<int64_t>& padding,
int count_include_pad,
std::vector<int64_t>& s_start)
{
// maxpooling or count_include_pad is 1, no change is required.
if(op.mode == "max" or count_include_pad == 1)
{
return;
}
// if padding is symmetric, return directly
if(!is_asym_padding(padding))
{
return;
}
// asymmetric padding, make it symmetric
std::size_t n_dims = padding.size() / 2;
s_start.resize(n_dims);
for(std::size_t i = 0; i < n_dims; ++i)
{
tune_padding_to_symmetric(padding[i], padding[i + n_dims], op.stride[i], s_start[i]);
}
}
instruction_ref instruction_ref
parse_pooling(const std::string& name, node_info info, std::vector<instruction_ref> args) parse_pooling(const std::string& name, node_info info, std::vector<instruction_ref> args)
{ {
...@@ -792,27 +836,21 @@ struct onnx_parser ...@@ -792,27 +836,21 @@ struct onnx_parser
op.lengths = std::vector<size_t>(in_lens.begin() + 2, in_lens.end()); op.lengths = std::vector<size_t>(in_lens.begin() + 2, in_lens.end());
} }
if(contains(info.attributes, "pads")) // does not support ceil_mode
{ if(contains(info.attributes, "ceil_mode"))
if(contains(info.attributes, "auto_pad"))
{ {
auto s = info.attributes["auto_pad"].s(); if(info.attributes.at("ceil_mode").i() == 1)
if(to_upper(s) != "NOTSET")
{ {
MIGRAPHX_THROW( MIGRAPHX_THROW("PARSE_POOLING: pool does not support ceil_mode");
"PARSE_POOLING: auto_pad and padding cannot be specified simultaneously");
} }
} }
op.padding.clear();
std::vector<int64_t> padding;
copy(info.attributes["pads"].ints(), std::back_inserter(padding));
check_attr_sizes(kdims, padding.size() / 2, "PARSE_POOLING: inconsistent paddings");
float pad_val = 0; // count include padding, if count include pad is 1, we always use
if(op.mode == "max") // explicit pad
pad_val = std::numeric_limits<float>::lowest(); int count_include_pad = 0;
check_asym_padding(l0, padding, op, pad_val); if(contains(info.attributes, "count_include_pad"))
in_lens = l0->get_shape().lens(); {
count_include_pad = info.attributes.at("count_include_pad").i();
} }
if(contains(info.attributes, "strides")) if(contains(info.attributes, "strides"))
...@@ -828,18 +866,30 @@ struct onnx_parser ...@@ -828,18 +866,30 @@ struct onnx_parser
check_attr_sizes(kdims, op.lengths.size(), "PARSE_POOLING: inconsistent lengths"); check_attr_sizes(kdims, op.lengths.size(), "PARSE_POOLING: inconsistent lengths");
} }
if(contains(info.attributes, "auto_pad")) // ensure pads availabe only when auto_pad is "NOT_SET"
check_padding_mode(info, "POOLING");
std::vector<int64_t> paddings;
float pad_val = ((op.mode == "max") ? std::numeric_limits<float>::lowest() : 0.0f);
if(contains(info.attributes, "pads"))
{ {
op.padding.clear(); op.padding.clear();
float val = 0.0f; copy(info.attributes["pads"].ints(), std::back_inserter(paddings));
// MaxPool check_attr_sizes(
if(op.mode == "max") kdims, paddings.size() / 2, "PARSE_POOLING: inconsistent explicit paddings");
}
if(contains(info.attributes, "auto_pad"))
{ {
val = std::numeric_limits<float>::lowest(); op.padding.clear();
// return paddings could be empty, then setting to 0 for no padding
cal_auto_padding_size(info, op, op.lengths, {1, 1}, in_lens, paddings);
} }
l0 = process_auto_pad_attribute(l0, info, op, op.lengths, {1, 1}, in_lens, val); if(paddings.size() != 2 * kdims)
in_lens = l0->get_shape().lens(); {
paddings.resize(kdims * 2);
std::fill_n(paddings.begin(), 2 * kdims, 0);
} }
if(op.padding.size() != kdims) if(op.padding.size() != kdims)
...@@ -847,19 +897,56 @@ struct onnx_parser ...@@ -847,19 +897,56 @@ struct onnx_parser
op.padding.resize(kdims); op.padding.resize(kdims);
std::fill_n(op.padding.begin(), kdims, 0); std::fill_n(op.padding.begin(), kdims, 0);
} }
if(op.stride.size() != kdims) if(op.stride.size() != kdims)
{ {
op.stride.resize(kdims); op.stride.resize(kdims);
std::fill_n(op.stride.begin(), kdims, 1); std::fill_n(op.stride.begin(), kdims, 1);
} }
// used to calculate the supposed output shape
std::vector<int64_t> orig_padding(paddings.begin(), paddings.end());
std::vector<int64_t> slice_start;
std::vector<int64_t> slice_end;
tune_padding_size(op, paddings, count_include_pad, slice_start);
if(!slice_start.empty())
{
// calculate expected output shape
orig_padding.insert(orig_padding.begin() + kdims, 2, 0);
orig_padding.insert(orig_padding.begin(), 2, 0);
op::pad pad{orig_padding, 0.0f};
shape padded_shape = pad.compute_shape({l0->get_shape()});
auto out_lens = op.compute_shape({padded_shape}).lens();
// compute slice_end information
slice_end.resize(slice_start.size());
std::transform(out_lens.begin() + 2,
out_lens.end(),
slice_start.begin(),
slice_end.begin(),
[](auto i, auto j) { return i + j; });
}
check_asym_padding(l0, paddings, op, count_include_pad, pad_val);
in_lens = l0->get_shape().lens();
for(size_t i = 0; i < kdims; i++) for(size_t i = 0; i < kdims; i++)
{ {
if(op.lengths[i] > in_lens[i + 2] + 2 * op.padding[i]) if(op.lengths[i] > in_lens[i + 2] + 2 * op.padding[i])
{
MIGRAPHX_THROW("PARSE_POOLING: kernel shape is too large"); MIGRAPHX_THROW("PARSE_POOLING: kernel shape is too large");
} }
}
auto l1 = prog.add_instruction(op, l0);
if(!slice_start.empty())
{
std::vector<int64_t> axes(kdims);
std::iota(axes.begin(), axes.end(), 2);
l1 = prog.add_instruction(op::slice{axes, slice_start, slice_end}, l1);
}
return prog.add_instruction(op, l0); return l1;
} }
instruction_ref instruction_ref
......
...@@ -52,7 +52,7 @@ TEST_CASE(rewrite_test) ...@@ -52,7 +52,7 @@ TEST_CASE(rewrite_test)
auto l0 = create_im2col(padded_img, channels, p); auto l0 = create_im2col(padded_img, channels, p);
auto l1 = create_conv(padded_img, channels, p); auto l1 = create_conv(padded_img, channels, p);
auto l2 = p.add_instruction(migraphx::op::pooling{}, padded_img); auto l2 = p.add_instruction(migraphx::op::pooling{"max"}, padded_img);
p.add_instruction(migraphx::op::identity{}, l0, l1, l2); p.add_instruction(migraphx::op::identity{}, l0, l1, l2);
run_pass(p); run_pass(p);
......
averagepool_sl_cip_test:
_
xy" AveragePool*
auto_pad"
SAME_LOWER*
count_include_pad*
kernel_shape@@averagepool_sl_cip_testZ
x




b
y




B
\ No newline at end of file
...@@ -228,6 +228,23 @@ def averagepool_notset_test(): ...@@ -228,6 +228,23 @@ def averagepool_notset_test():
return ([node], [x], [y]) return ([node], [x], [y])
@onnx_test
def averagepool_nt_cip_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 1, 1])
node = onnx.helper.make_node('AveragePool',
inputs=['x'],
outputs=['y'],
kernel_shape=[6, 6],
strides=[2, 2],
pads=[0, 0, 1, 1],
auto_pad='NOTSET',
count_include_pad=1)
return ([node], [x], [y])
@onnx_test @onnx_test
def averagepool_same_lower_test(): def averagepool_same_lower_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
...@@ -242,6 +259,21 @@ def averagepool_same_lower_test(): ...@@ -242,6 +259,21 @@ def averagepool_same_lower_test():
return ([node], [x], [y]) return ([node], [x], [y])
@onnx_test
def averagepool_sl_cip_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node('AveragePool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2, 2],
auto_pad='SAME_LOWER',
count_include_pad=1)
return ([node], [x], [y])
@onnx_test @onnx_test
def averagepool_same_upper_test(): def averagepool_same_upper_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
......
...@@ -177,30 +177,57 @@ TEST_CASE(averagepool_3d_test) ...@@ -177,30 +177,57 @@ TEST_CASE(averagepool_3d_test)
} }
TEST_CASE(averagepool_notset_test) TEST_CASE(averagepool_notset_test)
{
migraphx::program p;
auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
auto ins = p.add_instruction(migraphx::op::pooling{"average", {2, 2}, {2, 2}, {6, 6}}, input);
auto ret = p.add_instruction(migraphx::op::slice{{2, 3}, {1, 1}, {2, 2}}, ins);
p.add_return({ret});
auto prog = migraphx::parse_onnx("averagepool_notset_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(averagepool_nt_cip_test)
{ {
migraphx::program p; migraphx::program p;
auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}}); auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
std::vector<int64_t> pads = {0, 0, 0, 0, 0, 0, 1, 1}; std::vector<int64_t> pads = {0, 0, 0, 0, 0, 0, 1, 1};
auto ins_pad = p.add_instruction(migraphx::op::pad{pads}, input); auto ins_pad = p.add_instruction(migraphx::op::pad{pads}, input);
p.add_instruction(migraphx::op::pooling{"average", {0, 0}, {2, 2}, {6, 6}}, ins_pad); auto ret = p.add_instruction(migraphx::op::pooling{"average", {0, 0}, {2, 2}, {6, 6}}, ins_pad);
p.add_return({ret});
auto prog = optimize_onnx("averagepool_notset_test.onnx");
auto prog = migraphx::parse_onnx("averagepool_nt_cip_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(averagepool_same_lower_test) TEST_CASE(averagepool_same_lower_test)
{
migraphx::program p;
auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
auto ins = p.add_instruction(
migraphx::op::pooling{
"average", {1, 1}, {1, 1}, {2, 2}, migraphx::op::padding_mode_t::same},
input);
auto ret = p.add_instruction(migraphx::op::slice{{2, 3}, {0, 0}, {5, 5}}, ins);
p.add_return({ret});
auto prog = migraphx::parse_onnx("averagepool_same_lower_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(averagepool_sl_cip_test)
{ {
migraphx::program p; migraphx::program p;
auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}}); auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
std::vector<int64_t> pads = {0, 0, 1, 1, 0, 0, 0, 0}; std::vector<int64_t> pads = {0, 0, 1, 1, 0, 0, 0, 0};
auto ins_pad = p.add_instruction(migraphx::op::pad{pads}, input); auto ins_pad = p.add_instruction(migraphx::op::pad{pads}, input);
p.add_instruction( auto ret = p.add_instruction(
migraphx::op::pooling{ migraphx::op::pooling{
"average", {0, 0}, {1, 1}, {2, 2}, migraphx::op::padding_mode_t::same}, "average", {0, 0}, {1, 1}, {2, 2}, migraphx::op::padding_mode_t::same},
ins_pad); ins_pad);
p.add_return({ret});
auto prog = optimize_onnx("averagepool_same_lower_test.onnx"); auto prog = migraphx::parse_onnx("averagepool_sl_cip_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
...@@ -209,14 +236,13 @@ TEST_CASE(averagepool_same_upper_test) ...@@ -209,14 +236,13 @@ TEST_CASE(averagepool_same_upper_test)
{ {
migraphx::program p; migraphx::program p;
auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}}); auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
std::vector<int64_t> pads = {0, 0, 0, 0, 0, 0, 1, 1}; auto ins = p.add_instruction(
auto ins_pad = p.add_instruction(migraphx::op::pad{pads}, input);
p.add_instruction(
migraphx::op::pooling{ migraphx::op::pooling{
"average", {0, 0}, {1, 1}, {2, 2}, migraphx::op::padding_mode_t::same}, "average", {1, 1}, {1, 1}, {2, 2}, migraphx::op::padding_mode_t::same},
ins_pad); input);
auto ret = p.add_instruction(migraphx::op::slice{{2, 3}, {1, 1}, {6, 6}}, ins);
auto prog = optimize_onnx("averagepool_same_upper_test.onnx"); p.add_return({ret});
auto prog = migraphx::parse_onnx("averagepool_same_upper_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
......
...@@ -39,6 +39,42 @@ TEST_CASE(instance_norm_test) ...@@ -39,6 +39,42 @@ TEST_CASE(instance_norm_test)
EXPECT(migraphx::verify_range(result_vector, gold)); EXPECT(migraphx::verify_range(result_vector, gold));
} }
TEST_CASE(averagepool_notset_test)
{
auto p = migraphx::parse_onnx("averagepool_notset_test.onnx");
p.compile(migraphx::cpu::target{});
std::vector<float> data_x = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24};
migraphx::shape s_x{migraphx::shape::float_type, {1, 1, 5, 5}};
migraphx::program::parameter_map pp;
pp["x"] = migraphx::argument(s_x, data_x.data());
auto result = p.eval(pp).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {12};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(averagepool_nt_cip_test)
{
auto p = migraphx::parse_onnx("averagepool_nt_cip_test.onnx");
p.compile(migraphx::cpu::target{});
std::vector<float> data_x = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24};
migraphx::shape s_x{migraphx::shape::float_type, {1, 1, 5, 5}};
migraphx::program::parameter_map pp;
pp["x"] = migraphx::argument(s_x, data_x.data());
auto result = p.eval(pp).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {8.33333};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(gather_elements) TEST_CASE(gather_elements)
{ {
migraphx::program p = migraphx::parse_onnx("gather_elements_axis0_test.onnx"); migraphx::program p = migraphx::parse_onnx("gather_elements_axis0_test.onnx");
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment