Unverified Commit 9a5e0c06 authored by kahmed10's avatar kahmed10 Committed by GitHub
Browse files

Asym pad refactor (#791)



* alternative impl

* formatting

* add gpu pass to insert pad

* formatting

* update onnx test, still need cleanup

* formatting

* update tf_test

* modify existing tests

* formatting

* remove print

* code cleanup

* formatting

* code cleanup

* formatting

* fix tidy and cppcheck

* remove variable

* add test

* formatting

* add test and address comments

* formatting
Co-authored-by: default avatarShucai Xiao <shucai@gmail.com>
Co-authored-by: default avatarmvermeulen <5479696+mvermeulen@users.noreply.github.com>
parent 98486807
...@@ -185,7 +185,7 @@ MIGRAPHX_PRED_MATCHER(fusable_conv, instruction_ref ins) ...@@ -185,7 +185,7 @@ MIGRAPHX_PRED_MATCHER(fusable_conv, instruction_ref ins)
if(conv.algo == miopenConvolutionFwdAlgoWinograd and wei.lens()[2] != 3 and if(conv.algo == miopenConvolutionFwdAlgoWinograd and wei.lens()[2] != 3 and
wei.lens()[3] != 3 and contains({{1, 1}}, op.stride)) wei.lens()[3] != 3 and contains({{1, 1}}, op.stride))
return false; return false;
return contains({{0, 0}, {1, 1}, {2, 2}}, op.padding) and return contains({{0, 0, 0, 0}, {1, 1, 1, 1}, {2, 2, 2, 2}}, op.padding) and
contains({{0, 0}, {1, 1}}, op.stride) and contains({{1, 1}}, op.dilation); contains({{0, 0}, {1, 1}}, op.stride) and contains({{1, 1}}, op.dilation);
} }
...@@ -568,7 +568,7 @@ struct miopen_conv_bias ...@@ -568,7 +568,7 @@ struct miopen_conv_bias
{ {
check_shapes{inputs, *this}.has(5); check_shapes{inputs, *this}.has(5);
// TODO: Check slices // TODO: Check slices
return op.compute_shape({inputs.at(0), inputs.at(1)}); return op.normalize_compute_shape({inputs.at(0), inputs.at(1)});
} }
argument compute(context& ctx, const shape&, const std::vector<argument>& args) const argument compute(context& ctx, const shape&, const std::vector<argument>& args) const
{ {
...@@ -615,7 +615,7 @@ struct miopen_conv_bias_relu ...@@ -615,7 +615,7 @@ struct miopen_conv_bias_relu
{ {
check_shapes{inputs, *this}.has(5); check_shapes{inputs, *this}.has(5);
// TODO: Check slices // TODO: Check slices
return op.compute_shape({inputs.at(0), inputs.at(1)}); return op.normalize_compute_shape({inputs.at(0), inputs.at(1)});
} }
argument compute(context& ctx, const shape&, const std::vector<argument>& args) const argument compute(context& ctx, const shape&, const std::vector<argument>& args) const
{ {
......
...@@ -94,7 +94,7 @@ inline convolution_descriptor make_conv(const T& op) ...@@ -94,7 +94,7 @@ inline convolution_descriptor make_conv(const T& op)
std::vector<int> stride(std::max(2, kdims), 1); std::vector<int> stride(std::max(2, kdims), 1);
std::vector<int> dilation(std::max(2, kdims), 1); std::vector<int> dilation(std::max(2, kdims), 1);
std::copy_backward(op.padding.begin(), op.padding.end(), padding.end()); std::copy_backward(op.padding.begin(), op.padding.begin() + kdims, padding.end());
std::copy_backward(op.stride.begin(), op.stride.end(), stride.end()); std::copy_backward(op.stride.begin(), op.stride.end(), stride.end());
std::copy_backward(op.dilation.begin(), op.dilation.end(), dilation.end()); std::copy_backward(op.dilation.begin(), op.dilation.end(), dilation.end());
...@@ -145,7 +145,7 @@ inline pooling_descriptor make_pooling(const migraphx::op::pooling& op) ...@@ -145,7 +145,7 @@ inline pooling_descriptor make_pooling(const migraphx::op::pooling& op)
std::vector<int> stride(std::max(2, kdims), 1); std::vector<int> stride(std::max(2, kdims), 1);
std::vector<int> lengths(std::max(2, kdims), 1); std::vector<int> lengths(std::max(2, kdims), 1);
std::copy_backward(op.padding.begin(), op.padding.end(), padding.end()); std::copy_backward(op.padding.begin(), op.padding.begin() + kdims, padding.end());
std::copy_backward(op.stride.begin(), op.stride.end(), stride.end()); std::copy_backward(op.stride.begin(), op.stride.end(), stride.end());
std::copy_backward(op.lengths.begin(), op.lengths.end(), lengths.end()); std::copy_backward(op.lengths.begin(), op.lengths.end(), lengths.end());
......
...@@ -10,7 +10,7 @@ shape miopen_pooling::compute_shape(const std::vector<shape>& inputs) const ...@@ -10,7 +10,7 @@ shape miopen_pooling::compute_shape(const std::vector<shape>& inputs) const
check_shapes{inputs, *this}.has(2).standard(); check_shapes{inputs, *this}.has(2).standard();
std::vector<shape> pooling_input = {inputs.at(0)}; std::vector<shape> pooling_input = {inputs.at(0)};
check_shapes{pooling_input, *this}.max_ndims(5); check_shapes{pooling_input, *this}.max_ndims(5);
return op.compute_shape(pooling_input); return op.normalize_compute_shape(pooling_input);
} }
inline void reshape_if_1d(shape& input) inline void reshape_if_1d(shape& input)
......
...@@ -10,7 +10,7 @@ namespace gpu { ...@@ -10,7 +10,7 @@ namespace gpu {
shape miopen_quant_convolution::compute_shape(const std::vector<shape>& inputs) const shape miopen_quant_convolution::compute_shape(const std::vector<shape>& inputs) const
{ {
check_shapes{inputs, *this}.has(4).standard(); check_shapes{inputs, *this}.has(4).standard();
return op.compute_shape({inputs.at(0), inputs.at(1)}); return op.normalize_compute_shape({inputs.at(0), inputs.at(1)});
} }
argument miopen_quant_convolution::compute(context& ctx, argument miopen_quant_convolution::compute(context& ctx,
const shape& output_shape, const shape& output_shape,
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <migraphx/eliminate_data_type.hpp> #include <migraphx/eliminate_data_type.hpp>
#include <migraphx/eliminate_identity.hpp> #include <migraphx/eliminate_identity.hpp>
#include <migraphx/eliminate_pad.hpp> #include <migraphx/eliminate_pad.hpp>
#include <migraphx/insert_pad.hpp>
#include <migraphx/memory_coloring.hpp> #include <migraphx/memory_coloring.hpp>
#include <migraphx/normalize_ops.hpp> #include <migraphx/normalize_ops.hpp>
#include <migraphx/propagate_constant.hpp> #include <migraphx/propagate_constant.hpp>
...@@ -61,6 +62,8 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti ...@@ -61,6 +62,8 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
eliminate_identity{}, eliminate_identity{},
eliminate_pad{}, eliminate_pad{},
dead_code_elimination{}, dead_code_elimination{},
insert_pad{},
dead_code_elimination{},
rewrite_batchnorm{}, rewrite_batchnorm{},
dead_code_elimination{}, dead_code_elimination{},
rewrite_rnn{}, rewrite_rnn{},
......
...@@ -205,7 +205,10 @@ struct ref_convolution : auto_register_op<ref_convolution<Op>> ...@@ -205,7 +205,10 @@ struct ref_convolution : auto_register_op<ref_convolution<Op>>
} }
std::string name() const { return "ref::" + op.name(); } std::string name() const { return "ref::" + op.name(); }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); } shape compute_shape(const std::vector<shape>& inputs) const
{
return op.normalize_compute_shape(inputs);
}
argument compute(context&, shape output_shape, std::vector<argument> args) const argument compute(context&, shape output_shape, std::vector<argument> args) const
{ {
argument result{output_shape}; argument result{output_shape};
...@@ -370,7 +373,10 @@ struct ref_im2col ...@@ -370,7 +373,10 @@ struct ref_im2col
} }
static std::string name() { return "ref::im2col"; } static std::string name() { return "ref::im2col"; }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); } shape compute_shape(const std::vector<shape>& inputs) const
{
return op.normalize_compute_shape(inputs);
}
argument compute(context&, const shape& output_shape, std::vector<argument> args) const argument compute(context&, const shape& output_shape, std::vector<argument> args) const
{ {
...@@ -471,7 +477,10 @@ struct ref_pooling : auto_register_op<ref_pooling<Op>> ...@@ -471,7 +477,10 @@ struct ref_pooling : auto_register_op<ref_pooling<Op>>
} }
std::string name() const { return "ref::pooling_" + Op::name(); } std::string name() const { return "ref::pooling_" + Op::name(); }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); } shape compute_shape(const std::vector<shape>& inputs) const
{
return op.normalize_compute_shape(inputs);
}
argument compute(context&, const shape& output_shape, std::vector<argument> args) const argument compute(context&, const shape& output_shape, std::vector<argument> args) const
{ {
argument result{output_shape}; argument result{output_shape};
......
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
#include <migraphx/pass.hpp> #include <migraphx/pass.hpp>
#include <migraphx/auto_contiguous.hpp> #include <migraphx/auto_contiguous.hpp>
#include <migraphx/rewrite_rnn.hpp> #include <migraphx/rewrite_rnn.hpp>
#include <migraphx/eliminate_pad.hpp>
#include <migraphx/insert_pad.hpp>
#include <migraphx/dead_code_elimination.hpp> #include <migraphx/dead_code_elimination.hpp>
#include <migraphx/generate.hpp> #include <migraphx/generate.hpp>
#include <migraphx/normalize_ops.hpp> #include <migraphx/normalize_ops.hpp>
...@@ -18,6 +20,10 @@ std::string target::name() const { return "ref"; } ...@@ -18,6 +20,10 @@ std::string target::name() const { return "ref"; }
std::vector<pass> target::get_passes(migraphx::context&, const compile_options&) const std::vector<pass> target::get_passes(migraphx::context&, const compile_options&) const
{ {
return {normalize_ops{}, return {normalize_ops{},
eliminate_pad{},
dead_code_elimination{},
insert_pad{},
dead_code_elimination{},
rewrite_rnn{}, rewrite_rnn{},
dead_code_elimination{}, dead_code_elimination{},
auto_contiguous{}, auto_contiguous{},
......
...@@ -62,16 +62,7 @@ struct parse_conv : op_parser<parse_conv> ...@@ -62,16 +62,7 @@ struct parse_conv : op_parser<parse_conv>
calculate_padding(0, pads, input_dims[2], op.stride[0], op.dilation[0], weight_h); calculate_padding(0, pads, input_dims[2], op.stride[0], op.dilation[0], weight_h);
calculate_padding(1, pads, input_dims[3], op.stride[1], op.dilation[1], weight_w); calculate_padding(1, pads, input_dims[3], op.stride[1], op.dilation[1], weight_w);
if(pads[0] != pads[2] || pads[1] != pads[3]) op.padding = std::vector<size_t>(pads.begin(), pads.end());
{
std::vector<int64_t> padding = {0, 0, pads[0], pads[1], 0, 0, pads[2], pads[3]};
l0 = info.add_instruction(migraphx::make_op("pad", {{"pads", padding}}), l0);
}
else
{
op.padding[0] = pads[0];
op.padding[1] = pads[1];
}
} }
else if(pad_mode.find("VALID") != std::string::npos) else if(pad_mode.find("VALID") != std::string::npos)
{ {
......
...@@ -57,20 +57,7 @@ struct parse_pooling : op_parser<parse_pooling> ...@@ -57,20 +57,7 @@ struct parse_pooling : op_parser<parse_pooling>
calculate_padding(0, pads, input_dims[2], op.stride[0], 1, op.lengths[0]); calculate_padding(0, pads, input_dims[2], op.stride[0], 1, op.lengths[0]);
calculate_padding(1, pads, input_dims[3], op.stride[1], 1, op.lengths[1]); calculate_padding(1, pads, input_dims[3], op.stride[1], 1, op.lengths[1]);
if(pads[0] != pads[2] || pads[1] != pads[3]) op.padding = std::vector<size_t>(pads.begin(), pads.end());
{
std::vector<int64_t> padding = {0, 0, pads[0], pads[1], 0, 0, pads[2], pads[3]};
l0 = info.add_instruction(
migraphx::make_op(
"pad",
{{"pads", padding}, {"value", std::numeric_limits<float>::lowest()}}),
l0);
}
else
{
op.padding[0] = pads[0];
op.padding[1] = pads[1];
}
} }
} }
return info.add_instruction(op, l0); return info.add_instruction(op, l0);
......
#include <migraphx/dead_code_elimination.hpp> #include <migraphx/dead_code_elimination.hpp>
#include <migraphx/normalize_ops.hpp>
#include <migraphx/eliminate_pad.hpp> #include <migraphx/eliminate_pad.hpp>
#include <migraphx/pass_manager.hpp> #include <migraphx/pass_manager.hpp>
#include <migraphx/instruction.hpp> #include <migraphx/instruction.hpp>
...@@ -10,7 +11,9 @@ ...@@ -10,7 +11,9 @@
void run_pass(migraphx::module& m) void run_pass(migraphx::module& m)
{ {
migraphx::run_passes(m, {migraphx::eliminate_pad{}, migraphx::dead_code_elimination{}}); migraphx::run_passes(
m,
{migraphx::normalize_ops{}, migraphx::eliminate_pad{}, migraphx::dead_code_elimination{}});
} }
migraphx::instruction_ref migraphx::instruction_ref
...@@ -66,15 +69,15 @@ TEST_CASE(rewrite_pad) ...@@ -66,15 +69,15 @@ TEST_CASE(rewrite_pad)
auto om1 = l1->get_operator().to_value(); auto om1 = l1->get_operator().to_value();
auto om2 = l2->get_operator().to_value(); auto om2 = l2->get_operator().to_value();
EXPECT(op0["padding"].to_vector<std::size_t>() == std::vector<std::size_t>{1, 1}); EXPECT(op0["padding"].to_vector<std::size_t>() == std::vector<std::size_t>{1, 1, 1, 1});
EXPECT(om1["padding"].to_vector<std::size_t>() == std::vector<std::size_t>{1, 1}); EXPECT(om1["padding"].to_vector<std::size_t>() == std::vector<std::size_t>{1, 1, 1, 1});
EXPECT(om2["padding"].to_vector<std::size_t>() == std::vector<std::size_t>{1, 1}); EXPECT(om2["padding"].to_vector<std::size_t>() == std::vector<std::size_t>{1, 1, 1, 1});
EXPECT(std::none_of( EXPECT(std::none_of(
m.begin(), m.end(), [](const migraphx::instruction& ins) { return ins.name() == "pad"; })); m.begin(), m.end(), [](const migraphx::instruction& ins) { return ins.name() == "pad"; }));
} }
TEST_CASE(rewrite_pad_im2col_asymetric) TEST_CASE(rewrite_pad_im2col_asymmetric)
{ {
migraphx::module m; migraphx::module m;
...@@ -95,10 +98,10 @@ TEST_CASE(rewrite_pad_im2col_asymetric) ...@@ -95,10 +98,10 @@ TEST_CASE(rewrite_pad_im2col_asymetric)
EXPECT(l0->get_shape() == s0); EXPECT(l0->get_shape() == s0);
auto op0 = l0->get_operator().to_value(); auto op0 = l0->get_operator().to_value();
EXPECT(op0["padding"].to_vector<std::size_t>() == std::vector<std::size_t>{0, 0}); EXPECT(op0["padding"].to_vector<std::size_t>() == std::vector<std::size_t>{0, 0, 2, 2});
run_pass(m); run_pass(m);
EXPECT(std::any_of( EXPECT(std::none_of(
m.begin(), m.end(), [](const migraphx::instruction& ins) { return ins.name() == "pad"; })); m.begin(), m.end(), [](const migraphx::instruction& ins) { return ins.name() == "pad"; }));
} }
......
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/normalize_ops.hpp>
#include <migraphx/insert_pad.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/instruction.hpp>
#include <basic_ops.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/make_op.hpp>
#include <test.hpp>
void run_pass(migraphx::module& m)
{
migraphx::run_passes(
m, {migraphx::normalize_ops{}, migraphx::insert_pad{}, migraphx::dead_code_elimination{}});
}
migraphx::instruction_ref
create_im2col(migraphx::instruction_ref& l_img, size_t channels, migraphx::module& m)
{
size_t f[2] = {1, 1};
std::vector<int32_t> weights(channels * f[0] * f[1]);
migraphx::shape s_weights{migraphx::shape::int32_type, {1, channels, f[0], f[1]}};
auto l_weights = m.add_literal(migraphx::literal{s_weights, weights});
return m.add_instruction(
migraphx::make_op("im2col", {{"padding", {0, 0, 1, 1}}}), l_img, l_weights);
}
migraphx::instruction_ref
create_conv(migraphx::instruction_ref& l_img,
size_t channels,
migraphx::module& m,
migraphx::op::padding_mode_t padding_mode = migraphx::op::padding_mode_t::default_)
{
migraphx::shape s_weights{migraphx::shape::int32_type, {4, channels, 3, 3}};
std::vector<int32_t> weights(4 * channels * 3 * 3);
auto l_weights = m.add_literal(migraphx::literal{s_weights, weights});
migraphx::op::convolution op;
op.padding_mode = padding_mode;
op.padding = {0, 0, 1, 1};
return m.add_instruction(op, l_img, l_weights);
}
TEST_CASE(rewrite_pad)
{
migraphx::module m;
size_t img_dim[2] = {2, 2};
size_t channels = 1;
std::vector<int32_t> input(channels * img_dim[0] * img_dim[1]);
std::iota(input.begin(), input.end(), 0);
migraphx::shape s_img{migraphx::shape::int32_type, {1, channels, img_dim[0], img_dim[1]}};
auto l_img = m.add_literal(migraphx::literal{s_img, input});
auto l0 = create_im2col(l_img, channels, m);
auto l1 = create_conv(l_img, channels, m);
auto l2 = m.add_instruction(
migraphx::make_op("pooling", {{"mode", "max"}, {"padding", {0, 0, 1, 1}}}), l_img);
m.add_instruction(migraphx::make_op("identity"), l0, l1, l2);
run_pass(m);
EXPECT(std::any_of(
m.begin(), m.end(), [](const migraphx::instruction& ins) { return ins.name() == "pad"; }));
}
TEST_CASE(rewrite_pad_symmetric)
{
migraphx::module m;
size_t img_dim[2] = {2, 2};
size_t channels = 1;
std::vector<int32_t> input(channels * img_dim[0] * img_dim[1]);
std::iota(input.begin(), input.end(), 0);
migraphx::shape s_img{migraphx::shape::int32_type, {1, channels, img_dim[0], img_dim[1]}};
auto l_img = m.add_literal(migraphx::literal{s_img, input});
m.add_instruction(migraphx::make_op("pooling", {{"mode", "max"}, {"padding", {1, 1, 1, 1}}}),
l_img);
run_pass(m);
EXPECT(std::none_of(
m.begin(), m.end(), [](const migraphx::instruction& ins) { return ins.name() == "pad"; }));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -75,6 +75,26 @@ TEST_CASE(gather_test_1) ...@@ -75,6 +75,26 @@ TEST_CASE(gather_test_1)
EXPECT(m1 == m2); EXPECT(m1 == m2);
} }
migraphx::module create_padded_op(const std::vector<size_t>& pad_vals)
{
migraphx::module m;
migraphx::shape s{migraphx::shape::float_type, {2, 3, 4, 5}};
auto si = m.add_parameter("data", s);
auto r = m.add_instruction(migraphx::make_op("pooling", {{"padding", pad_vals}}), si);
m.add_return({r});
return m;
}
TEST_CASE(padding_attr_test)
{
migraphx::module m1 = create_padded_op({0, 1});
migraphx::module m2 = create_padded_op({0, 1, 0, 1});
run_pass(m1);
EXPECT(m1 == m2);
}
migraphx::module create_reduce_mean(const std::vector<int64_t>& axes) migraphx::module create_reduce_mean(const std::vector<int64_t>& axes)
{ {
migraphx::module m; migraphx::module m;
......
...@@ -182,7 +182,8 @@ TEST_CASE(averagepool_1d_test) ...@@ -182,7 +182,8 @@ TEST_CASE(averagepool_1d_test)
auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {1, 3, 5}}); auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {1, 3, 5}});
mm->add_instruction( mm->add_instruction(
migraphx::make_op( migraphx::make_op(
"pooling", {{"mode", "average"}, {"padding", {0}}, {"stride", {1}}, {"lengths", {3}}}), "pooling",
{{"mode", "average"}, {"padding", {0, 0}}, {"stride", {1}}, {"lengths", {3}}}),
l0); l0);
auto prog = optimize_onnx("averagepool_1d_test.onnx"); auto prog = optimize_onnx("averagepool_1d_test.onnx");
...@@ -196,7 +197,7 @@ TEST_CASE(averagepool_3d_test) ...@@ -196,7 +197,7 @@ TEST_CASE(averagepool_3d_test)
auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {1, 3, 5, 5, 5}}); auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {1, 3, 5, 5, 5}});
mm->add_instruction(migraphx::make_op("pooling", mm->add_instruction(migraphx::make_op("pooling",
{{"mode", "average"}, {{"mode", "average"},
{"padding", {0, 0, 0}}, {"padding", {0, 0, 0, 0, 0, 0}},
{"stride", {1, 1, 1}}, {"stride", {1, 1, 1}},
{"lengths", {3, 3, 3}}}), {"lengths", {3, 3, 3}}}),
l0); l0);
...@@ -210,12 +211,13 @@ TEST_CASE(averagepool_notset_test) ...@@ -210,12 +211,13 @@ TEST_CASE(averagepool_notset_test)
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}}); auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
auto ins = mm->add_instruction( auto ins = mm->add_instruction(migraphx::make_op("pooling",
migraphx::make_op( {{"mode", "average"},
"pooling", {"padding", {2, 2, 2, 2}},
{{"mode", "average"}, {"padding", {2, 2}}, {"stride", {2, 2}}, {"lengths", {6, 6}}}), {"stride", {2, 2}},
input); {"lengths", {6, 6}}}),
auto ret = mm->add_instruction( input);
auto ret = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {2, 2}}}), ins); migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {2, 2}}}), ins);
mm->add_return({ret}); mm->add_return({ret});
auto prog = migraphx::parse_onnx("averagepool_notset_test.onnx"); auto prog = migraphx::parse_onnx("averagepool_notset_test.onnx");
...@@ -230,11 +232,12 @@ TEST_CASE(averagepool_nt_cip_test) ...@@ -230,11 +232,12 @@ TEST_CASE(averagepool_nt_cip_test)
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}}); auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
std::vector<int64_t> pads = {0, 0, 0, 0, 0, 0, 1, 1}; std::vector<int64_t> pads = {0, 0, 0, 0, 0, 0, 1, 1};
auto ins_pad = mm->add_instruction(migraphx::make_op("pad", {{"pads", pads}}), input); auto ins_pad = mm->add_instruction(migraphx::make_op("pad", {{"pads", pads}}), input);
auto ret = mm->add_instruction( auto ret = mm->add_instruction(migraphx::make_op("pooling",
migraphx::make_op( {{"mode", "average"},
"pooling", {"padding", {0, 0, 0, 0}},
{{"mode", "average"}, {"padding", {0, 0}}, {"stride", {2, 2}}, {"lengths", {6, 6}}}), {"stride", {2, 2}},
ins_pad); {"lengths", {6, 6}}}),
ins_pad);
mm->add_return({ret}); mm->add_return({ret});
auto prog = migraphx::parse_onnx("averagepool_nt_cip_test.onnx"); auto prog = migraphx::parse_onnx("averagepool_nt_cip_test.onnx");
...@@ -246,12 +249,13 @@ TEST_CASE(averagepool_same_lower_test) ...@@ -246,12 +249,13 @@ TEST_CASE(averagepool_same_lower_test)
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}}); auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
auto ins = mm->add_instruction( auto ins = mm->add_instruction(migraphx::make_op("pooling",
migraphx::make_op( {{"mode", "average"},
"pooling", {"padding", {1, 1, 1, 1}},
{{"mode", "average"}, {"padding", {1, 1}}, {"stride", {1, 1}}, {"lengths", {2, 2}}}), {"stride", {1, 1}},
input); {"lengths", {2, 2}}}),
auto ret = mm->add_instruction( input);
auto ret = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {0, 0}}, {"ends", {5, 5}}}), ins); migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {0, 0}}, {"ends", {5, 5}}}), ins);
mm->add_return({ret}); mm->add_return({ret});
auto prog = migraphx::parse_onnx("averagepool_same_lower_test.onnx"); auto prog = migraphx::parse_onnx("averagepool_same_lower_test.onnx");
...@@ -266,11 +270,12 @@ TEST_CASE(averagepool_sl_cip_test) ...@@ -266,11 +270,12 @@ TEST_CASE(averagepool_sl_cip_test)
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}}); auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
std::vector<int64_t> pads = {0, 0, 1, 1, 0, 0, 0, 0}; std::vector<int64_t> pads = {0, 0, 1, 1, 0, 0, 0, 0};
auto ins_pad = mm->add_instruction(migraphx::make_op("pad", {{"pads", pads}}), input); auto ins_pad = mm->add_instruction(migraphx::make_op("pad", {{"pads", pads}}), input);
auto ret = mm->add_instruction( auto ret = mm->add_instruction(migraphx::make_op("pooling",
migraphx::make_op( {{"mode", "average"},
"pooling", {"padding", {0, 0, 0, 0}},
{{"mode", "average"}, {"padding", {0, 0}}, {"stride", {1, 1}}, {"lengths", {2, 2}}}), {"stride", {1, 1}},
ins_pad); {"lengths", {2, 2}}}),
ins_pad);
mm->add_return({ret}); mm->add_return({ret});
auto prog = migraphx::parse_onnx("averagepool_sl_cip_test.onnx"); auto prog = migraphx::parse_onnx("averagepool_sl_cip_test.onnx");
...@@ -282,12 +287,13 @@ TEST_CASE(averagepool_same_upper_test) ...@@ -282,12 +287,13 @@ TEST_CASE(averagepool_same_upper_test)
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}}); auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
auto ins = mm->add_instruction( auto ins = mm->add_instruction(migraphx::make_op("pooling",
migraphx::make_op( {{"mode", "average"},
"pooling", {"padding", {1, 1, 1, 1}},
{{"mode", "average"}, {"padding", {1, 1}}, {"stride", {1, 1}}, {"lengths", {2, 2}}}), {"stride", {1, 1}},
input); {"lengths", {2, 2}}}),
auto ret = mm->add_instruction( input);
auto ret = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {6, 6}}}), ins); migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {6, 6}}}), ins);
mm->add_return({ret}); mm->add_return({ret});
auto prog = migraphx::parse_onnx("averagepool_same_upper_test.onnx"); auto prog = migraphx::parse_onnx("averagepool_same_upper_test.onnx");
...@@ -606,7 +612,7 @@ TEST_CASE(conv_autopad_same_test) ...@@ -606,7 +612,7 @@ TEST_CASE(conv_autopad_same_test)
auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {1, 3, 32, 32}}); auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {1, 3, 32, 32}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}}); auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}});
migraphx::op::convolution op; migraphx::op::convolution op;
op.padding = {1, 1}; op.padding = {1, 1, 1, 1};
op.padding_mode = migraphx::op::padding_mode_t::same; op.padding_mode = migraphx::op::padding_mode_t::same;
mm->add_instruction(op, l0, l1); mm->add_instruction(op, l0, l1);
...@@ -644,8 +650,9 @@ TEST_CASE(conv_bn_relu_maxpool_test) ...@@ -644,8 +650,9 @@ TEST_CASE(conv_bn_relu_maxpool_test)
auto p5 = mm->add_parameter("5", {migraphx::shape::float_type, {1}}); auto p5 = mm->add_parameter("5", {migraphx::shape::float_type, {1}});
auto p6 = mm->add_parameter("6", {migraphx::shape::float_type, {1}}); auto p6 = mm->add_parameter("6", {migraphx::shape::float_type, {1}});
uint64_t axis = 1; uint64_t axis = 1;
auto l3 = mm->add_instruction(migraphx::make_op("convolution"), l0, l1); auto l3 =
auto l4 = mm->add_instruction( mm->add_instruction(migraphx::make_op("convolution", {{"padding", {0, 0, 0, 0}}}), l0, l1);
auto l4 = mm->add_instruction(
migraphx::make_op("broadcast", {{"axis", axis}, {"dims", l3->get_shape().lens()}}), l2); migraphx::make_op("broadcast", {{"axis", axis}, {"dims", l3->get_shape().lens()}}), l2);
auto l5 = mm->add_instruction(migraphx::make_op("add"), l3, l4); auto l5 = mm->add_instruction(migraphx::make_op("add"), l3, l4);
auto l6 = mm->add_instruction( auto l6 = mm->add_instruction(
...@@ -654,7 +661,7 @@ TEST_CASE(conv_bn_relu_maxpool_test) ...@@ -654,7 +661,7 @@ TEST_CASE(conv_bn_relu_maxpool_test)
mm->add_instruction( mm->add_instruction(
migraphx::make_op( migraphx::make_op(
"pooling", "pooling",
{{"mode", "max"}, {"padding", {0, 0}}, {"stride", {2, 2}}, {"lengths", {2, 2}}}), {{"mode", "max"}, {"padding", {0, 0, 0, 0}}, {"stride", {2, 2}}, {"lengths", {2, 2}}}),
l7); l7);
auto prog = optimize_onnx("conv_bn_relu_maxpool_test.onnx"); auto prog = optimize_onnx("conv_bn_relu_maxpool_test.onnx");
...@@ -669,15 +676,16 @@ TEST_CASE(conv_relu_maxpool_test) ...@@ -669,15 +676,16 @@ TEST_CASE(conv_relu_maxpool_test)
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 5, 5}}); auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 5, 5}});
auto l2 = mm->add_parameter("2", {migraphx::shape::float_type, {1}}); auto l2 = mm->add_parameter("2", {migraphx::shape::float_type, {1}});
uint64_t axis = 1; uint64_t axis = 1;
auto l3 = mm->add_instruction(migraphx::make_op("convolution"), l0, l1); auto l3 =
auto l4 = mm->add_instruction( mm->add_instruction(migraphx::make_op("convolution", {{"padding", {0, 0, 0, 0}}}), l0, l1);
auto l4 = mm->add_instruction(
migraphx::make_op("broadcast", {{"axis", axis}, {"dims", l3->get_shape().lens()}}), l2); migraphx::make_op("broadcast", {{"axis", axis}, {"dims", l3->get_shape().lens()}}), l2);
auto l5 = mm->add_instruction(migraphx::make_op("add"), l3, l4); auto l5 = mm->add_instruction(migraphx::make_op("add"), l3, l4);
auto l6 = mm->add_instruction(migraphx::make_op("relu"), l5); auto l6 = mm->add_instruction(migraphx::make_op("relu"), l5);
mm->add_instruction( mm->add_instruction(
migraphx::make_op( migraphx::make_op(
"pooling", "pooling",
{{"mode", "max"}, {"padding", {0, 0}}, {"stride", {2, 2}}, {"lengths", {2, 2}}}), {{"mode", "max"}, {"padding", {0, 0, 0, 0}}, {"stride", {2, 2}}, {"lengths", {2, 2}}}),
l6); l6);
auto prog = optimize_onnx("conv_relu_maxpool_test.onnx"); auto prog = optimize_onnx("conv_relu_maxpool_test.onnx");
...@@ -692,20 +700,22 @@ TEST_CASE(conv_relu_maxpool_x2_test) ...@@ -692,20 +700,22 @@ TEST_CASE(conv_relu_maxpool_x2_test)
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {5, 3, 5, 5}}); auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {5, 3, 5, 5}});
auto l2 = mm->add_parameter("2", {migraphx::shape::float_type, {5}}); auto l2 = mm->add_parameter("2", {migraphx::shape::float_type, {5}});
uint64_t axis = 1; uint64_t axis = 1;
auto l3 = mm->add_instruction(migraphx::make_op("convolution"), l0, l1); auto l3 =
auto l4 = mm->add_instruction( mm->add_instruction(migraphx::make_op("convolution", {{"padding", {0, 0, 0, 0}}}), l0, l1);
auto l4 = mm->add_instruction(
migraphx::make_op("broadcast", {{"axis", axis}, {"dims", l3->get_shape().lens()}}), l2); migraphx::make_op("broadcast", {{"axis", axis}, {"dims", l3->get_shape().lens()}}), l2);
auto l5 = mm->add_instruction(migraphx::make_op("add"), l3, l4); auto l5 = mm->add_instruction(migraphx::make_op("add"), l3, l4);
auto l6 = mm->add_instruction(migraphx::make_op("relu"), l5); auto l6 = mm->add_instruction(migraphx::make_op("relu"), l5);
auto l7 = mm->add_instruction( auto l7 = mm->add_instruction(
migraphx::make_op( migraphx::make_op(
"pooling", "pooling",
{{"mode", "max"}, {"padding", {0, 0}}, {"stride", {2, 2}}, {"lengths", {2, 2}}}), {{"mode", "max"}, {"padding", {0, 0, 0, 0}}, {"stride", {2, 2}}, {"lengths", {2, 2}}}),
l6); l6);
auto l8 = mm->add_parameter("3", {migraphx::shape::float_type, {1, 5, 5, 5}}); auto l8 = mm->add_parameter("3", {migraphx::shape::float_type, {1, 5, 5, 5}});
auto l9 = mm->add_parameter("4", {migraphx::shape::float_type, {1}}); auto l9 = mm->add_parameter("4", {migraphx::shape::float_type, {1}});
auto l10 = mm->add_instruction(migraphx::make_op("convolution"), l7, l8); auto l10 =
mm->add_instruction(migraphx::make_op("convolution", {{"padding", {0, 0, 0, 0}}}), l7, l8);
auto l11 = mm->add_instruction( auto l11 = mm->add_instruction(
migraphx::make_op("broadcast", {{"axis", axis}, {"dims", l10->get_shape().lens()}}), l9); migraphx::make_op("broadcast", {{"axis", axis}, {"dims", l10->get_shape().lens()}}), l9);
auto l12 = mm->add_instruction(migraphx::make_op("add"), l10, l11); auto l12 = mm->add_instruction(migraphx::make_op("add"), l10, l11);
...@@ -713,7 +723,7 @@ TEST_CASE(conv_relu_maxpool_x2_test) ...@@ -713,7 +723,7 @@ TEST_CASE(conv_relu_maxpool_x2_test)
mm->add_instruction( mm->add_instruction(
migraphx::make_op( migraphx::make_op(
"pooling", "pooling",
{{"mode", "max"}, {"padding", {0, 0}}, {"stride", {2, 2}}, {"lengths", {2, 2}}}), {{"mode", "max"}, {"padding", {0, 0, 0, 0}}, {"stride", {2, 2}}, {"lengths", {2, 2}}}),
l13); l13);
auto prog = optimize_onnx("conv_relu_maxpool_x2_test.onnx"); auto prog = optimize_onnx("conv_relu_maxpool_x2_test.onnx");
...@@ -825,7 +835,8 @@ TEST_CASE(deconv_input_pads_asymm_1d_test) ...@@ -825,7 +835,8 @@ TEST_CASE(deconv_input_pads_asymm_1d_test)
auto l0 = mm->add_parameter("x", {migraphx::shape::float_type, {1, 1, 3}}); auto l0 = mm->add_parameter("x", {migraphx::shape::float_type, {1, 1, 3}});
auto l1 = mm->add_parameter("w", {migraphx::shape::float_type, {1, 2, 3}}); auto l1 = mm->add_parameter("w", {migraphx::shape::float_type, {1, 2, 3}});
auto l2 = mm->add_instruction( auto l2 = mm->add_instruction(
migraphx::make_op("deconvolution", {{"padding", {0}}, {"stride", {2}}, {"dilation", {1}}}), migraphx::make_op("deconvolution",
{{"padding", {0, 0}}, {"stride", {2}}, {"dilation", {1}}}),
l0, l0,
l1); l1);
mm->add_instruction(migraphx::make_op("slice", {{"axes", {2}}, {"starts", {0}}, {"ends", {6}}}), mm->add_instruction(migraphx::make_op("slice", {{"axes", {2}}, {"starts", {0}}, {"ends", {6}}}),
...@@ -1117,9 +1128,10 @@ migraphx::program create_external_data_prog() ...@@ -1117,9 +1128,10 @@ migraphx::program create_external_data_prog()
std::vector<float> weight_data(1210, 1); std::vector<float> weight_data(1210, 1);
std::vector<float> bias_data(10, 1); std::vector<float> bias_data(10, 1);
auto bias = mm->add_literal(migraphx::literal({migraphx::shape::float_type, {10}}, bias_data)); auto bias = mm->add_literal(migraphx::literal({migraphx::shape::float_type, {10}}, bias_data));
auto weights = mm->add_literal(migraphx::literal(s2, weight_data)); auto weights = mm->add_literal(migraphx::literal(s2, weight_data));
auto param = mm->add_parameter("input", s); auto param = mm->add_parameter("input", s);
auto conv = mm->add_instruction(migraphx::make_op("convolution"), param, weights); auto conv = mm->add_instruction(
migraphx::make_op("convolution", {{"padding", {0, 0, 0, 0}}}), param, weights);
auto bias_bcast = mm->add_instruction( auto bias_bcast = mm->add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"dims", {1, 10, 214, 214}}}), bias); migraphx::make_op("broadcast", {{"axis", 1}, {"dims", {1, 10, 214, 214}}}), bias);
mm->add_instruction(migraphx::make_op("add"), conv, bias_bcast); mm->add_instruction(migraphx::make_op("add"), conv, bias_bcast);
...@@ -1300,6 +1312,7 @@ TEST_CASE(globalavgpool_test) ...@@ -1300,6 +1312,7 @@ TEST_CASE(globalavgpool_test)
auto op = migraphx::op::pooling{"average"}; auto op = migraphx::op::pooling{"average"};
auto lens = input->get_shape().lens(); auto lens = input->get_shape().lens();
op.lengths = {lens[2], lens[3]}; op.lengths = {lens[2], lens[3]};
op.padding = {0, 0, 0, 0};
mm->add_instruction(op, input); mm->add_instruction(op, input);
auto prog = optimize_onnx("globalavgpool_test.onnx"); auto prog = optimize_onnx("globalavgpool_test.onnx");
...@@ -1316,6 +1329,7 @@ TEST_CASE(globalmaxpool_test) ...@@ -1316,6 +1329,7 @@ TEST_CASE(globalmaxpool_test)
auto op = migraphx::op::pooling{"max"}; auto op = migraphx::op::pooling{"max"};
auto lens = input->get_shape().lens(); auto lens = input->get_shape().lens();
op.lengths = {lens[2], lens[3]}; op.lengths = {lens[2], lens[3]};
op.padding = {0, 0, 0, 0};
mm->add_instruction(op, input); mm->add_instruction(op, input);
auto prog = optimize_onnx("globalmaxpool_test.onnx"); auto prog = optimize_onnx("globalmaxpool_test.onnx");
...@@ -1995,15 +2009,11 @@ TEST_CASE(maxpool_notset_test) ...@@ -1995,15 +2009,11 @@ TEST_CASE(maxpool_notset_test)
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}}); auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
std::vector<int64_t> pads = {0, 0, 0, 0, 0, 0, 1, 1};
float val = std::numeric_limits<float>::lowest();
auto ins_pad =
mm->add_instruction(migraphx::make_op("pad", {{"pads", pads}, {"value", val}}), input);
mm->add_instruction( mm->add_instruction(
migraphx::make_op( migraphx::make_op(
"pooling", "pooling",
{{"mode", "max"}, {"padding", {0, 0}}, {"stride", {2, 2}}, {"lengths", {6, 6}}}), {{"mode", "max"}, {"padding", {0, 0, 1, 1}}, {"stride", {2, 2}}, {"lengths", {6, 6}}}),
ins_pad); input);
auto prog = optimize_onnx("maxpool_notset_test.onnx"); auto prog = optimize_onnx("maxpool_notset_test.onnx");
...@@ -2015,15 +2025,11 @@ TEST_CASE(maxpool_same_upper_test) ...@@ -2015,15 +2025,11 @@ TEST_CASE(maxpool_same_upper_test)
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}}); auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
std::vector<int64_t> pads = {0, 0, 0, 0, 0, 0, 1, 1};
float val = std::numeric_limits<float>::lowest();
auto ins_pad =
mm->add_instruction(migraphx::make_op("pad", {{"pads", pads}, {"value", val}}), input);
mm->add_instruction( mm->add_instruction(
migraphx::make_op( migraphx::make_op(
"pooling", "pooling",
{{"mode", "max"}, {"padding", {0, 0}}, {"stride", {1, 1}}, {"lengths", {2, 2}}}), {{"mode", "max"}, {"padding", {0, 0, 1, 1}}, {"stride", {1, 1}}, {"lengths", {2, 2}}}),
ins_pad); input);
auto prog = optimize_onnx("maxpool_same_upper_test.onnx"); auto prog = optimize_onnx("maxpool_same_upper_test.onnx");
......
...@@ -301,7 +301,7 @@ migraphx::program create_conv() ...@@ -301,7 +301,7 @@ migraphx::program create_conv()
migraphx::op::convolution op; migraphx::op::convolution op;
op.padding_mode = migraphx::op::padding_mode_t::same; op.padding_mode = migraphx::op::padding_mode_t::same;
op.padding = {1, 1}; op.padding = {1, 1, 1, 1};
op.stride = {1, 1}; op.stride = {1, 1};
op.dilation = {1, 1}; op.dilation = {1, 1};
auto l2 = mm->add_instruction(migraphx::make_op("transpose", {{"dims", {3, 2, 0, 1}}}), l1); auto l2 = mm->add_instruction(migraphx::make_op("transpose", {{"dims", {3, 2, 0, 1}}}), l1);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment