Unverified Commit 19bd9c49 authored by Mirza Halilčević's avatar Mirza Halilčević Committed by GitHub
Browse files

Add support for the dilations attribute to Pooling ops (#2105)

Introduce dilations attribute to pooling operators reference implementation.
parent c7bae543
...@@ -70,7 +70,8 @@ struct pooling ...@@ -70,7 +70,8 @@ struct pooling
// 2 smaller than the input tensor rank (NCHW layout) // 2 smaller than the input tensor rank (NCHW layout)
std::vector<std::size_t> lengths = {1, 1}; std::vector<std::size_t> lengths = {1, 1};
// Dilations are not supported at this time. // Spacing between the elements of the pooling kernel. Must be the same ndim as lengths.
std::vector<std::size_t> dilations = {1, 1};
// ceiling mode is a flag affecting output size // ceiling mode is a flag affecting output size
// or equivalently, placements of the pooling kernel. // or equivalently, placements of the pooling kernel.
...@@ -99,6 +100,7 @@ struct pooling ...@@ -99,6 +100,7 @@ struct pooling
f(self.padding_mode, "padding_mode"), f(self.padding_mode, "padding_mode"),
f(self.stride, "stride"), f(self.stride, "stride"),
f(self.lengths, "lengths"), f(self.lengths, "lengths"),
f(self.dilations, "dilations"),
f(self.ceil_mode, "ceil_mode"), f(self.ceil_mode, "ceil_mode"),
f(self.lp_order, "lp_order"), f(self.lp_order, "lp_order"),
f(self.dyn_global, "dyn_global")); f(self.dyn_global, "dyn_global"));
...@@ -112,14 +114,17 @@ struct pooling ...@@ -112,14 +114,17 @@ struct pooling
return; return;
if((padding_mode != default_ and padding.size() != stride.size() and if((padding_mode != default_ and padding.size() != stride.size() and
(padding.size()) != stride.size() * 2) or (padding.size()) != stride.size() * 2) or
stride.size() != lengths.size()) stride.size() != lengths.size() or dilations.size() != lengths.size())
{ {
MIGRAPHX_THROW("POOLING: inconsistent attribute sizes"); MIGRAPHX_THROW("POOLING: inconsistent attribute sizes");
} }
if(std::any_of(lengths.begin(), lengths.end(), [&](auto i) { return (i == 0); }) or
std::any_of(stride.begin(), stride.end(), [&](auto i) { return (i == 0); })) const auto is_zero = [](auto el) { return el == 0; };
if(std::any_of(lengths.begin(), lengths.end(), is_zero) or
std::any_of(stride.begin(), stride.end(), is_zero) or
std::any_of(dilations.begin(), dilations.end(), is_zero))
{ {
MIGRAPHX_THROW("POOLING: size 0 pooling kernel or stride"); MIGRAPHX_THROW("POOLING: size 0 pooling kernel or stride or dilations");
} }
// TODO: update lowering to run the reference // TODO: update lowering to run the reference
...@@ -142,6 +147,11 @@ struct pooling ...@@ -142,6 +147,11 @@ struct pooling
value attributes() const { return {{"normalize_padding", "padding"}}; } value attributes() const { return {{"normalize_padding", "padding"}}; }
inline std::size_t dilate_dim(std::size_t dim, std::size_t dilation) const
{
return 1 + dilation * (dim - 1);
}
std::vector<std::size_t> calc_spatial_dim_out(const std::vector<std::size_t>& input_lens, std::vector<std::size_t> calc_spatial_dim_out(const std::vector<std::size_t>& input_lens,
std::size_t kdims) const std::size_t kdims) const
{ {
...@@ -151,8 +161,9 @@ struct pooling ...@@ -151,8 +161,9 @@ struct pooling
std::size_t padding_factor = 2 * padding[i]; std::size_t padding_factor = 2 * padding[i];
if(padding.size() == 2 * kdims) if(padding.size() == 2 * kdims)
padding_factor = padding[i] + padding[i + kdims]; padding_factor = padding[i] + padding[i + kdims];
std::size_t dilated_length = dilate_dim(lengths[i], dilations[i]);
std::size_t dim_size; std::size_t dim_size;
if(input_lens[i + 2] + padding_factor < lengths[i]) if(input_lens[i + 2] + padding_factor < dilated_length)
{ {
if(padding_mode == default_) if(padding_mode == default_)
MIGRAPHX_THROW("POOLING: not enough padding for the given kernel size"); MIGRAPHX_THROW("POOLING: not enough padding for the given kernel size");
...@@ -162,7 +173,7 @@ struct pooling ...@@ -162,7 +173,7 @@ struct pooling
} }
else else
{ {
dim_size = input_lens[i + 2] + padding_factor - lengths[i]; dim_size = input_lens[i + 2] + padding_factor - dilated_length;
} }
std::size_t len = std::size_t len =
(ceil_mode) (ceil_mode)
...@@ -331,6 +342,7 @@ struct pooling ...@@ -331,6 +342,7 @@ struct pooling
int start = static_cast<int>(idx_o[dim] * stride[d_2]) - int start = static_cast<int>(idx_o[dim] * stride[d_2]) -
static_cast<int>(padding_vals[d_2]); static_cast<int>(padding_vals[d_2]);
int end; int end;
std::size_t dilated_kernel_dim = dilate_dim(kernel_dims[d_2], dilations[d_2]);
// NOLINT // NOLINT
if(count_include_pad and ceil_mode and (mode != pooling_mode::max)) if(count_include_pad and ceil_mode and (mode != pooling_mode::max))
{ {
...@@ -340,15 +352,14 @@ struct pooling ...@@ -340,15 +352,14 @@ struct pooling
// padding. Clip out-of-bounds indexes but not padding. // padding. Clip out-of-bounds indexes but not padding.
// Check if this kernel extends beyond the padding at end of dimension // Check if this kernel extends beyond the padding at end of dimension
end = std::min(start + kernel_dims[d_2], end = std::min(start + dilated_kernel_dim,
in_lens[dim] + static_cast<int>(padding_vals[d_2])); in_lens[dim] + static_cast<int>(padding_vals[d_2]));
} }
else else
{ {
// In non-ceiling mode, when // In non-ceiling mode, when
// count_include_pad is false, or for max pooling, clip off padding. // count_include_pad is false, or for max pooling, clip off padding.
end = std::min(start + kernel_dims[d_2], in_lens[dim]); end = std::min(start + dilated_kernel_dim, in_lens[dim]);
start = std::max(start, 0);
} }
win_start.push_back(start); win_start.push_back(start);
if(end < start) if(end < start)
...@@ -366,6 +377,16 @@ struct pooling ...@@ -366,6 +377,16 @@ struct pooling
// for each element in the window... // for each element in the window...
shape_for_each(win_shape, [&](const auto& idx_w) { shape_for_each(win_shape, [&](const auto& idx_w) {
// Skip elements that belong to the dilated area
for(size_t axis = 0; axis < idx_w.size(); ++axis)
{
if(idx_w[axis] % dilations[axis])
{
pool_size -= 1;
return;
}
}
// the coordinates of this element // the coordinates of this element
auto idx = idx_o; auto idx = idx_o;
...@@ -390,7 +411,15 @@ struct pooling ...@@ -390,7 +411,15 @@ struct pooling
// this is a padding element. Padding locations // this is a padding element. Padding locations
// don't contribute to average or max pooling total but can play in // don't contribute to average or max pooling total but can play in
// lpnorm pooling. // lpnorm pooling.
output_val = op(output_val, 0); if(mode == pooling_mode::lpnorm)
{
output_val = op(output_val, op.template init<Type>());
}
if(mode == pooling_mode::average)
{
// Ignore padding
pool_size -= 1;
}
} }
}); });
output[i] = Type(op.final(output_val, pool_size)); output[i] = Type(op.final(output_val, pool_size));
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <string> #include <string>
#include <migraphx/config.hpp> #include <migraphx/config.hpp>
#include <migraphx/instruction_ref.hpp>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
......
...@@ -91,6 +91,14 @@ struct parse_pooling : op_parser<parse_pooling> ...@@ -91,6 +91,14 @@ struct parse_pooling : op_parser<parse_pooling>
kdims, values["lengths"].size(), "PARSE_POOLING: inconsistent lengths"); kdims, values["lengths"].size(), "PARSE_POOLING: inconsistent lengths");
} }
if(contains(info.attributes, "dilations"))
{
values["dilations"].clear();
copy(info.attributes["dilations"].ints(), std::back_inserter(values["dilations"]));
check_attr_sizes(
kdims, values["dilations"].size(), "PARSE_POOLING: inconsistent dilations");
}
// lp_order attribute // lp_order attribute
if(contains(info.attributes, "p")) if(contains(info.attributes, "p"))
{ {
...@@ -169,10 +177,15 @@ struct parse_pooling : op_parser<parse_pooling> ...@@ -169,10 +177,15 @@ struct parse_pooling : op_parser<parse_pooling>
std::fill_n(values["stride"].begin(), kdims, 1); std::fill_n(values["stride"].begin(), kdims, 1);
} }
if(values["dilations"].size() != kdims)
{
values["dilations"].resize(kdims);
std::fill_n(values["dilations"].begin(), kdims, 1);
}
// used to calculate the supposed output shape // used to calculate the supposed output shape
std::vector<int64_t> orig_padding = paddings; std::vector<int64_t> orig_padding = paddings;
// TODO: add parsing for dilations
if(contains(info.attributes, "auto_pad") and if(contains(info.attributes, "auto_pad") and
to_upper(info.attributes["auto_pad"].s()) != "NOTSET") to_upper(info.attributes["auto_pad"].s()) != "NOTSET")
{ {
...@@ -189,11 +202,10 @@ struct parse_pooling : op_parser<parse_pooling> ...@@ -189,11 +202,10 @@ struct parse_pooling : op_parser<parse_pooling>
else else
{ {
// Calculate auto padding // Calculate auto padding
// dilations (argument 4) not supported; default to all 1's
cal_auto_padding_size(info, cal_auto_padding_size(info,
values, values,
values["lengths"].to_vector<std::size_t>(), values["lengths"].to_vector<std::size_t>(),
std::vector<size_t>(in_shape.ndim() - 2, 1), values["dilations"].to_vector<std::size_t>(),
in_shape.lens(), in_shape.lens(),
paddings); paddings);
values["padding"] = paddings; values["padding"] = paddings;
......
...@@ -35,6 +35,110 @@ ...@@ -35,6 +35,110 @@
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
static void replace_with_reduce(module& m, instruction_ref ins)
{
auto&& s = ins->inputs().front()->get_shape();
auto&& op = any_cast<op::pooling>(ins->get_operator());
auto lens = s.lens();
std::vector<std::int64_t> axes(lens.size() - 2);
std::iota(axes.begin(), axes.end(), 2);
// average pooling
if(op.mode == op::pooling_mode::average)
{
m.replace_instruction(ins, make_op("reduce_mean", {{"axes", axes}}), ins->inputs());
}
// max pooling
else
{
m.replace_instruction(ins, make_op("reduce_max", {{"axes", axes}}), ins->inputs());
}
}
static void replace_dilations_with_gather_pooling(module& m, instruction_ref ins)
{
// TODO remove this when MIOpen supports dilated pooling
auto&& s = ins->inputs().front()->get_shape();
auto&& op = any_cast<op::pooling>(ins->get_operator());
// Ignore N, C axes
std::vector<size_t> dims = {s.lens().cbegin() + 2, s.lens().cend()};
bool default_padding =
std::all_of(op.padding.cbegin(), op.padding.cend(), [](auto i) { return i == 0; });
if(not default_padding)
{
for(size_t idx{0}; idx < op.padding.size(); ++idx)
{
// We need to pad both ends
dims[idx] += op.padding.at(idx) * 2;
}
}
std::vector<size_t> kernels = op.lengths;
std::vector<size_t> strides = op.stride;
std::vector<size_t> dilations = op.dilations;
std::vector<std::vector<int>> axis_indices;
axis_indices.resize(dims.size());
for(auto idx{0}; idx < dims.size(); ++idx)
{
// Only consider if iw fits into the window
for(size_t stride{0}; stride < dims.at(idx) - dilations.at(idx) * (kernels.at(idx) - 1);
stride += strides.at(idx))
{
for(size_t step{0}; step < kernels.at(idx); ++step)
{
axis_indices.at(idx).push_back(stride + dilations.at(idx) * step);
}
}
}
auto elements = ins->inputs().front();
if(not default_padding)
{
// Pad supports asym, we need to provide both ends
std::vector<size_t> padding(2 * s.lens().size(), 0);
// Format will be e.g {N, C, P1, P2, N, C, P1, P2}
for(size_t idx{0}; idx < op.padding.size(); ++idx)
{
// Ignore N, C axes
padding.at(2 + idx) = op.padding.at(idx);
padding.at(2 + idx + s.lens().size()) = op.padding.at(idx);
}
// Default value needed for Max pooling
elements = m.insert_instruction(
ins,
make_op("pad", {{"pads", padding}, {"value", std::numeric_limits<float>::lowest()}}),
elements);
}
for(auto idx{0}; idx < axis_indices.size(); ++idx)
{
migraphx::shape s_indices{migraphx::shape::int32_type, {axis_indices.at(idx).size()}};
auto indices = m.add_literal(migraphx::literal{s_indices, axis_indices.at(idx)});
elements = m.insert_instruction(
ins, make_op("gather", {{"axis", idx + 2 /*ignore N,C*/}}), elements, indices);
}
// Ignore padding
std::vector<size_t> new_padding(kernels.size(), 0);
// The kernel window elements are places next to each other. E.g. {x1, y1, x2, y2, ...}
// We need to skip them to not overlap
std::vector<size_t> new_strides(kernels);
// Ignore dilations
std::vector<size_t> new_dilations(kernels.size(), 1);
m.replace_instruction(ins,
make_op("pooling",
{{"mode", op.mode},
{"padding", new_padding},
{"stride", new_strides},
{"lengths", kernels},
{"dilations", new_dilations}}),
elements);
}
void rewrite_pooling::apply(module& m) const void rewrite_pooling::apply(module& m) const
{ {
for(auto ins : iterator_for(m)) for(auto ins : iterator_for(m))
...@@ -43,26 +147,36 @@ void rewrite_pooling::apply(module& m) const ...@@ -43,26 +147,36 @@ void rewrite_pooling::apply(module& m) const
continue; continue;
if(ins->inputs().empty()) if(ins->inputs().empty())
continue; continue;
auto&& s = ins->inputs().front()->get_shape(); auto&& s = ins->inputs().front()->get_shape();
auto&& op = any_cast<op::pooling>(ins->get_operator()); auto&& op = any_cast<op::pooling>(ins->get_operator());
if(not std::all_of(op.padding.begin(), op.padding.end(), [](auto i) { return i == 0; })) bool same_kernel_as_shape = std::equal(
continue; s.lens().cbegin() + 2, s.lens().cend(), op.lengths.cbegin(), op.lengths.cend());
if(not std::all_of(op.stride.begin(), op.stride.end(), [](auto i) { return i == 1; })) bool default_strides =
continue; std::all_of(op.stride.cbegin(), op.stride.cend(), [](auto i) { return i == 1; });
auto lens = s.lens(); bool default_padding =
if(not std::equal(lens.begin() + 2, lens.end(), op.lengths.begin(), op.lengths.end())) std::all_of(op.padding.cbegin(), op.padding.cend(), [](auto i) { return i == 0; });
continue; bool default_dilations =
std::vector<std::int64_t> axes(lens.size() - 2); std::all_of(op.dilations.cbegin(), op.dilations.cend(), [](auto i) { return i == 1; });
std::iota(axes.begin(), axes.end(), 2); if(same_kernel_as_shape and default_strides and default_padding and default_dilations)
// average pooling
if(op.mode == op::pooling_mode::average)
{ {
m.replace_instruction(ins, make_op("reduce_mean", {{"axes", axes}}), ins->inputs()); replace_with_reduce(m, ins);
} }
// max pooling else if(not default_dilations)
else
{ {
m.replace_instruction(ins, make_op("reduce_max", {{"axes", axes}}), ins->inputs()); // Dilated AvgPool with padding is not supported
if(not default_padding and op.mode == op::pooling_mode::average)
{
continue;
}
auto size =
std::accumulate(s.lens().cbegin(), s.lens().cend(), 1, std::multiplies<size_t>());
// Can't handle too much size because of literal size
if(size > 100000)
{
continue;
}
replace_dilations_with_gather_pooling(m, ins);
} }
} }
} }
......
...@@ -34,23 +34,32 @@ namespace migraphx { ...@@ -34,23 +34,32 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
namespace cpu { namespace cpu {
struct dnnl_pooling : dnnl_extend_op<dnnl_pooling, dnnl::pooling_forward, op::pooling> struct dnnl_pooling : dnnl_extend_op<dnnl_pooling, dnnl::pooling_v2_forward, op::pooling>
{ {
std::vector<int> arg_map(int) const { return {MIGRAPHX_DNNL_PREFIX(ARG_SRC)}; } std::vector<int> arg_map(int) const { return {MIGRAPHX_DNNL_PREFIX(ARG_SRC)}; }
dnnl::pooling_forward::desc get_desc(const std::unordered_map<int, dnnl::memory::desc>& m) const dnnl::pooling_v2_forward::desc
get_desc(const std::unordered_map<int, dnnl::memory::desc>& m) const
{ {
auto algo = op.mode == op::pooling_mode::max ? dnnl::algorithm::pooling_max auto algo = op.mode == op::pooling_mode::max ? dnnl::algorithm::pooling_max
: dnnl::algorithm::pooling_avg; : dnnl::algorithm::pooling_avg;
auto kdims = op.kdims(); auto kdims = op.kdims();
std::vector<size_t> padding_l(op.padding.begin(), op.padding.begin() + kdims); std::vector<size_t> padding_l(op.padding.begin(), op.padding.begin() + kdims);
std::vector<size_t> padding_r(op.padding.begin() + kdims, op.padding.end()); std::vector<size_t> padding_r(op.padding.begin() + kdims, op.padding.end());
// Note: It is not documented, but the default dilation seems to be 0 instead of 1.
// We need to offset dilations with -1.
std::vector<size_t> dilations;
std::transform(op.dilations.cbegin(),
op.dilations.cend(),
std::back_inserter(dilations),
[](size_t d) { return d - 1; });
return {dnnl::prop_kind::forward_inference, return {dnnl::prop_kind::forward_inference,
algo, algo,
m.at(MIGRAPHX_DNNL_PREFIX(ARG_SRC)), m.at(MIGRAPHX_DNNL_PREFIX(ARG_SRC)),
m.at(MIGRAPHX_DNNL_PREFIX(ARG_DST)), m.at(MIGRAPHX_DNNL_PREFIX(ARG_DST)),
to_dnnl_dims(op.stride), to_dnnl_dims(op.stride),
to_dnnl_dims(op.lengths), to_dnnl_dims(op.lengths),
to_dnnl_dims(dilations),
to_dnnl_dims(padding_l), to_dnnl_dims(padding_l),
to_dnnl_dims(padding_r)}; to_dnnl_dims(padding_r)};
} }
......
...@@ -211,6 +211,12 @@ inline pooling_descriptor make_pooling(const migraphx::op::pooling& op) ...@@ -211,6 +211,12 @@ inline pooling_descriptor make_pooling(const migraphx::op::pooling& op)
ss << op.mode; ss << op.mode;
MIGRAPHX_THROW(ss.str()); MIGRAPHX_THROW(ss.str());
} }
if(not std::all_of(
op.dilations.cbegin(), op.dilations.cend(), [](std::size_t d) { return d == 1; }))
{
MIGRAPHX_THROW("Unsupported dilations for pooling: [" + to_string_range(op.dilations) +
"]");
}
auto p = make_obj<pooling_descriptor>(&miopenCreatePoolingDescriptor); auto p = make_obj<pooling_descriptor>(&miopenCreatePoolingDescriptor);
int kdims = op.kdims(); int kdims = op.kdims();
......
averagepool_dilate_test:
Y
xy" AveragePool*
dilations@*
kernel_shape@*
pads@@*
strides@averagepool_dilate_testZ
x



b
y



B
\ No newline at end of file
...@@ -276,6 +276,22 @@ def averagepool_1d_test(): ...@@ -276,6 +276,22 @@ def averagepool_1d_test():
return ([node], [x], [out]) return ([node], [x], [out])
@onnx_test()
def averagepool_dilate_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 4, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 4, 2])
node = onnx.helper.make_node('AveragePool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2],
strides=[1],
pads=[1, 1],
dilations=[3])
return ([node], [x], [y])
@onnx_test() @onnx_test()
def averagepool_3d_test(): def averagepool_3d_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5, 5, 5]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5, 5, 5])
...@@ -4882,6 +4898,22 @@ def maxpool_notset_test(): ...@@ -4882,6 +4898,22 @@ def maxpool_notset_test():
return ([node], [x], [y]) return ([node], [x], [y])
@onnx_test()
def maxpool_dilate_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 4, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 4, 2])
node = onnx.helper.make_node('MaxPool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2],
strides=[1],
pads=[1, 1],
dilations=[3])
return ([node], [x], [y])
@onnx_test() @onnx_test()
def maxpool_same_upper_test(): def maxpool_same_upper_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
......
maxpool_dilate_test:
U
xy"MaxPool*
dilations@*
kernel_shape@*
pads@@*
strides@maxpool_dilate_testZ
x



b
y



B
\ No newline at end of file
...@@ -296,13 +296,32 @@ TEST_CASE(averagepool_1d_test) ...@@ -296,13 +296,32 @@ TEST_CASE(averagepool_1d_test)
{{"mode", migraphx::op::pooling_mode::average}, {{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0}}, {"padding", {0, 0}},
{"stride", {1}}, {"stride", {1}},
{"lengths", {3}}}), {"lengths", {3}},
{"dilations", {1}}}),
l0); l0);
auto prog = optimize_onnx("averagepool_1d_test.onnx"); auto prog = optimize_onnx("averagepool_1d_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(averagepool_dilate_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 4, 3}});
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {1, 1}},
{"stride", {1}},
{"lengths", {2}},
{"dilations", {3}}}),
input);
auto prog = optimize_onnx("averagepool_dilate_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(averagepool_3d_test) TEST_CASE(averagepool_3d_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -312,7 +331,8 @@ TEST_CASE(averagepool_3d_test) ...@@ -312,7 +331,8 @@ TEST_CASE(averagepool_3d_test)
{{"mode", migraphx::op::pooling_mode::average}, {{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0, 0, 0}}, {"padding", {0, 0, 0, 0, 0, 0}},
{"stride", {1, 1, 1}}, {"stride", {1, 1, 1}},
{"lengths", {3, 3, 3}}}), {"lengths", {3, 3, 3}},
{"dilations", {1, 1, 1}}}),
l0); l0);
auto prog = optimize_onnx("averagepool_3d_test.onnx"); auto prog = optimize_onnx("averagepool_3d_test.onnx");
...@@ -332,6 +352,7 @@ TEST_CASE(averagepool_dyn_test) ...@@ -332,6 +352,7 @@ TEST_CASE(averagepool_dyn_test)
{"mode", migraphx::op::pooling_mode::average}, {"mode", migraphx::op::pooling_mode::average},
{"stride", {2, 2, 2}}, {"stride", {2, 2, 2}},
{"lengths", {3, 3, 3}}, {"lengths", {3, 3, 3}},
{"dilations", {1, 1, 1}},
{"padding", {1, 1, 1, 1, 1, 1}}, {"padding", {1, 1, 1, 1, 1, 1}},
{"padding_mode", 0}, {"padding_mode", 0},
}), }),
...@@ -357,6 +378,7 @@ TEST_CASE(averagepool_dyn_autopad_test) ...@@ -357,6 +378,7 @@ TEST_CASE(averagepool_dyn_autopad_test)
{"mode", migraphx::op::pooling_mode::average}, {"mode", migraphx::op::pooling_mode::average},
{"stride", {2, 2, 2}}, {"stride", {2, 2, 2}},
{"lengths", {3, 3, 3}}, {"lengths", {3, 3, 3}},
{"dilations", {1, 1, 1}},
{"padding", {0, 0, 0, 0, 0, 0}}, {"padding", {0, 0, 0, 0, 0, 0}},
{"padding_mode", migraphx::op::padding_mode_t::same_upper}, {"padding_mode", migraphx::op::padding_mode_t::same_upper},
}), }),
...@@ -394,7 +416,8 @@ TEST_CASE(averagepool_notset_test) ...@@ -394,7 +416,8 @@ TEST_CASE(averagepool_notset_test)
{{"mode", migraphx::op::pooling_mode::average}, {{"mode", migraphx::op::pooling_mode::average},
{"padding", {2, 2, 2, 2}}, {"padding", {2, 2, 2, 2}},
{"stride", {2, 2}}, {"stride", {2, 2}},
{"lengths", {6, 6}}}), {"lengths", {6, 6}},
{"dilations", {1, 1}}}),
input); input);
auto ret = mm->add_instruction( auto ret = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {2, 2}}}), ins); migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {2, 2}}}), ins);
...@@ -415,7 +438,8 @@ TEST_CASE(averagepool_nt_cip_test) ...@@ -415,7 +438,8 @@ TEST_CASE(averagepool_nt_cip_test)
{{"mode", migraphx::op::pooling_mode::average}, {{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0}}, {"padding", {0, 0, 0, 0}},
{"stride", {2, 2}}, {"stride", {2, 2}},
{"lengths", {6, 6}}}), {"lengths", {6, 6}},
{"dilations", {1, 1}}}),
ins_pad); ins_pad);
mm->add_return({ret}); mm->add_return({ret});
...@@ -437,6 +461,7 @@ TEST_CASE(averagepool_same_lower_test) ...@@ -437,6 +461,7 @@ TEST_CASE(averagepool_same_lower_test)
{"padding", {1, 1, 1, 1}}, {"padding", {1, 1, 1, 1}},
{"stride", {1, 1}}, {"stride", {1, 1}},
{"lengths", {2, 2}}, {"lengths", {2, 2}},
{"dilations", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::default_}, {"padding_mode", migraphx::op::padding_mode_t::default_},
}), }),
input); input);
...@@ -459,7 +484,8 @@ TEST_CASE(averagepool_sl_cip_test) ...@@ -459,7 +484,8 @@ TEST_CASE(averagepool_sl_cip_test)
{{"mode", migraphx::op::pooling_mode::average}, {{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0}}, {"padding", {0, 0, 0, 0}},
{"stride", {1, 1}}, {"stride", {1, 1}},
{"lengths", {2, 2}}}), {"lengths", {2, 2}},
{"dilations", {1, 1}}}),
ins_pad); ins_pad);
mm->add_return({ret}); mm->add_return({ret});
auto prog = migraphx::parse_onnx("averagepool_sl_cip_test.onnx"); auto prog = migraphx::parse_onnx("averagepool_sl_cip_test.onnx");
...@@ -476,7 +502,8 @@ TEST_CASE(averagepool_same_upper_test) ...@@ -476,7 +502,8 @@ TEST_CASE(averagepool_same_upper_test)
{{"mode", migraphx::op::pooling_mode::average}, {{"mode", migraphx::op::pooling_mode::average},
{"padding", {1, 1, 1, 1}}, {"padding", {1, 1, 1, 1}},
{"stride", {1, 1}}, {"stride", {1, 1}},
{"lengths", {2, 2}}}), {"lengths", {2, 2}},
{"dilations", {1, 1}}}),
input); input);
auto ret = mm->add_instruction( auto ret = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {6, 6}}}), ins); migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {6, 6}}}), ins);
...@@ -1307,7 +1334,8 @@ TEST_CASE(conv_bn_relu_maxpool_test) ...@@ -1307,7 +1334,8 @@ TEST_CASE(conv_bn_relu_maxpool_test)
{{"mode", migraphx::op::pooling_mode::max}, {{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 0, 0}}, {"padding", {0, 0, 0, 0}},
{"stride", {2, 2}}, {"stride", {2, 2}},
{"lengths", {2, 2}}}), {"lengths", {2, 2}},
{"dilations", {1, 1}}}),
l7); l7);
auto prog = optimize_onnx("conv_bn_relu_maxpool_test.onnx"); auto prog = optimize_onnx("conv_bn_relu_maxpool_test.onnx");
...@@ -1505,7 +1533,8 @@ TEST_CASE(conv_relu_maxpool_test) ...@@ -1505,7 +1533,8 @@ TEST_CASE(conv_relu_maxpool_test)
{{"mode", migraphx::op::pooling_mode::max}, {{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 0, 0}}, {"padding", {0, 0, 0, 0}},
{"stride", {2, 2}}, {"stride", {2, 2}},
{"lengths", {2, 2}}}), {"lengths", {2, 2}},
{"dilations", {1, 1}}}),
l6); l6);
auto prog = optimize_onnx("conv_relu_maxpool_test.onnx"); auto prog = optimize_onnx("conv_relu_maxpool_test.onnx");
...@@ -1530,7 +1559,8 @@ TEST_CASE(conv_relu_maxpool_x2_test) ...@@ -1530,7 +1559,8 @@ TEST_CASE(conv_relu_maxpool_x2_test)
{{"mode", migraphx::op::pooling_mode::max}, {{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 0, 0}}, {"padding", {0, 0, 0, 0}},
{"stride", {2, 2}}, {"stride", {2, 2}},
{"lengths", {2, 2}}}), {"lengths", {2, 2}},
{"dilations", {1, 1}}}),
l6); l6);
auto l8 = mm->add_parameter("3", {migraphx::shape::float_type, {1, 5, 5, 5}}); auto l8 = mm->add_parameter("3", {migraphx::shape::float_type, {1, 5, 5, 5}});
...@@ -1546,7 +1576,8 @@ TEST_CASE(conv_relu_maxpool_x2_test) ...@@ -1546,7 +1576,8 @@ TEST_CASE(conv_relu_maxpool_x2_test)
{{"mode", migraphx::op::pooling_mode::max}, {{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 0, 0}}, {"padding", {0, 0, 0, 0}},
{"stride", {2, 2}}, {"stride", {2, 2}},
{"lengths", {2, 2}}}), {"lengths", {2, 2}},
{"dilations", {1, 1}}}),
l13); l13);
auto prog = optimize_onnx("conv_relu_maxpool_x2_test.onnx"); auto prog = optimize_onnx("conv_relu_maxpool_x2_test.onnx");
...@@ -4245,6 +4276,7 @@ TEST_CASE(lppool_l1_test) ...@@ -4245,6 +4276,7 @@ TEST_CASE(lppool_l1_test)
{"padding", {0, 0}}, {"padding", {0, 0}},
{"stride", {1}}, {"stride", {1}},
{"lengths", {3}}, {"lengths", {3}},
{"dilations", {1}},
{"lp_order", 1}}), {"lp_order", 1}}),
l0); l0);
auto prog = optimize_onnx("lppool_l1_test.onnx"); auto prog = optimize_onnx("lppool_l1_test.onnx");
...@@ -4261,6 +4293,7 @@ TEST_CASE(lppool_l2_test) ...@@ -4261,6 +4293,7 @@ TEST_CASE(lppool_l2_test)
{"padding", {0, 0}}, {"padding", {0, 0}},
{"stride", {1}}, {"stride", {1}},
{"lengths", {3}}, {"lengths", {3}},
{"dilations", {1}},
{"lp_order", 2}}), {"lp_order", 2}}),
l0); l0);
auto prog = optimize_onnx("lppool_l2_test.onnx"); auto prog = optimize_onnx("lppool_l2_test.onnx");
...@@ -4513,7 +4546,8 @@ TEST_CASE(maxpool_notset_test) ...@@ -4513,7 +4546,8 @@ TEST_CASE(maxpool_notset_test)
{{"mode", migraphx::op::pooling_mode::max}, {{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 1, 1}}, {"padding", {0, 0, 1, 1}},
{"stride", {2, 2}}, {"stride", {2, 2}},
{"lengths", {6, 6}}}), {"lengths", {6, 6}},
{"dilations", {1, 1}}}),
input); input);
auto prog = optimize_onnx("maxpool_notset_test.onnx"); auto prog = optimize_onnx("maxpool_notset_test.onnx");
...@@ -4521,6 +4555,24 @@ TEST_CASE(maxpool_notset_test) ...@@ -4521,6 +4555,24 @@ TEST_CASE(maxpool_notset_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(maxpool_dilate_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 4, 3}});
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {1, 1}},
{"stride", {1}},
{"lengths", {2}},
{"dilations", {3}}}),
input);
auto prog = optimize_onnx("maxpool_dilate_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(maxpool_same_upper_test) TEST_CASE(maxpool_same_upper_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -4530,7 +4582,8 @@ TEST_CASE(maxpool_same_upper_test) ...@@ -4530,7 +4582,8 @@ TEST_CASE(maxpool_same_upper_test)
{{"mode", migraphx::op::pooling_mode::max}, {{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 1, 1}}, {"padding", {0, 0, 1, 1}},
{"stride", {1, 1}}, {"stride", {1, 1}},
{"lengths", {2, 2}}}), {"lengths", {2, 2}},
{"dilations", {1, 1}}}),
input); input);
auto prog = optimize_onnx("maxpool_same_upper_test.onnx"); auto prog = optimize_onnx("maxpool_same_upper_test.onnx");
......
...@@ -2202,7 +2202,8 @@ TEST_CASE(pooling_shape0) ...@@ -2202,7 +2202,8 @@ TEST_CASE(pooling_shape0)
{{"mode", migraphx::op::pooling_mode::max}, {{"mode", migraphx::op::pooling_mode::max},
{"padding", {1}}, {"padding", {1}},
{"stride", {0}}, {"stride", {0}},
{"lengths", {1}}}), {"lengths", {1}},
{"dilations", {1}}}),
input); input);
} }
...@@ -2215,7 +2216,8 @@ TEST_CASE(pooling_shape1) ...@@ -2215,7 +2216,8 @@ TEST_CASE(pooling_shape1)
{{"mode", migraphx::op::pooling_mode::max}, {{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0}}, {"padding", {0, 0}},
{"stride", {3, 3}}, {"stride", {3, 3}},
{"lengths", {1, 1}}}), {"lengths", {1, 1}},
{"dilations", {1, 1}}}),
input); input);
} }
...@@ -2229,6 +2231,7 @@ TEST_CASE(pooling_shape2) ...@@ -2229,6 +2231,7 @@ TEST_CASE(pooling_shape2)
{"padding", {0, 0}}, {"padding", {0, 0}},
{"stride", {3, 3}}, {"stride", {3, 3}},
{"lengths", {1, 1}}, {"lengths", {1, 1}},
{"dilations", {1, 1}},
{"ceil_mode", true}}), {"ceil_mode", true}}),
input); input);
} }
...@@ -2243,6 +2246,7 @@ TEST_CASE(pooling_shape3) ...@@ -2243,6 +2246,7 @@ TEST_CASE(pooling_shape3)
{"padding", {2, 2}}, {"padding", {2, 2}},
{"stride", {3, 3}}, {"stride", {3, 3}},
{"lengths", {3, 3}}, {"lengths", {3, 3}},
{"dilations", {1, 1}},
{"ceil_mode", true}}), {"ceil_mode", true}}),
input); input);
} }
...@@ -2254,6 +2258,63 @@ TEST_CASE(pooling_shape4) ...@@ -2254,6 +2258,63 @@ TEST_CASE(pooling_shape4)
tiny_input); tiny_input);
} }
TEST_CASE(pooling_shape5)
{
migraphx::shape input{migraphx::shape::float_type, {4, 3, 3, 3}};
migraphx::shape output{migraphx::shape::float_type, {4, 3, 1, 1}};
expect_shape(output,
migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0}},
{"stride", {1, 1}},
{"lengths", {2, 2}},
{"dilations", {2, 2}}}),
input);
}
TEST_CASE(pooling_shape6)
{
migraphx::shape input{migraphx::shape::float_type, {4, 3, 3, 3}};
migraphx::shape output{migraphx::shape::float_type, {4, 3, 2, 2}};
expect_shape(output,
migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0}},
{"stride", {2, 2}},
{"lengths", {1, 1}},
{"dilations", {2, 2}}}),
input);
}
TEST_CASE(pooling_shape7)
{
migraphx::shape input{migraphx::shape::float_type, {4, 3, 3, 3}};
migraphx::shape output{migraphx::shape::float_type, {4, 3, 2, 2}};
expect_shape(output,
migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0}},
{"stride", {3, 3}},
{"lengths", {1, 1}},
{"dilations", {3, 3}},
{"ceil_mode", true}}),
input);
}
TEST_CASE(pooling_shape8)
{
migraphx::shape input{migraphx::shape::float_type, {4, 3, 3, 3}};
migraphx::shape output{migraphx::shape::float_type, {4, 3, 3, 3}};
expect_shape(output,
migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {2, 2}},
{"stride", {1, 1}},
{"lengths", {3, 3}},
{"dilations", {2, 2}}}),
input);
}
TEST_CASE(pooling_dyn_shape0) TEST_CASE(pooling_dyn_shape0)
{ {
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {3, 3, {3}}, {3, 3, {3}}, {3, 3}}}; migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {3, 3, {3}}, {3, 3, {3}}, {3, 3}}};
...@@ -2261,7 +2322,8 @@ TEST_CASE(pooling_dyn_shape0) ...@@ -2261,7 +2322,8 @@ TEST_CASE(pooling_dyn_shape0)
{{"mode", migraphx::op::pooling_mode::max}, {{"mode", migraphx::op::pooling_mode::max},
{"padding", {1}}, {"padding", {1}},
{"stride", {0}}, {"stride", {0}},
{"lengths", {1}}}), {"lengths", {1}},
{"dilations", {1}}}),
input); input);
} }
...@@ -2274,7 +2336,8 @@ TEST_CASE(pooling_dyn_shape1) ...@@ -2274,7 +2336,8 @@ TEST_CASE(pooling_dyn_shape1)
{{"mode", migraphx::op::pooling_mode::max}, {{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0}}, {"padding", {0, 0}},
{"stride", {3, 3}}, {"stride", {3, 3}},
{"lengths", {1, 1}}}), {"lengths", {1, 1}},
{"dilations", {1, 1}}}),
input); input);
} }
...@@ -2288,6 +2351,7 @@ TEST_CASE(pooling_dyn_shape2) ...@@ -2288,6 +2351,7 @@ TEST_CASE(pooling_dyn_shape2)
{"padding", {0, 0}}, {"padding", {0, 0}},
{"stride", {3, 3}}, {"stride", {3, 3}},
{"lengths", {1, 1}}, {"lengths", {1, 1}},
{"dilations", {1, 1}},
{"ceil_mode", true}}), {"ceil_mode", true}}),
input); input);
} }
...@@ -2302,7 +2366,8 @@ TEST_CASE(pooling_dyn_shape3) ...@@ -2302,7 +2366,8 @@ TEST_CASE(pooling_dyn_shape3)
{{"mode", migraphx::op::pooling_mode::max}, {{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0}}, {"padding", {0, 0}},
{"stride", {3, 3}}, {"stride", {3, 3}},
{"lengths", {1, 1}}}), {"lengths", {1, 1}},
{"dilations", {1, 1}}}),
input); input);
} }
...@@ -2317,6 +2382,7 @@ TEST_CASE(pooling_dyn_shape4) ...@@ -2317,6 +2382,7 @@ TEST_CASE(pooling_dyn_shape4)
{"padding", {2, 2}}, {"padding", {2, 2}},
{"stride", {3, 3}}, {"stride", {3, 3}},
{"lengths", {3, 3}}, {"lengths", {3, 3}},
{"dilations", {1, 1}},
{"ceil_mode", true}}), {"ceil_mode", true}}),
input); input);
} }
......
...@@ -576,6 +576,8 @@ def disabled_tests_onnx_1_9_0(backend_test): ...@@ -576,6 +576,8 @@ def disabled_tests_onnx_1_9_0(backend_test):
backend_test.exclude(r'test_gru_batchwise_cpu') backend_test.exclude(r'test_gru_batchwise_cpu')
backend_test.exclude(r'test_simple_rnn_batchwise_cpu') backend_test.exclude(r'test_simple_rnn_batchwise_cpu')
# from OnnxBackendPyTorchConvertedModelTest # from OnnxBackendPyTorchConvertedModelTest
# MaxPool dialtion is partially supported on GPU by a workaround
# But these tests require too large allocations to work properly
backend_test.exclude(r'test_MaxPool1d_stride_padding_dilation_cpu') backend_test.exclude(r'test_MaxPool1d_stride_padding_dilation_cpu')
backend_test.exclude(r'test_MaxPool2d_stride_padding_dilation_cpu') backend_test.exclude(r'test_MaxPool2d_stride_padding_dilation_cpu')
......
This diff is collapsed.
This diff is collapsed.
...@@ -788,6 +788,7 @@ TEST_CASE(conv_pooling_dot) ...@@ -788,6 +788,7 @@ TEST_CASE(conv_pooling_dot)
{"padding", {0, 0, 0, 0}}, {"padding", {0, 0, 0, 0}},
{"stride", {1, 1}}, {"stride", {1, 1}},
{"lengths", {7, 7}}, {"lengths", {7, 7}},
{"dilations", {1, 1}},
{"ceil_mode", 0}}), {"ceil_mode", 0}}),
a1); a1);
auto fl = m1.add_instruction(migraphx::make_op("flatten", {{"axis", 1}}), ap); auto fl = m1.add_instruction(migraphx::make_op("flatten", {{"axis", 1}}), ap);
...@@ -835,6 +836,7 @@ TEST_CASE(conv_pooling_dot) ...@@ -835,6 +836,7 @@ TEST_CASE(conv_pooling_dot)
{"padding", {0, 0, 0, 0}}, {"padding", {0, 0, 0, 0}},
{"stride", {1, 1}}, {"stride", {1, 1}},
{"lengths", {7, 7}}, {"lengths", {7, 7}},
{"dilations", {1, 1}},
{"ceil_mode", 0}}), {"ceil_mode", 0}}),
a1); a1);
auto fl = m2.add_instruction(migraphx::make_op("flatten", {{"axis", 1}}), ap); auto fl = m2.add_instruction(migraphx::make_op("flatten", {{"axis", 1}}), ap);
...@@ -896,6 +898,7 @@ TEST_CASE(mobilenet_snippet) ...@@ -896,6 +898,7 @@ TEST_CASE(mobilenet_snippet)
{"padding", {0, 0, 0, 0}}, {"padding", {0, 0, 0, 0}},
{"stride", {1, 1}}, {"stride", {1, 1}},
{"lengths", {7, 7}}, {"lengths", {7, 7}},
{"dilations", {1, 1}},
{"ceil_mode", 0}}), {"ceil_mode", 0}}),
d6); d6);
auto q3 = add_quantize_op(mm, "quantizelinear", ap, scale, zero); auto q3 = add_quantize_op(mm, "quantizelinear", ap, scale, zero);
......
...@@ -35,7 +35,7 @@ struct test_avg_pooling_1d : verify_program<test_avg_pooling_1d> ...@@ -35,7 +35,7 @@ struct test_avg_pooling_1d : verify_program<test_avg_pooling_1d>
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
auto input = auto input =
mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 3, 5}}); mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 3, 5}});
auto op = migraphx::op::pooling{migraphx::op::pooling_mode::average, {0}, {1}, {3}}; auto op = migraphx::op::pooling{migraphx::op::pooling_mode::average, {0}, {1}, {3}, {1}};
mm->add_instruction(op, input); mm->add_instruction(op, input);
return p; return p;
} }
......
...@@ -36,7 +36,7 @@ struct test_avg_pooling_3d : verify_program<test_avg_pooling_3d> ...@@ -36,7 +36,7 @@ struct test_avg_pooling_3d : verify_program<test_avg_pooling_3d>
auto input = auto input =
mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 3, 5, 5, 5}}); mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 3, 5, 5, 5}});
auto op = migraphx::op::pooling{ auto op = migraphx::op::pooling{
migraphx::op::pooling_mode::average, {1, 1, 1}, {3, 3, 3}, {3, 3, 3}}; migraphx::op::pooling_mode::average, {1, 1, 1}, {3, 3, 3}, {3, 3, 3}, {1, 1, 1}};
mm->add_instruction(op, input); mm->add_instruction(op, input);
return p; return p;
} }
......
...@@ -36,7 +36,7 @@ struct test_avg_pooling_3d_opt : verify_program<test_avg_pooling_3d_opt> ...@@ -36,7 +36,7 @@ struct test_avg_pooling_3d_opt : verify_program<test_avg_pooling_3d_opt>
auto input = auto input =
mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 2, 3, 3, 3}}); mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 2, 3, 3, 3}});
auto op = migraphx::op::pooling{ auto op = migraphx::op::pooling{
migraphx::op::pooling_mode::average, {0, 0, 0}, {1, 1, 1}, {3, 3, 3}}; migraphx::op::pooling_mode::average, {0, 0, 0}, {1, 1, 1}, {3, 3, 3}, {1, 1, 1}};
mm->add_instruction(op, input); mm->add_instruction(op, input);
return p; return p;
} }
......
...@@ -37,7 +37,7 @@ struct test_avg_pooling_ceil_3d : verify_program<test_avg_pooling_ceil_3d> ...@@ -37,7 +37,7 @@ struct test_avg_pooling_ceil_3d : verify_program<test_avg_pooling_ceil_3d>
auto input = auto input =
mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 3, 5, 5, 5}}); mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 3, 5, 5, 5}});
auto op = migraphx::op::pooling{ auto op = migraphx::op::pooling{
migraphx::op::pooling_mode::average, {1, 1, 1}, {3, 3, 3}, {3, 3, 3}, true}; migraphx::op::pooling_mode::average, {1, 1, 1}, {3, 3, 3}, {3, 3, 3}, {1, 1, 1}, true};
mm->add_instruction(op, input); mm->add_instruction(op, input);
return p; return p;
} }
......
...@@ -36,7 +36,7 @@ struct test_avg_pooling_pad : verify_program<test_avg_pooling_pad> ...@@ -36,7 +36,7 @@ struct test_avg_pooling_pad : verify_program<test_avg_pooling_pad>
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
auto input = auto input =
mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 3, 7}}); mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 3, 7}});
auto op = migraphx::op::pooling{migraphx::op::pooling_mode::average, {2}, {1}, {3}}; auto op = migraphx::op::pooling{migraphx::op::pooling_mode::average, {2}, {1}, {3}, {1}};
mm->add_instruction(op, input); mm->add_instruction(op, input);
return p; return p;
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment