Commit 343a5774 authored by Shucai Xiao's avatar Shucai Xiao
Browse files

merge from develop

parents 179fa5b2 2d0d96e8
...@@ -16,6 +16,13 @@ namespace migraphx { ...@@ -16,6 +16,13 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
namespace op { namespace op {
enum padding_mode_t
{
default_, // NOLINT
same,
valid
};
struct not_computable struct not_computable
{ {
argument compute(const shape&, const std::vector<argument>&) const argument compute(const shape&, const std::vector<argument>&) const
...@@ -58,12 +65,7 @@ struct convolution ...@@ -58,12 +65,7 @@ struct convolution
std::array<std::size_t, 2> padding = {{0, 0}}; std::array<std::size_t, 2> padding = {{0, 0}};
std::array<std::size_t, 2> stride = {{1, 1}}; std::array<std::size_t, 2> stride = {{1, 1}};
std::array<std::size_t, 2> dilation = {{1, 1}}; std::array<std::size_t, 2> dilation = {{1, 1}};
enum padding_mode_t
{
default_, // NOLINT
same,
valid
};
padding_mode_t padding_mode = default_; padding_mode_t padding_mode = default_;
int group = 1; int group = 1;
...@@ -138,12 +140,7 @@ struct im2col ...@@ -138,12 +140,7 @@ struct im2col
std::array<std::size_t, 2> padding = {{0, 0}}; std::array<std::size_t, 2> padding = {{0, 0}};
std::array<std::size_t, 2> stride = {{1, 1}}; std::array<std::size_t, 2> stride = {{1, 1}};
std::array<std::size_t, 2> dilation = {{1, 1}}; std::array<std::size_t, 2> dilation = {{1, 1}};
enum padding_mode_t
{
default_, // NOLINT
same,
valid
};
padding_mode_t padding_mode = default_; padding_mode_t padding_mode = default_;
template <class Self, class F> template <class Self, class F>
...@@ -189,12 +186,14 @@ struct pooling ...@@ -189,12 +186,14 @@ struct pooling
std::array<std::size_t, 2> padding = {{0, 0}}; std::array<std::size_t, 2> padding = {{0, 0}};
std::array<std::size_t, 2> stride = {{1, 1}}; std::array<std::size_t, 2> stride = {{1, 1}};
std::array<std::size_t, 2> lengths = {{1, 1}}; std::array<std::size_t, 2> lengths = {{1, 1}};
padding_mode_t padding_mode = default_;
template <class Self, class F> template <class Self, class F>
static auto reflect(Self& self, F f) static auto reflect(Self& self, F f)
{ {
return pack(f(self.mode, "mode"), return pack(f(self.mode, "mode"),
f(self.padding, "padding"), f(self.padding, "padding"),
f(self.padding, "padding_mode"),
f(self.stride, "stride"), f(self.stride, "stride"),
f(self.lengths, "lengths")); f(self.lengths, "lengths"));
} }
...@@ -211,7 +210,10 @@ struct pooling ...@@ -211,7 +210,10 @@ struct pooling
assert(lengths[0] <= (input.lens()[2] + 2 * padding[0])); assert(lengths[0] <= (input.lens()[2] + 2 * padding[0]));
assert(lengths[1] <= (input.lens()[3] + 2 * padding[1])); assert(lengths[1] <= (input.lens()[3] + 2 * padding[1]));
return {t, if(padding_mode == default_)
{
return {
t,
{ {
input.lens()[0], input.lens()[0],
input.lens()[1], input.lens()[1],
...@@ -226,6 +228,39 @@ struct pooling ...@@ -226,6 +228,39 @@ struct pooling
static_cast<float>(stride[1]))) + static_cast<float>(stride[1]))) +
1)), 1)),
}}; }};
}
else if(padding_mode == same)
{
return {t,
{input.lens()[0],
input.lens()[1],
static_cast<std::size_t>(
std::ceil(static_cast<double>(input.lens()[2]) / stride[0])),
static_cast<std::size_t>(
std::ceil(static_cast<double>(input.lens()[3]) / stride[1]))}};
}
else if(padding_mode == valid)
{
return {t,
{
input.lens()[0],
input.lens()[1],
std::size_t(std::max<std::ptrdiff_t>(
1,
std::ptrdiff_t(std::floor((input.lens()[2] - lengths[0]) /
static_cast<float>(stride[0]))) +
1)),
std::size_t(std::max<std::ptrdiff_t>(
1,
std::ptrdiff_t(std::floor((input.lens()[3] - lengths[1]) /
static_cast<float>(stride[1]))) +
1)),
}};
}
else
{
MIGRAPHX_THROW("Invalid padding mode");
}
} }
}; };
...@@ -614,6 +649,42 @@ struct reshape ...@@ -614,6 +649,42 @@ struct reshape
int output_alias(const std::vector<shape>&) const { return 0; } int output_alias(const std::vector<shape>&) const { return 0; }
}; };
struct pad
{
std::vector<int64_t> pads;
float value = 0.0f;
enum pad_op_mode_t
{
constant_pad,
reflect_pad,
edge_pad
};
pad_op_mode_t mode = constant_pad;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.mode, "mode"), f(self.pads, "pads"), f(self.value, "value"));
}
std::string name() const { return "pad"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this}.has(1);
auto&& idims = inputs.front().lens();
std::vector<std::size_t> rdims(idims.begin(), idims.end());
std::size_t num_dims = rdims.size();
for(std::size_t i = 0; i < num_dims; i++)
{
rdims[i] += pads[i] + pads[i + num_dims];
}
shape s{inputs.front().type(), rdims};
return s;
}
};
struct as_shape struct as_shape
{ {
shape s; shape s;
......
...@@ -87,6 +87,7 @@ struct onnx_parser ...@@ -87,6 +87,7 @@ struct onnx_parser
add_mem_op("ConstantFill", &onnx_parser::parse_constant_fill); add_mem_op("ConstantFill", &onnx_parser::parse_constant_fill);
add_mem_op("Transpose", &onnx_parser::parse_transpose); add_mem_op("Transpose", &onnx_parser::parse_transpose);
add_mem_op("RNN", &onnx_parser::parse_rnn); add_mem_op("RNN", &onnx_parser::parse_rnn);
add_mem_op("Pad", &onnx_parser::parse_pad);
// init the activation function map // init the activation function map
init_actv_func(); init_actv_func();
...@@ -228,24 +229,30 @@ struct onnx_parser ...@@ -228,24 +229,30 @@ struct onnx_parser
parse_conv(const std::string&, attribute_map attributes, std::vector<instruction_ref> args) parse_conv(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
{ {
op::convolution op; op::convolution op;
auto l0 = args[0];
if(contains(attributes, "pads")) if(contains(attributes, "pads"))
{ {
if(contains(attributes, "auto_pad")) if(contains(attributes, "auto_pad"))
{ {
MIGRAPHX_THROW("auto_pad and padding cannot be specified simultaneously"); MIGRAPHX_THROW("auto_pad and padding cannot be specified simultaneously");
} }
std::vector<std::size_t> padding(4); std::vector<std::int64_t> padding;
copy(attributes["pads"].ints(), padding.begin()); copy(attributes["pads"].ints(), std::back_inserter(padding));
if(padding.size() != 4) if(padding.size() != 4)
{ {
MIGRAPHX_THROW("padding should have 4 values"); MIGRAPHX_THROW("padding should have 4 values");
} }
if(padding[0] != padding[2] || padding[1] != padding[3]) if(padding[0] != padding[2] || padding[1] != padding[3])
{ {
MIGRAPHX_THROW("migraphx does not support asymetric padding"); // insert zeros for pad op (args[0] has 4 dims)
padding = {0, 0, padding[0], padding[1], 0, 0, padding[2], padding[3]};
l0 = prog.add_instruction(op::pad{padding}, l0);
}
else
{
op.padding[0] = padding[0];
op.padding[1] = padding[1];
} }
op.padding[0] = padding[0];
op.padding[1] = padding[1];
} }
if(contains(attributes, "strides")) if(contains(attributes, "strides"))
{ {
...@@ -265,7 +272,7 @@ struct onnx_parser ...@@ -265,7 +272,7 @@ struct onnx_parser
if(s.find("SAME") != std::string::npos) if(s.find("SAME") != std::string::npos)
{ {
op.padding_mode = op::convolution::same; op.padding_mode = op::padding_mode_t::same;
} }
} }
if(contains(attributes, "group")) if(contains(attributes, "group"))
...@@ -279,7 +286,7 @@ struct onnx_parser ...@@ -279,7 +286,7 @@ struct onnx_parser
auto l2 = prog.add_instruction(op::broadcast{axis, l1->get_shape()}, args[2]); auto l2 = prog.add_instruction(op::broadcast{axis, l1->get_shape()}, args[2]);
return prog.add_instruction(op::add{}, l1, l2); return prog.add_instruction(op::add{}, l1, l2);
} }
return prog.add_instruction(op, args); return prog.add_instruction(op, l0, args[1]);
} }
instruction_ref parse_pooling(const std::string& name, instruction_ref parse_pooling(const std::string& name,
...@@ -287,6 +294,7 @@ struct onnx_parser ...@@ -287,6 +294,7 @@ struct onnx_parser
std::vector<instruction_ref> args) std::vector<instruction_ref> args)
{ {
op::pooling op{ends_with(name, "MaxPool") ? "max" : "average"}; op::pooling op{ends_with(name, "MaxPool") ? "max" : "average"};
auto l0 = args[0];
if(starts_with(name, "Global")) if(starts_with(name, "Global"))
{ {
auto lens = args.front()->get_shape().lens(); auto lens = args.front()->get_shape().lens();
...@@ -294,18 +302,23 @@ struct onnx_parser ...@@ -294,18 +302,23 @@ struct onnx_parser
} }
if(contains(attributes, "pads")) if(contains(attributes, "pads"))
{ {
std::vector<std::size_t> padding(4); std::vector<std::int64_t> padding;
copy(attributes["pads"].ints(), padding.begin()); copy(attributes["pads"].ints(), std::back_inserter(padding));
if(padding.size() != 4) if(padding.size() != 4)
{ {
MIGRAPHX_THROW("padding should have 4 values"); MIGRAPHX_THROW("padding should have 4 values");
} }
if(padding[0] != padding[2] || padding[1] != padding[3]) if(padding[0] != padding[2] || padding[1] != padding[3])
{ {
MIGRAPHX_THROW("migraphx does not support asymetric padding"); // insert zeros for pad op (args[0] has 4 dims)
padding = {0, 0, padding[0], padding[1], 0, 0, padding[2], padding[3]};
l0 = prog.add_instruction(op::pad{padding}, l0);
}
else
{
op.padding[0] = padding[0];
op.padding[1] = padding[1];
} }
op.padding[0] = padding[0];
op.padding[1] = padding[1];
} }
if(contains(attributes, "strides")) if(contains(attributes, "strides"))
{ {
...@@ -318,13 +331,14 @@ struct onnx_parser ...@@ -318,13 +331,14 @@ struct onnx_parser
if(contains(attributes, "auto_pad")) if(contains(attributes, "auto_pad"))
{ {
auto s = attributes["auto_pad"].s(); auto s = attributes["auto_pad"].s();
if(to_upper(s) != "NOTSET") if(s.find("SAME_UPPER") == std::string::npos)
{ {
MIGRAPHX_THROW("auto_pad is not supported for pooling"); MIGRAPHX_THROW("auto_pad only supports SAME_UPPER for pooling");
} }
op.padding_mode = op::padding_mode_t::same;
} }
return prog.add_instruction(op, std::move(args)); return prog.add_instruction(op, l0);
} }
instruction_ref instruction_ref
...@@ -562,6 +576,28 @@ struct onnx_parser ...@@ -562,6 +576,28 @@ struct onnx_parser
return prog.add_instruction(migraphx::op::transpose{perm}, args.front()); return prog.add_instruction(migraphx::op::transpose{perm}, args.front());
} }
instruction_ref
parse_pad(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
{
std::vector<int64_t> pads{};
float value = 0.0f;
if(contains(attributes, "pads"))
{
auto&& pad_vals = attributes["pads"].ints();
pads = std::vector<int64_t>(pad_vals.begin(), pad_vals.end());
}
if(contains(attributes, "value"))
{
value = parse_value(attributes.at("value")).at<float>();
}
if(contains(attributes, "mode"))
{
auto mode = attributes.at("mode").s();
if(mode != "constant")
MIGRAPHX_THROW("migraphx currently only supports constant padding");
}
return prog.add_instruction(migraphx::op::pad{pads, value}, args.front());
}
// Use a literal instruction to replace the shape since, output of // Use a literal instruction to replace the shape since, output of
// shape operator are literals in migraphx // shape operator are literals in migraphx
instruction_ref instruction_ref
......
...@@ -298,6 +298,32 @@ struct cpu_contiguous ...@@ -298,6 +298,32 @@ struct cpu_contiguous
} }
}; };
struct cpu_pad
{
op::pad op;
std::string name() const { return "cpu::contiguous"; }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
argument compute(context&, const shape& output_shape, std::vector<argument> args) const
{
assert(output_shape.standard());
argument result{output_shape};
result.visit([&](auto output) { std::fill(output.begin(), output.end(), op.value); });
visit_all(result, args[0])([&](auto output, auto input) {
shape_for_each(input.get_shape(), [&](const auto& idx) {
std::vector<std::size_t> new_idx(idx.size());
std::transform(
idx.begin(), idx.end(), op.pads.begin(), new_idx.begin(), [](auto i, auto j) {
return i + j;
});
output(new_idx.begin(), new_idx.end()) = input(idx.begin(), idx.end());
});
});
return result;
}
};
struct cpu_concat struct cpu_concat
{ {
op::concat op; op::concat op;
...@@ -663,6 +689,7 @@ struct cpu_apply ...@@ -663,6 +689,7 @@ struct cpu_apply
apply_map["batch_norm_inference"] = apply_map["batch_norm_inference"] =
extend_op<cpu_batch_norm_inference, op::batch_norm_inference>(); extend_op<cpu_batch_norm_inference, op::batch_norm_inference>();
apply_map["contiguous"] = extend_op<cpu_contiguous, op::contiguous>(); apply_map["contiguous"] = extend_op<cpu_contiguous, op::contiguous>();
apply_map["pad"] = extend_op<cpu_pad, op::pad>();
apply_map["concat"] = extend_op<cpu_concat, op::concat>(); apply_map["concat"] = extend_op<cpu_concat, op::concat>();
apply_map["gather"] = extend_op<cpu_gather, op::gather>(); apply_map["gather"] = extend_op<cpu_gather, op::gather>();
apply_map["leaky_relu"] = extend_op<cpu_unary<leaky_relu_op>, op::leaky_relu>(); apply_map["leaky_relu"] = extend_op<cpu_unary<leaky_relu_op>, op::leaky_relu>();
......
...@@ -28,6 +28,7 @@ add_library(migraphx_device ...@@ -28,6 +28,7 @@ add_library(migraphx_device
device/contiguous.cpp device/contiguous.cpp
device/mul.cpp device/mul.cpp
device/concat.cpp device/concat.cpp
device/pad.cpp
device/gather.cpp device/gather.cpp
) )
set_target_properties(migraphx_device PROPERTIES EXPORT_NAME device) set_target_properties(migraphx_device PROPERTIES EXPORT_NAME device)
...@@ -57,6 +58,7 @@ add_library(migraphx_gpu ...@@ -57,6 +58,7 @@ add_library(migraphx_gpu
sigmoid.cpp sigmoid.cpp
abs.cpp abs.cpp
elu.cpp elu.cpp
pad.cpp
gather.cpp gather.cpp
) )
set_target_properties(migraphx_gpu PROPERTIES EXPORT_NAME gpu) set_target_properties(migraphx_gpu PROPERTIES EXPORT_NAME gpu)
......
...@@ -313,6 +313,12 @@ void nary_impl(hipStream_t stream, F f, argument result, Arguments... args) ...@@ -313,6 +313,12 @@ void nary_impl(hipStream_t stream, F f, argument result, Arguments... args)
nary_nonstandard_impl(stream, f, result, args...); nary_nonstandard_impl(stream, f, result, args...);
} }
template <class F>
void nary_impl(hipStream_t stream, F f, argument result)
{
nary_standard_impl(stream, f, result);
}
template <class... Arguments> template <class... Arguments>
auto nary_nonstandard(hipStream_t stream, argument result, Arguments... args) auto nary_nonstandard(hipStream_t stream, argument result, Arguments... args)
{ {
......
#include <migraphx/shape.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/gpu/device/nary.hpp>
#include <migraphx/gpu/device/pad.hpp>
#include <migraphx/gpu/device/tensor.hpp>
#include <migraphx/gpu/device/launch.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
argument
pad(hipStream_t stream, argument result, argument arg1, float value, std::vector<std::int64_t> pads)
{
std::size_t nelements = arg1.get_shape().elements();
nary(stream, result)([=] { return value; });
visit_all(result, arg1)([&](auto output, auto input) {
visit_tensor_size(result.get_shape().lens().size(), [&](auto ndim) {
std::size_t offsets[ndim];
std::copy(pads.begin(), pads.begin() + ndim, offsets);
auto* outptr = output.data();
const auto* inptr = input.data();
hip_tensor_descriptor<ndim> desc_input(input.get_shape());
hip_tensor_descriptor<ndim> desc_output(output.get_shape());
gs_launch(stream, nelements)([=](auto i) {
auto idx = desc_input.multi(i);
for(std::size_t j = 0; j < ndim; j++)
{
idx[j] += offsets[j];
}
outptr[desc_output.linear(idx)] = inptr[i];
});
});
});
return result;
}
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_PAD_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_PAD_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
argument pad(hipStream_t stream,
argument result,
argument arg1,
float value,
std::vector<std::int64_t> pads);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_PAD_HPP
#define MIGRAPHX_GUARD_RTGLIB_PAD_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/pad.hpp>
#include <migraphx/gpu/device/add.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct hip_pad
{
op::pad op;
std::string name() const { return "gpu::pad"; }
shape compute_shape(std::vector<shape> inputs) const;
argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
int output_alias(const std::vector<shape>& shapes) const { return shapes.size() - 1; }
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <migraphx/gpu/pooling.hpp> #include <migraphx/gpu/pooling.hpp>
#include <migraphx/gpu/gemm.hpp> #include <migraphx/gpu/gemm.hpp>
#include <migraphx/gpu/concat.hpp> #include <migraphx/gpu/concat.hpp>
#include <migraphx/gpu/pad.hpp>
#include <migraphx/gpu/gather.hpp> #include <migraphx/gpu/gather.hpp>
#include <utility> #include <utility>
#include <functional> #include <functional>
...@@ -94,6 +95,8 @@ struct miopen_apply ...@@ -94,6 +95,8 @@ struct miopen_apply
add_extend_op<hip_concat, op::concat>("concat"); add_extend_op<hip_concat, op::concat>("concat");
add_extend_op<miopen_softmax, op::softmax>("softmax"); add_extend_op<miopen_softmax, op::softmax>("softmax");
add_extend_op<hip_gather, op::gather>("gather"); add_extend_op<hip_gather, op::gather>("gather");
add_extend_op<hip_pad, op::pad>("pad");
add_convolution_op(); add_convolution_op();
add_pooling_op(); add_pooling_op();
add_batch_norm_inference_op(); add_batch_norm_inference_op();
......
#include <migraphx/gpu/pad.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/device/pad.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
shape hip_pad::compute_shape(std::vector<shape> inputs) const
{
inputs.pop_back();
return op.compute_shape(inputs);
}
argument hip_pad::compute(context& ctx, const shape&, const std::vector<argument>& args) const
{
return device::pad(ctx.get_stream().get(), args.back(), args.front(), op.value, op.pads);
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
...@@ -1802,4 +1802,18 @@ TEST_CASE(rnn_bidirectional) ...@@ -1802,4 +1802,18 @@ TEST_CASE(rnn_bidirectional)
} }
} }
TEST_CASE(pad_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
auto l0 = p.add_literal(migraphx::literal{s, {1, 2, 3, 4}});
p.add_instruction(migraphx::op::pad{{1, 1, 1, 1}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(16);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{0, 0, 0, 0, 0, 1, 2, 0, 0, 3, 4, 0, 0, 0, 0, 0};
EXPECT(migraphx::verify_range(results_vector, gold));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -934,6 +934,41 @@ struct test_concat_relu ...@@ -934,6 +934,41 @@ struct test_concat_relu
} }
}; };
struct test_pad
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s0{migraphx::shape::int32_type, {1, 96, 165, 165}};
std::vector<int64_t> pads0 = {0, 0, 0, 0, 0, 0, 1, 1};
std::vector<int64_t> pads1 = {0, 0, 0, 0, 1, 1, 1, 1};
std::vector<int64_t> pads2 = {1, 1, 1, 1, 0, 0, 0, 0};
std::vector<int64_t> pads3 = {1, 0, 1, 0, 1, 0, 2, 0};
auto l0 = p.add_parameter("x", s0);
p.add_instruction(migraphx::op::pad{pads0}, l0);
p.add_instruction(migraphx::op::pad{pads1}, l0);
p.add_instruction(migraphx::op::pad{pads2}, l0);
p.add_instruction(migraphx::op::pad{pads3}, l0);
return p;
}
};
struct test_pooling_autopad
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s0{migraphx::shape::float_type, {1, 3, 63, 63}};
auto l0 = p.add_parameter("x", s0);
migraphx::op::pooling op{"max"};
op.padding_mode = migraphx::op::padding_mode_t::same;
op.lengths = {2, 2};
op.stride = {2, 2};
p.add_instruction(op, l0);
return p;
}
};
struct test_gather struct test_gather
{ {
migraphx::program create_program() const migraphx::program create_program() const
...@@ -1391,10 +1426,12 @@ struct test_rnn_bidirectional10 ...@@ -1391,10 +1426,12 @@ struct test_rnn_bidirectional10
int main() int main()
{ {
verify_program<test_pooling_autopad>();
verify_program<test_abs>(); verify_program<test_abs>();
verify_program<test_concat>(); verify_program<test_concat>();
verify_program<test_concat2>(); verify_program<test_concat2>();
verify_program<test_concat_relu>(); verify_program<test_concat_relu>();
verify_program<test_pad>();
verify_program<test_add>(); verify_program<test_add>();
verify_program<test_add_half>(); verify_program<test_add_half>();
verify_program<test_mul>(); verify_program<test_mul>();
......
...@@ -718,4 +718,12 @@ TEST_CASE(group_conv_test) ...@@ -718,4 +718,12 @@ TEST_CASE(group_conv_test)
migraphx::parse_onnx("group_conv_test.onnx"); migraphx::parse_onnx("group_conv_test.onnx");
} }
TEST_CASE(pad_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 2}});
p.add_instruction(migraphx::op::pad{{1, 1, 1, 1}}, l0);
migraphx::parse_onnx("pad_test.onnx");
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
 pad-example:T

01"Pad*
pads@@@@test-padZ
0


b
1


B
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment