"...resnet50_tensorflow.git" did not exist on "c894eafed190b1d7a4cec94a10240674a60fded8"
Commit 1e7457cb authored by Shucai Xiao's avatar Shucai Xiao
Browse files

Merge from branch rnn_operator

parents 5fe0c226 7bab863d
...@@ -16,6 +16,13 @@ namespace migraphx { ...@@ -16,6 +16,13 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
namespace op { namespace op {
enum padding_mode_t
{
default_, // NOLINT
same,
valid
};
struct not_computable struct not_computable
{ {
argument compute(const shape&, const std::vector<argument>&) const argument compute(const shape&, const std::vector<argument>&) const
...@@ -58,12 +65,7 @@ struct convolution ...@@ -58,12 +65,7 @@ struct convolution
std::array<std::size_t, 2> padding = {{0, 0}}; std::array<std::size_t, 2> padding = {{0, 0}};
std::array<std::size_t, 2> stride = {{1, 1}}; std::array<std::size_t, 2> stride = {{1, 1}};
std::array<std::size_t, 2> dilation = {{1, 1}}; std::array<std::size_t, 2> dilation = {{1, 1}};
enum padding_mode_t
{
default_, // NOLINT
same,
valid
};
padding_mode_t padding_mode = default_; padding_mode_t padding_mode = default_;
int group = 1; int group = 1;
...@@ -138,12 +140,7 @@ struct im2col ...@@ -138,12 +140,7 @@ struct im2col
std::array<std::size_t, 2> padding = {{0, 0}}; std::array<std::size_t, 2> padding = {{0, 0}};
std::array<std::size_t, 2> stride = {{1, 1}}; std::array<std::size_t, 2> stride = {{1, 1}};
std::array<std::size_t, 2> dilation = {{1, 1}}; std::array<std::size_t, 2> dilation = {{1, 1}};
enum padding_mode_t
{
default_, // NOLINT
same,
valid
};
padding_mode_t padding_mode = default_; padding_mode_t padding_mode = default_;
template <class Self, class F> template <class Self, class F>
...@@ -189,12 +186,14 @@ struct pooling ...@@ -189,12 +186,14 @@ struct pooling
std::array<std::size_t, 2> padding = {{0, 0}}; std::array<std::size_t, 2> padding = {{0, 0}};
std::array<std::size_t, 2> stride = {{1, 1}}; std::array<std::size_t, 2> stride = {{1, 1}};
std::array<std::size_t, 2> lengths = {{1, 1}}; std::array<std::size_t, 2> lengths = {{1, 1}};
padding_mode_t padding_mode = default_;
template <class Self, class F> template <class Self, class F>
static auto reflect(Self& self, F f) static auto reflect(Self& self, F f)
{ {
return pack(f(self.mode, "mode"), return pack(f(self.mode, "mode"),
f(self.padding, "padding"), f(self.padding, "padding"),
f(self.padding, "padding_mode"),
f(self.stride, "stride"), f(self.stride, "stride"),
f(self.lengths, "lengths")); f(self.lengths, "lengths"));
} }
...@@ -211,7 +210,10 @@ struct pooling ...@@ -211,7 +210,10 @@ struct pooling
assert(lengths[0] <= (input.lens()[2] + 2 * padding[0])); assert(lengths[0] <= (input.lens()[2] + 2 * padding[0]));
assert(lengths[1] <= (input.lens()[3] + 2 * padding[1])); assert(lengths[1] <= (input.lens()[3] + 2 * padding[1]));
return {t, if(padding_mode == default_)
{
return {
t,
{ {
input.lens()[0], input.lens()[0],
input.lens()[1], input.lens()[1],
...@@ -226,6 +228,39 @@ struct pooling ...@@ -226,6 +228,39 @@ struct pooling
static_cast<float>(stride[1]))) + static_cast<float>(stride[1]))) +
1)), 1)),
}}; }};
}
else if(padding_mode == same)
{
return {t,
{input.lens()[0],
input.lens()[1],
static_cast<std::size_t>(
std::ceil(static_cast<double>(input.lens()[2]) / stride[0])),
static_cast<std::size_t>(
std::ceil(static_cast<double>(input.lens()[3]) / stride[1]))}};
}
else if(padding_mode == valid)
{
return {t,
{
input.lens()[0],
input.lens()[1],
std::size_t(std::max<std::ptrdiff_t>(
1,
std::ptrdiff_t(std::floor((input.lens()[2] - lengths[0]) /
static_cast<float>(stride[0]))) +
1)),
std::size_t(std::max<std::ptrdiff_t>(
1,
std::ptrdiff_t(std::floor((input.lens()[3] - lengths[1]) /
static_cast<float>(stride[1]))) +
1)),
}};
}
else
{
MIGRAPHX_THROW("Invalid padding mode");
}
} }
}; };
...@@ -395,7 +430,6 @@ struct concat ...@@ -395,7 +430,6 @@ struct concat
} }
return result; return result;
} }
int output_alias(const std::vector<shape>&) const { return 0; }
}; };
struct slice struct slice
...@@ -598,11 +632,7 @@ struct reshape ...@@ -598,11 +632,7 @@ struct reshape
rdims[i] = missing_dim; rdims[i] = missing_dim;
} }
} }
// if(dims.back() == -1)
//{
// rdims.pop_back();
// std::copy(idims.begin() + rdims.size(), idims.end(), std::back_inserter(rdims));
//}
shape s{inputs.front().type(), rdims}; shape s{inputs.front().type(), rdims};
if(s.elements() != inputs.front().elements()) if(s.elements() != inputs.front().elements())
MIGRAPHX_THROW("Wrong number of elements for reshape"); MIGRAPHX_THROW("Wrong number of elements for reshape");
...@@ -615,6 +645,42 @@ struct reshape ...@@ -615,6 +645,42 @@ struct reshape
int output_alias(const std::vector<shape>&) const { return 0; } int output_alias(const std::vector<shape>&) const { return 0; }
}; };
struct pad
{
std::vector<int64_t> pads;
float value = 0.0f;
enum pad_op_mode_t
{
constant_pad,
reflect_pad,
edge_pad
};
pad_op_mode_t mode = constant_pad;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.mode, "mode"), f(self.pads, "pads"), f(self.value, "value"));
}
std::string name() const { return "pad"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this}.has(1);
auto&& idims = inputs.front().lens();
std::vector<std::size_t> rdims(idims.begin(), idims.end());
std::size_t num_dims = rdims.size();
for(std::size_t i = 0; i < num_dims; i++)
{
rdims[i] += pads[i] + pads[i + num_dims];
}
shape s{inputs.front().type(), rdims};
return s;
}
};
struct as_shape struct as_shape
{ {
shape s; shape s;
...@@ -698,8 +764,6 @@ struct gather ...@@ -698,8 +764,6 @@ struct gather
return result; return result;
} }
int output_alias(const std::vector<shape>&) const { return 0; }
}; };
struct dot struct dot
...@@ -1098,7 +1162,7 @@ struct rnn ...@@ -1098,7 +1162,7 @@ struct rnn
if(num_directions != hidden_dims[0]) if(num_directions != hidden_dims[0])
{ {
MIGRAPHX_THROW("RNN: num_direction does not match the direction attribute"); MIGRAPHX_THROW("RNN: num_direction mismatch in attribute and input");
} }
std::vector<std::size_t> out_dims(in_dims); std::vector<std::size_t> out_dims(in_dims);
......
...@@ -25,10 +25,10 @@ struct rewrite_rnn ...@@ -25,10 +25,10 @@ struct rewrite_rnn
program& prog, program& prog,
instruction_ref ins, instruction_ref ins,
instruction_ref input, instruction_ref input,
instruction_ref wx, instruction_ref w,
instruction_ref wh, instruction_ref r,
instruction_ref ih,
instruction_ref bias, instruction_ref bias,
instruction_ref ih,
operation& actv_func) const; operation& actv_func) const;
}; };
......
...@@ -88,6 +88,7 @@ struct onnx_parser ...@@ -88,6 +88,7 @@ struct onnx_parser
add_mem_op("Transpose", &onnx_parser::parse_transpose); add_mem_op("Transpose", &onnx_parser::parse_transpose);
add_mem_op("RNN", &onnx_parser::parse_rnn); add_mem_op("RNN", &onnx_parser::parse_rnn);
add_mem_op("GRU", &onnx_parser::parse_gru); add_mem_op("GRU", &onnx_parser::parse_gru);
add_mem_op("Pad", &onnx_parser::parse_pad);
// init the activation function map // init the activation function map
init_actv_func(); init_actv_func();
...@@ -229,24 +230,30 @@ struct onnx_parser ...@@ -229,24 +230,30 @@ struct onnx_parser
parse_conv(const std::string&, attribute_map attributes, std::vector<instruction_ref> args) parse_conv(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
{ {
op::convolution op; op::convolution op;
auto l0 = args[0];
if(contains(attributes, "pads")) if(contains(attributes, "pads"))
{ {
if(contains(attributes, "auto_pad")) if(contains(attributes, "auto_pad"))
{ {
MIGRAPHX_THROW("auto_pad and padding cannot be specified simultaneously"); MIGRAPHX_THROW("auto_pad and padding cannot be specified simultaneously");
} }
std::vector<std::size_t> padding(4); std::vector<std::int64_t> padding;
copy(attributes["pads"].ints(), padding.begin()); copy(attributes["pads"].ints(), std::back_inserter(padding));
if(padding.size() != 4) if(padding.size() != 4)
{ {
MIGRAPHX_THROW("padding should have 4 values"); MIGRAPHX_THROW("padding should have 4 values");
} }
if(padding[0] != padding[2] || padding[1] != padding[3]) if(padding[0] != padding[2] || padding[1] != padding[3])
{ {
MIGRAPHX_THROW("migraphx does not support asymetric padding"); // insert zeros for pad op (args[0] has 4 dims)
padding = {0, 0, padding[0], padding[1], 0, 0, padding[2], padding[3]};
l0 = prog.add_instruction(op::pad{padding}, l0);
}
else
{
op.padding[0] = padding[0];
op.padding[1] = padding[1];
} }
op.padding[0] = padding[0];
op.padding[1] = padding[1];
} }
if(contains(attributes, "strides")) if(contains(attributes, "strides"))
{ {
...@@ -266,7 +273,7 @@ struct onnx_parser ...@@ -266,7 +273,7 @@ struct onnx_parser
if(s.find("SAME") != std::string::npos) if(s.find("SAME") != std::string::npos)
{ {
op.padding_mode = op::convolution::same; op.padding_mode = op::padding_mode_t::same;
} }
} }
if(contains(attributes, "group")) if(contains(attributes, "group"))
...@@ -280,7 +287,7 @@ struct onnx_parser ...@@ -280,7 +287,7 @@ struct onnx_parser
auto l2 = prog.add_instruction(op::broadcast{axis, l1->get_shape()}, args[2]); auto l2 = prog.add_instruction(op::broadcast{axis, l1->get_shape()}, args[2]);
return prog.add_instruction(op::add{}, l1, l2); return prog.add_instruction(op::add{}, l1, l2);
} }
return prog.add_instruction(op, args); return prog.add_instruction(op, l0, args[1]);
} }
instruction_ref parse_pooling(const std::string& name, instruction_ref parse_pooling(const std::string& name,
...@@ -288,6 +295,7 @@ struct onnx_parser ...@@ -288,6 +295,7 @@ struct onnx_parser
std::vector<instruction_ref> args) std::vector<instruction_ref> args)
{ {
op::pooling op{ends_with(name, "MaxPool") ? "max" : "average"}; op::pooling op{ends_with(name, "MaxPool") ? "max" : "average"};
auto l0 = args[0];
if(starts_with(name, "Global")) if(starts_with(name, "Global"))
{ {
auto lens = args.front()->get_shape().lens(); auto lens = args.front()->get_shape().lens();
...@@ -295,18 +303,23 @@ struct onnx_parser ...@@ -295,18 +303,23 @@ struct onnx_parser
} }
if(contains(attributes, "pads")) if(contains(attributes, "pads"))
{ {
std::vector<std::size_t> padding(4); std::vector<std::int64_t> padding;
copy(attributes["pads"].ints(), padding.begin()); copy(attributes["pads"].ints(), std::back_inserter(padding));
if(padding.size() != 4) if(padding.size() != 4)
{ {
MIGRAPHX_THROW("padding should have 4 values"); MIGRAPHX_THROW("padding should have 4 values");
} }
if(padding[0] != padding[2] || padding[1] != padding[3]) if(padding[0] != padding[2] || padding[1] != padding[3])
{ {
MIGRAPHX_THROW("migraphx does not support asymetric padding"); // insert zeros for pad op (args[0] has 4 dims)
padding = {0, 0, padding[0], padding[1], 0, 0, padding[2], padding[3]};
l0 = prog.add_instruction(op::pad{padding}, l0);
}
else
{
op.padding[0] = padding[0];
op.padding[1] = padding[1];
} }
op.padding[0] = padding[0];
op.padding[1] = padding[1];
} }
if(contains(attributes, "strides")) if(contains(attributes, "strides"))
{ {
...@@ -319,13 +332,14 @@ struct onnx_parser ...@@ -319,13 +332,14 @@ struct onnx_parser
if(contains(attributes, "auto_pad")) if(contains(attributes, "auto_pad"))
{ {
auto s = attributes["auto_pad"].s(); auto s = attributes["auto_pad"].s();
if(to_upper(s) != "NOTSET") if(s.find("SAME_UPPER") == std::string::npos)
{ {
MIGRAPHX_THROW("auto_pad is not supported for pooling"); MIGRAPHX_THROW("auto_pad only supports SAME_UPPER for pooling");
} }
op.padding_mode = op::padding_mode_t::same;
} }
return prog.add_instruction(op, std::move(args)); return prog.add_instruction(op, l0);
} }
instruction_ref instruction_ref
...@@ -563,6 +577,28 @@ struct onnx_parser ...@@ -563,6 +577,28 @@ struct onnx_parser
return prog.add_instruction(migraphx::op::transpose{perm}, args.front()); return prog.add_instruction(migraphx::op::transpose{perm}, args.front());
} }
instruction_ref
parse_pad(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
{
std::vector<int64_t> pads{};
float value = 0.0f;
if(contains(attributes, "pads"))
{
auto&& pad_vals = attributes["pads"].ints();
pads = std::vector<int64_t>(pad_vals.begin(), pad_vals.end());
}
if(contains(attributes, "value"))
{
value = parse_value(attributes.at("value")).at<float>();
}
if(contains(attributes, "mode"))
{
auto mode = attributes.at("mode").s();
if(mode != "constant")
MIGRAPHX_THROW("migraphx currently only supports constant padding");
}
return prog.add_instruction(migraphx::op::pad{pads, value}, args.front());
}
// Use a literal instruction to replace the shape since, output of // Use a literal instruction to replace the shape since, output of
// shape operator are literals in migraphx // shape operator are literals in migraphx
instruction_ref instruction_ref
...@@ -664,11 +700,11 @@ struct onnx_parser ...@@ -664,11 +700,11 @@ struct onnx_parser
if(contains(attributes, "hidden_size")) if(contains(attributes, "hidden_size"))
{ {
hidden_size = parse_value(attributes.at("hidden_size")).at<int>(); std::size_t hidden_size_att = parse_value(attributes.at("hidden_size")).at<int>();
} if(hidden_size != hidden_size_att)
else {
{ MIGRAPHX_THROW("RNN: hidden size mismatch in input and attribute");
MIGRAPHX_THROW("RNN: hidden size attribute missing"); }
} }
// Handling of direction to be added later // Handling of direction to be added later
...@@ -699,12 +735,13 @@ struct onnx_parser ...@@ -699,12 +735,13 @@ struct onnx_parser
for_each(vec_names.begin(), vec_names.end(), [&](auto& fn) { for_each(vec_names.begin(), vec_names.end(), [&](auto& fn) {
if(map_actv_funcs.count(fn) == 0) if(map_actv_funcs.count(fn) == 0)
{ {
MIGRAPHX_THROW("RNN: activation function " + fn + " not supported"); MIGRAPHX_THROW("RNN: activation function " + std::string(fn) + " not supported");
} }
}); });
// bidirectional should have two activation functions // bidirectional case should have two activation functions.
// if only one actv function is provides, we use it in both // one is for forward, and the other is for reverse.
// if only one actv function is provided, we use it in both
// forward and reverse direction // forward and reverse direction
if(dirct == op::rnn::bidirectional) if(dirct == op::rnn::bidirectional)
{ {
...@@ -714,9 +751,9 @@ struct onnx_parser ...@@ -714,9 +751,9 @@ struct onnx_parser
} }
} }
std::vector<operation> vec_actv_funcs; std::vector<operation> vec_actv_funcs(vec_names.size());
for_each(vec_names.begin(), vec_names.end(), [&](auto& fn) { std::transform(vec_names.begin(), vec_names.end(), vec_actv_funcs.begin(), [&](auto& fn) {
vec_actv_funcs.push_back(map_actv_funcs[fn]); return map_actv_funcs[fn];
}); });
// To be added later // To be added later
...@@ -915,9 +952,8 @@ struct onnx_parser ...@@ -915,9 +952,8 @@ struct onnx_parser
// For RNN, LSTM, and GRU operators, one of the input arguments // For RNN, LSTM, and GRU operators, one of the input arguments
// is prim::Undefined, and it is ignored by protobuf. We use a // is prim::Undefined, and it is ignored by protobuf. We use a
// hack to ignore this argument for these three operators // hack to ignore this argument for these three operators
std::string op_type = node.op_type(); const std::string& op_type = node.op_type();
if((op_type == "RNN" || op_type == "LSTM" || op_type == "GRU") && if((op_type == "RNN" || op_type == "LSTM" || op_type == "GRU") && input.empty())
input.empty() == true)
{ {
continue; continue;
} }
......
#include <migraphx/program.hpp> #include <migraphx/program.hpp>
#include <migraphx/stringutils.hpp> #include <migraphx/stringutils.hpp>
#include <migraphx/instruction.hpp> #include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/env.hpp> #include <migraphx/env.hpp>
#include <migraphx/ranges.hpp> #include <migraphx/ranges.hpp>
#include <migraphx/time.hpp> #include <migraphx/time.hpp>
...@@ -134,6 +135,12 @@ instruction_ref program::replace_instruction(instruction_ref ins, instruction_re ...@@ -134,6 +135,12 @@ instruction_ref program::replace_instruction(instruction_ref ins, instruction_re
assert(has_instruction(ins)); assert(has_instruction(ins));
assert(has_instruction(rep)); assert(has_instruction(rep));
assert(ins != rep); assert(ins != rep);
if(ins == std::prev(this->end()))
{
return replace_instruction(ins, op::identity{}, rep);
}
// TODO: Should it be an error if the output is empty? // TODO: Should it be an error if the output is empty?
if(ins->outputs().empty()) if(ins->outputs().empty())
{ {
......
...@@ -10,7 +10,7 @@ inline namespace MIGRAPHX_INLINE_NS { ...@@ -10,7 +10,7 @@ inline namespace MIGRAPHX_INLINE_NS {
void rewrite_rnn::apply(program& prog) const void rewrite_rnn::apply(program& prog) const
{ {
instruction_ref last_output = prog.end(); std::unordered_map<instruction_ref, instruction_ref> map_last_output;
for(auto ins : iterator_for(prog)) for(auto ins : iterator_for(prog))
{ {
// rewrite rnn operator // rewrite rnn operator
...@@ -27,7 +27,7 @@ void rewrite_rnn::apply(program& prog) const ...@@ -27,7 +27,7 @@ void rewrite_rnn::apply(program& prog) const
std::size_t batch_size = seq_shape.lens()[1]; std::size_t batch_size = seq_shape.lens()[1];
shape::type_t type = seq_shape.type(); shape::type_t type = seq_shape.type();
migraphx::shape ih_shape{type, {1, batch_size, hidden_size}}; migraphx::shape ih_shape{type, {1, batch_size, hidden_size}};
std::vector<char> data(ih_shape.bytes(), 0); std::vector<float> data(ih_shape.elements(), 0);
auto rnn_op = any_cast<op::rnn>(ins->get_operator()); auto rnn_op = any_cast<op::rnn>(ins->get_operator());
op::rnn::rnn_direction_t dicrt = rnn_op.direction; op::rnn::rnn_direction_t dicrt = rnn_op.direction;
...@@ -85,19 +85,33 @@ void rewrite_rnn::apply(program& prog) const ...@@ -85,19 +85,33 @@ void rewrite_rnn::apply(program& prog) const
ih_reverse, ih_reverse,
rnn_op.actv_funcs.at(1)); rnn_op.actv_funcs.at(1));
last_output = auto concat_output =
prog.insert_instruction(ins, op::concat{0}, ret_forward[1], ret_reverse[1]); prog.insert_instruction(ins, op::concat{1}, ret_forward[1], ret_reverse[1]);
auto last_output = prog.insert_instruction(ins, op::squeeze{{0}}, concat_output);
// add the dimension of num_direction // The following logic is to ensure the last instruction rewritten from
ret_forward[0] = prog.insert_instruction(ins, op::unsqueeze{{1}}, ret_forward[0]); // rnn operator is a concat instruction
ret_reverse[0] = prog.insert_instruction(ins, op::unsqueeze{{1}}, ret_reverse[0]); // sequence len is 1
instruction_ref hidden_output{};
// concat the forward and reverse output if(ret_forward[0] == prog.end())
prog.replace_instruction(ins, op::concat{1}, {ret_forward[0], ret_reverse[0]}); {
hidden_output = prog.replace_instruction(
ins, op::concat{1}, ret_forward[1], ret_reverse[1]);
}
else
{
ret_forward[0] =
prog.insert_instruction(ins, op::concat{0}, ret_forward[0], ret_forward[1]);
ret_reverse[0] =
prog.insert_instruction(ins, op::concat{0}, ret_reverse[1], ret_reverse[0]);
hidden_output = prog.replace_instruction(
ins, op::concat{1}, {ret_forward[0], ret_reverse[0]});
}
map_last_output[hidden_output] = last_output;
} }
else else
{ {
bool is_forward = (dicrt == op::rnn::rnn_direction_t::forward) ? true : false; bool is_forward = (dicrt == op::rnn::rnn_direction_t::forward);
// input weight matrix // input weight matrix
auto w = args[1]; auto w = args[1];
...@@ -125,10 +139,24 @@ void rewrite_rnn::apply(program& prog) const ...@@ -125,10 +139,24 @@ void rewrite_rnn::apply(program& prog) const
auto ret = rnn_cell( auto ret = rnn_cell(
is_forward, prog, ins, args[0], w, r, bias, ih, rnn_op.actv_funcs.at(0)); is_forward, prog, ins, args[0], w, r, bias, ih, rnn_op.actv_funcs.at(0));
last_output = ret[1]; auto last_output = prog.insert_instruction(ins, op::squeeze{{0}}, ret[1]);
// add the dimension of num_direction // following logic is to ensure the last instruction is a
prog.replace_instruction(ins, op::unsqueeze{{1}}, ret[0]); // concat instruction
// sequence len is 1
instruction_ref hidden_output{};
if(ret[0] == prog.end())
{
hidden_output = prog.replace_instruction(ins, op::concat{0}, ret[1]);
}
else
{
auto concat_arg0 = is_forward ? ret[0] : ret[1];
auto concat_arg1 = is_forward ? ret[1] : ret[0];
hidden_output =
prog.replace_instruction(ins, op::concat{0}, concat_arg0, concat_arg1);
}
map_last_output[hidden_output] = last_output;
} }
} }
...@@ -138,12 +166,15 @@ void rewrite_rnn::apply(program& prog) const ...@@ -138,12 +166,15 @@ void rewrite_rnn::apply(program& prog) const
// so we can just use it as the output here // so we can just use it as the output here
if(ins->name() == "rnn_last_output") if(ins->name() == "rnn_last_output")
{ {
// if rnn operator is executed, the last_output != prog.end() auto inputs = ins->inputs();
if(last_output != prog.end()) assert(inputs.size() == 1);
auto arg = inputs[0];
if(map_last_output.count(arg) == 0)
{ {
prog.replace_instruction(ins, op::identity{}, last_output); MIGRAPHX_THROW("RNN_LAST_OUTPUT: no related rnn operator as its input");
last_output = prog.end();
} }
prog.replace_instruction(ins, map_last_output[arg]);
} }
} }
} }
...@@ -181,11 +212,12 @@ std::vector<instruction_ref> rewrite_rnn::rnn_cell(bool is_forward, ...@@ -181,11 +212,12 @@ std::vector<instruction_ref> rewrite_rnn::rnn_cell(bool is_forward,
bias = prog.insert_instruction(ins, op::broadcast{1, sih->get_shape()}, b); bias = prog.insert_instruction(ins, op::broadcast{1, sih->get_shape()}, b);
} }
instruction_ref hidden_out, last_out; instruction_ref hidden_out = prog.end(), last_out;
std::size_t seq_len = input->get_shape().lens()[0]; last_out = prog.insert_instruction(ins, op::unsqueeze{{0, 1}}, sih);
long seq_index = is_forward ? 0 : seq_len - 1; std::size_t seq_len = input->get_shape().lens()[0];
for(std::size_t i = 0; i < seq_len; i++) for(std::size_t i = 0; i < seq_len; i++)
{ {
long seq_index = is_forward ? i : (seq_len - 1 - i);
auto xt = prog.insert_instruction(ins, op::slice{{0}, {seq_index}, {seq_index + 1}}, input); auto xt = prog.insert_instruction(ins, op::slice{{0}, {seq_index}, {seq_index + 1}}, input);
xt = prog.insert_instruction(ins, op::squeeze{{0}}, xt); xt = prog.insert_instruction(ins, op::squeeze{{0}}, xt);
auto xt_wi = prog.insert_instruction(ins, op::dot{}, xt, tran_sw); auto xt_wi = prog.insert_instruction(ins, op::dot{}, xt, tran_sw);
...@@ -205,29 +237,33 @@ std::vector<instruction_ref> rewrite_rnn::rnn_cell(bool is_forward, ...@@ -205,29 +237,33 @@ std::vector<instruction_ref> rewrite_rnn::rnn_cell(bool is_forward,
ht = prog.insert_instruction(ins, actv_func, ht); ht = prog.insert_instruction(ins, actv_func, ht);
sih = ht; sih = ht;
// add the dimension of sequence length // add the dimensions of sequence length (axis 0 for sequence length,
last_out = prog.insert_instruction(ins, op::unsqueeze{{0}}, ht); // axis 1 for num_directions
last_out = prog.insert_instruction(ins, op::unsqueeze{{0, 1}}, ht);
if(is_forward) // concatenation for the last last_out is performed in the apply()
// function to ensure the last instruction is concat, then we have
// output inserted
if(i < seq_len - 1)
{ {
hidden_out = (seq_index == 0) if(is_forward)
? last_out {
: prog.insert_instruction(ins, op::concat{0}, hidden_out, last_out); hidden_out =
} (seq_index == 0)
else ? last_out
{ : prog.insert_instruction(ins, op::concat{0}, hidden_out, last_out);
hidden_out = (seq_index == seq_len - 1) }
? last_out else
: prog.insert_instruction(ins, op::concat{0}, last_out, hidden_out); {
hidden_out =
(seq_index == seq_len - 1)
? last_out
: prog.insert_instruction(ins, op::concat{0}, last_out, hidden_out);
}
} }
seq_index = is_forward ? (seq_index + 1) : (seq_index - 1);
} }
std::vector<instruction_ref> out_args; return {hidden_out, last_out};
out_args.push_back(hidden_out);
out_args.push_back(last_out);
return out_args;
} }
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
......
...@@ -14,7 +14,8 @@ bool is_nonstandard_reshaper(instruction_ref ins) ...@@ -14,7 +14,8 @@ bool is_nonstandard_reshaper(instruction_ref ins)
{ {
// clang-format off // clang-format off
static const std::unordered_set<std::string> names = { static const std::unordered_set<std::string> names = {
"reshape" "reshape",
"contiguous"
}; };
// clang-format on // clang-format on
return contains(names, ins->name()) and ins->inputs().front()->name() == "contiguous"; return contains(names, ins->name()) and ins->inputs().front()->name() == "contiguous";
......
...@@ -298,6 +298,32 @@ struct cpu_contiguous ...@@ -298,6 +298,32 @@ struct cpu_contiguous
} }
}; };
struct cpu_pad
{
op::pad op;
std::string name() const { return "cpu::contiguous"; }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
argument compute(context&, const shape& output_shape, std::vector<argument> args) const
{
assert(output_shape.standard());
argument result{output_shape};
result.visit([&](auto output) { std::fill(output.begin(), output.end(), op.value); });
visit_all(result, args[0])([&](auto output, auto input) {
shape_for_each(input.get_shape(), [&](const auto& idx) {
std::vector<std::size_t> new_idx(idx.size());
std::transform(
idx.begin(), idx.end(), op.pads.begin(), new_idx.begin(), [](auto i, auto j) {
return i + j;
});
output(new_idx.begin(), new_idx.end()) = input(idx.begin(), idx.end());
});
});
return result;
}
};
struct cpu_concat struct cpu_concat
{ {
op::concat op; op::concat op;
...@@ -663,6 +689,7 @@ struct cpu_apply ...@@ -663,6 +689,7 @@ struct cpu_apply
apply_map["batch_norm_inference"] = apply_map["batch_norm_inference"] =
extend_op<cpu_batch_norm_inference, op::batch_norm_inference>(); extend_op<cpu_batch_norm_inference, op::batch_norm_inference>();
apply_map["contiguous"] = extend_op<cpu_contiguous, op::contiguous>(); apply_map["contiguous"] = extend_op<cpu_contiguous, op::contiguous>();
apply_map["pad"] = extend_op<cpu_pad, op::pad>();
apply_map["concat"] = extend_op<cpu_concat, op::concat>(); apply_map["concat"] = extend_op<cpu_concat, op::concat>();
apply_map["gather"] = extend_op<cpu_gather, op::gather>(); apply_map["gather"] = extend_op<cpu_gather, op::gather>();
apply_map["leaky_relu"] = extend_op<cpu_unary<leaky_relu_op>, op::leaky_relu>(); apply_map["leaky_relu"] = extend_op<cpu_unary<leaky_relu_op>, op::leaky_relu>();
......
...@@ -28,6 +28,7 @@ add_library(migraphx_device ...@@ -28,6 +28,7 @@ add_library(migraphx_device
device/contiguous.cpp device/contiguous.cpp
device/mul.cpp device/mul.cpp
device/concat.cpp device/concat.cpp
device/pad.cpp
device/gather.cpp device/gather.cpp
) )
set_target_properties(migraphx_device PROPERTIES EXPORT_NAME device) set_target_properties(migraphx_device PROPERTIES EXPORT_NAME device)
...@@ -57,6 +58,7 @@ add_library(migraphx_gpu ...@@ -57,6 +58,7 @@ add_library(migraphx_gpu
sigmoid.cpp sigmoid.cpp
abs.cpp abs.cpp
elu.cpp elu.cpp
pad.cpp
gather.cpp gather.cpp
) )
set_target_properties(migraphx_gpu PROPERTIES EXPORT_NAME gpu) set_target_properties(migraphx_gpu PROPERTIES EXPORT_NAME gpu)
......
...@@ -313,6 +313,12 @@ void nary_impl(hipStream_t stream, F f, argument result, Arguments... args) ...@@ -313,6 +313,12 @@ void nary_impl(hipStream_t stream, F f, argument result, Arguments... args)
nary_nonstandard_impl(stream, f, result, args...); nary_nonstandard_impl(stream, f, result, args...);
} }
template <class F>
void nary_impl(hipStream_t stream, F f, argument result)
{
nary_standard_impl(stream, f, result);
}
template <class... Arguments> template <class... Arguments>
auto nary_nonstandard(hipStream_t stream, argument result, Arguments... args) auto nary_nonstandard(hipStream_t stream, argument result, Arguments... args)
{ {
......
#include <migraphx/shape.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/gpu/device/nary.hpp>
#include <migraphx/gpu/device/pad.hpp>
#include <migraphx/gpu/device/tensor.hpp>
#include <migraphx/gpu/device/launch.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
argument
pad(hipStream_t stream, argument result, argument arg1, float value, std::vector<std::int64_t> pads)
{
std::size_t nelements = arg1.get_shape().elements();
nary(stream, result)([=] { return value; });
visit_all(result, arg1)([&](auto output, auto input) {
visit_tensor_size(result.get_shape().lens().size(), [&](auto ndim) {
std::size_t offsets[ndim];
std::copy(pads.begin(), pads.begin() + ndim, offsets);
auto* outptr = output.data();
const auto* inptr = input.data();
hip_tensor_descriptor<ndim> desc_input(input.get_shape());
hip_tensor_descriptor<ndim> desc_output(output.get_shape());
gs_launch(stream, nelements)([=](auto i) {
auto idx = desc_input.multi(i);
for(std::size_t j = 0; j < ndim; j++)
{
idx[j] += offsets[j];
}
outptr[desc_output.linear(idx)] = inptr[i];
});
});
});
return result;
}
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
...@@ -107,6 +107,7 @@ argument miopen_gemm::compute(context& ctx, ...@@ -107,6 +107,7 @@ argument miopen_gemm::compute(context& ctx,
ldc); ldc);
}); });
return args[2]; return args[2];
} }
......
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_PAD_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_PAD_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
argument pad(hipStream_t stream,
argument result,
argument arg1,
float value,
std::vector<std::int64_t> pads);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_PAD_HPP
#define MIGRAPHX_GUARD_RTGLIB_PAD_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/pad.hpp>
#include <migraphx/gpu/device/add.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct hip_pad
{
op::pad op;
std::string name() const { return "gpu::pad"; }
shape compute_shape(std::vector<shape> inputs) const;
argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
int output_alias(const std::vector<shape>& shapes) const { return shapes.size() - 1; }
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <migraphx/gpu/pooling.hpp> #include <migraphx/gpu/pooling.hpp>
#include <migraphx/gpu/gemm.hpp> #include <migraphx/gpu/gemm.hpp>
#include <migraphx/gpu/concat.hpp> #include <migraphx/gpu/concat.hpp>
#include <migraphx/gpu/pad.hpp>
#include <migraphx/gpu/gather.hpp> #include <migraphx/gpu/gather.hpp>
#include <utility> #include <utility>
#include <functional> #include <functional>
...@@ -54,6 +55,7 @@ struct miopen_apply ...@@ -54,6 +55,7 @@ struct miopen_apply
program* prog = nullptr; program* prog = nullptr;
context ctx{}; context ctx{};
std::unordered_map<std::string, std::function<instruction_ref(instruction_ref)>> apply_map{}; std::unordered_map<std::string, std::function<instruction_ref(instruction_ref)>> apply_map{};
instruction_ref last{};
void check_shape(shape x, instruction_ref i) void check_shape(shape x, instruction_ref i)
{ {
...@@ -64,6 +66,7 @@ struct miopen_apply ...@@ -64,6 +66,7 @@ struct miopen_apply
void init() void init()
{ {
this->last = instruction::get_output_alias(std::prev(prog->end()));
add_miopen_simple_op<miopen_relu>("relu", make_relu); add_miopen_simple_op<miopen_relu>("relu", make_relu);
add_miopen_simple_op<miopen_sigmoid>("sigmoid", make_sigmoid); add_miopen_simple_op<miopen_sigmoid>("sigmoid", make_sigmoid);
add_miopen_simple_op<miopen_abs>("abs", make_abs); add_miopen_simple_op<miopen_abs>("abs", make_abs);
...@@ -92,6 +95,8 @@ struct miopen_apply ...@@ -92,6 +95,8 @@ struct miopen_apply
add_extend_op<hip_concat, op::concat>("concat"); add_extend_op<hip_concat, op::concat>("concat");
add_extend_op<miopen_softmax, op::softmax>("softmax"); add_extend_op<miopen_softmax, op::softmax>("softmax");
add_extend_op<hip_gather, op::gather>("gather"); add_extend_op<hip_gather, op::gather>("gather");
add_extend_op<hip_pad, op::pad>("pad");
add_convolution_op(); add_convolution_op();
add_pooling_op(); add_pooling_op();
add_batch_norm_inference_op(); add_batch_norm_inference_op();
...@@ -112,7 +117,7 @@ struct miopen_apply ...@@ -112,7 +117,7 @@ struct miopen_apply
instruction_ref insert_allocation(instruction_ref ins, const shape& s, std::string tag = "") instruction_ref insert_allocation(instruction_ref ins, const shape& s, std::string tag = "")
{ {
if(ins == --prog->end() and tag.empty()) if(ins == last and tag.empty())
{ {
return prog->add_parameter("output", s); return prog->add_parameter("output", s);
} }
......
#include <migraphx/gpu/pad.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/device/pad.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
shape hip_pad::compute_shape(std::vector<shape> inputs) const
{
inputs.pop_back();
return op.compute_shape(inputs);
}
argument hip_pad::compute(context& ctx, const shape&, const std::vector<argument>& args) const
{
return device::pad(ctx.get_stream().get(), args.back(), args.front(), op.value, op.pads);
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
...@@ -33,18 +33,17 @@ std::vector<pass> target::get_passes(migraphx::context& gctx) const ...@@ -33,18 +33,17 @@ std::vector<pass> target::get_passes(migraphx::context& gctx) const
dead_code_elimination{}, dead_code_elimination{},
fwd_conv_batchnorm_rewrite{}, fwd_conv_batchnorm_rewrite{},
dead_code_elimination{}, dead_code_elimination{},
common_subexpression_elimination{},
dead_code_elimination{},
rewrite_rnn{}, rewrite_rnn{},
dead_code_elimination{},
rewrite_gru{}, rewrite_gru{},
dead_code_elimination{}, dead_code_elimination{},
common_subexpression_elimination{},
dead_code_elimination{},
simplify_algebra{}, simplify_algebra{},
dead_code_elimination{}, dead_code_elimination{},
constant_propagate{}, constant_propagate{},
dead_code_elimination{}, dead_code_elimination{},
auto_contiguous{}, auto_contiguous{},
simplify_reshapes{}, //simplify_reshapes{},
dead_code_elimination{}, dead_code_elimination{},
lowering{ctx}, lowering{ctx},
eliminate_concat{concat_gpu_optimization{}}, eliminate_concat{concat_gpu_optimization{}},
......
...@@ -1347,101 +1347,473 @@ TEST_CASE(min_test) ...@@ -1347,101 +1347,473 @@ TEST_CASE(min_test)
EXPECT(migraphx::verify_range(results_vector, gold)); EXPECT(migraphx::verify_range(results_vector, gold));
} }
/* TEST_CASE(rnn_forward)
TEST_CASE(rnn_test)
{ {
std::size_t batch_size = 2;
std::size_t seq_len = 2;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 1;
std::vector<float> wf_data{0.4691,
0.3185,
-0.2227,
0.4423,
-0.0609,
-0.2803,
0.1744,
0.3146,
0.4049,
-0.3973,
-0.0890,
-0.1636};
std::vector<float> rf_data{-0.0456,
0.1061,
0.1574,
-0.4928,
-0.4300,
-0.1909,
-0.0225,
-0.2668,
0.1840,
-0.4453,
-0.4896,
0.1302,
-0.0929,
0.3545,
-0.4981,
0.0616};
std::vector<float> biasf_data{
-0.4938, 0.4355, -0.3186, 0.2094, 0.1037, -0.1071, 0.4504, -0.3990};
std::vector<float> input(seq_len * batch_size * input_size, 0);
input[0] = input[1] = 1.0;
float clip = 0.0f;
{ {
std::vector<float> ih_data(num_dirct * batch_size * hidden_size, 0);
migraphx::program p; migraphx::program p;
size_t hidden_size = 8; migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
size_t input_size = 6; auto seq = p.add_literal(migraphx::literal{in_shape, input});
size_t batch_size = 2;
size_t seq_len = 5; migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
migraphx::shape hidden_shape{migraphx::shape::float_type, {1, batch_size, hidden_size}}; auto ih = p.add_literal(migraphx::literal{ih_shape, ih_data});
migraphx::shape input_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
migraphx::shape w_shape{migraphx::shape::float_type, {num_dirct, hidden_size, input_size}};
auto w = p.add_literal(migraphx::literal{w_shape, wf_data});
migraphx::shape r_shape{migraphx::shape::float_type, {num_dirct, hidden_size, hidden_size}};
auto r = p.add_literal(migraphx::literal{r_shape, rf_data});
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 2 * hidden_size}};
auto bias = p.add_literal(migraphx::literal{b_shape, biasf_data});
p.add_instruction(migraphx::op::rnn{hidden_size,
{migraphx::op::tanh{}, migraphx::op::tanh{}},
migraphx::op::rnn::forward,
clip},
seq,
w,
r,
bias,
ih);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
std::vector<float> hs_data_gold{0.37780784,
0.61055139,
0.55168478,
-0.5888475,
-0.37144644,
0.31708236,
0.13104209,
-0.18736027,
0.03445704,
0.19167931,
-0.3946827,
-0.30889652,
-0.22276389,
0.44193283,
-0.16477929,
-0.11893477};
EXPECT(migraphx::verify_range(hs_data, hs_data_gold));
}
std::vector<float> input(input_shape.elements(), 0.0); {
input[0] = input[1] = 1.0; std::vector<float> ih_data(num_dirct * batch_size * hidden_size, 0);
std::vector<float> init_hidden(hidden_shape.elements(), 0.0);
migraphx::program p;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
auto seq = p.add_literal(migraphx::literal{in_shape, input});
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
auto ih = p.add_literal(migraphx::literal{ih_shape, ih_data});
migraphx::shape w_shape{migraphx::shape::float_type, {num_dirct, hidden_size, input_size}};
auto w = p.add_literal(migraphx::literal{w_shape, wf_data});
migraphx::shape r_shape{migraphx::shape::float_type, {num_dirct, hidden_size, hidden_size}};
auto r = p.add_literal(migraphx::literal{r_shape, rf_data});
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 2 * hidden_size}};
auto bias = p.add_literal(migraphx::literal{b_shape, biasf_data});
auto out_hs =
p.add_instruction(migraphx::op::rnn{hidden_size,
{migraphx::op::tanh{}, migraphx::op::tanh{}},
migraphx::op::rnn::forward,
clip},
seq,
w,
r,
bias,
ih);
p.add_instruction(migraphx::op::rnn_last_output{}, out_hs);
p.compile(migraphx::cpu::target{}); p.compile(migraphx::cpu::target{});
migraphx::program::parameter_map m;
m["input"] = migraphx::argument(input_shape, input.data()); auto last_output = p.eval({});
auto resarg = p.eval(m); std::vector<float> last_output_data;
last_output.visit([&](auto out) { last_output_data.assign(out.begin(), out.end()); });
std::vector<float> res;
resarg.visit([&](auto output) { res.assign(output.begin(), output.end()); } ); std::vector<float> last_output_data_gold{0.03445704,
0.19167931,
std::vector<float> res_gold{ -0.3946827,
0.596363, -0.274248, 0.714484, 0.282515, 0.0938349, -0.30889652,
0.185406, 0.283227, -0.482086, 0.265265, -0.523217, -0.22276389,
0.50433, 0.400934, -0.34513, 0.114924, 0.0392658, 0.44193283,
-0.0976029, 0.364322, -0.567117, 0.538775, 0.314859, -0.16477929,
-0.478676, 0.51778, -0.286718, -0.0478341, 0.339601, -0.11893477};
-0.380976, 0.628219, 0.222791, -0.271949, 0.490674, EXPECT(migraphx::verify_range(last_output_data, last_output_data_gold));
-0.234456, -0.224984, 0.456527, -0.454559, 0.546034,
-0.0389027, -0.307475, 0.561003, -0.245673, -0.0776644,
0.447162, -0.52013, 0.511913, 0.0324621, -0.380515,
0.500777, -0.225695, -0.0193589, 0.458955, -0.531746,
0.448536, -0.087655, -0.430165, 0.551379, -0.161603,
-0.0165391, 0.447551, -0.491717, 0.484796, -0.0699652,
-0.3941, 0.561967, -0.168543, -0.0661258, 0.465925,
-0.499277, 0.45216, -0.103005, -0.392837, 0.584424,
-0.189044, -0.0388068, 0.468369, -0.512927, 0.449144,
-0.0900977, -0.400401, 0.573534, -0.19617, -0.0208253};
EXPECT(migraphx::verify_range(res, res_gold));
} }
}
TEST_CASE(rnn_reverse)
{
std::size_t batch_size = 2;
std::size_t seq_len = 2;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 1;
std::vector<float> wr_data{-0.0296,
-0.1341,
0.1761,
-0.2325,
-0.0717,
0.1852,
0.2720,
0.1471,
-0.1097,
0.3363,
-0.0587,
-0.2302};
std::vector<float> rr_data{0.2528,
-0.2333,
0.3973,
0.1593,
-0.0388,
0.1702,
0.3829,
-0.0712,
-0.1668,
0.3074,
-0.2854,
0.4049,
-0.3737,
-0.1051,
0.4482,
-0.2841};
std::vector<float> biasr_data{-0.3188, 0.1341, -0.4446, 0.1389, 0.3117, 0.3664, 0.2352, 0.2552};
std::vector<float> input(seq_len * batch_size * input_size, 0);
input[0] = input[1] = 1.0;
float clip = 0.0f;
{ {
std::vector<float> ih_data(num_dirct * batch_size * hidden_size, 0);
migraphx::program p; migraphx::program p;
size_t hidden_size = 6; migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
size_t input_size = 4; auto seq = p.add_literal(migraphx::literal{in_shape, input});
size_t batch_size = 2;
size_t seq_len = 5; migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
migraphx::shape hidden_shape{migraphx::shape::float_type, {6, batch_size, hidden_size}}; auto ih = p.add_literal(migraphx::literal{ih_shape, ih_data});
migraphx::shape input_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
migraphx::shape w_shape{migraphx::shape::float_type, {num_dirct, hidden_size, input_size}};
auto w = p.add_literal(migraphx::literal{w_shape, wr_data});
migraphx::shape r_shape{migraphx::shape::float_type, {num_dirct, hidden_size, hidden_size}};
auto r = p.add_literal(migraphx::literal{r_shape, rr_data});
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 2 * hidden_size}};
auto bias = p.add_literal(migraphx::literal{b_shape, biasr_data});
p.add_instruction(migraphx::op::rnn{hidden_size,
{migraphx::op::tanh{}, migraphx::op::tanh{}},
migraphx::op::rnn::reverse,
clip},
seq,
w,
r,
bias,
ih);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
std::vector<float> hs_data_gold{-0.29385301,
0.16796815,
0.51075965,
0.40258689,
-0.13818839,
0.44124447,
0.14365635,
0.14803654,
-0.0070999,
0.46251031,
-0.20639211,
0.37488942,
-0.0070999,
0.46251031,
-0.20639211,
0.37488942};
EXPECT(migraphx::verify_range(hs_data, hs_data_gold));
}
std::vector<float> input(input_shape.elements(), 0.0); {
input[0] = input[1] = 1.0; std::vector<float> ih_data(num_dirct * batch_size * hidden_size, 0);
std::vector<float> init_hidden(hidden_shape.elements(), 0.0);
migraphx::program p;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
auto seq = p.add_literal(migraphx::literal{in_shape, input});
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
auto ih = p.add_literal(migraphx::literal{ih_shape, ih_data});
migraphx::shape w_shape{migraphx::shape::float_type, {num_dirct, hidden_size, input_size}};
auto w = p.add_literal(migraphx::literal{w_shape, wr_data});
migraphx::shape r_shape{migraphx::shape::float_type, {num_dirct, hidden_size, hidden_size}};
auto r = p.add_literal(migraphx::literal{r_shape, rr_data});
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 2 * hidden_size}};
auto bias = p.add_literal(migraphx::literal{b_shape, biasr_data});
auto out_hs =
p.add_instruction(migraphx::op::rnn{hidden_size,
{migraphx::op::tanh{}, migraphx::op::tanh{}},
migraphx::op::rnn::reverse,
clip},
seq,
w,
r,
bias,
ih);
p.add_instruction(migraphx::op::rnn_last_output{}, out_hs);
p.compile(migraphx::cpu::target{}); p.compile(migraphx::cpu::target{});
migraphx::program::parameter_map m;
m["input"] = migraphx::argument(input_shape, &input[0]); auto last_output = p.eval({});
auto resarg = p.eval(m); std::vector<float> last_output_data;
last_output.visit([&](auto out) { last_output_data.assign(out.begin(), out.end()); });
std::vector<float> res;
resarg.visit([&](auto output) { res.assign(output.begin(), output.end()); } ); std::vector<float> last_output_data_gold{-0.29385301,
0.16796815,
std::vector<float> res_gold{ 0.51075965,
-0.0890872, -0.0558751, 0.185233, 0.452857, 0.104082, 0.40258689,
0.432953, 0.274236, 0.186055, -0.367716, 0.266761, -0.13818839,
-0.28489, 0.498758, 0.0140574, -0.122377, 0.278067, 0.44124447,
0.469699, 0.216743, 0.258926, 0.269785, 0.328379, 0.14365635,
-0.576081, 0.11672, -0.452062, 0.603549, 0.472625, 0.14803654};
0.120929, 0.350331, 0.502138, 0.103585, 0.128486, EXPECT(migraphx::verify_range(last_output_data, last_output_data_gold));
0.0210318, 0.338759, -0.654448, 0.37656, -0.359715,
0.424365, 0.449677, 0.130903, 0.354359, 0.59317,
0.189543, 0.201865, 0.126288, 0.31099, -0.681538,
0.275407, -0.406133, 0.450767, 0.305638, 0.14942,
0.309857, 0.722745, 0.361199, -0.00963601, 0.397046,
0.264047, -0.539317, 0.0690505, -0.321901, 0.566638,
0.406511, 0.231472, 0.320225, 0.737927, 0.372938,
0.00762333, 0.349881, 0.280791, -0.541838, 0.128319,
-0.266702, 0.536205, 0.509004, 0.361068, 0.42431,
0.767474, 0.368881, 0.0753035, 0.141155, 0.219692,
-0.643801, 0.281643, -0.330984, 0.397033, 0.494424,
0.38013, 0.434627, 0.795404, 0.391589, 0.0102068,
0.166358, 0.226248, -0.608175, 0.302622, -0.349646,
0.375506, 0.546918, 0.22908, 0.40025, 0.806049,
0.424462, -0.0352604, 0.528827, -0.0372434, -0.573789,
-0.0541837, -0.194983, 0.552972, 0.553695, 0.263657,
0.432448, 0.815763, 0.412716, -0.0389366, 0.52391,
-0.0256845, -0.577296, -0.0570545, -0.219738, 0.561644};
EXPECT(migraphx::verify_range(res, res_gold));
} }
} }
*/
TEST_CASE(rnn_bidirectional)
{
std::size_t batch_size = 2;
std::size_t seq_len = 2;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 2;
std::vector<float> wf_data{0.4691,
0.3185,
-0.2227,
0.4423,
-0.0609,
-0.2803,
0.1744,
0.3146,
0.4049,
-0.3973,
-0.0890,
-0.1636};
std::vector<float> wr_data{-0.0296,
-0.1341,
0.1761,
-0.2325,
-0.0717,
0.1852,
0.2720,
0.1471,
-0.1097,
0.3363,
-0.0587,
-0.2302};
std::vector<float> rf_data{-0.0456,
0.1061,
0.1574,
-0.4928,
-0.4300,
-0.1909,
-0.0225,
-0.2668,
0.1840,
-0.4453,
-0.4896,
0.1302,
-0.0929,
0.3545,
-0.4981,
0.0616};
std::vector<float> rr_data{0.2528,
-0.2333,
0.3973,
0.1593,
-0.0388,
0.1702,
0.3829,
-0.0712,
-0.1668,
0.3074,
-0.2854,
0.4049,
-0.3737,
-0.1051,
0.4482,
-0.2841};
std::vector<float> biasf_data{
-0.4938, 0.4355, -0.3186, 0.2094, 0.1037, -0.1071, 0.4504, -0.3990};
std::vector<float> biasr_data{-0.3188, 0.1341, -0.4446, 0.1389, 0.3117, 0.3664, 0.2352, 0.2552};
std::vector<float> input(seq_len * batch_size * input_size, 0);
input[0] = input[1] = 1.0;
float clip = 0.0f;
{
std::vector<float> ih_data(num_dirct * batch_size * hidden_size, 0);
migraphx::program p;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
auto seq = p.add_literal(migraphx::literal{in_shape, input});
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
auto ih = p.add_literal(migraphx::literal{ih_shape, ih_data});
auto w_data = wf_data;
w_data.insert(w_data.end(), wr_data.begin(), wr_data.end());
migraphx::shape w_shape{migraphx::shape::float_type, {num_dirct, hidden_size, input_size}};
auto w = p.add_literal(migraphx::literal{w_shape, w_data});
auto r_data = rf_data;
r_data.insert(r_data.end(), rr_data.begin(), rr_data.end());
migraphx::shape r_shape{migraphx::shape::float_type, {num_dirct, hidden_size, hidden_size}};
auto r = p.add_literal(migraphx::literal{r_shape, r_data});
auto bias_data = biasf_data;
bias_data.insert(bias_data.end(), biasr_data.begin(), biasr_data.end());
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 2 * hidden_size}};
auto bias = p.add_literal(migraphx::literal{b_shape, bias_data});
p.add_instruction(migraphx::op::rnn{hidden_size,
{migraphx::op::tanh{}, migraphx::op::tanh{}},
migraphx::op::rnn::bidirectional,
clip},
seq,
w,
r,
bias,
ih);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
std::vector<float> hs_data_gold{
0.37780784, 0.61055139, 0.55168478, -0.5888475, -0.37144644, 0.31708236,
0.13104209, -0.18736027, -0.29385301, 0.16796815, 0.51075965, 0.40258689,
-0.13818839, 0.44124447, 0.14365635, 0.14803654, 0.03445704, 0.19167931,
-0.3946827, -0.30889652, -0.22276389, 0.44193283, -0.16477929, -0.11893477,
-0.0070999, 0.46251031, -0.20639211, 0.37488942, -0.0070999, 0.46251031,
-0.20639211, 0.37488942};
EXPECT(migraphx::verify_range(hs_data, hs_data_gold));
}
{
std::vector<float> ih_data(num_dirct * batch_size * hidden_size, 0);
migraphx::program p;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
auto seq = p.add_literal(migraphx::literal{in_shape, input});
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
auto ih = p.add_literal(migraphx::literal{ih_shape, ih_data});
auto w_data = wf_data;
w_data.insert(w_data.end(), wr_data.begin(), wr_data.end());
migraphx::shape w_shape{migraphx::shape::float_type, {num_dirct, hidden_size, input_size}};
auto w = p.add_literal(migraphx::literal{w_shape, w_data});
auto r_data = rf_data;
r_data.insert(r_data.end(), rr_data.begin(), rr_data.end());
migraphx::shape r_shape{migraphx::shape::float_type, {num_dirct, hidden_size, hidden_size}};
auto r = p.add_literal(migraphx::literal{r_shape, r_data});
auto bias_data = biasf_data;
bias_data.insert(bias_data.end(), biasr_data.begin(), biasr_data.end());
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 2 * hidden_size}};
auto bias = p.add_literal(migraphx::literal{b_shape, bias_data});
auto out_hs =
p.add_instruction(migraphx::op::rnn{hidden_size,
{migraphx::op::tanh{}, migraphx::op::tanh{}},
migraphx::op::rnn::bidirectional,
clip},
seq,
w,
r,
bias,
ih);
p.add_instruction(migraphx::op::rnn_last_output{}, out_hs);
p.compile(migraphx::cpu::target{});
auto last_output = p.eval({});
std::vector<float> last_output_data;
last_output.visit([&](auto out) { last_output_data.assign(out.begin(), out.end()); });
std::vector<float> last_output_data_gold{0.03445704,
0.19167931,
-0.3946827,
-0.30889652,
-0.22276389,
0.44193283,
-0.16477929,
-0.11893477,
-0.29385301,
0.16796815,
0.51075965,
0.40258689,
-0.13818839,
0.44124447,
0.14365635,
0.14803654};
EXPECT(migraphx::verify_range(last_output_data, last_output_data_gold));
}
}
TEST_CASE(pad_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
auto l0 = p.add_literal(migraphx::literal{s, {1, 2, 3, 4}});
p.add_instruction(migraphx::op::pad{{1, 1, 1, 1}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(16);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{0, 0, 0, 0, 0, 1, 2, 0, 0, 3, 4, 0, 0, 0, 0, 0};
EXPECT(migraphx::verify_range(results_vector, gold));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -934,6 +934,41 @@ struct test_concat_relu ...@@ -934,6 +934,41 @@ struct test_concat_relu
} }
}; };
struct test_pad
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s0{migraphx::shape::int32_type, {1, 96, 165, 165}};
std::vector<int64_t> pads0 = {0, 0, 0, 0, 0, 0, 1, 1};
std::vector<int64_t> pads1 = {0, 0, 0, 0, 1, 1, 1, 1};
std::vector<int64_t> pads2 = {1, 1, 1, 1, 0, 0, 0, 0};
std::vector<int64_t> pads3 = {1, 0, 1, 0, 1, 0, 2, 0};
auto l0 = p.add_parameter("x", s0);
p.add_instruction(migraphx::op::pad{pads0}, l0);
p.add_instruction(migraphx::op::pad{pads1}, l0);
p.add_instruction(migraphx::op::pad{pads2}, l0);
p.add_instruction(migraphx::op::pad{pads3}, l0);
return p;
}
};
struct test_pooling_autopad
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s0{migraphx::shape::float_type, {1, 3, 63, 63}};
auto l0 = p.add_parameter("x", s0);
migraphx::op::pooling op{"max"};
op.padding_mode = migraphx::op::padding_mode_t::same;
op.lengths = {2, 2};
op.stride = {2, 2};
p.add_instruction(op, l0);
return p;
}
};
struct test_gather struct test_gather
{ {
migraphx::program create_program() const migraphx::program create_program() const
...@@ -1049,12 +1084,354 @@ struct test_conv_bn_relu_pooling2 ...@@ -1049,12 +1084,354 @@ struct test_conv_bn_relu_pooling2
} }
}; };
struct test_rnn_forward
{
migraphx::program create_program() const
{
std::size_t batch_size = 2;
std::size_t seq_len = 1;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 1;
float clip = 0.0f;
migraphx::program p;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
migraphx::shape w_shape{migraphx::shape::float_type, {num_dirct, hidden_size, input_size}};
migraphx::shape r_shape{migraphx::shape::float_type, {num_dirct, hidden_size, hidden_size}};
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 2 * hidden_size}};
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
auto seq = p.add_parameter("seq", in_shape);
auto w = p.add_parameter("w", w_shape);
auto r = p.add_parameter("r", r_shape);
auto bias = p.add_parameter("bias", b_shape);
auto ih = p.add_parameter("ih", ih_shape);
auto output =
p.add_instruction(migraphx::op::rnn{hidden_size,
{migraphx::op::tanh{}, migraphx::op::tanh{}},
migraphx::op::rnn::forward,
clip},
seq,
w,
r,
bias,
ih);
p.add_instruction(migraphx::op::rnn_last_output{}, output);
return p;
}
};
struct test_rnn_forward10
{
migraphx::program create_program() const
{
std::size_t batch_size = 2;
std::size_t seq_len = 10;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 1;
float clip = 0.0f;
migraphx::program p;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
migraphx::shape w_shape{migraphx::shape::float_type, {num_dirct, hidden_size, input_size}};
migraphx::shape r_shape{migraphx::shape::float_type, {num_dirct, hidden_size, hidden_size}};
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 2 * hidden_size}};
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
auto seq = p.add_parameter("seq", in_shape);
auto w = p.add_parameter("w", w_shape);
auto r = p.add_parameter("r", r_shape);
auto bias = p.add_parameter("bias", b_shape);
auto ih = p.add_parameter("ih", ih_shape);
auto output =
p.add_instruction(migraphx::op::rnn{hidden_size,
{migraphx::op::tanh{}, migraphx::op::tanh{}},
migraphx::op::rnn::forward,
clip},
seq,
w,
r,
bias,
ih);
p.add_instruction(migraphx::op::rnn_last_output{}, output);
return p;
}
};
struct test_rnn_reverse
{
migraphx::program create_program() const
{
std::size_t batch_size = 2;
std::size_t seq_len = 1;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 1;
float clip = 0.0f;
migraphx::program p;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
migraphx::shape w_shape{migraphx::shape::float_type, {num_dirct, hidden_size, input_size}};
migraphx::shape r_shape{migraphx::shape::float_type, {num_dirct, hidden_size, hidden_size}};
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 2 * hidden_size}};
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
auto seq = p.add_parameter("seq", in_shape);
auto w = p.add_parameter("w", w_shape);
auto r = p.add_parameter("r", r_shape);
auto bias = p.add_parameter("bias", b_shape);
auto ih = p.add_parameter("ih", ih_shape);
p.add_instruction(migraphx::op::rnn{hidden_size,
{migraphx::op::tanh{}, migraphx::op::tanh{}},
migraphx::op::rnn::reverse,
clip},
seq,
w,
r,
bias,
ih);
return p;
}
};
struct test_rnn_reverse2
{
migraphx::program create_program() const
{
std::size_t batch_size = 2;
std::size_t seq_len = 2;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 1;
float clip = 0.0f;
migraphx::program p;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
migraphx::shape w_shape{migraphx::shape::float_type, {num_dirct, hidden_size, input_size}};
migraphx::shape r_shape{migraphx::shape::float_type, {num_dirct, hidden_size, hidden_size}};
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 2 * hidden_size}};
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
auto seq = p.add_parameter("seq", in_shape);
auto w = p.add_parameter("w", w_shape);
auto r = p.add_parameter("r", r_shape);
auto bias = p.add_parameter("bias", b_shape);
auto ih = p.add_parameter("ih", ih_shape);
p.add_instruction(migraphx::op::rnn{hidden_size,
{migraphx::op::tanh{}, migraphx::op::tanh{}},
migraphx::op::rnn::reverse,
clip},
seq,
w,
r,
bias,
ih);
return p;
}
};
struct test_rnn_3args
{
migraphx::program create_program() const
{
std::size_t batch_size = 2;
std::size_t seq_len = 1;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 1;
float clip = 0.0f;
migraphx::program p;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
migraphx::shape w_shape{migraphx::shape::float_type, {num_dirct, hidden_size, input_size}};
migraphx::shape r_shape{migraphx::shape::float_type, {num_dirct, hidden_size, hidden_size}};
auto seq = p.add_parameter("seq", in_shape);
auto w = p.add_parameter("w", w_shape);
auto r = p.add_parameter("r", r_shape);
p.add_instruction(migraphx::op::rnn{hidden_size,
{migraphx::op::tanh{}, migraphx::op::tanh{}},
migraphx::op::rnn::reverse,
clip},
seq,
w,
r);
return p;
}
};
struct test_rnn_4args
{
migraphx::program create_program() const
{
std::size_t batch_size = 2;
std::size_t seq_len = 5;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 1;
float clip = 0.0f;
migraphx::program p;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
migraphx::shape w_shape{migraphx::shape::float_type, {num_dirct, hidden_size, input_size}};
migraphx::shape r_shape{migraphx::shape::float_type, {num_dirct, hidden_size, hidden_size}};
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 2 * hidden_size}};
auto seq = p.add_parameter("seq", in_shape);
auto w = p.add_parameter("w", w_shape);
auto r = p.add_parameter("r", r_shape);
auto bias = p.add_parameter("bias", b_shape);
p.add_instruction(migraphx::op::rnn{hidden_size,
{migraphx::op::tanh{}, migraphx::op::tanh{}},
migraphx::op::rnn::reverse,
clip},
seq,
w,
r,
bias);
return p;
}
};
struct test_rnn_5args
{
migraphx::program create_program() const
{
std::size_t batch_size = 2;
std::size_t seq_len = 10;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 1;
float clip = 0.0f;
migraphx::program p;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
migraphx::shape w_shape{migraphx::shape::float_type, {num_dirct, hidden_size, input_size}};
migraphx::shape r_shape{migraphx::shape::float_type, {num_dirct, hidden_size, hidden_size}};
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 2 * hidden_size}};
auto seq = p.add_parameter("seq", in_shape);
auto w = p.add_parameter("w", w_shape);
auto r = p.add_parameter("r", r_shape);
auto bias = p.add_parameter("bias", b_shape);
auto output =
p.add_instruction(migraphx::op::rnn{hidden_size,
{migraphx::op::tanh{}, migraphx::op::tanh{}},
migraphx::op::rnn::forward,
clip},
seq,
w,
r,
bias);
p.add_instruction(migraphx::op::rnn_last_output{}, output);
return p;
}
};
struct test_rnn_bidirectional
{
migraphx::program create_program() const
{
std::size_t batch_size = 2;
std::size_t seq_len = 1;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 2;
float clip = 0.0f;
migraphx::program p;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
migraphx::shape w_shape{migraphx::shape::float_type, {num_dirct, hidden_size, input_size}};
migraphx::shape r_shape{migraphx::shape::float_type, {num_dirct, hidden_size, hidden_size}};
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 2 * hidden_size}};
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
auto seq = p.add_parameter("seq", in_shape);
auto w = p.add_parameter("w", w_shape);
auto r = p.add_parameter("r", r_shape);
auto bias = p.add_parameter("bias", b_shape);
auto ih = p.add_parameter("ih", ih_shape);
auto output =
p.add_instruction(migraphx::op::rnn{hidden_size,
{migraphx::op::tanh{}, migraphx::op::tanh{}},
migraphx::op::rnn::bidirectional,
clip},
seq,
w,
r,
bias,
ih);
p.add_instruction(migraphx::op::rnn_last_output{}, output);
return p;
}
};
struct test_rnn_bidirectional10
{
migraphx::program create_program() const
{
std::size_t batch_size = 2;
std::size_t seq_len = 10;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 2;
float clip = 0.0f;
migraphx::program p;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
migraphx::shape w_shape{migraphx::shape::float_type, {num_dirct, hidden_size, input_size}};
migraphx::shape r_shape{migraphx::shape::float_type, {num_dirct, hidden_size, hidden_size}};
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 2 * hidden_size}};
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
auto seq = p.add_parameter("seq", in_shape);
auto w = p.add_parameter("w", w_shape);
auto r = p.add_parameter("r", r_shape);
auto bias = p.add_parameter("bias", b_shape);
auto ih = p.add_parameter("ih", ih_shape);
auto output =
p.add_instruction(migraphx::op::rnn{hidden_size,
{migraphx::op::tanh{}, migraphx::op::tanh{}},
migraphx::op::rnn::bidirectional,
clip},
seq,
w,
r,
bias,
ih);
p.add_instruction(migraphx::op::rnn_last_output{}, output);
return p;
}
};
int main() int main()
{ {
verify_program<test_pooling_autopad>();
verify_program<test_abs>(); verify_program<test_abs>();
verify_program<test_concat>(); verify_program<test_concat>();
verify_program<test_concat2>(); verify_program<test_concat2>();
verify_program<test_concat_relu>(); verify_program<test_concat_relu>();
verify_program<test_pad>();
verify_program<test_add>(); verify_program<test_add>();
verify_program<test_add_half>(); verify_program<test_add_half>();
verify_program<test_mul>(); verify_program<test_mul>();
...@@ -1108,4 +1485,13 @@ int main() ...@@ -1108,4 +1485,13 @@ int main()
verify_program<test_slice>(); verify_program<test_slice>();
verify_program<test_gather>(); verify_program<test_gather>();
verify_program<test_gather_neg_axis>(); verify_program<test_gather_neg_axis>();
verify_program<test_rnn_forward>();
verify_program<test_rnn_forward10>();
verify_program<test_rnn_reverse>();
verify_program<test_rnn_reverse2>();
verify_program<test_rnn_3args>();
verify_program<test_rnn_4args>();
verify_program<test_rnn_5args>();
verify_program<test_rnn_bidirectional>();
verify_program<test_rnn_bidirectional10>();
} }
...@@ -718,4 +718,12 @@ TEST_CASE(group_conv_test) ...@@ -718,4 +718,12 @@ TEST_CASE(group_conv_test)
migraphx::parse_onnx("group_conv_test.onnx"); migraphx::parse_onnx("group_conv_test.onnx");
} }
TEST_CASE(pad_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 2}});
p.add_instruction(migraphx::op::pad{{1, 1, 1, 1}}, l0);
migraphx::parse_onnx("pad_test.onnx");
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
 pad-example:T

01"Pad*
pads@@@@test-padZ
0


b
1


B
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment