Commit 11f0f3f1 authored by Artur Wojcik's avatar Artur Wojcik
Browse files

dnnl

parent b44e948d
...@@ -95,7 +95,7 @@ template <class Derived, class Primitive> ...@@ -95,7 +95,7 @@ template <class Derived, class Primitive>
struct dnnl_op : auto_register_op<Derived> struct dnnl_op : auto_register_op<Derived>
{ {
std::vector<post_op> post_ops; std::vector<post_op> post_ops;
std::function<argument(context& ctx, const std::vector<argument>& args)> execute; std::function<argument(context&, const std::vector<argument>&)> execute;
template <class Self, class F> template <class Self, class F>
static auto reflect_base(Self& self, F f) static auto reflect_base(Self& self, F f)
...@@ -284,7 +284,7 @@ struct dnnl_op : auto_register_op<Derived> ...@@ -284,7 +284,7 @@ struct dnnl_op : auto_register_op<Derived>
std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
{ {
return shapes.size() - 1; return static_cast<std::ptrdiff_t>(shapes.size() - 1);
} }
value compile(context&, const shape& output_shape, std::vector<shape> inputs) value compile(context&, const shape& output_shape, std::vector<shape> inputs)
{ {
...@@ -300,16 +300,60 @@ struct dnnl_op : auto_register_op<Derived> ...@@ -300,16 +300,60 @@ struct dnnl_op : auto_register_op<Derived>
{ {
// Compensate for allocation // Compensate for allocation
inputs.pop_back(); inputs.pop_back();
const auto& self = static_cast<const Derived&>(*this);
auto name = self.name();
auto md = to_memory_desc(output_shape, inputs); auto md = to_memory_desc(output_shape, inputs);
auto prim = get_primitive(md); auto prim = get_primitive(md);
auto arg_lookup = create_arg_map(inputs.size()); auto arg_lookup = create_arg_map(inputs.size());
#ifndef NDEBUG #ifndef NDEBUG
auto prim_attr = get_primitive_attr(md); // NOLINTNEXTLINE
execute = std::bind(&dnnl_op::internal,
this,
output_shape,
inputs,
md,
prim,
arg_lookup,
std::placeholders::_1,
std::placeholders::_2);
#else
// NOLINTNEXTLINE
execute = std::bind(&dnnl_op::internal,
this,
md,
prim,
arg_lookup,
std::placeholders::_1,
std::placeholders::_2);
#endif #endif
execute = [=](context&, const std::vector<argument>& args) { }
std::vector<shape> trim_post_op_inputs(const std::vector<shape>& inputs) const
{
auto prim_input_size = inputs.size() - this->get_extra_post_op_args();
return {inputs.begin(), inputs.begin() + prim_input_size};
}
private:
#ifndef NDEBUG #ifndef NDEBUG
argument internal(const shape& output_shape,
const std::vector<shape>& inputs,
std::unordered_map<int, dnnl::memory::desc> md,
Primitive prim,
std::vector<int> arg_lookup,
context&,
const std::vector<argument>& args)
#else
argument internal(std::unordered_map<int, dnnl::memory::desc> md,
Primitive prim,
std::vector<int> arg_lookup,
context&,
const std::vector<argument>& args)
#endif
{
#ifndef NDEBUG
const auto& self = static_cast<const Derived&>(*this);
auto name = self.name();
auto prim_attr = get_primitive_attr(md);
// Check that the memory descriptors have not changed // Check that the memory descriptors have not changed
auto debug_args = args; auto debug_args = args;
debug_args.pop_back(); debug_args.pop_back();
...@@ -332,8 +376,7 @@ struct dnnl_op : auto_register_op<Derived> ...@@ -332,8 +376,7 @@ struct dnnl_op : auto_register_op<Derived>
{ {
auto arg = j + prim_input_size; auto arg = j + prim_input_size;
auto kind = pos.kind(i); auto kind = pos.kind(i);
std::string mesg = std::string mesg = "Post op " + std::to_string(i) + "@" + std::to_string(arg) + ": ";
"Post op " + std::to_string(i) + "@" + std::to_string(arg) + ": ";
try try
{ {
dnnl::algorithm algo; dnnl::algorithm algo;
...@@ -345,8 +388,8 @@ struct dnnl_op : auto_register_op<Derived> ...@@ -345,8 +388,8 @@ struct dnnl_op : auto_register_op<Derived>
{ {
pos.get_params_binary(i, algo, mdesc); pos.get_params_binary(i, algo, mdesc);
if(mdesc != md.at(arg_lookup.at(arg))) if(mdesc != md.at(arg_lookup.at(arg)))
MIGRAPHX_THROW(mesg + MIGRAPHX_THROW(mesg + "Memory descriptor doesn't match for binary "
"Memory descriptor doesn't match for binary post op"); "post op");
j++; j++;
} }
else if(kind == dnnl::primitive::kind::eltwise) else if(kind == dnnl::primitive::kind::eltwise)
...@@ -379,12 +422,6 @@ struct dnnl_op : auto_register_op<Derived> ...@@ -379,12 +422,6 @@ struct dnnl_op : auto_register_op<Derived>
m[arg_lookup[i]] = to_dnnl_memory(md.at(arg_lookup[i]), args[i]); m[arg_lookup[i]] = to_dnnl_memory(md.at(arg_lookup[i]), args[i]);
prim.execute(get_dnnl_context().stream, m); prim.execute(get_dnnl_context().stream, m);
return args.back(); return args.back();
};
}
std::vector<shape> trim_post_op_inputs(const std::vector<shape>& inputs) const
{
auto prim_input_size = inputs.size() - this->get_extra_post_op_args();
return {inputs.begin(), inputs.begin() + prim_input_size};
} }
}; };
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment