"test/vscode:/vscode.git/clone" did not exist on "dd651742a9bfdf26ebab2d5f6fb55fc7633f1b22"
Commit 3ed217c9 authored by Paul's avatar Paul
Browse files

Ensure reflect methods for all operators

parent b2051bbc
......@@ -39,6 +39,11 @@ struct undefined
struct unknown
{
std::string op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.op, "op"));
}
std::string name() const { return "unknown:" + op; }
shape compute_shape(std::vector<shape> input) const
{
......
......@@ -19,6 +19,13 @@ namespace op {
struct concat
{
std::size_t axis = 0;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.axis, "axis"));
}
std::string name() const { return "concat"; }
std::vector<std::size_t> compute_offsets(const shape& output_shape,
const std::vector<argument>& args) const
......
......@@ -18,19 +18,21 @@ namespace op {
struct leaky_relu
{
std::string name() const { return "leaky_relu"; }
float alpha;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.alpha, "alpha"));
}
std::string name() const { return "leaky_relu"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this}.has(1);
return inputs.front();
}
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.alpha, "alpha"));
}
};
} // namespace op
......
......@@ -87,6 +87,8 @@ namespace operation_equal {
template <class T, class U>
auto operator==(const T& x, const U& y) -> decltype(x.name() == y.name())
{
static_assert(is_reflectable<T>{} or sizeof(T) <= 1,
"Missing equality operator or reflect method.");
if(x.name() != y.name())
return false;
const auto& yy = any_cast<T>(y);
......@@ -175,7 +177,7 @@ auto is_context_free_op(const T& x) -> decltype(is_context_free_op(
}
template <class T>
std::ptrdiff_t output_alias_op(rank<0>, const T&, const std::vector<shape>&)
int output_alias_op(rank<0>, const T&, const std::vector<shape>&)
{
return -1;
}
......@@ -188,7 +190,7 @@ auto output_alias_op(rank<1>, const T& x, const std::vector<shape>& shapes)
}
template <class T>
std::ptrdiff_t output_alias_op(const T& x, const std::vector<shape>& shapes)
int output_alias_op(const T& x, const std::vector<shape>& shapes)
{
return output_alias_op(rank<1>{}, x, shapes);
}
......@@ -239,7 +241,7 @@ auto has_finalize_op(const T&) -> decltype(has_finalize_op(rank<1>{},
* std::string name() const;
* bool is_context_free() const;
* bool has_finalize() const;
* std::ptrdiff_t output_alias(const std::vector<shape>& input) const;
* int output_alias(const std::vector<shape>& input) const;
* void finalize(context& ctx,const shape& output,const std::vector<shape>& input) ;
* shape compute_shape(const std::vector<shape>& input) const;
* argument compute(context& ctx,const shape& output,const std::vector<argument>& input) const;
......@@ -325,7 +327,7 @@ struct operation
return (*this).private_detail_te_get_handle().has_finalize();
}
std::ptrdiff_t output_alias(const std::vector<shape>& input) const
int output_alias(const std::vector<shape>& input) const
{
assert((*this).private_detail_te_handle_mem_var);
return (*this).private_detail_te_get_handle().output_alias(input);
......@@ -380,10 +382,10 @@ struct operation
virtual std::shared_ptr<private_detail_te_handle_base_type> clone() const = 0;
virtual const std::type_info& type() const = 0;
virtual std::string name() const = 0;
virtual bool is_context_free() const = 0;
virtual bool has_finalize() const = 0;
virtual std::ptrdiff_t output_alias(const std::vector<shape>& input) const = 0;
virtual std::string name() const = 0;
virtual bool is_context_free() const = 0;
virtual bool has_finalize() const = 0;
virtual int output_alias(const std::vector<shape>& input) const = 0;
virtual void
finalize(context& ctx, const shape& output, const std::vector<shape>& input) = 0;
virtual shape compute_shape(const std::vector<shape>& input) const = 0;
......@@ -432,7 +434,7 @@ struct operation
bool has_finalize() const override { return has_finalize_op(private_detail_te_value); }
std::ptrdiff_t output_alias(const std::vector<shape>& input) const override
int output_alias(const std::vector<shape>& input) const override
{
return output_alias_op(private_detail_te_value, input);
......
......@@ -11,6 +11,15 @@ inline namespace MIGRAPHX_INLINE_NS {
namespace detail {
struct reflect_placeholder
{
template<class... Ts>
int operator()(Ts&&...) const
{
return 0;
}
};
template <class T, class Selector>
auto reflect_impl(rank<1>, T& x, Selector f) -> decltype(T::reflect(x, f))
{
......@@ -23,8 +32,17 @@ auto reflect_impl(rank<0>, T&, Selector)
return pack();
}
template <class T>
auto reflectable_impl(rank<1>, T&& x) -> decltype(T::reflect(x, reflect_placeholder{}), std::true_type{});
template <class T>
auto reflectable_impl(rank<0>, T&&) -> decltype(std::false_type{});
} // namespace detail
template<class T>
using is_reflectable = decltype(detail::reflectable_impl(rank<1>{}, std::declval<T>()));
template <class T, class Selector>
auto reflect(T& x, Selector f)
{
......
......@@ -48,6 +48,12 @@ struct cpu_batch_norm_inference
{
op::batch_norm_inference op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "cpu::batch_norm_inference"; }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
......@@ -107,6 +113,12 @@ struct cpu_lrn
{
op::lrn op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "cpu::lrn"; }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
argument compute(context&, shape output_shape, std::vector<argument> args) const
......@@ -144,6 +156,12 @@ struct cpu_convolution
{
op::convolution op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "cpu::convolution"; }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
argument compute(context&, shape output_shape, std::vector<argument> args) const
......@@ -190,6 +208,12 @@ struct cpu_im2col
{
op::im2col op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
static std::string name() { return "cpu::im2col"; }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
......@@ -271,6 +295,12 @@ struct cpu_pooling
{
op::pooling op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "cpu::pooling_" + Op::name(); }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
argument compute(context&, const shape& output_shape, std::vector<argument> args) const
......@@ -315,20 +345,53 @@ struct cpu_pooling
}
};
struct cpu_contiguous
struct cpu_op
{
op::contiguous op;
std::string name() const { return "cpu::contiguous"; }
operation op;
std::string name() const { return "cpu::" + op.name(); }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
argument compute(context&, const shape& output_shape, std::vector<argument> args) const
{
return op.compute(output_shape, std::move(args));
}
friend bool operator==(const cpu_op& x, const cpu_op& y)
{
return x.op == y.op;
}
friend bool operator==(const cpu_op& x, const operation& y)
{
if(x.name() != y.name())
return false;
return x == any_cast<cpu_op>(y);
}
friend bool operator==(const operation& x, const cpu_op& y)
{
return y == x;
}
};
// struct cpu_contiguous
// {
// op::contiguous op;
// std::string name() const { return "cpu::contiguous"; }
// shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
// argument compute(context&, const shape& output_shape, std::vector<argument> args) const
// {
// return op.compute(output_shape, std::move(args));
// }
// };
struct cpu_pad
{
op::pad op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "cpu::contiguous"; }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
argument compute(context&, const shape& output_shape, std::vector<argument> args) const
......@@ -352,20 +415,26 @@ struct cpu_pad
}
};
struct cpu_concat
{
op::concat op;
std::string name() const { return "cpu::concat"; }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
argument compute(context&, const shape& output_shape, std::vector<argument> args) const
{
return op.compute(output_shape, std::move(args));
}
};
// struct cpu_concat
// {
// op::concat op;
// std::string name() const { return "cpu::concat"; }
// shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
// argument compute(context&, const shape& output_shape, std::vector<argument> args) const
// {
// return op.compute(output_shape, std::move(args));
// }
// };
struct cpu_gemm
{
op::dot op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "cpu::dot"; }
shape compute_shape(const std::vector<shape>& inputs) const
{
......@@ -408,161 +477,161 @@ struct cpu_gemm
}
};
struct cpu_gather
{
op::gather op;
std::string name() const { return "cpu::gather"; }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
argument compute(context&, const shape& output_shape, std::vector<argument> args) const
{
return op.compute(output_shape, std::move(args));
}
};
struct identity_op
{
std::string name() const { return "cpu::identity"; }
auto fcn() const
{
return [](auto x) { return x; };
}
};
struct abs_op
{
std::string name() const { return "cpu::abs"; }
auto fcn() const
{
return [](auto x) { return std::abs(make_signed(x)); };
}
};
struct exp_op
{
std::string name() const { return "cpu::exp"; }
auto fcn() const
{
return [](auto x) { return std::exp(x); };
}
};
struct log_op
{
std::string name() const { return "cpu::log"; }
auto fcn() const
{
return [](auto x) { return std::log(x); };
}
};
struct sin_op
{
std::string name() const { return "cpu::sin"; }
auto fcn() const
{
return [](auto x) { return std::sin(x); };
}
};
struct cos_op
{
std::string name() const { return "cpu::cos"; }
auto fcn() const
{
return [](auto x) { return std::cos(x); };
}
};
struct tan_op
{
std::string name() const { return "cpu::tan"; }
auto fcn() const
{
return [](auto x) { return std::tan(x); };
}
};
struct asin_op
{
std::string name() const { return "cpu::asin"; }
auto fcn() const
{
return [](auto x) { return std::asin(x); };
}
};
struct acos_op
{
std::string name() const { return "cpu::acos"; }
auto fcn() const
{
return [](auto x) { return std::acos(x); };
}
};
struct atan_op
{
std::string name() const { return "cpu::atan"; }
auto fcn() const
{
return [](auto x) { return std::atan(x); };
}
};
struct sinh_op
{
std::string name() const { return "cpu::sinh"; }
auto fcn() const
{
return [](auto x) { return std::sinh(x); };
}
};
struct cosh_op
{
std::string name() const { return "cpu::cosh"; }
auto fcn() const
{
return [](auto x) { return std::cosh(x); };
}
};
struct tanh_op
{
std::string name() const { return "cpu::tanh"; }
auto fcn() const
{
return [](auto x) { return std::tanh(x); };
}
};
struct sigmoid_op
{
std::string name() const { return "cpu::sigmoid"; }
auto fcn() const
{
return [](auto x) { return 1.f / (1.f + std::exp(-x)); };
}
};
struct neg_op
{
std::string name() const { return "cpu::neg"; }
auto fcn() const
{
return [](auto x) { return -x; };
}
};
struct relu_op
{
std::string name() const { return "cpu::relu"; }
auto fcn() const
{
return [](auto x) { return std::max(decltype(x){0}, x); };
}
};
// struct cpu_gather
// {
// op::gather op;
// std::string name() const { return "cpu::gather"; }
// shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
// argument compute(context&, const shape& output_shape, std::vector<argument> args) const
// {
// return op.compute(output_shape, std::move(args));
// }
// };
// struct identity_op
// {
// std::string name() const { return "cpu::identity"; }
// auto fcn() const
// {
// return [](auto x) { return x; };
// }
// };
// struct abs_op
// {
// std::string name() const { return "cpu::abs"; }
// auto fcn() const
// {
// return [](auto x) { return std::abs(make_signed(x)); };
// }
// };
// struct exp_op
// {
// std::string name() const { return "cpu::exp"; }
// auto fcn() const
// {
// return [](auto x) { return std::exp(x); };
// }
// };
// struct log_op
// {
// std::string name() const { return "cpu::log"; }
// auto fcn() const
// {
// return [](auto x) { return std::log(x); };
// }
// };
// struct sin_op
// {
// std::string name() const { return "cpu::sin"; }
// auto fcn() const
// {
// return [](auto x) { return std::sin(x); };
// }
// };
// struct cos_op
// {
// std::string name() const { return "cpu::cos"; }
// auto fcn() const
// {
// return [](auto x) { return std::cos(x); };
// }
// };
// struct tan_op
// {
// std::string name() const { return "cpu::tan"; }
// auto fcn() const
// {
// return [](auto x) { return std::tan(x); };
// }
// };
// struct asin_op
// {
// std::string name() const { return "cpu::asin"; }
// auto fcn() const
// {
// return [](auto x) { return std::asin(x); };
// }
// };
// struct acos_op
// {
// std::string name() const { return "cpu::acos"; }
// auto fcn() const
// {
// return [](auto x) { return std::acos(x); };
// }
// };
// struct atan_op
// {
// std::string name() const { return "cpu::atan"; }
// auto fcn() const
// {
// return [](auto x) { return std::atan(x); };
// }
// };
// struct sinh_op
// {
// std::string name() const { return "cpu::sinh"; }
// auto fcn() const
// {
// return [](auto x) { return std::sinh(x); };
// }
// };
// struct cosh_op
// {
// std::string name() const { return "cpu::cosh"; }
// auto fcn() const
// {
// return [](auto x) { return std::cosh(x); };
// }
// };
// struct tanh_op
// {
// std::string name() const { return "cpu::tanh"; }
// auto fcn() const
// {
// return [](auto x) { return std::tanh(x); };
// }
// };
// struct sigmoid_op
// {
// std::string name() const { return "cpu::sigmoid"; }
// auto fcn() const
// {
// return [](auto x) { return 1.f / (1.f + std::exp(-x)); };
// }
// };
// struct neg_op
// {
// std::string name() const { return "cpu::neg"; }
// auto fcn() const
// {
// return [](auto x) { return -x; };
// }
// };
// struct relu_op
// {
// std::string name() const { return "cpu::relu"; }
// auto fcn() const
// {
// return [](auto x) { return std::max(decltype(x){0}, x); };
// }
// };
struct leaky_relu_op
{
......@@ -590,6 +659,12 @@ template <typename Op>
struct cpu_unary
{
Op op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op.op, f);
}
std::string name() const { return op.name(); }
shape compute_shape(const std::vector<shape>& inputs) const { return inputs.front(); }
argument compute(context&, const shape& output_shape, std::vector<argument> args) const
......@@ -646,6 +721,13 @@ struct softmax2d
struct cpu_logsoftmax
{
op::logsoftmax op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "cpu::logsoftmax"; }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
......@@ -712,86 +794,86 @@ struct cpu_logsoftmax
}
};
struct add_op
{
std::string name() const { return "add"; }
auto fcn() const
{
return [](auto x, auto y) { return x + y; };
}
};
struct sub_op
{
std::string name() const { return "sub"; }
auto fcn() const
{
return [](auto x, auto y) { return x - y; };
}
};
struct mul_op
{
std::string name() const { return "mul"; }
auto fcn() const
{
return [](auto x, auto y) { return x * y; };
}
};
struct div_op
{
std::string name() const { return "div"; }
auto fcn() const
{
return [](auto x, auto y) { return x / y; };
}
};
struct max_op
{
std::string name() const { return "max"; }
auto fcn() const
{
return [](auto x, auto y) { return std::max(x, y); };
}
};
struct min_op
{
std::string name() const { return "min"; }
auto fcn() const
{
return [](auto x, auto y) { return std::min(x, y); };
}
};
template <typename Op>
struct cpu_binary
{
Op op;
std::string name() const { return "cpu::" + op.name(); }
shape compute_shape(const std::vector<shape>& inputs) const { return inputs.front(); }
argument compute(context&, const shape& output_shape, std::vector<argument> args) const
{
argument result{output_shape};
visit_all(result, args[0], args[1])([&](auto output, auto input1, auto input2) {
if(input1.get_shape().packed() and input2.get_shape().packed())
{
std::transform(
input1.begin(), input1.end(), input2.begin(), output.begin(), op.fcn());
}
else
{
shape_for_each(output.get_shape(), [&](const auto& idx) {
output(idx.begin(), idx.end()) =
op.fcn()(input1(idx.begin(), idx.end()), input2(idx.begin(), idx.end()));
});
}
});
return result;
}
};
// struct add_op
// {
// std::string name() const { return "add"; }
// auto fcn() const
// {
// return [](auto x, auto y) { return x + y; };
// }
// };
// struct sub_op
// {
// std::string name() const { return "sub"; }
// auto fcn() const
// {
// return [](auto x, auto y) { return x - y; };
// }
// };
// struct mul_op
// {
// std::string name() const { return "mul"; }
// auto fcn() const
// {
// return [](auto x, auto y) { return x * y; };
// }
// };
// struct div_op
// {
// std::string name() const { return "div"; }
// auto fcn() const
// {
// return [](auto x, auto y) { return x / y; };
// }
// };
// struct max_op
// {
// std::string name() const { return "max"; }
// auto fcn() const
// {
// return [](auto x, auto y) { return std::max(x, y); };
// }
// };
// struct min_op
// {
// std::string name() const { return "min"; }
// auto fcn() const
// {
// return [](auto x, auto y) { return std::min(x, y); };
// }
// };
// template <typename Op>
// struct cpu_binary
// {
// Op op;
// std::string name() const { return "cpu::" + op.name(); }
// shape compute_shape(const std::vector<shape>& inputs) const { return inputs.front(); }
// argument compute(context&, const shape& output_shape, std::vector<argument> args) const
// {
// argument result{output_shape};
// visit_all(result, args[0], args[1])([&](auto output, auto input1, auto input2) {
// if(input1.get_shape().packed() and input2.get_shape().packed())
// {
// std::transform(
// input1.begin(), input1.end(), input2.begin(), output.begin(), op.fcn());
// }
// else
// {
// shape_for_each(output.get_shape(), [&](const auto& idx) {
// output(idx.begin(), idx.end()) =
// op.fcn()(input1(idx.begin(), idx.end()), input2(idx.begin(), idx.end()));
// });
// }
// });
// return result;
// }
// };
struct cpu_apply
{
......@@ -818,37 +900,37 @@ struct cpu_apply
apply_map["batch_norm_inference"] =
extend_op<cpu_batch_norm_inference, op::batch_norm_inference>();
apply_map["lrn"] = extend_op<cpu_lrn, op::lrn>();
apply_map["contiguous"] = extend_op<cpu_contiguous, op::contiguous>();
apply_map["pad"] = extend_op<cpu_pad, op::pad>();
apply_map["concat"] = extend_op<cpu_concat, op::concat>();
apply_map["gather"] = extend_op<cpu_gather, op::gather>();
apply_map["logsoftmax"] = extend_op<cpu_logsoftmax, op::logsoftmax>();
apply_map["leaky_relu"] = extend_op<cpu_unary<leaky_relu_op>, op::leaky_relu>();
apply_map["logsoftmax"] = extend_op<cpu_logsoftmax, op::logsoftmax>();
apply_map["elu"] = extend_op<cpu_unary<elu_op>, op::elu>();
apply_map["identity"] = simple_op<cpu_unary<identity_op>>();
apply_map["abs"] = simple_op<cpu_unary<abs_op>>();
apply_map["sinh"] = simple_op<cpu_unary<sinh_op>>();
apply_map["cosh"] = simple_op<cpu_unary<cosh_op>>();
apply_map["tanh"] = simple_op<cpu_unary<tanh_op>>();
apply_map["sigmoid"] = simple_op<cpu_unary<sigmoid_op>>();
apply_map["exp"] = simple_op<cpu_unary<exp_op>>();
apply_map["log"] = simple_op<cpu_unary<log_op>>();
apply_map["neg"] = simple_op<cpu_unary<neg_op>>();
apply_map["sin"] = simple_op<cpu_unary<sin_op>>();
apply_map["cos"] = simple_op<cpu_unary<cos_op>>();
apply_map["tan"] = simple_op<cpu_unary<tan_op>>();
apply_map["asin"] = simple_op<cpu_unary<asin_op>>();
apply_map["acos"] = simple_op<cpu_unary<acos_op>>();
apply_map["atan"] = simple_op<cpu_unary<atan_op>>();
apply_map["relu"] = simple_op<cpu_unary<relu_op>>();
apply_map["add"] = simple_op<cpu_binary<add_op>>();
apply_map["sub"] = simple_op<cpu_binary<sub_op>>();
apply_map["mul"] = simple_op<cpu_binary<mul_op>>();
apply_map["div"] = simple_op<cpu_binary<div_op>>();
apply_map["max"] = simple_op<cpu_binary<max_op>>();
apply_map["min"] = simple_op<cpu_binary<min_op>>();
apply_map["softmax"] = simple_op<softmax2d>();
apply_map["pad"] = extend_op<cpu_pad, op::pad>();
// apply_map["contiguous"] = extend_op<cpu_contiguous, op::contiguous>();
// apply_map["concat"] = extend_op<cpu_concat, op::concat>();
// apply_map["gather"] = extend_op<cpu_gather, op::gather>();
// apply_map["identity"] = simple_op<cpu_unary<identity_op>>();
// apply_map["abs"] = simple_op<cpu_unary<abs_op>>();
// apply_map["sinh"] = simple_op<cpu_unary<sinh_op>>();
// apply_map["cosh"] = simple_op<cpu_unary<cosh_op>>();
// apply_map["tanh"] = simple_op<cpu_unary<tanh_op>>();
// apply_map["sigmoid"] = simple_op<cpu_unary<sigmoid_op>>();
// apply_map["exp"] = simple_op<cpu_unary<exp_op>>();
// apply_map["log"] = simple_op<cpu_unary<log_op>>();
// apply_map["neg"] = simple_op<cpu_unary<neg_op>>();
// apply_map["sin"] = simple_op<cpu_unary<sin_op>>();
// apply_map["cos"] = simple_op<cpu_unary<cos_op>>();
// apply_map["tan"] = simple_op<cpu_unary<tan_op>>();
// apply_map["asin"] = simple_op<cpu_unary<asin_op>>();
// apply_map["acos"] = simple_op<cpu_unary<acos_op>>();
// apply_map["atan"] = simple_op<cpu_unary<atan_op>>();
// apply_map["relu"] = simple_op<cpu_unary<relu_op>>();
// apply_map["add"] = simple_op<cpu_binary<add_op>>();
// apply_map["sub"] = simple_op<cpu_binary<sub_op>>();
// apply_map["mul"] = simple_op<cpu_binary<mul_op>>();
// apply_map["div"] = simple_op<cpu_binary<div_op>>();
// apply_map["max"] = simple_op<cpu_binary<max_op>>();
// apply_map["min"] = simple_op<cpu_binary<min_op>>();
}
void apply()
......@@ -864,9 +946,18 @@ struct cpu_apply
{
apply_map.at(it->name())(it);
}
else if (is_context_free(it->get_operator()))
{
apply_cpu_op(it);
}
}
}
void apply_cpu_op(instruction_ref ins)
{
prog->replace_instruction(ins, cpu_op{ins->get_operator()}, ins->inputs());
}
template <class T>
void apply_simple_op(instruction_ref ins)
{
......
......@@ -13,6 +13,13 @@ struct context;
struct miopen_abs
{
shared<activation_descriptor> ad;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return gpu::reflect(self.ad.get(), f);
}
std::string name() const { return "gpu::abs"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
......@@ -13,6 +13,13 @@ struct context;
struct miopen_batch_norm_inference
{
op::batch_norm_inference op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::batch_norm_inference"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
......@@ -14,6 +14,12 @@ struct hip_concat
{
op::concat op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::concat"; }
shape compute_shape(std::vector<shape> inputs) const;
argument
......
......@@ -13,6 +13,13 @@ struct context;
struct miopen_contiguous
{
op::contiguous op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::contiguous"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument compute(context&, shape output_shape, const std::vector<argument>& args) const;
......
......@@ -13,6 +13,13 @@ struct context;
struct miopen_elu
{
shared<activation_descriptor> ad;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return gpu::reflect(self.ad.get(), f);
}
std::string name() const { return "gpu::elu"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
......@@ -14,6 +14,13 @@ struct context;
struct hip_gather
{
op::gather op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::gather"; }
shape compute_shape(std::vector<shape> inputs) const;
argument
......
......@@ -13,6 +13,13 @@ struct context;
struct miopen_gemm
{
op::dot op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::gemm"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
......@@ -28,6 +28,13 @@ struct hip_allocate
{
shape s;
std::string tag{};
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.s, "shape"), f(self.tag, "tag"));
}
std::string name() const { return "hip::allocate"; }
shape compute_shape(const std::vector<shape>& inputs) const
{
......@@ -43,6 +50,13 @@ struct hip_allocate
struct hip_sync
{
std::string tag{};
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.tag, "tag"));
}
std::string name() const { return "hip::sync"; }
shape compute_shape(const std::vector<shape>& inputs) const
{
......
......@@ -13,6 +13,13 @@ struct context;
struct miopen_leaky_relu
{
shared<activation_descriptor> ad;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return gpu::reflect(self.ad.get(), f);
}
std::string name() const { return "gpu::leaky_relu"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
......@@ -25,6 +25,13 @@ namespace gpu {
struct hip_logsoftmax
{
op::logsoftmax op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::logsoftmax"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
......@@ -13,6 +13,13 @@ struct context;
struct miopen_lrn
{
shared<lrn_descriptor> ldesc;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return gpu::reflect(self.ldesc.get(), f);
}
std::string name() const { return "gpu::lrn"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
......@@ -162,6 +162,29 @@ inline fused_operator_args make_fused_args()
return make_obj<fused_operator_args>(&miopenCreateOperatorArgs);
}
template<class F>
auto reflect(miopenActivationDescriptor_t ad, F f)
{
miopenActivationMode_t mode;
double alpha;
double beta;
double gamma;
miopenGetActivationDescriptor(ad, &mode, &alpha, &beta, &gamma);
return pack(f(mode, "mode"), f(alpha, "alpha"), f(beta, "beta"), f(gamma, "gamma"));
}
template<class F>
auto reflect(miopenLRNDescriptor_t lrnd, F f)
{
miopenLRNMode_t mode;;
unsigned int n;
double alpha;
double beta;
double k;
miopenGetLRNDescriptor(lrnd,&mode, &n, &alpha, &beta, &k);
return pack(f(mode, "mode"), f(n, "n"), f(alpha, "alpha"), f(beta, "beta"), f(k, "k"));
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......
......@@ -14,6 +14,12 @@ struct hip_pad
{
op::pad op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::pad"; }
shape compute_shape(std::vector<shape> inputs) const;
argument
......
......@@ -16,6 +16,12 @@ struct miopen_pooling
op::pooling op;
shared<pooling_descriptor> pd;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::pooling"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment