Commit 3ed217c9 authored by Paul's avatar Paul
Browse files

Ensure reflect methods for all operators

parent b2051bbc
......@@ -39,6 +39,11 @@ struct undefined
struct unknown
{
std::string op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.op, "op"));
}
std::string name() const { return "unknown:" + op; }
shape compute_shape(std::vector<shape> input) const
{
......
......@@ -19,6 +19,13 @@ namespace op {
struct concat
{
std::size_t axis = 0;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.axis, "axis"));
}
std::string name() const { return "concat"; }
std::vector<std::size_t> compute_offsets(const shape& output_shape,
const std::vector<argument>& args) const
......
......@@ -18,19 +18,21 @@ namespace op {
struct leaky_relu
{
std::string name() const { return "leaky_relu"; }
float alpha;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.alpha, "alpha"));
}
std::string name() const { return "leaky_relu"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this}.has(1);
return inputs.front();
}
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.alpha, "alpha"));
}
};
} // namespace op
......
......@@ -87,6 +87,8 @@ namespace operation_equal {
template <class T, class U>
auto operator==(const T& x, const U& y) -> decltype(x.name() == y.name())
{
static_assert(is_reflectable<T>{} or sizeof(T) <= 1,
"Missing equality operator or reflect method.");
if(x.name() != y.name())
return false;
const auto& yy = any_cast<T>(y);
......@@ -175,7 +177,7 @@ auto is_context_free_op(const T& x) -> decltype(is_context_free_op(
}
template <class T>
std::ptrdiff_t output_alias_op(rank<0>, const T&, const std::vector<shape>&)
int output_alias_op(rank<0>, const T&, const std::vector<shape>&)
{
return -1;
}
......@@ -188,7 +190,7 @@ auto output_alias_op(rank<1>, const T& x, const std::vector<shape>& shapes)
}
template <class T>
std::ptrdiff_t output_alias_op(const T& x, const std::vector<shape>& shapes)
int output_alias_op(const T& x, const std::vector<shape>& shapes)
{
return output_alias_op(rank<1>{}, x, shapes);
}
......@@ -239,7 +241,7 @@ auto has_finalize_op(const T&) -> decltype(has_finalize_op(rank<1>{},
* std::string name() const;
* bool is_context_free() const;
* bool has_finalize() const;
* std::ptrdiff_t output_alias(const std::vector<shape>& input) const;
* int output_alias(const std::vector<shape>& input) const;
* void finalize(context& ctx,const shape& output,const std::vector<shape>& input) ;
* shape compute_shape(const std::vector<shape>& input) const;
* argument compute(context& ctx,const shape& output,const std::vector<argument>& input) const;
......@@ -325,7 +327,7 @@ struct operation
return (*this).private_detail_te_get_handle().has_finalize();
}
std::ptrdiff_t output_alias(const std::vector<shape>& input) const
int output_alias(const std::vector<shape>& input) const
{
assert((*this).private_detail_te_handle_mem_var);
return (*this).private_detail_te_get_handle().output_alias(input);
......@@ -380,10 +382,10 @@ struct operation
virtual std::shared_ptr<private_detail_te_handle_base_type> clone() const = 0;
virtual const std::type_info& type() const = 0;
virtual std::string name() const = 0;
virtual bool is_context_free() const = 0;
virtual bool has_finalize() const = 0;
virtual std::ptrdiff_t output_alias(const std::vector<shape>& input) const = 0;
virtual std::string name() const = 0;
virtual bool is_context_free() const = 0;
virtual bool has_finalize() const = 0;
virtual int output_alias(const std::vector<shape>& input) const = 0;
virtual void
finalize(context& ctx, const shape& output, const std::vector<shape>& input) = 0;
virtual shape compute_shape(const std::vector<shape>& input) const = 0;
......@@ -432,7 +434,7 @@ struct operation
bool has_finalize() const override { return has_finalize_op(private_detail_te_value); }
std::ptrdiff_t output_alias(const std::vector<shape>& input) const override
int output_alias(const std::vector<shape>& input) const override
{
return output_alias_op(private_detail_te_value, input);
......
......@@ -11,6 +11,15 @@ inline namespace MIGRAPHX_INLINE_NS {
namespace detail {
struct reflect_placeholder
{
template<class... Ts>
int operator()(Ts&&...) const
{
return 0;
}
};
template <class T, class Selector>
auto reflect_impl(rank<1>, T& x, Selector f) -> decltype(T::reflect(x, f))
{
......@@ -23,8 +32,17 @@ auto reflect_impl(rank<0>, T&, Selector)
return pack();
}
template <class T>
auto reflectable_impl(rank<1>, T&& x) -> decltype(T::reflect(x, reflect_placeholder{}), std::true_type{});
template <class T>
auto reflectable_impl(rank<0>, T&&) -> decltype(std::false_type{});
} // namespace detail
template<class T>
using is_reflectable = decltype(detail::reflectable_impl(rank<1>{}, std::declval<T>()));
template <class T, class Selector>
auto reflect(T& x, Selector f)
{
......
This diff is collapsed.
......@@ -13,6 +13,13 @@ struct context;
struct miopen_abs
{
shared<activation_descriptor> ad;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return gpu::reflect(self.ad.get(), f);
}
std::string name() const { return "gpu::abs"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
......@@ -13,6 +13,13 @@ struct context;
struct miopen_batch_norm_inference
{
op::batch_norm_inference op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::batch_norm_inference"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
......@@ -14,6 +14,12 @@ struct hip_concat
{
op::concat op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::concat"; }
shape compute_shape(std::vector<shape> inputs) const;
argument
......
......@@ -13,6 +13,13 @@ struct context;
struct miopen_contiguous
{
op::contiguous op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::contiguous"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument compute(context&, shape output_shape, const std::vector<argument>& args) const;
......
......@@ -13,6 +13,13 @@ struct context;
struct miopen_elu
{
shared<activation_descriptor> ad;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return gpu::reflect(self.ad.get(), f);
}
std::string name() const { return "gpu::elu"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
......@@ -14,6 +14,13 @@ struct context;
struct hip_gather
{
op::gather op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::gather"; }
shape compute_shape(std::vector<shape> inputs) const;
argument
......
......@@ -13,6 +13,13 @@ struct context;
struct miopen_gemm
{
op::dot op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::gemm"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
......@@ -28,6 +28,13 @@ struct hip_allocate
{
shape s;
std::string tag{};
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.s, "shape"), f(self.tag, "tag"));
}
std::string name() const { return "hip::allocate"; }
shape compute_shape(const std::vector<shape>& inputs) const
{
......@@ -43,6 +50,13 @@ struct hip_allocate
struct hip_sync
{
std::string tag{};
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.tag, "tag"));
}
std::string name() const { return "hip::sync"; }
shape compute_shape(const std::vector<shape>& inputs) const
{
......
......@@ -13,6 +13,13 @@ struct context;
struct miopen_leaky_relu
{
shared<activation_descriptor> ad;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return gpu::reflect(self.ad.get(), f);
}
std::string name() const { return "gpu::leaky_relu"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
......@@ -25,6 +25,13 @@ namespace gpu {
struct hip_logsoftmax
{
op::logsoftmax op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::logsoftmax"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
......@@ -13,6 +13,13 @@ struct context;
struct miopen_lrn
{
shared<lrn_descriptor> ldesc;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return gpu::reflect(self.ldesc.get(), f);
}
std::string name() const { return "gpu::lrn"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
......@@ -162,6 +162,29 @@ inline fused_operator_args make_fused_args()
return make_obj<fused_operator_args>(&miopenCreateOperatorArgs);
}
template<class F>
auto reflect(miopenActivationDescriptor_t ad, F f)
{
miopenActivationMode_t mode;
double alpha;
double beta;
double gamma;
miopenGetActivationDescriptor(ad, &mode, &alpha, &beta, &gamma);
return pack(f(mode, "mode"), f(alpha, "alpha"), f(beta, "beta"), f(gamma, "gamma"));
}
template<class F>
auto reflect(miopenLRNDescriptor_t lrnd, F f)
{
miopenLRNMode_t mode;;
unsigned int n;
double alpha;
double beta;
double k;
miopenGetLRNDescriptor(lrnd,&mode, &n, &alpha, &beta, &k);
return pack(f(mode, "mode"), f(n, "n"), f(alpha, "alpha"), f(beta, "beta"), f(k, "k"));
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......
......@@ -14,6 +14,12 @@ struct hip_pad
{
op::pad op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::pad"; }
shape compute_shape(std::vector<shape> inputs) const;
argument
......
......@@ -16,6 +16,12 @@ struct miopen_pooling
op::pooling op;
shared<pooling_descriptor> pd;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::pooling"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment