Commit b3e9d8f8 authored by Paul's avatar Paul
Browse files

Formatting

parent 3ed217c9
......@@ -25,7 +25,7 @@ struct concat
{
return pack(f(self.axis, "axis"));
}
std::string name() const { return "concat"; }
std::vector<std::size_t> compute_offsets(const shape& output_shape,
const std::vector<argument>& args) const
......
......@@ -19,20 +19,19 @@ namespace op {
struct leaky_relu
{
float alpha;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.alpha, "alpha"));
}
std::string name() const { return "leaky_relu"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this}.has(1);
return inputs.front();
}
};
} // namespace op
......
......@@ -13,7 +13,7 @@ namespace detail {
struct reflect_placeholder
{
template<class... Ts>
template <class... Ts>
int operator()(Ts&&...) const
{
return 0;
......@@ -33,14 +33,15 @@ auto reflect_impl(rank<0>, T&, Selector)
}
template <class T>
auto reflectable_impl(rank<1>, T&& x) -> decltype(T::reflect(x, reflect_placeholder{}), std::true_type{});
auto reflectable_impl(rank<1>, T&& x)
-> decltype(T::reflect(x, reflect_placeholder{}), std::true_type{});
template <class T>
auto reflectable_impl(rank<0>, T&&) -> decltype(std::false_type{});
auto reflectable_impl(rank<0>, T &&) -> decltype(std::false_type{});
} // namespace detail
template<class T>
template <class T>
using is_reflectable = decltype(detail::reflectable_impl(rank<1>{}, std::declval<T>()));
template <class T, class Selector>
......
......@@ -354,29 +354,23 @@ struct cpu_op
{
return op.compute(output_shape, std::move(args));
}
friend bool operator==(const cpu_op& x, const cpu_op& y)
{
return x.op == y.op;
}
friend bool operator==(const cpu_op& x, const cpu_op& y) { return x.op == y.op; }
friend bool operator==(const cpu_op& x, const operation& y)
{
if(x.name() != y.name())
return false;
return x == any_cast<cpu_op>(y);
}
friend bool operator==(const operation& x, const cpu_op& y)
{
return y == x;
}
friend bool operator==(const operation& x, const cpu_op& y) { return y == x; }
};
// struct cpu_contiguous
// {
// op::contiguous op;
// std::string name() const { return "cpu::contiguous"; }
// shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
// argument compute(context&, const shape& output_shape, std::vector<argument> args) const
// shape compute_shape(const std::vector<shape>& inputs) const { return
// op.compute_shape(inputs); } argument compute(context&, const shape& output_shape,
// std::vector<argument> args) const
// {
// return op.compute(output_shape, std::move(args));
// }
......@@ -419,8 +413,9 @@ struct cpu_pad
// {
// op::concat op;
// std::string name() const { return "cpu::concat"; }
// shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
// argument compute(context&, const shape& output_shape, std::vector<argument> args) const
// shape compute_shape(const std::vector<shape>& inputs) const { return
// op.compute_shape(inputs); } argument compute(context&, const shape& output_shape,
// std::vector<argument> args) const
// {
// return op.compute(output_shape, std::move(args));
// }
......@@ -481,7 +476,8 @@ struct cpu_gemm
// {
// op::gather op;
// std::string name() const { return "cpu::gather"; }
// shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
// shape compute_shape(const std::vector<shape>& inputs) const { return
// op.compute_shape(inputs); }
// argument compute(context&, const shape& output_shape, std::vector<argument> args) const
// {
......@@ -903,7 +899,7 @@ struct cpu_apply
apply_map["leaky_relu"] = extend_op<cpu_unary<leaky_relu_op>, op::leaky_relu>();
apply_map["logsoftmax"] = extend_op<cpu_logsoftmax, op::logsoftmax>();
apply_map["elu"] = extend_op<cpu_unary<elu_op>, op::elu>();
apply_map["softmax"] = simple_op<softmax2d>();
apply_map["softmax"] = simple_op<softmax2d>();
apply_map["pad"] = extend_op<cpu_pad, op::pad>();
// apply_map["contiguous"] = extend_op<cpu_contiguous, op::contiguous>();
// apply_map["concat"] = extend_op<cpu_concat, op::concat>();
......@@ -930,7 +926,6 @@ struct cpu_apply
// apply_map["div"] = simple_op<cpu_binary<div_op>>();
// apply_map["max"] = simple_op<cpu_binary<max_op>>();
// apply_map["min"] = simple_op<cpu_binary<min_op>>();
}
void apply()
......@@ -946,7 +941,7 @@ struct cpu_apply
{
apply_map.at(it->name())(it);
}
else if (is_context_free(it->get_operator()))
else if(is_context_free(it->get_operator()))
{
apply_cpu_op(it);
}
......
......@@ -13,7 +13,7 @@ struct context;
struct miopen_abs
{
shared<activation_descriptor> ad;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
......
......@@ -19,7 +19,7 @@ struct miopen_batch_norm_inference
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::batch_norm_inference"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
......@@ -19,7 +19,7 @@ struct hip_concat
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::concat"; }
shape compute_shape(std::vector<shape> inputs) const;
argument
......
......@@ -19,7 +19,7 @@ struct miopen_contiguous
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::contiguous"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument compute(context&, shape output_shape, const std::vector<argument>& args) const;
......
......@@ -19,7 +19,7 @@ struct miopen_elu
{
return gpu::reflect(self.ad.get(), f);
}
std::string name() const { return "gpu::elu"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
......@@ -20,7 +20,7 @@ struct hip_gather
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::gather"; }
shape compute_shape(std::vector<shape> inputs) const;
argument
......
......@@ -19,7 +19,7 @@ struct miopen_gemm
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::gemm"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
......@@ -19,7 +19,7 @@ struct miopen_leaky_relu
{
return gpu::reflect(self.ad.get(), f);
}
std::string name() const { return "gpu::leaky_relu"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
......@@ -31,7 +31,7 @@ struct hip_logsoftmax
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::logsoftmax"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
......@@ -19,7 +19,7 @@ struct miopen_lrn
{
return gpu::reflect(self.ldesc.get(), f);
}
std::string name() const { return "gpu::lrn"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
......@@ -162,7 +162,7 @@ inline fused_operator_args make_fused_args()
return make_obj<fused_operator_args>(&miopenCreateOperatorArgs);
}
template<class F>
template <class F>
auto reflect(miopenActivationDescriptor_t ad, F f)
{
miopenActivationMode_t mode;
......@@ -173,15 +173,16 @@ auto reflect(miopenActivationDescriptor_t ad, F f)
return pack(f(mode, "mode"), f(alpha, "alpha"), f(beta, "beta"), f(gamma, "gamma"));
}
template<class F>
template <class F>
auto reflect(miopenLRNDescriptor_t lrnd, F f)
{
miopenLRNMode_t mode;;
miopenLRNMode_t mode;
;
unsigned int n;
double alpha;
double beta;
double k;
miopenGetLRNDescriptor(lrnd,&mode, &n, &alpha, &beta, &k);
miopenGetLRNDescriptor(lrnd, &mode, &n, &alpha, &beta, &k);
return pack(f(mode, "mode"), f(n, "n"), f(alpha, "alpha"), f(beta, "beta"), f(k, "k"));
}
......
......@@ -19,7 +19,7 @@ struct miopen_relu
{
return gpu::reflect(self.ad.get(), f);
}
std::string name() const { return "gpu::relu"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
......@@ -19,7 +19,7 @@ struct miopen_sigmoid
{
return gpu::reflect(self.ad.get(), f);
}
std::string name() const { return "gpu::sigmoid"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
......@@ -19,7 +19,7 @@ struct miopen_softmax
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::softmax"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
......@@ -19,7 +19,7 @@ struct miopen_tanh
{
return gpu::reflect(self.ad.get(), f);
}
std::string name() const { return "gpu::tanh"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
......
......@@ -26,7 +26,7 @@ struct allocate
{
return migraphx::pack(f(self.s, "shape"));
}
std::string name() const { return "allocate"; }
migraphx::shape compute_shape(const std::vector<migraphx::shape>& inputs) const
{
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment