Commit ad0ab357 authored by wsttiger's avatar wsttiger
Browse files

Formatting

parent 6239e84a
......@@ -26,8 +26,9 @@ shape miopen_add::compute_shape(const std::vector<shape>& inputs) const
return inputs.at(0);
}
argument
miopen_add::compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const
argument miopen_add::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1, beta = 0;
auto a_desc = make_tensor(args[0].get_shape());
......
......@@ -10,12 +10,12 @@ namespace gpu {
shape miopen_batch_norm_inference::compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(6);
return op.compute_shape(
{inputs.at(0), inputs.at(1), inputs.at(2), inputs.at(3), inputs.at(4)});
return op.compute_shape({inputs.at(0), inputs.at(1), inputs.at(2), inputs.at(3), inputs.at(4)});
}
argument
miopen_batch_norm_inference::compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const
argument miopen_batch_norm_inference::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
......
......@@ -12,7 +12,8 @@ shape miopen_contiguous::compute_shape(const std::vector<shape>& inputs) const
check_shapes{inputs, *this}.has(2);
return op.compute_shape({inputs.at(0)});
}
argument miopen_contiguous::compute(context&, shape output_shape, const std::vector<argument>& args) const
argument
miopen_contiguous::compute(context&, shape output_shape, const std::vector<argument>& args) const
{
assert(output_shape == args[1].get_shape());
assert(output_shape.standard());
......
......@@ -20,7 +20,6 @@
namespace migraph {
namespace gpu {
struct miopen_contiguous
{
contiguous op;
......
......@@ -13,8 +13,9 @@ shape miopen_relu::compute_shape(const std::vector<shape>& inputs) const
return inputs.at(1);
}
argument
miopen_relu::compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const
argument miopen_relu::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1, beta = 0;
auto x_desc = make_tensor(args[0].get_shape());
......
......@@ -13,8 +13,9 @@ shape miopen_softmax::compute_shape(const std::vector<shape>& inputs) const
return op.compute_shape({inputs.at(0)});
}
argument
miopen_softmax::compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const
argument miopen_softmax::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1, beta = 0;
auto x_desc = make_tensor(args[0].get_shape());
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment