Commit ad0ab357 authored by wsttiger's avatar wsttiger
Browse files

Formatting

parent 6239e84a
...@@ -26,8 +26,9 @@ shape miopen_add::compute_shape(const std::vector<shape>& inputs) const ...@@ -26,8 +26,9 @@ shape miopen_add::compute_shape(const std::vector<shape>& inputs) const
return inputs.at(0); return inputs.at(0);
} }
argument argument miopen_add::compute(context& ctx,
miopen_add::compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const const shape& output_shape,
const std::vector<argument>& args) const
{ {
float alpha = 1, beta = 0; float alpha = 1, beta = 0;
auto a_desc = make_tensor(args[0].get_shape()); auto a_desc = make_tensor(args[0].get_shape());
......
...@@ -10,12 +10,12 @@ namespace gpu { ...@@ -10,12 +10,12 @@ namespace gpu {
shape miopen_batch_norm_inference::compute_shape(const std::vector<shape>& inputs) const shape miopen_batch_norm_inference::compute_shape(const std::vector<shape>& inputs) const
{ {
check_shapes{inputs, *this}.has(6); check_shapes{inputs, *this}.has(6);
return op.compute_shape( return op.compute_shape({inputs.at(0), inputs.at(1), inputs.at(2), inputs.at(3), inputs.at(4)});
{inputs.at(0), inputs.at(1), inputs.at(2), inputs.at(3), inputs.at(4)});
} }
argument argument miopen_batch_norm_inference::compute(context& ctx,
miopen_batch_norm_inference::compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const const shape& output_shape,
const std::vector<argument>& args) const
{ {
auto x_desc = make_tensor(args[0].get_shape()); auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape); auto y_desc = make_tensor(output_shape);
......
...@@ -12,7 +12,8 @@ shape miopen_contiguous::compute_shape(const std::vector<shape>& inputs) const ...@@ -12,7 +12,8 @@ shape miopen_contiguous::compute_shape(const std::vector<shape>& inputs) const
check_shapes{inputs, *this}.has(2); check_shapes{inputs, *this}.has(2);
return op.compute_shape({inputs.at(0)}); return op.compute_shape({inputs.at(0)});
} }
argument miopen_contiguous::compute(context&, shape output_shape, const std::vector<argument>& args) const argument
miopen_contiguous::compute(context&, shape output_shape, const std::vector<argument>& args) const
{ {
assert(output_shape == args[1].get_shape()); assert(output_shape == args[1].get_shape());
assert(output_shape.standard()); assert(output_shape.standard());
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
namespace migraph { namespace migraph {
namespace gpu { namespace gpu {
struct miopen_contiguous struct miopen_contiguous
{ {
contiguous op; contiguous op;
......
...@@ -13,8 +13,9 @@ shape miopen_relu::compute_shape(const std::vector<shape>& inputs) const ...@@ -13,8 +13,9 @@ shape miopen_relu::compute_shape(const std::vector<shape>& inputs) const
return inputs.at(1); return inputs.at(1);
} }
argument argument miopen_relu::compute(context& ctx,
miopen_relu::compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const const shape& output_shape,
const std::vector<argument>& args) const
{ {
float alpha = 1, beta = 0; float alpha = 1, beta = 0;
auto x_desc = make_tensor(args[0].get_shape()); auto x_desc = make_tensor(args[0].get_shape());
......
...@@ -13,8 +13,9 @@ shape miopen_softmax::compute_shape(const std::vector<shape>& inputs) const ...@@ -13,8 +13,9 @@ shape miopen_softmax::compute_shape(const std::vector<shape>& inputs) const
return op.compute_shape({inputs.at(0)}); return op.compute_shape({inputs.at(0)});
} }
argument argument miopen_softmax::compute(context& ctx,
miopen_softmax::compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const const shape& output_shape,
const std::vector<argument>& args) const
{ {
float alpha = 1, beta = 0; float alpha = 1, beta = 0;
auto x_desc = make_tensor(args[0].get_shape()); auto x_desc = make_tensor(args[0].get_shape());
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment