Commit 3003844f authored by Shucai Xiao's avatar Shucai Xiao
Browse files

clang format

parent 0905762b
...@@ -27,35 +27,35 @@ struct reduce_mean ...@@ -27,35 +27,35 @@ struct reduce_mean
std::vector<int64_t> tune_axes(std::size_t n_dim) const std::vector<int64_t> tune_axes(std::size_t n_dim) const
{ {
auto tuned_axes = axes; auto tuned_axes = axes;
if (tuned_axes.empty()) if(tuned_axes.empty())
{ {
tuned_axes.resize(n_dim); tuned_axes.resize(n_dim);
std::iota(tuned_axes.begin(), tuned_axes.end(), 0); std::iota(tuned_axes.begin(), tuned_axes.end(), 0);
} }
else else
{ {
for (std::size_t i = 0; i < tuned_axes.size(); ++i) for(std::size_t i = 0; i < tuned_axes.size(); ++i)
{ {
int64_t s_dim = static_cast<int64_t>(n_dim); int64_t s_dim = static_cast<int64_t>(n_dim);
if (tuned_axes[i] >= s_dim or tuned_axes[i] < -s_dim) if(tuned_axes[i] >= s_dim or tuned_axes[i] < -s_dim)
{ {
MIGRAPHX_THROW("REDUCE_MEAN: axis out of range"); MIGRAPHX_THROW("REDUCE_MEAN: axis out of range");
} }
if (tuned_axes[i] < 0) if(tuned_axes[i] < 0)
{ {
tuned_axes[i] += n_dim; tuned_axes[i] += n_dim;
} }
} }
} }
return tuned_axes; return tuned_axes;
} }
shape compute_shape(std::vector<shape> inputs) const shape compute_shape(std::vector<shape> inputs) const
{ {
check_shapes{inputs, *this}.has(1); check_shapes{inputs, *this}.has(1);
auto s = inputs.at(0); auto s = inputs.at(0);
auto lens = s.lens(); auto lens = s.lens();
auto tuned_axes = tune_axes(lens.size()); auto tuned_axes = tune_axes(lens.size());
for(auto axis : tuned_axes) for(auto axis : tuned_axes)
{ {
...@@ -88,7 +88,7 @@ struct reduce_mean ...@@ -88,7 +88,7 @@ struct reduce_mean
argument compute(const shape& output_shape, std::vector<argument> args) const argument compute(const shape& output_shape, std::vector<argument> args) const
{ {
argument result{output_shape}; argument result{output_shape};
auto arg_lens = args.front().get_shape().lens(); auto arg_lens = args.front().get_shape().lens();
auto tuned_axes = tune_axes(arg_lens.size()); auto tuned_axes = tune_axes(arg_lens.size());
std::vector<std::size_t> batch_lens(output_shape.lens().size(), 1); std::vector<std::size_t> batch_lens(output_shape.lens().size(), 1);
for(auto axis : tuned_axes) for(auto axis : tuned_axes)
......
...@@ -27,35 +27,35 @@ struct reduce_sum ...@@ -27,35 +27,35 @@ struct reduce_sum
std::vector<int64_t> tune_axes(std::size_t n_dim) const std::vector<int64_t> tune_axes(std::size_t n_dim) const
{ {
auto tuned_axes = axes; auto tuned_axes = axes;
if (tuned_axes.empty()) if(tuned_axes.empty())
{ {
tuned_axes.resize(n_dim); tuned_axes.resize(n_dim);
std::iota(tuned_axes.begin(), tuned_axes.end(), 0); std::iota(tuned_axes.begin(), tuned_axes.end(), 0);
} }
else else
{ {
for (std::size_t i = 0; i < tuned_axes.size(); ++i) for(std::size_t i = 0; i < tuned_axes.size(); ++i)
{ {
int64_t s_dim = static_cast<int64_t>(n_dim); int64_t s_dim = static_cast<int64_t>(n_dim);
if (tuned_axes[i] >= s_dim or tuned_axes[i] < -s_dim) if(tuned_axes[i] >= s_dim or tuned_axes[i] < -s_dim)
{ {
MIGRAPHX_THROW("REDUCE_SUM: axis out of range"); MIGRAPHX_THROW("REDUCE_SUM: axis out of range");
} }
if (tuned_axes[i] < 0) if(tuned_axes[i] < 0)
{ {
tuned_axes[i] += n_dim; tuned_axes[i] += n_dim;
} }
} }
} }
return tuned_axes; return tuned_axes;
} }
shape compute_shape(std::vector<shape> inputs) const shape compute_shape(std::vector<shape> inputs) const
{ {
check_shapes{inputs, *this}.has(1); check_shapes{inputs, *this}.has(1);
auto s = inputs.at(0); auto s = inputs.at(0);
auto lens = s.lens(); auto lens = s.lens();
auto tuned_axes = tune_axes(lens.size()); auto tuned_axes = tune_axes(lens.size());
for(auto axis : tuned_axes) for(auto axis : tuned_axes)
{ {
...@@ -88,7 +88,7 @@ struct reduce_sum ...@@ -88,7 +88,7 @@ struct reduce_sum
argument compute(const shape& output_shape, std::vector<argument> args) const argument compute(const shape& output_shape, std::vector<argument> args) const
{ {
argument result{output_shape}; argument result{output_shape};
auto arg_lens = args.front().get_shape().lens(); auto arg_lens = args.front().get_shape().lens();
std::vector<int64_t> tuned_axes = tune_axes(arg_lens.size()); std::vector<int64_t> tuned_axes = tune_axes(arg_lens.size());
std::vector<std::size_t> batch_lens(output_shape.lens().size(), 1); std::vector<std::size_t> batch_lens(output_shape.lens().size(), 1);
for(auto axis : tuned_axes) for(auto axis : tuned_axes)
......
...@@ -464,7 +464,7 @@ TEST_CASE(test_argmin) ...@@ -464,7 +464,7 @@ TEST_CASE(test_argmin)
template <class T> template <class T>
void test_reduce_ops() void test_reduce_ops()
{ {
{ {
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}}; migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {1, 1, 1, 1}}, T{}, input); expect_shape(migraphx::shape{migraphx::shape::float_type, {1, 1, 1, 1}}, T{}, input);
...@@ -472,7 +472,8 @@ void test_reduce_ops() ...@@ -472,7 +472,8 @@ void test_reduce_ops()
{ {
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}}; migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {1, 1, 1, 1}}, T{{0, 1, 2, 3}}, input); expect_shape(
migraphx::shape{migraphx::shape::float_type, {1, 1, 1, 1}}, T{{0, 1, 2, 3}}, input);
} }
{ {
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}}; migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment