Commit 274c772b authored by Shucai Xiao's avatar Shucai Xiao
Browse files

clang format

parent 99d1fed4
...@@ -18,7 +18,7 @@ namespace op { ...@@ -18,7 +18,7 @@ namespace op {
struct argmax struct argmax
{ {
int axis = 0; int axis = 0;
int keep_dims = 1; int keep_dims = 1;
template <class Self, class F> template <class Self, class F>
...@@ -40,7 +40,7 @@ struct argmax ...@@ -40,7 +40,7 @@ struct argmax
} }
lens[axis] = 1; lens[axis] = 1;
if (!keep_dims) if(!keep_dims)
{ {
lens.erase(lens.begin() + axis); lens.erase(lens.begin() + axis);
} }
......
...@@ -18,7 +18,7 @@ namespace op { ...@@ -18,7 +18,7 @@ namespace op {
struct argmin struct argmin
{ {
int axis = 0; int axis = 0;
int keep_dims = 1; int keep_dims = 1;
template <class Self, class F> template <class Self, class F>
...@@ -40,7 +40,7 @@ struct argmin ...@@ -40,7 +40,7 @@ struct argmin
} }
lens[axis] = 1; lens[axis] = 1;
if (!keep_dims) if(!keep_dims)
{ {
lens.erase(lens.begin() + axis); lens.erase(lens.begin() + axis);
} }
......
...@@ -655,17 +655,17 @@ struct cpu_argmax ...@@ -655,17 +655,17 @@ struct cpu_argmax
argument result{output_shape}; argument result{output_shape};
result.visit([&](auto output) { result.visit([&](auto output) {
args[0].visit([&](auto input) { args[0].visit([&](auto input) {
using value_type = batch_max(output_shape.elements(), using value_type =
std::numeric_limits<value_type>::lowest()); batch_max(output_shape.elements(), std::numeric_limits<value_type>::lowest());
auto data_shape = args[0].get_shape(); auto data_shape = args[0].get_shape();
shape_for_each(data_shape, [&](auto idx) { shape_for_each(data_shape, [&](auto idx) {
auto data_index = data_shape.index(idx); auto data_index = data_shape.index(idx);
idx[axis] = 0; idx[axis] = 0;
auto out_index = data_shape.index(idx); auto out_index = data_shape.index(idx);
if (batch_max[index] < input[data_index]) if(batch_max[index] < input[data_index])
{ {
batch_max[index] = input[data_index]; batch_max[index] = input[data_index];
output[index] = static_cast<int64_t>(data_index); output[index] = static_cast<int64_t>(data_index);
} }
}); });
}); });
...@@ -693,17 +693,17 @@ struct cpu_argmin ...@@ -693,17 +693,17 @@ struct cpu_argmin
argument result{output_shape}; argument result{output_shape};
result.visit([&](auto output) { result.visit([&](auto output) {
args[0].visit([&](auto input) { args[0].visit([&](auto input) {
using value_type = batch_min(output_shape.elements(), using value_type =
std::numeric_limits<value_type>::max()); batch_min(output_shape.elements(), std::numeric_limits<value_type>::max());
auto data_shape = args[0].get_shape(); auto data_shape = args[0].get_shape();
shape_for_each(data_shape, [&](auto idx) { shape_for_each(data_shape, [&](auto idx) {
auto data_index = data_shape.index(idx); auto data_index = data_shape.index(idx);
idx[axis] = 0; idx[axis] = 0;
auto out_index = data_shape.index(idx); auto out_index = data_shape.index(idx);
if (batch_min[index] > input[data_index]) if(batch_min[index] > input[data_index])
{ {
batch_min[index] = input[data_index]; batch_min[index] = input[data_index];
output[index] = static_cast<int64_t>(data_index); output[index] = static_cast<int64_t>(data_index);
} }
}); });
}); });
...@@ -732,8 +732,8 @@ struct cpu_apply ...@@ -732,8 +732,8 @@ struct cpu_apply
void init() void init()
{ {
apply_map["argmax"] = extend_op<cpu_argmax, op::argmax>(); apply_map["argmax"] = extend_op<cpu_argmax, op::argmax>();
apply_map["argmin"] = extend_op<cpu_argmin, op::argmin>(); apply_map["argmin"] = extend_op<cpu_argmin, op::argmin>();
apply_map["batch_norm_inference"] = apply_map["batch_norm_inference"] =
extend_op<cpu_batch_norm_inference, op::batch_norm_inference>(); extend_op<cpu_batch_norm_inference, op::batch_norm_inference>();
apply_map["convolution"] = extend_op<cpu_convolution, op::convolution>(); apply_map["convolution"] = extend_op<cpu_convolution, op::convolution>();
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment