Commit 5ec978eb authored by Shucai Xiao's avatar Shucai Xiao
Browse files

clang format

parent edc23800
...@@ -20,8 +20,7 @@ inline namespace MIGRAPHX_INLINE_NS { ...@@ -20,8 +20,7 @@ inline namespace MIGRAPHX_INLINE_NS {
// In this case we need to broadcast the (:,:,1:,:) axis // In this case we need to broadcast the (:,:,1:,:) axis
// of s0 plus the 1st dimension of s1 giving // of s0 plus the 1st dimension of s1 giving
// output_lens = (3,2,7,5) // output_lens = (3,2,7,5)
std::vector<int> compute_broadcasted_lens(std::vector<int> s0, std::vector<int> compute_broadcasted_lens(std::vector<int> s0, std::vector<int> s1)
std::vector<int> s1)
{ {
if(s0 == s1) if(s0 == s1)
return s0; return s0;
......
...@@ -34,8 +34,8 @@ void eliminate_concat::apply(module& p) const ...@@ -34,8 +34,8 @@ void eliminate_concat::apply(module& p) const
// axis OR the sizes to the left of this axis are all equal to 1 // axis OR the sizes to the left of this axis are all equal to 1
// Since we've already checked that the non-axis dimensions are identical // Since we've already checked that the non-axis dimensions are identical
// we only need to check the first input // we only need to check the first input
auto lens = ins->inputs().front()->get_shape().lens(); auto lens = ins->inputs().front()->get_shape().lens();
auto concat_op = concat_opt.get_concat(ins->get_operator()); auto concat_op = concat_opt.get_concat(ins->get_operator());
int axis_index = tune_axis(lens.size(), concat_op.axis, concat_op.name()); int axis_index = tune_axis(lens.size(), concat_op.axis, concat_op.name());
if(axis_index == 0 || if(axis_index == 0 ||
std::all_of(lens.begin(), lens.begin() + axis_index, [](auto x) { return x == 1; })) std::all_of(lens.begin(), lens.begin() + axis_index, [](auto x) { return x == 1; }))
......
...@@ -11,8 +11,7 @@ inline namespace MIGRAPHX_INLINE_NS { ...@@ -11,8 +11,7 @@ inline namespace MIGRAPHX_INLINE_NS {
struct module; struct module;
struct operation; struct operation;
std::vector<int> compute_broadcasted_lens(std::vector<int> s0, std::vector<int> compute_broadcasted_lens(std::vector<int> s0, std::vector<int> s1);
std::vector<int> s1);
shape common_shape(const std::vector<shape>& shapes); shape common_shape(const std::vector<shape>& shapes);
instruction_ref insert_common_op(module& m, instruction_ref insert_common_op(module& m,
......
...@@ -38,7 +38,7 @@ struct concat ...@@ -38,7 +38,7 @@ struct concat
std::string name() const { return "concat"; } std::string name() const { return "concat"; }
std::vector<int> compute_offsets(const shape& output_shape, std::vector<int> compute_offsets(const shape& output_shape,
const std::vector<argument>& args) const const std::vector<argument>& args) const
{ {
auto n_dims = args[0].get_shape().lens().size(); auto n_dims = args[0].get_shape().lens().size();
std::vector<int> offsets; std::vector<int> offsets;
......
...@@ -64,7 +64,7 @@ struct convolution ...@@ -64,7 +64,7 @@ struct convolution
const shape& input = inputs.at(0); const shape& input = inputs.at(0);
const shape& weights = inputs.at(1); const shape& weights = inputs.at(1);
int kdims = input_size - 2; int kdims = input_size - 2;
if(kdims != this->kdims()) if(kdims != this->kdims())
{ {
MIGRAPHX_THROW("convolution: input k-dims does not match attribute size"); MIGRAPHX_THROW("convolution: input k-dims does not match attribute size");
......
...@@ -54,7 +54,7 @@ struct deconvolution ...@@ -54,7 +54,7 @@ struct deconvolution
const shape& input = inputs.at(0); const shape& input = inputs.at(0);
const shape& weights = inputs.at(1); const shape& weights = inputs.at(1);
int kdims = input.lens().size() - 2; int kdims = input.lens().size() - 2;
if(kdims != this->kdims()) if(kdims != this->kdims())
{ {
MIGRAPHX_THROW("deconvolution: input k-dims does not match attribute size"); MIGRAPHX_THROW("deconvolution: input k-dims does not match attribute size");
......
...@@ -41,10 +41,8 @@ struct flatten ...@@ -41,10 +41,8 @@ struct flatten
{ {
check_shapes{inputs, *this}.has(1).standard(); check_shapes{inputs, *this}.has(1).standard();
auto&& lens = inputs.front().lens(); auto&& lens = inputs.front().lens();
auto x = auto x = std::accumulate(lens.begin(), lens.begin() + axis, int{1}, std::multiplies<>{});
std::accumulate(lens.begin(), lens.begin() + axis, int{1}, std::multiplies<>{}); auto y = std::accumulate(lens.begin() + axis, lens.end(), int{1}, std::multiplies<>{});
auto y =
std::accumulate(lens.begin() + axis, lens.end(), int{1}, std::multiplies<>{});
return {inputs.at(0).type(), {x, y}}; return {inputs.at(0).type(), {x, y}};
} }
argument compute(shape output_shape, std::vector<argument> args) const argument compute(shape output_shape, std::vector<argument> args) const
......
...@@ -20,8 +20,8 @@ struct nonzero ...@@ -20,8 +20,8 @@ struct nonzero
shape compute_shape(std::vector<shape> inputs) const shape compute_shape(std::vector<shape> inputs) const
{ {
check_shapes{inputs, *this}.has(1).standard(); check_shapes{inputs, *this}.has(1).standard();
auto elem_num = inputs[0].elements(); auto elem_num = inputs[0].elements();
int dim_num = inputs[0].lens().size(); int dim_num = inputs[0].lens().size();
std::vector<int> out_lens = {dim_num, elem_num}; std::vector<int> out_lens = {dim_num, elem_num};
return {shape::int64_type, out_lens}; return {shape::int64_type, out_lens};
......
...@@ -21,11 +21,11 @@ namespace op { ...@@ -21,11 +21,11 @@ namespace op {
struct pooling struct pooling
{ {
std::string mode = "average"; std::string mode = "average";
std::vector<int> padding = {0, 0}; std::vector<int> padding = {0, 0};
std::vector<int> stride = {1, 1}; std::vector<int> stride = {1, 1};
std::vector<int> lengths = {1, 1}; std::vector<int> lengths = {1, 1};
bool ceil_mode = false; bool ceil_mode = false;
template <class Self, class F> template <class Self, class F>
static auto reflect(Self& self, F f) static auto reflect(Self& self, F f)
...@@ -76,7 +76,7 @@ struct pooling ...@@ -76,7 +76,7 @@ struct pooling
dim_size = input_lens[i + 2] + padding_factor - lengths[i]; dim_size = input_lens[i + 2] + padding_factor - lengths[i];
assert(dim_size >= 0); assert(dim_size >= 0);
int len = (ceil_mode) ? ceil_divide<std::ptrdiff_t>(dim_size, stride[i]) int len = (ceil_mode) ? ceil_divide<std::ptrdiff_t>(dim_size, stride[i])
: floor_divide<std::ptrdiff_t>(dim_size, stride[i]); : floor_divide<std::ptrdiff_t>(dim_size, stride[i]);
output_lens.push_back(int(std::max<std::ptrdiff_t>(1, len + 1))); output_lens.push_back(int(std::max<std::ptrdiff_t>(1, len + 1)));
} }
......
...@@ -60,7 +60,7 @@ struct quant_convolution ...@@ -60,7 +60,7 @@ struct quant_convolution
const shape& input = inputs.at(0); const shape& input = inputs.at(0);
const shape& weights = inputs.at(1); const shape& weights = inputs.at(1);
auto t = input.type(); auto t = input.type();
int kdims = input.lens().size() - 2; int kdims = input.lens().size() - 2;
if(kdims != this->kdims()) if(kdims != this->kdims())
{ {
MIGRAPHX_THROW("quant_convolution: input k-dims does not match attribute size"); MIGRAPHX_THROW("quant_convolution: input k-dims does not match attribute size");
......
...@@ -66,9 +66,9 @@ struct roialign ...@@ -66,9 +66,9 @@ struct roialign
} }
std::vector<int> out_lens = x_lens; std::vector<int> out_lens = x_lens;
out_lens[0] = roi_lens[0]; out_lens[0] = roi_lens[0];
out_lens[2] = output_height; out_lens[2] = output_height;
out_lens[3] = output_width; out_lens[3] = output_width;
return {type, out_lens}; return {type, out_lens};
} }
...@@ -92,7 +92,7 @@ struct roialign ...@@ -92,7 +92,7 @@ struct roialign
shape_for_each(comp_s, [&](auto idx) { shape_for_each(comp_s, [&](auto idx) {
std::array<int, 2> p = {idx[0], idx[1]}; std::array<int, 2> p = {idx[0], idx[1]};
std::array<int, 2> i = {idx[2], idx[3]}; std::array<int, 2> i = {idx[2], idx[3]};
auto index = comp_s.index(idx); auto index = comp_s.index(idx);
std::array<float, 2> xy{}; std::array<float, 2> xy{};
std::array<int, 2> low{}; std::array<int, 2> low{};
...@@ -182,14 +182,14 @@ struct roialign ...@@ -182,14 +182,14 @@ struct roialign
argument result{output_shape}; argument result{output_shape};
const auto& out_lens = output_shape.lens(); const auto& out_lens = output_shape.lens();
int64_t n_rois = out_lens[0]; int64_t n_rois = out_lens[0];
int channels = out_lens[1]; int channels = out_lens[1];
// output dims of height and width, in all 2-dim arrays, the first dim // output dims of height and width, in all 2-dim arrays, the first dim
// is for height and second dim is for width // is for height and second dim is for width
std::array<int, 2> out_dims = {out_lens[2], out_lens[3]}; std::array<int, 2> out_dims = {out_lens[2], out_lens[3]};
const auto& x_lens = args.at(0).get_shape().lens(); const auto& x_lens = args.at(0).get_shape().lens();
// input dims of height and width // input dims of height and width
std::array<int, 2> in_dims = {x_lens[2], x_lens[3]}; std::array<int, 2> in_dims = {x_lens[2], x_lens[3]};
auto roi_s = args.at(1).get_shape(); auto roi_s = args.at(1).get_shape();
visit_all(result, args.at(0), args.at(1))([&](auto output, auto x, auto roi) { visit_all(result, args.at(0), args.at(1))([&](auto output, auto x, auto roi) {
const auto* batch_indices = args.at(2).cast<int64_t>(); const auto* batch_indices = args.at(2).cast<int64_t>();
......
...@@ -58,7 +58,7 @@ struct slice ...@@ -58,7 +58,7 @@ struct slice
{ {
const std::vector<int>& lens = s.lens(); const std::vector<int>& lens = s.lens();
const std::vector<int>& strides = s.strides(); const std::vector<int>& strides = s.strides();
auto offset = 0; auto offset = 0;
if(!axes.empty()) if(!axes.empty())
{ {
for(int i = 0; i < axes.size(); i++) for(int i = 0; i < axes.size(); i++)
......
...@@ -78,8 +78,8 @@ void par_for_impl(int n, int threadsize, F f) ...@@ -78,8 +78,8 @@ void par_for_impl(int n, int threadsize, F f)
template <class F> template <class F>
void par_for(int n, int min_grain, F f) void par_for(int n, int min_grain, F f)
{ {
const auto threadsize = std::min<int>(std::thread::hardware_concurrency(), const auto threadsize =
n / std::max<int>(1, min_grain)); std::min<int>(std::thread::hardware_concurrency(), n / std::max<int>(1, min_grain));
par_for_impl(n, threadsize, f); par_for_impl(n, threadsize, f);
} }
......
...@@ -69,8 +69,7 @@ struct rewrite_rnn ...@@ -69,8 +69,7 @@ struct rewrite_rnn
instruction_ref last_cell_output, instruction_ref last_cell_output,
op::rnn_direction dirct) const; op::rnn_direction dirct) const;
int int get_seq_len(const module& prog, instruction_ref input, instruction_ref seq_lens) const;
get_seq_len(const module& prog, instruction_ref input, instruction_ref seq_lens) const;
instruction_ref pad_hidden_states(module& prog, instruction_ref pad_hidden_states(module& prog,
instruction_ref seq, instruction_ref seq,
......
...@@ -76,9 +76,7 @@ struct shape ...@@ -76,9 +76,7 @@ struct shape
template <class Range1, class Range2> template <class Range1, class Range2>
shape(type_t t, const Range1& l, const Range2& s) shape(type_t t, const Range1& l, const Range2& s)
: shape(t, : shape(t, std::vector<int>(l.begin(), l.end()), std::vector<int>(s.begin(), s.end()))
std::vector<int>(l.begin(), l.end()),
std::vector<int>(s.begin(), s.end()))
{ {
} }
......
...@@ -25,9 +25,9 @@ struct onnx_parser ...@@ -25,9 +25,9 @@ struct onnx_parser
struct node_info struct node_info
{ {
attribute_map attributes{}; attribute_map attributes{};
int num_outputs = 1; int num_outputs = 1;
std::string name = ""; std::string name = "";
module* mod = nullptr; module* mod = nullptr;
instruction_ref make_contiguous(instruction_ref ins) const; instruction_ref make_contiguous(instruction_ref ins) const;
instruction_ref add_bias(const std::vector<instruction_ref>& args, instruction_ref add_bias(const std::vector<instruction_ref>& args,
instruction_ref curr_ins, instruction_ref curr_ins,
...@@ -59,7 +59,7 @@ struct onnx_parser ...@@ -59,7 +59,7 @@ struct onnx_parser
onnx_parser&, const node_info&, std::vector<instruction_ref>)>; onnx_parser&, const node_info&, std::vector<instruction_ref>)>;
node_map nodes; node_map nodes;
std::unordered_map<std::string, instruction_ref> instructions; std::unordered_map<std::string, instruction_ref> instructions;
program prog = program(); program prog = program();
int default_dim_value = 1; int default_dim_value = 1;
std::unordered_map<std::string, std::vector<int>> map_input_dims; std::unordered_map<std::string, std::vector<int>> map_input_dims;
bool skip_unknown_operators = false; bool skip_unknown_operators = false;
......
...@@ -31,8 +31,7 @@ static literal ...@@ -31,8 +31,7 @@ static literal
create_literal(shape::type_t shape_type, const std::vector<int>& dims, const char* data) create_literal(shape::type_t shape_type, const std::vector<int>& dims, const char* data)
{ {
// empty input // empty input
auto elem_num = auto elem_num = std::accumulate(dims.begin(), dims.end(), int(1), std::multiplies<int>());
std::accumulate(dims.begin(), dims.end(), int(1), std::multiplies<int>());
if(elem_num == 0) if(elem_num == 0)
{ {
return {}; return {};
...@@ -48,8 +47,7 @@ template <class T, MIGRAPHX_REQUIRES(not std::is_pointer<T>{})> ...@@ -48,8 +47,7 @@ template <class T, MIGRAPHX_REQUIRES(not std::is_pointer<T>{})>
static literal create_literal(shape::type_t shape_type, const std::vector<int>& dims, T data) static literal create_literal(shape::type_t shape_type, const std::vector<int>& dims, T data)
{ {
// empty input // empty input
auto elem_num = auto elem_num = std::accumulate(dims.begin(), dims.end(), int(1), std::multiplies<int>());
std::accumulate(dims.begin(), dims.end(), int(1), std::multiplies<int>());
if(elem_num == 0) if(elem_num == 0)
{ {
return {}; return {};
...@@ -400,8 +398,7 @@ literal onnx_parser::parse_tensor(const onnx::TensorProto& t) const ...@@ -400,8 +398,7 @@ literal onnx_parser::parse_tensor(const onnx::TensorProto& t) const
} }
MIGRAPHX_THROW("PARSE_TENSOR: Invalid tensor type"); MIGRAPHX_THROW("PARSE_TENSOR: Invalid tensor type");
} }
shape onnx_parser::parse_type(const onnx::TypeProto& t, shape onnx_parser::parse_type(const onnx::TypeProto& t, const std::vector<int>& input_dims) const
const std::vector<int>& input_dims) const
{ {
shape::type_t shape_type = get_type(t.tensor_type().elem_type()); shape::type_t shape_type = get_type(t.tensor_type().elem_type());
if(!input_dims.empty()) if(!input_dims.empty())
...@@ -411,23 +408,21 @@ shape onnx_parser::parse_type(const onnx::TypeProto& t, ...@@ -411,23 +408,21 @@ shape onnx_parser::parse_type(const onnx::TypeProto& t,
std::vector<int> dims; std::vector<int> dims;
auto&& tensor_dims = t.tensor_type().shape().dim(); auto&& tensor_dims = t.tensor_type().shape().dim();
std::transform(tensor_dims.begin(), std::transform(
tensor_dims.end(), tensor_dims.begin(), tensor_dims.end(), std::back_inserter(dims), [&](auto&& d) -> int {
std::back_inserter(dims), if(d.has_dim_value())
[&](auto&& d) -> int { {
if(d.has_dim_value()) if(static_cast<int>(d.dim_value()) <= 0)
{ {
if(static_cast<int>(d.dim_value()) <= 0) return default_dim_value;
{ }
return default_dim_value; return d.dim_value();
} }
return d.dim_value(); else
} {
else return default_dim_value;
{ }
return default_dim_value; });
}
});
if(dims.empty()) if(dims.empty())
return {shape_type}; return {shape_type};
......
...@@ -122,7 +122,7 @@ void check_asym_padding(const onnx_parser::node_info& info, ...@@ -122,7 +122,7 @@ void check_asym_padding(const onnx_parser::node_info& info,
int count_include_pad, int count_include_pad,
float pad_val) float pad_val)
{ {
int pad_ndims = padding.size() / 2; int pad_ndims = padding.size() / 2;
auto left_pad_it = padding.begin(); auto left_pad_it = padding.begin();
auto right_pad_it = left_pad_it + pad_ndims; auto right_pad_it = left_pad_it + pad_ndims;
......
...@@ -61,12 +61,8 @@ struct parse_convolution : op_parser<parse_convolution> ...@@ -61,12 +61,8 @@ struct parse_convolution : op_parser<parse_convolution>
{ {
auto weight_lens = weights->get_shape().lens(); auto weight_lens = weights->get_shape().lens();
std::vector<int> k_lens(weight_lens.begin() + 2, weight_lens.end()); std::vector<int> k_lens(weight_lens.begin() + 2, weight_lens.end());
cal_auto_padding_size(info, cal_auto_padding_size(
values, info, values, k_lens, values["dilation"].to_vector<int>(), in_lens, padding);
k_lens,
values["dilation"].to_vector<int>(),
in_lens,
padding);
auto auto_pad = info.attributes["auto_pad"].s(); auto auto_pad = info.attributes["auto_pad"].s();
if(auto_pad.find("SAME") != std::string::npos) if(auto_pad.find("SAME") != std::string::npos)
{ {
......
...@@ -33,7 +33,8 @@ struct parse_imagescalar : op_parser<parse_imagescalar> ...@@ -33,7 +33,8 @@ struct parse_imagescalar : op_parser<parse_imagescalar>
auto input_type = input_shape.type(); auto input_type = input_shape.type();
auto scale_val = info.add_literal(literal{shape{input_type}, {scale}}); auto scale_val = info.add_literal(literal{shape{input_type}, {scale}});
auto bias_vals = info.add_literal(literal{shape{input_type, {static_cast<int>(bias.size())}}, bias}); auto bias_vals =
info.add_literal(literal{shape{input_type, {static_cast<int>(bias.size())}}, bias});
auto scale_tensor = info.add_instruction( auto scale_tensor = info.add_instruction(
migraphx::make_op("scalar", {{"scalar_bcst_dims", input_lens}}), scale_val); migraphx::make_op("scalar", {{"scalar_bcst_dims", input_lens}}), scale_val);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment