Commit 5ec978eb authored by Shucai Xiao's avatar Shucai Xiao
Browse files

clang format

parent edc23800
......@@ -20,8 +20,7 @@ inline namespace MIGRAPHX_INLINE_NS {
// In this case we need to broadcast the (:,:,1:,:) axis
// of s0 plus the 1st dimension of s1 giving
// output_lens = (3,2,7,5)
std::vector<int> compute_broadcasted_lens(std::vector<int> s0,
std::vector<int> s1)
std::vector<int> compute_broadcasted_lens(std::vector<int> s0, std::vector<int> s1)
{
if(s0 == s1)
return s0;
......
......@@ -11,8 +11,7 @@ inline namespace MIGRAPHX_INLINE_NS {
struct module;
struct operation;
std::vector<int> compute_broadcasted_lens(std::vector<int> s0,
std::vector<int> s1);
std::vector<int> compute_broadcasted_lens(std::vector<int> s0, std::vector<int> s1);
shape common_shape(const std::vector<shape>& shapes);
instruction_ref insert_common_op(module& m,
......
......@@ -41,10 +41,8 @@ struct flatten
{
check_shapes{inputs, *this}.has(1).standard();
auto&& lens = inputs.front().lens();
auto x =
std::accumulate(lens.begin(), lens.begin() + axis, int{1}, std::multiplies<>{});
auto y =
std::accumulate(lens.begin() + axis, lens.end(), int{1}, std::multiplies<>{});
auto x = std::accumulate(lens.begin(), lens.begin() + axis, int{1}, std::multiplies<>{});
auto y = std::accumulate(lens.begin() + axis, lens.end(), int{1}, std::multiplies<>{});
return {inputs.at(0).type(), {x, y}};
}
argument compute(shape output_shape, std::vector<argument> args) const
......
......@@ -78,8 +78,8 @@ void par_for_impl(int n, int threadsize, F f)
template <class F>
void par_for(int n, int min_grain, F f)
{
const auto threadsize = std::min<int>(std::thread::hardware_concurrency(),
n / std::max<int>(1, min_grain));
const auto threadsize =
std::min<int>(std::thread::hardware_concurrency(), n / std::max<int>(1, min_grain));
par_for_impl(n, threadsize, f);
}
......
......@@ -69,8 +69,7 @@ struct rewrite_rnn
instruction_ref last_cell_output,
op::rnn_direction dirct) const;
int
get_seq_len(const module& prog, instruction_ref input, instruction_ref seq_lens) const;
int get_seq_len(const module& prog, instruction_ref input, instruction_ref seq_lens) const;
instruction_ref pad_hidden_states(module& prog,
instruction_ref seq,
......
......@@ -76,9 +76,7 @@ struct shape
template <class Range1, class Range2>
shape(type_t t, const Range1& l, const Range2& s)
: shape(t,
std::vector<int>(l.begin(), l.end()),
std::vector<int>(s.begin(), s.end()))
: shape(t, std::vector<int>(l.begin(), l.end()), std::vector<int>(s.begin(), s.end()))
{
}
......
......@@ -31,8 +31,7 @@ static literal
create_literal(shape::type_t shape_type, const std::vector<int>& dims, const char* data)
{
// empty input
auto elem_num =
std::accumulate(dims.begin(), dims.end(), int(1), std::multiplies<int>());
auto elem_num = std::accumulate(dims.begin(), dims.end(), int(1), std::multiplies<int>());
if(elem_num == 0)
{
return {};
......@@ -48,8 +47,7 @@ template <class T, MIGRAPHX_REQUIRES(not std::is_pointer<T>{})>
static literal create_literal(shape::type_t shape_type, const std::vector<int>& dims, T data)
{
// empty input
auto elem_num =
std::accumulate(dims.begin(), dims.end(), int(1), std::multiplies<int>());
auto elem_num = std::accumulate(dims.begin(), dims.end(), int(1), std::multiplies<int>());
if(elem_num == 0)
{
return {};
......@@ -400,8 +398,7 @@ literal onnx_parser::parse_tensor(const onnx::TensorProto& t) const
}
MIGRAPHX_THROW("PARSE_TENSOR: Invalid tensor type");
}
shape onnx_parser::parse_type(const onnx::TypeProto& t,
const std::vector<int>& input_dims) const
shape onnx_parser::parse_type(const onnx::TypeProto& t, const std::vector<int>& input_dims) const
{
shape::type_t shape_type = get_type(t.tensor_type().elem_type());
if(!input_dims.empty())
......@@ -411,10 +408,8 @@ shape onnx_parser::parse_type(const onnx::TypeProto& t,
std::vector<int> dims;
auto&& tensor_dims = t.tensor_type().shape().dim();
std::transform(tensor_dims.begin(),
tensor_dims.end(),
std::back_inserter(dims),
[&](auto&& d) -> int {
std::transform(
tensor_dims.begin(), tensor_dims.end(), std::back_inserter(dims), [&](auto&& d) -> int {
if(d.has_dim_value())
{
if(static_cast<int>(d.dim_value()) <= 0)
......
......@@ -61,12 +61,8 @@ struct parse_convolution : op_parser<parse_convolution>
{
auto weight_lens = weights->get_shape().lens();
std::vector<int> k_lens(weight_lens.begin() + 2, weight_lens.end());
cal_auto_padding_size(info,
values,
k_lens,
values["dilation"].to_vector<int>(),
in_lens,
padding);
cal_auto_padding_size(
info, values, k_lens, values["dilation"].to_vector<int>(), in_lens, padding);
auto auto_pad = info.attributes["auto_pad"].s();
if(auto_pad.find("SAME") != std::string::npos)
{
......
......@@ -33,7 +33,8 @@ struct parse_imagescalar : op_parser<parse_imagescalar>
auto input_type = input_shape.type();
auto scale_val = info.add_literal(literal{shape{input_type}, {scale}});
auto bias_vals = info.add_literal(literal{shape{input_type, {static_cast<int>(bias.size())}}, bias});
auto bias_vals =
info.add_literal(literal{shape{input_type, {static_cast<int>(bias.size())}}, bias});
auto scale_tensor = info.add_instruction(
migraphx::make_op("scalar", {{"scalar_bcst_dims", input_lens}}), scale_val);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment