Commit 5ec978eb authored by Shucai Xiao's avatar Shucai Xiao
Browse files

clang format

parent edc23800
......@@ -46,7 +46,8 @@ struct parse_nonzero : op_parser<parse_nonzero>
});
shape in_s = args[0]->get_shape();
shape out_s{shape::int64_type, {static_cast<int>(in_s.lens().size()), static_cast<int>(indices.size())}};
shape out_s{shape::int64_type,
{static_cast<int>(in_s.lens().size()), static_cast<int>(indices.size())}};
std::vector<int64_t> out_data(out_s.elements());
for(int i = 0; i < indices.size(); ++i)
......
......@@ -84,12 +84,8 @@ struct parse_pooling : op_parser<parse_pooling>
{
values["padding"].clear();
// return paddings could be empty, then setting to 0 for no padding
cal_auto_padding_size(info,
values,
values["lengths"].to_vector<int>(),
{1, 1},
in_lens,
paddings);
cal_auto_padding_size(
info, values, values["lengths"].to_vector<int>(), {1, 1}, in_lens, paddings);
}
if(paddings.size() != 2 * kdims)
......
......@@ -59,10 +59,10 @@ void rewrite_rnn::apply_vanilla_rnn(module& prog, instruction_ref ins) const
// when writing their module.
auto args = ins->inputs();
shape seq_shape = args[0]->get_shape();
int hidden_size = args[1]->get_shape().lens()[1];
int batch_size = seq_shape.lens()[1];
shape::type_t type = seq_shape.type();
shape seq_shape = args[0]->get_shape();
int hidden_size = args[1]->get_shape().lens()[1];
int batch_size = seq_shape.lens()[1];
shape::type_t type = seq_shape.type();
migraphx::shape ih_shape{type, {1, batch_size, hidden_size}};
std::vector<float> data(ih_shape.elements(), 0);
......@@ -368,10 +368,10 @@ void rewrite_rnn::apply_gru(module& prog, instruction_ref ins) const
// when writing their program.
auto args = ins->inputs();
shape seq_shape = args[0]->get_shape();
int hidden_size = args[2]->get_shape().lens()[2];
int batch_size = seq_shape.lens()[1];
shape::type_t type = seq_shape.type();
shape seq_shape = args[0]->get_shape();
int hidden_size = args[2]->get_shape().lens()[2];
int batch_size = seq_shape.lens()[1];
shape::type_t type = seq_shape.type();
migraphx::shape ih_shape{type, {1, batch_size, hidden_size}};
std::vector<float> data(ih_shape.elements(), 0.0);
......@@ -753,10 +753,10 @@ void rewrite_rnn::apply_lstm(module& prog, instruction_ref ins) const
assert(ins->name() == "lstm");
auto args = ins->inputs();
shape seq_shape = args[0]->get_shape();
int hidden_size = args[2]->get_shape().lens()[2];
int batch_size = seq_shape.lens()[1];
shape::type_t type = seq_shape.type();
shape seq_shape = args[0]->get_shape();
int hidden_size = args[2]->get_shape().lens()[2];
int batch_size = seq_shape.lens()[1];
shape::type_t type = seq_shape.type();
migraphx::shape ihc_shape{type, {1, batch_size, hidden_size}};
std::vector<float> ihc_data(ihc_shape.elements(), 0.0);
......@@ -1194,8 +1194,8 @@ std::vector<operation> rewrite_rnn::lstm_actv_funcs(instruction_ref ins) const
// we have 6 actv funcs, even though a user does not
// specifiy any actv func. If less than 46, use the
// algorithm in parse_lstm to make 6 actv functions
const auto& actv_funcs = lstm_op.actv_funcs;
int num_actv_funcs = actv_funcs.size();
const auto& actv_funcs = lstm_op.actv_funcs;
int num_actv_funcs = actv_funcs.size();
if(lstm_op.direction == op::rnn_direction::bidirectional)
{
switch(num_actv_funcs)
......@@ -1295,8 +1295,9 @@ bool rewrite_rnn::is_variable_seq_lens(const module& prog, instruction_ref seq_l
return is_var_lens;
}
int
rewrite_rnn::get_seq_len(const module& prog, instruction_ref input, instruction_ref seq_lens) const
int rewrite_rnn::get_seq_len(const module& prog,
instruction_ref input,
instruction_ref seq_lens) const
{
bool is_var_lens = is_variable_seq_lens(prog, seq_lens);
auto input_shape = input->get_shape();
......
......@@ -46,10 +46,10 @@ struct shape_impl
shape_impl(const std::vector<shape>& subs) : m_type(shape::tuple_type), m_shapes(subs) {}
shape::type_t m_type;
std::vector<int> m_lens = {};
std::vector<int> m_strides = {};
std::vector<shape> m_shapes = {};
bool m_standard = false;
std::vector<int> m_lens = {};
std::vector<int> m_strides = {};
std::vector<shape> m_shapes = {};
bool m_standard = false;
void calculate_strides()
{
......@@ -58,10 +58,8 @@ struct shape_impl
if(m_strides.empty())
return;
m_strides.back() = 1;
std::partial_sum(m_lens.rbegin(),
m_lens.rend() - 1,
m_strides.rbegin() + 1,
std::multiplies<int>());
std::partial_sum(
m_lens.rbegin(), m_lens.rend() - 1, m_strides.rbegin() + 1, std::multiplies<int>());
}
int element_space() const
......@@ -83,8 +81,7 @@ struct shape_impl
assert(m_lens.size() == m_strides.size());
if(m_lens.empty())
return 0;
return std::accumulate(
m_lens.begin(), m_lens.end(), int{1}, std::multiplies<int>());
return std::accumulate(m_lens.begin(), m_lens.end(), int{1}, std::multiplies<int>());
}
};
......@@ -124,10 +121,7 @@ std::string shape::cpp_type(shape::type_t t)
shape::shape() : impl(shape_impl::default_shape()) {}
shape::shape(type_t t) : impl(std::make_shared<shape_impl>(t)) {}
shape::shape(type_t t, std::vector<int> l)
: impl(std::make_shared<shape_impl>(t, std::move(l)))
{
}
shape::shape(type_t t, std::vector<int> l) : impl(std::make_shared<shape_impl>(t, std::move(l))) {}
shape::shape(type_t t, std::vector<int> l, std::vector<int> s)
: impl(std::make_shared<shape_impl>(t, std::move(l), std::move(s)))
{
......@@ -135,9 +129,7 @@ shape::shape(type_t t, std::vector<int> l, std::vector<int> s)
shape::shape(const std::vector<shape>& subs) : impl(std::make_shared<shape_impl>(subs)) {}
shape shape::from_permutation(type_t t,
const std::vector<int>& l,
const std::vector<int64_t>& perm)
shape shape::from_permutation(type_t t, const std::vector<int>& l, const std::vector<int64_t>& perm)
{
auto new_lens = reorder_dims(l, perm);
shape result = reorder_shape({t, new_lens}, invert_permutation(perm));
......@@ -221,14 +213,11 @@ void shape::multi_copy(int i, int* start, const int* end) const
assert(this->standard());
(void)end;
assert(lens().size() <= (end - start));
std::transform(strides().begin(),
strides().end(),
lens().begin(),
start,
[&](int stride, int len) {
assert(len > 0 and stride > 0);
return (i / stride) % len;
});
std::transform(
strides().begin(), strides().end(), lens().begin(), start, [&](int stride, int len) {
assert(len > 0 and stride > 0);
return (i / stride) % len;
});
}
bool shape::packed() const
......@@ -258,10 +247,8 @@ bool shape::transposed() const
bool shape::broadcasted() const
{
assert(this->lens().size() == this->strides().size());
return std::accumulate(this->strides().begin(),
this->strides().end(),
int{1},
std::multiplies<int>()) == 0;
return std::accumulate(
this->strides().begin(), this->strides().end(), int{1}, std::multiplies<int>()) == 0;
}
bool shape::scalar() const
......@@ -289,10 +276,7 @@ shape shape::with_lens(type_t t, const std::vector<int>& l) const
return shape::from_permutation(t, l, perm);
}
shape shape::with_lens(const std::vector<int>& l) const
{
return this->with_lens(this->type(), l);
}
shape shape::with_lens(const std::vector<int>& l) const { return this->with_lens(this->type(), l); }
int shape::element_space() const { return impl->element_space(); }
......@@ -350,9 +334,8 @@ void migraphx_from_value(const value& v, shape& s)
}
else
{
s = shape{shape::parse_type(t),
v.at("lens").to_vector<int>(),
v.at("strides").to_vector<int>()};
s = shape{
shape::parse_type(t), v.at("lens").to_vector<int>(), v.at("strides").to_vector<int>()};
}
}
......
......@@ -626,9 +626,7 @@ struct find_split_concat
}
};
bool axis_equal(const std::vector<int>& x,
const std::vector<int>& y,
int axis)
bool axis_equal(const std::vector<int>& x, const std::vector<int>& y, int axis)
{
return x.size() == y.size() and x.size() > axis and
std::equal(x.begin(), x.begin() + axis, y.begin()) and
......@@ -910,10 +908,10 @@ struct find_split_reshape
}
// ensure reshape happens after the axis dimension
auto axis = any_cast<op::slice>(slc->get_operator()).axes[0];
auto slc_lens = slc->get_shape().lens();
auto slc_dim_size = std::accumulate(
slc_lens.begin() + axis, slc_lens.end(), 1, std::multiplies<int>());
auto axis = any_cast<op::slice>(slc->get_operator()).axes[0];
auto slc_lens = slc->get_shape().lens();
auto slc_dim_size =
std::accumulate(slc_lens.begin() + axis, slc_lens.end(), 1, std::multiplies<int>());
// search the reshape output (standard shape) to decide which axis are
// in its output corresponding to the slc_dim_size
......
......@@ -69,7 +69,7 @@ void migemm_impl(
int n_dims = cmat.get_shape().lens().size();
int dim_0 = n_dims - 2;
int dim_1 = n_dims - 1;
auto k = amat.get_shape().lens()[dim_1];
auto k = amat.get_shape().lens()[dim_1];
assert(amat.get_shape().lens()[dim_1] == bmat.get_shape().lens()[dim_0]);
assert(cmat.get_shape().lens()[dim_0] == amat.get_shape().lens()[dim_0]);
......@@ -92,8 +92,7 @@ void migemm_impl(tensor_view<T> cmat, tensor_view<T> amat, tensor_view<T> bmat,
{
auto lens = amat.get_shape().lens();
bool batch_mul =
std::accumulate(
lens.rbegin() + 2, lens.rend(), int{1}, std::multiplies<int>()) == 1;
std::accumulate(lens.rbegin() + 2, lens.rend(), int{1}, std::multiplies<int>()) == 1;
if(batch_mul)
{
migemm_impl(cmat, amat, bmat, alpha, beta, is_fast_gemm_type<T>{});
......
......@@ -52,7 +52,7 @@ struct parse_conv : op_parser<parse_conv>
const std::string& pad_mode = info.attributes.at("padding").s();
if(pad_mode.find("SAME") != std::string::npos)
{
op.padding_mode = op::padding_mode_t::same;
op.padding_mode = op::padding_mode_t::same;
std::vector<int> weight_dims = weights->get_shape().lens();
int weight_h = weight_dims[2];
int weight_w = weight_dims[3];
......
......@@ -21,7 +21,7 @@ struct parse_depthwiseconv : op_parser<parse_depthwiseconv>
{
op::convolution op;
int num_channels = args[0]->get_shape().lens()[1];
op.group = num_channels;
op.group = num_channels;
if(contains(info.attributes, "strides"))
{
......@@ -57,7 +57,7 @@ struct parse_depthwiseconv : op_parser<parse_depthwiseconv>
if(pad_mode.find("SAME") != std::string::npos)
{
op.padding_mode = op::padding_mode_t::same;
op.padding_mode = op::padding_mode_t::same;
std::vector<int> weight_dims = weights->get_shape().lens();
int weight_h = weight_dims[2];
int weight_w = weight_dims[3];
......
......@@ -20,7 +20,7 @@ struct parse_expanddims : op_parser<parse_expanddims>
std::vector<int> input_dims = args[0]->get_shape().lens();
std::vector<int64_t> new_dims(input_dims.begin(), input_dims.end());
int num_dims = input_dims.size();
int32_t dim = args[1]->eval().at<int32_t>();
int32_t dim = args[1]->eval().at<int32_t>();
if(dim < 0)
{
......
......@@ -17,9 +17,9 @@ struct parse_strideslice : op_parser<parse_strideslice>
tf_parser::node_info info,
std::vector<instruction_ref> args) const
{
auto starts = args[1]->eval().get<int32_t>().to_vector();
auto ends = args[2]->eval().get<int32_t>().to_vector();
auto l0 = args[0];
auto starts = args[1]->eval().get<int32_t>().to_vector();
auto ends = args[2]->eval().get<int32_t>().to_vector();
auto l0 = args[0];
int num_axes = l0->get_shape().lens().size();
std::vector<int> axes = l0->get_shape().lens();
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment