Commit 79e27dac authored by charlie's avatar charlie
Browse files

Formatting

parent 53c4b899
......@@ -45,8 +45,8 @@ void dead_code_elimination::apply(module& m) const
if(i == last)
break;
// Skip instruction with empty shape as output unless its a builtin or undefined or identity
if((not i->get_shape().dynamic() and i->get_shape().elements() == 0) and i->name().front() != '@' and
i->name() != "undefined" and i->name() != "identity")
if((not i->get_shape().dynamic() and i->get_shape().elements() == 0) and
i->name().front() != '@' and i->name() != "undefined" and i->name() != "identity")
continue;
assert(bidistance(m, i, last) > 0);
fix([&](auto self, auto leaf) {
......
......@@ -77,8 +77,8 @@ struct convolution
if(!input.dynamic() and input.lens().at(1) != (weights.lens().at(1) * group))
MIGRAPHX_THROW("CONVOLUTION: mismatched channel numbers");
auto calc_output_lens = [this, &weights, &num_spatial_dims, &padding_size](std::vector<std::size_t> lens)
{
auto calc_output_lens =
[this, &weights, &num_spatial_dims, &padding_size](std::vector<std::size_t> lens) {
std::vector<size_t> ret = {};
// calculate the output shape of the convolution: ((W - K + 2P) / S) + 1
for(size_t i = 0; i < num_spatial_dims; i++)
......@@ -89,27 +89,27 @@ struct convolution
// when padding is {x0_begin, x1_begin, ... x0_end , x1_end, ...}
padding_factor = padding[i] + padding[i + num_spatial_dims];
}
ret.push_back(
std::size_t(
std::max<std::ptrdiff_t>(
ret.push_back(std::size_t(std::max<std::ptrdiff_t>(
1,
(lens[i + 2] - (1 + dilation[i] * (weights.lens()[i + 2] - 1)) + padding_factor) / stride[i] + 1
)
)
);
(lens[i + 2] - (1 + dilation[i] * (weights.lens()[i + 2] - 1)) +
padding_factor) /
stride[i] +
1)));
}
return ret;
};
if(input.dynamic())
{
std::vector<shape::dynamic_dimension> output_dyn_dims = {input.dyn_dims().at(0), input.dyn_dims().at(1)};
std::vector<shape::dynamic_dimension> output_dyn_dims = {input.dyn_dims().at(0),
input.dyn_dims().at(1)};
auto min_spatial_dims = calc_output_lens(input.min_lens());
auto max_spatial_dims = calc_output_lens(input.max_lens());
auto opt_spatial_dims = calc_output_lens(input.opt_lens());
for (size_t i = 0; i < num_spatial_dims; ++i)
for(size_t i = 0; i < num_spatial_dims; ++i)
{
output_dyn_dims.push_back(shape::dynamic_dimension{min_spatial_dims[i], max_spatial_dims[i], opt_spatial_dims[i]});
output_dyn_dims.push_back(shape::dynamic_dimension{
min_spatial_dims[i], max_spatial_dims[i], opt_spatial_dims[i]});
}
return shape{input.type(), output_dyn_dims};
}
......@@ -117,7 +117,9 @@ struct convolution
{
std::vector<size_t> output_lens{input.lens()[0], weights.lens()[0]};
auto spatial_lens = calc_output_lens(input.lens());
std::for_each(spatial_lens.begin(), spatial_lens.end(), [&output_lens](auto x){ output_lens.push_back(x); });
std::for_each(spatial_lens.begin(), spatial_lens.end(), [&output_lens](auto x) {
output_lens.push_back(x);
});
return inputs[0].with_lens(output_lens);
}
}
......
......@@ -172,7 +172,7 @@ bool normalize_attributes(operation& op, const shape& s)
axes = val.at("axes").without_key().to_vector<int64_t>();
}
auto vec = vv.to_vector<int64_t>();
auto result = tune_attribute(vec, axes, rv.without_key(),s.lens());
auto result = tune_attribute(vec, axes, rv.without_key(), s.lens());
val[key] = result;
op.from_value(val);
val = op.to_value();
......
......@@ -855,7 +855,8 @@ TEST_CASE(conv_dynamic_batch_test)
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape input_shape{migraphx::shape::float_type, {{1, 100, 0}, {3, 3, 0}, {4, 4, 0}, {4, 4, 0}}};
migraphx::shape input_shape{migraphx::shape::float_type,
{{1, 100, 0}, {3, 3, 0}, {4, 4, 0}, {4, 4, 0}}};
migraphx::shape weights_shape{migraphx::shape::float_type, {2, 3, 3, 3}};
auto input = mm->add_parameter("X", input_shape);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment