Commit 79e27dac authored by charlie's avatar charlie
Browse files

Formatting

parent 53c4b899
...@@ -45,8 +45,8 @@ void dead_code_elimination::apply(module& m) const ...@@ -45,8 +45,8 @@ void dead_code_elimination::apply(module& m) const
if(i == last) if(i == last)
break; break;
// Skip instruction with empty shape as output unless its a builtin or undefined or identity // Skip instruction with empty shape as output unless its a builtin or undefined or identity
if((not i->get_shape().dynamic() and i->get_shape().elements() == 0) and i->name().front() != '@' and if((not i->get_shape().dynamic() and i->get_shape().elements() == 0) and
i->name() != "undefined" and i->name() != "identity") i->name().front() != '@' and i->name() != "undefined" and i->name() != "identity")
continue; continue;
assert(bidistance(m, i, last) > 0); assert(bidistance(m, i, last) > 0);
fix([&](auto self, auto leaf) { fix([&](auto self, auto leaf) {
......
...@@ -68,7 +68,7 @@ struct convolution ...@@ -68,7 +68,7 @@ struct convolution
{ {
MIGRAPHX_THROW("CONVOLUTION: dynamic weights not supported"); MIGRAPHX_THROW("CONVOLUTION: dynamic weights not supported");
} }
const size_t num_spatial_dims = input_size - 2; const size_t num_spatial_dims = input_size - 2;
if(num_spatial_dims != this->kdims()) if(num_spatial_dims != this->kdims())
{ {
MIGRAPHX_THROW("CONVOLUTION: input k-dims does not match attribute size"); MIGRAPHX_THROW("CONVOLUTION: input k-dims does not match attribute size");
...@@ -76,40 +76,40 @@ struct convolution ...@@ -76,40 +76,40 @@ struct convolution
if(!input.dynamic() and input.lens().at(1) != (weights.lens().at(1) * group)) if(!input.dynamic() and input.lens().at(1) != (weights.lens().at(1) * group))
MIGRAPHX_THROW("CONVOLUTION: mismatched channel numbers"); MIGRAPHX_THROW("CONVOLUTION: mismatched channel numbers");
auto calc_output_lens = [this, &weights, &num_spatial_dims, &padding_size](std::vector<std::size_t> lens) auto calc_output_lens =
{ [this, &weights, &num_spatial_dims, &padding_size](std::vector<std::size_t> lens) {
std::vector<size_t> ret = {}; std::vector<size_t> ret = {};
// calculate the output shape of the convolution: ((W - K + 2P) / S) + 1 // calculate the output shape of the convolution: ((W - K + 2P) / S) + 1
for(size_t i = 0; i < num_spatial_dims; i++) for(size_t i = 0; i < num_spatial_dims; i++)
{
auto padding_factor = 2 * padding[i];
if(padding_size == 2 * num_spatial_dims)
{ {
// when padding is {x0_begin, x1_begin, ... x0_end , x1_end, ...} auto padding_factor = 2 * padding[i];
padding_factor = padding[i] + padding[i + num_spatial_dims]; if(padding_size == 2 * num_spatial_dims)
{
// when padding is {x0_begin, x1_begin, ... x0_end , x1_end, ...}
padding_factor = padding[i] + padding[i + num_spatial_dims];
}
ret.push_back(std::size_t(std::max<std::ptrdiff_t>(
1,
(lens[i + 2] - (1 + dilation[i] * (weights.lens()[i + 2] - 1)) +
padding_factor) /
stride[i] +
1)));
} }
ret.push_back( return ret;
std::size_t( };
std::max<std::ptrdiff_t>(
1,
(lens[i + 2] - (1 + dilation[i] * (weights.lens()[i + 2] - 1)) + padding_factor) / stride[i] + 1
)
)
);
}
return ret;
};
if(input.dynamic()) if(input.dynamic())
{ {
std::vector<shape::dynamic_dimension> output_dyn_dims = {input.dyn_dims().at(0), input.dyn_dims().at(1)}; std::vector<shape::dynamic_dimension> output_dyn_dims = {input.dyn_dims().at(0),
input.dyn_dims().at(1)};
auto min_spatial_dims = calc_output_lens(input.min_lens()); auto min_spatial_dims = calc_output_lens(input.min_lens());
auto max_spatial_dims = calc_output_lens(input.max_lens()); auto max_spatial_dims = calc_output_lens(input.max_lens());
auto opt_spatial_dims = calc_output_lens(input.opt_lens()); auto opt_spatial_dims = calc_output_lens(input.opt_lens());
for (size_t i = 0; i < num_spatial_dims; ++i) for(size_t i = 0; i < num_spatial_dims; ++i)
{ {
output_dyn_dims.push_back(shape::dynamic_dimension{min_spatial_dims[i], max_spatial_dims[i], opt_spatial_dims[i]}); output_dyn_dims.push_back(shape::dynamic_dimension{
min_spatial_dims[i], max_spatial_dims[i], opt_spatial_dims[i]});
} }
return shape{input.type(), output_dyn_dims}; return shape{input.type(), output_dyn_dims};
} }
...@@ -117,7 +117,9 @@ struct convolution ...@@ -117,7 +117,9 @@ struct convolution
{ {
std::vector<size_t> output_lens{input.lens()[0], weights.lens()[0]}; std::vector<size_t> output_lens{input.lens()[0], weights.lens()[0]};
auto spatial_lens = calc_output_lens(input.lens()); auto spatial_lens = calc_output_lens(input.lens());
std::for_each(spatial_lens.begin(), spatial_lens.end(), [&output_lens](auto x){ output_lens.push_back(x); }); std::for_each(spatial_lens.begin(), spatial_lens.end(), [&output_lens](auto x) {
output_lens.push_back(x);
});
return inputs[0].with_lens(output_lens); return inputs[0].with_lens(output_lens);
} }
} }
......
...@@ -134,7 +134,7 @@ bool normalize_attributes(operation& op, const shape& s) ...@@ -134,7 +134,7 @@ bool normalize_attributes(operation& op, const shape& s)
auto val = op.to_value(); auto val = op.to_value();
if(attrs.contains("normalize_padding")) if(attrs.contains("normalize_padding"))
{ {
auto num_dims = s.max_lens().size(); auto num_dims = s.max_lens().size();
auto padding = val.at(attrs.at("normalize_padding").to<std::string>()); auto padding = val.at(attrs.at("normalize_padding").to<std::string>());
auto padding_size = padding.size(); auto padding_size = padding.size();
// for now, assume the dimensions to pad start at dim 2 // for now, assume the dimensions to pad start at dim 2
...@@ -172,7 +172,7 @@ bool normalize_attributes(operation& op, const shape& s) ...@@ -172,7 +172,7 @@ bool normalize_attributes(operation& op, const shape& s)
axes = val.at("axes").without_key().to_vector<int64_t>(); axes = val.at("axes").without_key().to_vector<int64_t>();
} }
auto vec = vv.to_vector<int64_t>(); auto vec = vv.to_vector<int64_t>();
auto result = tune_attribute(vec, axes, rv.without_key(),s.lens()); auto result = tune_attribute(vec, axes, rv.without_key(), s.lens());
val[key] = result; val[key] = result;
op.from_value(val); op.from_value(val);
val = op.to_value(); val = op.to_value();
......
...@@ -20,7 +20,7 @@ void normalize_ops::apply(module& m) const ...@@ -20,7 +20,7 @@ void normalize_ops::apply(module& m) const
if(inputs.empty()) if(inputs.empty())
continue; continue;
auto s = inputs[0]->get_shape(); auto s = inputs[0]->get_shape();
migraphx::operation tuned_op = ins->get_operator(); migraphx::operation tuned_op = ins->get_operator();
if(normalize_attributes(tuned_op, s)) if(normalize_attributes(tuned_op, s))
{ {
......
...@@ -855,7 +855,8 @@ TEST_CASE(conv_dynamic_batch_test) ...@@ -855,7 +855,8 @@ TEST_CASE(conv_dynamic_batch_test)
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
migraphx::shape input_shape{migraphx::shape::float_type, {{1, 100, 0}, {3, 3, 0}, {4, 4, 0}, {4, 4, 0}}}; migraphx::shape input_shape{migraphx::shape::float_type,
{{1, 100, 0}, {3, 3, 0}, {4, 4, 0}, {4, 4, 0}}};
migraphx::shape weights_shape{migraphx::shape::float_type, {2, 3, 3, 3}}; migraphx::shape weights_shape{migraphx::shape::float_type, {2, 3, 3, 3}};
auto input = mm->add_parameter("X", input_shape); auto input = mm->add_parameter("X", input_shape);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment