Unverified Commit af073671 authored by kahmed10's avatar kahmed10 Committed by GitHub
Browse files

add improvements to pad calculation (#461)



* fix pad calc

* add padding calc and test

* formatting

* add parsing auto_pad attribute for convolution

* clang format

* made asym generic function

* formatting

* refine code

* code cleanup

* clang format

* pull changes from other pad branch

* formatting

* revert onnx file

* formatting

* fix a bug in the code

* fix bug in function call

* formatting

* fix bug

* fix review comment

* formatting
Co-authored-by: default avatarShucai Xiao <Shucai.Xiao@amd.com>
Co-authored-by: default avatarShucai Xiao <shucai@gmail.com>
Co-authored-by: default avatarPaul Fultz II <pfultz2@yahoo.com>
Co-authored-by: default avatarmvermeulen <5479696+mvermeulen@users.noreply.github.com>
parent 3c457a3c
......@@ -13,14 +13,24 @@ inline void calculate_padding(int64_t idx,
int64_t input_dim,
int64_t stride,
int64_t dilation,
int64_t weight_dim)
int64_t weight_dim,
bool is_same_upper = true)
{
int64_t output_dim = (input_dim + stride - 1) / stride; // round up result
int64_t new_weight_dim = weight_dim + (weight_dim - 1) * (dilation - 1);
int64_t pad =
std::max(static_cast<int64_t>(0), (output_dim - 1) * stride + new_weight_dim - input_dim);
pads[idx] = pad / 2;
pads[idx + 2] = pad - pad / 2;
if(is_same_upper)
{
pads[idx] = pad / 2;
pads[idx + 2] = pad - pad / 2;
}
else
{
pads[idx + 2] = pad / 2;
pads[idx] = pad - pad / 2;
}
}
} // namespace MIGRAPHX_INLINE_NS
......
......@@ -311,14 +311,15 @@ struct onnx_parser
template <class Op>
void check_asym_padding(instruction_ref& ins,
std::vector<int64_t>& padding,
const std::vector<int64_t>& padding,
Op& op,
float pad_val = 0)
{
if(padding[0] != padding[2] || padding[1] != padding[3])
{
padding = {0, 0, padding[0], padding[1], 0, 0, padding[2], padding[3]};
ins = prog.add_instruction(op::pad{padding, pad_val}, ins);
ins = prog.add_instruction(
op::pad{{0, 0, padding[0], padding[1], 0, 0, padding[2], padding[3]}, pad_val},
ins);
}
else
{
......@@ -418,7 +419,10 @@ struct onnx_parser
instruction_ref process_auto_pad_attribute(instruction_ref ins,
node_info info,
Op& op,
const std::vector<std::size_t>& in_lens)
std::array<std::size_t, 2> k_lens,
std::array<std::size_t, 2> dilation,
const std::vector<std::size_t>& in_lens,
float value = 0.0f)
{
if(!contains(info.attributes, "auto_pad"))
{
......@@ -428,46 +432,14 @@ struct onnx_parser
auto auto_pad = info.attributes["auto_pad"].s();
if(auto_pad.find("SAME") != std::string::npos)
{
// calculate the padding
std::array<std::size_t, 2> out_lens;
out_lens[0] = (in_lens[2] + op.stride[0] - 1) / op.stride[0];
out_lens[1] = (in_lens[3] + op.stride[1] - 1) / op.stride[1];
std::array<std::size_t, 2> explicit_pads;
explicit_pads[0] = (out_lens[0] - 1) * op.stride[0] + op.lengths[0] - in_lens[2];
explicit_pads[1] = (out_lens[1] - 1) * op.stride[1] + op.lengths[1] - in_lens[3];
op.padding[0] = explicit_pads[0] / 2;
op.padding[1] = explicit_pads[1] / 2;
explicit_pads[0] -= 2 * op.padding[0];
explicit_pads[1] -= 2 * op.padding[1];
std::vector<std::int64_t> pads(8, 0);
if(explicit_pads[0] != 0 or explicit_pads[1] != 0)
{
if(auto_pad == "SAME_UPPER")
{
pads[6] = explicit_pads[0];
pads[7] = explicit_pads[1];
}
else if(auto_pad == "SAME_LOWER")
{
pads[2] = explicit_pads[0];
pads[3] = explicit_pads[1];
}
// MaxPool
if(op.mode == "max")
{
ins = prog.add_instruction(op::pad{pads, std::numeric_limits<float>::lowest()},
ins);
}
// AveragePool
else
{
ins = prog.add_instruction(op::pad{pads}, ins);
}
}
bool is_same_upper = (auto_pad.find("SAME_UPPER") != std::string::npos);
std::vector<int64_t> padding(in_lens.size());
calculate_padding(
0, padding, in_lens[2], op.stride[0], dilation[0], k_lens[0], is_same_upper);
calculate_padding(
1, padding, in_lens[3], op.stride[1], dilation[1], k_lens[1], is_same_upper);
op.padding_mode = op::padding_mode_t::same;
check_asym_padding(ins, padding, op, value);
}
return ins;
......@@ -480,6 +452,7 @@ struct onnx_parser
Op op;
auto l0 = args[0];
auto weights = args[1];
std::vector<int64_t> padding;
if(contains(info.attributes, "pads"))
{
if(contains(info.attributes, "auto_pad"))
......@@ -487,14 +460,14 @@ struct onnx_parser
auto s = info.attributes["auto_pad"].s();
if(contains(info.attributes, "pads") and to_upper(s) != "NOTSET")
{
MIGRAPHX_THROW("auto_pad and padding cannot be specified simultaneously");
MIGRAPHX_THROW(
"PARSE_CONV: auto_pad and padding cannot be specified simultaneously");
}
}
std::vector<std::int64_t> padding;
copy(info.attributes["pads"].ints(), std::back_inserter(padding));
if(padding.size() != 4)
{
MIGRAPHX_THROW("padding should have 4 values");
MIGRAPHX_THROW("PARSE_CONV: padding should have 4 values");
}
check_asym_padding(l0, padding, op);
}
......@@ -509,11 +482,6 @@ struct onnx_parser
if(contains(info.attributes, "auto_pad"))
{
auto s = info.attributes["auto_pad"].s();
if(contains(info.attributes, "pads") and to_upper(s) != "NOTSET")
{
MIGRAPHX_THROW("auto_pad and padding cannot be specified simultaneously");
}
if(s.find("SAME") != std::string::npos)
{
op.padding_mode = op::padding_mode_t::same;
......@@ -522,7 +490,7 @@ struct onnx_parser
size_t weight_w = weight_dims[3];
auto input_dims = l0->get_shape().lens();
std::vector<int64_t> padding(input_dims.size());
padding.resize(input_dims.size());
calculate_padding(
0, padding, input_dims[2], op.stride[0], op.dilation[0], weight_h);
calculate_padding(
......@@ -530,6 +498,11 @@ struct onnx_parser
check_asym_padding(l0, padding, op);
}
auto in_lens = args[0]->get_shape().lens();
auto weight_lens = args[1]->get_shape().lens();
std::array<std::size_t, 2> k_lens = {weight_lens[2], weight_lens[3]};
l0 = process_auto_pad_attribute(l0, info, op, k_lens, op.dilation, in_lens);
}
if(contains(info.attributes, "group"))
{
......@@ -690,8 +663,21 @@ struct onnx_parser
if(contains(info.attributes, "auto_pad"))
{
auto s = info.attributes["auto_pad"].s();
if(s.find("SAME") != std::string::npos)
{
op.padding_mode = op::padding_mode_t::same;
}
auto in_lens = args[0]->get_shape().lens();
l0 = process_auto_pad_attribute(l0, info, op, in_lens);
float val = 0.0f;
// MaxPool
if(op.mode == "max")
{
val = std::numeric_limits<float>::lowest();
}
l0 = process_auto_pad_attribute(l0, info, op, op.lengths, {1, 1}, in_lens, val);
}
return prog.add_instruction(op, l0);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment