Unverified Commit 4edf1195 authored by Charlie Lin's avatar Charlie Lin Committed by GitHub
Browse files

Update deconvolution -> convolution_backwards and Dynamic Shape Support (#1801)

Renames deconvolution -> convolution_backwards to be more consistent with the literature
Note: this is not the cross-correlation operator (which is the adjoint of convolution). This is technically a standard convolution operator combined with an upsampling operator rather than a downsampling operator.
Adds unit tests for the padding, strides, dilations, and other op attributes.
Throws on auto_pad attribute since it has not been implemented
Previously it read the attribute and set it but then did nothing with it
Extended for dynamic shapes
Does not support using asymmetric padding (padding_L != padding_R) and output_shape with dynamic shapes.
parent e1039a1c
...@@ -129,9 +129,9 @@ register_migraphx_ops( ...@@ -129,9 +129,9 @@ register_migraphx_ops(
contiguous contiguous
convert convert
convolution convolution
convolution_backwards
cosh cosh
cos cos
deconvolution
dequantizelinear dequantizelinear
div div
dot dot
......
...@@ -79,17 +79,17 @@ struct convolution ...@@ -79,17 +79,17 @@ struct convolution
check_shapes{inputs, *this, true}.has(2).same_type().same_ndims().min_ndims(3); check_shapes{inputs, *this, true}.has(2).same_type().same_ndims().min_ndims(3);
check_attribute_size(); check_attribute_size();
// num of dims of input and attribute should match // num of dims of input and attribute should match
const auto input_size = inputs[0].max_lens().size(); const auto input_ndim = inputs[0].ndim();
const auto padding_size = padding.size(); const auto padding_size = padding.size();
if(input_size != padding_size / 2 + 2 && input_size != padding_size + 2) if(input_ndim != padding_size / 2 + 2 && input_ndim != padding_size + 2)
{ {
MIGRAPHX_THROW("CONVOLUTION: input and attribute size mismatch!"); MIGRAPHX_THROW("CONVOLUTION: input and attribute size mismatch!");
} }
const shape& x_shape = inputs.at(0); const shape& x_shape = inputs.at(0);
const shape& w_shape = inputs.at(1); const shape& w_shape = inputs.at(1);
const size_t num_spatial_dims = input_size - 2; const size_t num_spatial_dims = input_ndim - 2;
if(num_spatial_dims != this->kdims()) if(num_spatial_dims != this->kdims())
{ {
MIGRAPHX_THROW("CONVOLUTION: input k-dims does not match attribute size"); MIGRAPHX_THROW("CONVOLUTION: input k-dims does not match attribute size");
...@@ -105,7 +105,7 @@ struct convolution ...@@ -105,7 +105,7 @@ struct convolution
} }
else else
{ {
return fixed_compute_shape(x_shape, w_shape); return static_compute_shape(x_shape, w_shape);
} }
} }
...@@ -143,23 +143,10 @@ struct convolution ...@@ -143,23 +143,10 @@ struct convolution
shape dynamic_compute_shape(shape x_shape, shape w_shape) const shape dynamic_compute_shape(shape x_shape, shape w_shape) const
{ {
std::vector<shape::dynamic_dimension> output_dyn_dims = {}; std::vector<shape::dynamic_dimension> output_dyn_dims = {};
output_dyn_dims.push_back(x_shape.to_dynamic().dyn_dims().at(0));
output_dyn_dims.push_back(w_shape.to_dynamic().dyn_dims().at(0));
auto dynamic_shape_push_back = [&](const shape& input_shape) { const size_t num_spatial_dims = x_shape.ndim() - 2;
if(input_shape.dynamic())
{
output_dyn_dims.push_back(input_shape.dyn_dims().at(0));
}
else
{
auto l = input_shape.lens().at(0);
output_dyn_dims.push_back({l, l});
}
};
dynamic_shape_push_back(x_shape);
dynamic_shape_push_back(w_shape);
const size_t num_spatial_dims = x_shape.max_lens().size() - 2;
if(padding_mode != default_) if(padding_mode != default_)
{ {
for(std::size_t i = 0; i < num_spatial_dims; ++i) for(std::size_t i = 0; i < num_spatial_dims; ++i)
...@@ -198,7 +185,7 @@ struct convolution ...@@ -198,7 +185,7 @@ struct convolution
return shape{x_shape.type(), output_dyn_dims}; return shape{x_shape.type(), output_dyn_dims};
} }
shape fixed_compute_shape(shape x_shape, shape w_shape) const shape static_compute_shape(shape x_shape, shape w_shape) const
{ {
std::vector<size_t> output_lens{x_shape.lens()[0], w_shape.lens()[0]}; std::vector<size_t> output_lens{x_shape.lens()[0], w_shape.lens()[0]};
auto spatial_lens = calc_conv_lens(x_shape.lens(), w_shape.lens()); auto spatial_lens = calc_conv_lens(x_shape.lens(), w_shape.lens());
......
...@@ -21,9 +21,11 @@ ...@@ -21,9 +21,11 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE. * THE SOFTWARE.
*/ */
#ifndef MIGRAPHX_GUARD_OPERATORS_DECONVOLUTION_HPP #ifndef MIGRAPHX_GUARD_OPERATORS_CONVOLUTION_BACKWARDS_HPP
#define MIGRAPHX_GUARD_OPERATORS_DECONVOLUTION_HPP #define MIGRAPHX_GUARD_OPERATORS_CONVOLUTION_BACKWARDS_HPP
#include <cmath>
#include <utility>
#include <migraphx/op/common.hpp> #include <migraphx/op/common.hpp>
#include <migraphx/check_shapes.hpp> #include <migraphx/check_shapes.hpp>
#include <migraphx/config.hpp> #include <migraphx/config.hpp>
...@@ -31,14 +33,13 @@ ...@@ -31,14 +33,13 @@
#include <migraphx/argument.hpp> #include <migraphx/argument.hpp>
#include <migraphx/par_dfor.hpp> #include <migraphx/par_dfor.hpp>
#include <migraphx/shape_for_each.hpp> #include <migraphx/shape_for_each.hpp>
#include <cmath> #include <migraphx/dyn_output.hpp>
#include <utility>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
namespace op { namespace op {
struct deconvolution struct convolution_backwards
{ {
std::vector<std::size_t> padding = {0, 0}; std::vector<std::size_t> padding = {0, 0};
std::vector<std::size_t> stride = {1, 1}; std::vector<std::size_t> stride = {1, 1};
...@@ -57,45 +58,91 @@ struct deconvolution ...@@ -57,45 +58,91 @@ struct deconvolution
f(self.group, "group")); f(self.group, "group"));
} }
std::string name() const { return "deconvolution"; } std::string name() const { return "convolution_backwards"; }
void check_attribute_size() const void check_attribute_size() const
{ {
if((padding.size() != stride.size() and (padding.size() / 2) != stride.size()) or if(padding.size() != stride.size() or stride.size() != dilation.size())
stride.size() != dilation.size())
{ {
MIGRAPHX_THROW("deconvolution: inconsistent attribute sizes"); MIGRAPHX_THROW("CONVOLUTION_BACKWARDS: inconsistent attribute sizes");
} }
} }
shape compute_shape(std::vector<shape> inputs) const shape compute_shape(std::vector<shape> inputs) const
{ {
check_shapes{inputs, *this}.has(2).same_type().same_ndims().min_ndims(3); check_shapes{inputs, *this, true}.has(2).same_type().same_ndims().min_ndims(3);
const shape& x_shape = inputs.at(0);
const shape& w_shape = inputs.at(1);
if(x_shape.ndim() - 2 != this->kdims())
{
MIGRAPHX_THROW("CONVOLUTION_BACKWARDS: input k-dims does not match attribute size");
}
const shape& input = inputs.at(0); if(not x_shape.dynamic() and not w_shape.dynamic() and
const shape& weights = inputs.at(1); x_shape.lens().at(1) != (w_shape.lens().at(0) * group))
size_t kdims = input.lens().size() - 2;
if(kdims != this->kdims())
{ {
MIGRAPHX_THROW("deconvolution: input k-dims does not match attribute size"); MIGRAPHX_THROW("CONVOLUTION_BACKWARDS: mismatched channel numbers");
} }
std::vector<size_t> output_lens{input.lens()[0], weights.lens()[1]}; if(x_shape.dynamic() or w_shape.dynamic())
{
return dynamic_compute_shape(x_shape, w_shape);
}
else
{
return static_compute_shape(x_shape, w_shape);
}
}
for(size_t i = 0; i < kdims; i++) std::vector<std::size_t> calc_spatial_lens(std::vector<std::size_t> x_lens,
std::vector<std::size_t> w_lens) const
{ {
output_lens.push_back(std::size_t(std::max<std::ptrdiff_t>( std::vector<size_t> spatial_lens(x_lens.size() - 2);
// stride * (input - 1) + output_padding + ((kernel - 1) * dilation + 1) - padding_L -
// padding_R. This assumes padding_L = padding_R and output_padding handled in parser.
for(size_t i = 0; i < spatial_lens.size(); i++)
{
spatial_lens.at(i) = (std::size_t(std::max<std::ptrdiff_t>(
1, 1,
stride[i] * (input.lens()[i + 2] - 1) + stride[i] * (x_lens[i + 2] - 1) + ((w_lens[i + 2] - 1) * dilation[i] + 1) -
((weights.lens()[i + 2] - 1) * dilation[i] + 1) - 2 * padding[i]))); 2 * padding[i])));
}
return spatial_lens;
} }
return inputs[0].with_lens(output_lens);
shape dynamic_compute_shape(shape x_shape, shape w_shape) const
{
std::vector<shape::dynamic_dimension> output_dyn_dims = {};
output_dyn_dims.push_back(x_shape.to_dynamic().dyn_dims().at(0));
output_dyn_dims.push_back(w_shape.to_dynamic().dyn_dims().at(1));
const std::size_t num_spatial_dims = x_shape.ndim() - 2;
// Does not compute for optimals
auto min_spatial_dims = calc_spatial_lens(x_shape.min_lens(), w_shape.min_lens());
auto max_spatial_dims = calc_spatial_lens(x_shape.max_lens(), w_shape.max_lens());
for(size_t i = 0; i < num_spatial_dims; ++i)
{
output_dyn_dims.push_back(
shape::dynamic_dimension{min_spatial_dims[i], max_spatial_dims[i], {}});
}
return shape{x_shape.type(), output_dyn_dims};
}
shape static_compute_shape(shape x_shape, shape w_shape) const
{
std::vector<size_t> output_lens{x_shape.lens()[0], w_shape.lens()[1]};
auto spatial_lens = calc_spatial_lens(x_shape.lens(), w_shape.lens());
std::for_each(spatial_lens.begin(), spatial_lens.end(), [&output_lens](auto x) {
output_lens.push_back(x);
});
return x_shape.with_lens(output_lens);
} }
argument compute(shape output_shape, std::vector<argument> args) const argument compute(const dyn_output& dyn_out, std::vector<argument> args) const
{ {
argument result{output_shape}; argument result{dyn_out.computed_shape};
auto kdims = this->kdims(); auto num_spatial_dims = this->kdims();
visit_all(result, args[0], args[1])([&](auto output, auto input, auto weights) { visit_all(result, args[0], args[1])([&](auto output, auto input, auto weights) {
using type = typename decltype(output)::value_type; using type = typename decltype(output)::value_type;
...@@ -109,22 +156,22 @@ struct deconvolution ...@@ -109,22 +156,22 @@ struct deconvolution
auto wei_n = wei[0]; auto wei_n = wei[0];
auto wei_c = wei[1]; auto wei_c = wei[1];
auto out_lens = output_shape.lens(); auto out_lens = dyn_out.computed_shape.lens();
std::vector<std::size_t> win_size{in_c}; std::vector<std::size_t> win_size{in_c};
std::copy(in_lens.begin() + 2, in_lens.end(), std::back_inserter(win_size)); std::copy(in_lens.begin() + 2, in_lens.end(), std::back_inserter(win_size));
std::copy(wei.begin() + 2, wei.end(), std::back_inserter(win_size)); std::copy(wei.begin() + 2, wei.end(), std::back_inserter(win_size));
shape win_shape{output_shape.type(), win_size}; shape win_shape{dyn_out.computed_shape.type(), win_size};
par_dfor(in_n, wei_c)([&](int o, int k) { par_dfor(in_n, wei_c)([&](int o, int k) {
shape_for_each(win_shape, [&](auto idx_win) { shape_for_each(win_shape, [&](auto idx_win) {
const int w = idx_win[0]; const int w = idx_win[0];
auto input_dims_start = idx_win.begin() + 1; auto input_dims_start = idx_win.begin() + 1;
auto wei_dims_start = idx_win.begin() + kdims + 1; auto wei_dims_start = idx_win.begin() + num_spatial_dims + 1;
std::vector<std::ptrdiff_t> win_start; std::vector<std::ptrdiff_t> win_start;
for(std::size_t n = 0; n < kdims; ++n) for(std::size_t n = 0; n < num_spatial_dims; ++n)
{ {
win_start.push_back(std::ptrdiff_t(*(input_dims_start + n) * stride[n]) - win_start.push_back(std::ptrdiff_t(*(input_dims_start + n) * stride[n]) -
std::ptrdiff_t(padding[n])); std::ptrdiff_t(padding[n]));
...@@ -135,7 +182,7 @@ struct deconvolution ...@@ -135,7 +182,7 @@ struct deconvolution
std::vector<std::ptrdiff_t> idx_out{o, in_ch}; std::vector<std::ptrdiff_t> idx_out{o, in_ch};
for(size_t n = 0; n < kdims; n++) for(size_t n = 0; n < num_spatial_dims; n++)
{ {
idx_out.push_back(win_start[n] + *(wei_dims_start + n) * dilation[n]); idx_out.push_back(win_start[n] + *(wei_dims_start + n) * dilation[n]);
} }
......
...@@ -45,9 +45,9 @@ ...@@ -45,9 +45,9 @@
#include <migraphx/op/contiguous.hpp> #include <migraphx/op/contiguous.hpp>
#include <migraphx/op/convert.hpp> #include <migraphx/op/convert.hpp>
#include <migraphx/op/convolution.hpp> #include <migraphx/op/convolution.hpp>
#include <migraphx/op/convolution_backwards.hpp>
#include <migraphx/op/cosh.hpp> #include <migraphx/op/cosh.hpp>
#include <migraphx/op/cos.hpp> #include <migraphx/op/cos.hpp>
#include <migraphx/op/deconvolution.hpp>
#include <migraphx/op/div.hpp> #include <migraphx/op/div.hpp>
#include <migraphx/op/dot.hpp> #include <migraphx/op/dot.hpp>
#include <migraphx/op/elu.hpp> #include <migraphx/op/elu.hpp>
......
...@@ -42,7 +42,7 @@ std::vector<int64_t> to_int64_vector(const std::vector<T>& input_vector) ...@@ -42,7 +42,7 @@ std::vector<int64_t> to_int64_vector(const std::vector<T>& input_vector)
return output_vector; return output_vector;
} }
struct parse_deconvolution : op_parser<parse_deconvolution> struct parse_conv_transpose : op_parser<parse_conv_transpose>
{ {
std::vector<op_desc> operators() const { return {{"ConvTranspose"}}; } std::vector<op_desc> operators() const { return {{"ConvTranspose"}}; }
...@@ -51,17 +51,15 @@ struct parse_deconvolution : op_parser<parse_deconvolution> ...@@ -51,17 +51,15 @@ struct parse_deconvolution : op_parser<parse_deconvolution>
onnx_parser::node_info info, onnx_parser::node_info info,
std::vector<instruction_ref> args) const std::vector<instruction_ref> args) const
{ {
operation op = make_op("deconvolution"); operation op = make_op("convolution_backwards");
value values = op.to_value(); value values = op.to_value();
// op::deconvolution op;
auto l0 = args[0]; auto l0 = args[0];
std::vector<std::int64_t> padding; std::vector<std::int64_t> padding;
bool asym_padding = false; bool asym_padding = false;
auto in_lens = l0->get_shape().lens(); assert(l0->get_shape().ndim() > 2);
assert(in_lens.size() > 2); auto kdims = l0->get_shape().ndim() - 2;
auto kdims = in_lens.size() - 2;
// ensure pads availabe only when auto_pad is "NOT_SET" // ensure pads available only when auto_pad is "NOT_SET"
check_padding_mode(info, "CONV_TRANSPOSE"); check_padding_mode(info, "CONV_TRANSPOSE");
if(contains(info.attributes, "pads")) if(contains(info.attributes, "pads"))
...@@ -70,9 +68,9 @@ struct parse_deconvolution : op_parser<parse_deconvolution> ...@@ -70,9 +68,9 @@ struct parse_deconvolution : op_parser<parse_deconvolution>
asym_padding = is_asym_padding(padding); asym_padding = is_asym_padding(padding);
size_t pad_ndims = padding.size() / 2;
if(not asym_padding) if(not asym_padding)
{ {
size_t pad_ndims = padding.size() / 2;
check_attr_sizes(kdims, pad_ndims, "PARSE_CONV_TRANSPOSE: inconsistent paddings"); check_attr_sizes(kdims, pad_ndims, "PARSE_CONV_TRANSPOSE: inconsistent paddings");
values["padding"].clear(); values["padding"].clear();
std::transform(padding.begin(), std::transform(padding.begin(),
...@@ -80,7 +78,19 @@ struct parse_deconvolution : op_parser<parse_deconvolution> ...@@ -80,7 +78,19 @@ struct parse_deconvolution : op_parser<parse_deconvolution>
std::back_inserter(values["padding"]), std::back_inserter(values["padding"]),
[](auto pad_val) { return pad_val; }); [](auto pad_val) { return pad_val; });
} }
else if(l0->get_shape().dynamic())
{
MIGRAPHX_THROW("PARSE_CONV_TRANSPOSE: asymmetric padding (padding_L != padding_R) "
"not supported with dynamic shapes");
}
else
{
// set padding to 0s, asym_padding handled by parser with slice
// TODO changing parser and op to do asym padding in op
values["padding"] = std::vector<std::size_t>(pad_ndims, 0);
}
} }
if(contains(info.attributes, "strides")) if(contains(info.attributes, "strides"))
{ {
values["stride"].clear(); values["stride"].clear();
...@@ -88,6 +98,7 @@ struct parse_deconvolution : op_parser<parse_deconvolution> ...@@ -88,6 +98,7 @@ struct parse_deconvolution : op_parser<parse_deconvolution>
check_attr_sizes( check_attr_sizes(
kdims, values["stride"].size(), "PARSE_CONV_TRANSPOSE: inconsistent strides"); kdims, values["stride"].size(), "PARSE_CONV_TRANSPOSE: inconsistent strides");
} }
if(contains(info.attributes, "dilations")) if(contains(info.attributes, "dilations"))
{ {
values["dilation"].clear(); values["dilation"].clear();
...@@ -97,21 +108,10 @@ struct parse_deconvolution : op_parser<parse_deconvolution> ...@@ -97,21 +108,10 @@ struct parse_deconvolution : op_parser<parse_deconvolution>
} }
// TODO: auto padding needs to be implemented for this parser and operator // TODO: auto padding needs to be implemented for this parser and operator
if(contains(info.attributes, "auto_pad")) if(contains(info.attributes, "auto_pad") and
{ to_upper(info.attributes.at("auto_pad").s()) != "NOTSET")
auto s = info.attributes["auto_pad"].s();
if(contains(info.attributes, "pads") and to_upper(s) != "NOTSET")
{ {
MIGRAPHX_THROW("PARSE_CONV_TRANSPOSE: auto_pad and padding cannot be specified " MIGRAPHX_THROW("PARSE_CONV_TRANSPOSE: auto padding not supported");
"simultaneously");
}
if(s.find("SAME") != std::string::npos)
{
bool is_same_upper = (s.find("SAME_UPPER") != std::string::npos);
values["padding_mode"] = is_same_upper ? to_value(op::padding_mode_t::same_upper)
: to_value(op::padding_mode_t::same_lower);
}
} }
if(contains(info.attributes, "group")) if(contains(info.attributes, "group"))
...@@ -123,10 +123,10 @@ struct parse_deconvolution : op_parser<parse_deconvolution> ...@@ -123,10 +123,10 @@ struct parse_deconvolution : op_parser<parse_deconvolution>
op.from_value(values); op.from_value(values);
auto l1 = info.add_instruction(op, l0, args[1]); auto l1 = info.add_instruction(op, l0, args[1]);
std::vector<int64_t> dims = to_int64_vector(l1->get_shape().lens());
std::vector<int64_t> curr_shape(dims.begin() + 2, dims.end());
if(asym_padding) if(asym_padding)
{ {
std::vector<int64_t> dims = to_int64_vector(l1->get_shape().lens());
std::vector<int64_t> curr_shape(dims.begin() + 2, dims.end());
std::vector<int64_t> axes(kdims); std::vector<int64_t> axes(kdims);
std::iota(axes.begin(), axes.end(), 2); // ignore first 2 dims std::iota(axes.begin(), axes.end(), 2); // ignore first 2 dims
...@@ -144,9 +144,11 @@ struct parse_deconvolution : op_parser<parse_deconvolution> ...@@ -144,9 +144,11 @@ struct parse_deconvolution : op_parser<parse_deconvolution>
make_op("slice", {{"axes", axes}, {"starts", starts}, {"ends", ends}}), l1); make_op("slice", {{"axes", axes}, {"starts", starts}, {"ends", ends}}), l1);
} }
if(contains(info.attributes, "output_padding")) // TODO, should check output_padding < (strides or dilations)
if(contains(info.attributes, "output_padding") and
not contains(info.attributes, "output_shape"))
{ {
size_t non_kdims = dims.size() * 2 - kdims; size_t non_kdims = l1->get_shape().ndim() * 2 - kdims;
std::vector<int64_t> output_padding(non_kdims, 0); std::vector<int64_t> output_padding(non_kdims, 0);
copy(info.attributes["output_padding"].ints(), std::back_inserter(output_padding)); copy(info.attributes["output_padding"].ints(), std::back_inserter(output_padding));
check_attr_sizes(kdims, check_attr_sizes(kdims,
...@@ -155,14 +157,21 @@ struct parse_deconvolution : op_parser<parse_deconvolution> ...@@ -155,14 +157,21 @@ struct parse_deconvolution : op_parser<parse_deconvolution>
l1 = info.add_instruction(make_op("pad", {{"pads", output_padding}}), l1); l1 = info.add_instruction(make_op("pad", {{"pads", output_padding}}), l1);
} }
// TODO, doing unnecessary calcuations with this. Could instead
// calculate the padding to conv_transpose that would give the output_shape.
if(contains(info.attributes, "output_shape")) if(contains(info.attributes, "output_shape"))
{ {
if(l1->get_shape().dynamic())
{
MIGRAPHX_THROW("PARSE_CONV_TRANSPOSE: output_shape attribute and dynamic shapes "
"not supported");
}
std::vector<int64_t> dims = to_int64_vector(l1->get_shape().lens());
std::vector<int64_t> curr_shape(dims.begin() + 2, dims.end());
std::vector<int64_t> output_shape; std::vector<int64_t> output_shape;
copy(info.attributes["output_shape"].ints(), std::back_inserter(output_shape)); copy(info.attributes["output_shape"].ints(), std::back_inserter(output_shape));
check_attr_sizes( check_attr_sizes(
kdims, output_shape.size(), "PARSE_CONV_TRANSPOSE: inconsistent output shape"); kdims, output_shape.size(), "PARSE_CONV_TRANSPOSE: inconsistent output shape");
dims = to_int64_vector(l1->get_shape().lens());
copy(dims.begin() + 2, dims.end(), curr_shape.begin());
if(curr_shape != output_shape) if(curr_shape != output_shape)
{ {
std::vector<int64_t> target_padding(dims.size() * 2 - kdims, 0); std::vector<int64_t> target_padding(dims.size() * 2 - kdims, 0);
......
...@@ -23,14 +23,14 @@ ...@@ -23,14 +23,14 @@
*/ */
#include <migraphx/config.hpp> #include <migraphx/config.hpp>
#include <migraphx/cpu/dnnl.hpp> #include <migraphx/cpu/dnnl.hpp>
#include <migraphx/op/deconvolution.hpp> #include <migraphx/op/convolution_backwards.hpp>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
namespace cpu { namespace cpu {
struct dnnl_deconvolution struct dnnl_deconvolution
: dnnl_extend_op<dnnl_deconvolution, dnnl::deconvolution_forward, op::deconvolution> : dnnl_extend_op<dnnl_deconvolution, dnnl::deconvolution_forward, op::convolution_backwards>
{ {
std::vector<int> arg_map(int) const std::vector<int> arg_map(int) const
{ {
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#include <migraphx/dfor.hpp> #include <migraphx/dfor.hpp>
#include <migraphx/op/identity.hpp> #include <migraphx/op/identity.hpp>
#include <migraphx/op/convolution.hpp> #include <migraphx/op/convolution.hpp>
#include <migraphx/op/deconvolution.hpp> #include <migraphx/op/convolution_backwards.hpp>
#include <migraphx/op/quant_convolution.hpp> #include <migraphx/op/quant_convolution.hpp>
#include <migraphx/op/dot.hpp> #include <migraphx/op/dot.hpp>
#include <migraphx/op/quant_dot.hpp> #include <migraphx/op/quant_dot.hpp>
...@@ -345,7 +345,7 @@ struct cpu_apply ...@@ -345,7 +345,7 @@ struct cpu_apply
extend_op("contiguous", "dnnl::reorder"); extend_op("contiguous", "dnnl::reorder");
extend_op("convolution", "dnnl::convolution"); extend_op("convolution", "dnnl::convolution");
#ifndef MIGRAPHX_ENABLE_ZENDNN #ifndef MIGRAPHX_ENABLE_ZENDNN
extend_op("deconvolution", "dnnl::deconvolution"); extend_op("convolution_backwards", "dnnl::convolution_backwards");
extend_op("dot", "dnnl::dot"); extend_op("dot", "dnnl::dot");
#endif #endif
extend_op("erf", "cpu::erf"); extend_op("erf", "cpu::erf");
......
...@@ -176,7 +176,7 @@ register_op(migraphx_gpu ...@@ -176,7 +176,7 @@ register_op(migraphx_gpu
OPERATORS gpu::rocblas_gemm<op::dot> gpu::rocblas_gemm<op::quant_dot> OPERATORS gpu::rocblas_gemm<op::dot> gpu::rocblas_gemm<op::quant_dot>
INCLUDES migraphx/gpu/context.hpp) INCLUDES migraphx/gpu/context.hpp)
register_op(migraphx_gpu HEADER migraphx/gpu/convolution.hpp register_op(migraphx_gpu HEADER migraphx/gpu/convolution.hpp
OPERATORS gpu::miopen_convolution<op::convolution> gpu::miopen_convolution<op::deconvolution> gpu::miopen_convolution<op::quant_convolution> OPERATORS gpu::miopen_convolution<op::convolution> gpu::miopen_convolution<op::convolution_backwards> gpu::miopen_convolution<op::quant_convolution>
INCLUDES migraphx/gpu/context.hpp) INCLUDES migraphx/gpu/context.hpp)
rocm_set_soversion(migraphx_gpu ${MIGRAPHX_SO_VERSION}) rocm_set_soversion(migraphx_gpu ${MIGRAPHX_SO_VERSION})
rocm_clang_tidy_check(migraphx_gpu) rocm_clang_tidy_check(migraphx_gpu)
......
...@@ -79,7 +79,7 @@ void compile_miopen::apply(module& m) const ...@@ -79,7 +79,7 @@ void compile_miopen::apply(module& m) const
std::size_t ws = 0; std::size_t ws = 0;
try try
{ {
// for the regular convolution and deconvolution, this try would always succeed // for the regular convolution and convolution_backwards, this try would always succeed
ws = compile(op, ins, int8_x4_format); ws = compile(op, ins, int8_x4_format);
} }
catch(migraphx::exception&) catch(migraphx::exception&)
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#include <migraphx/op/identity.hpp> #include <migraphx/op/identity.hpp>
#include <migraphx/op/convolution.hpp> #include <migraphx/op/convolution.hpp>
#include <migraphx/op/quant_convolution.hpp> #include <migraphx/op/quant_convolution.hpp>
#include <migraphx/op/deconvolution.hpp> #include <migraphx/op/convolution_backwards.hpp>
#include <unordered_map> #include <unordered_map>
#include <migraphx/reflect.hpp> #include <migraphx/reflect.hpp>
#include <migraphx/gpu/context.hpp> #include <migraphx/gpu/context.hpp>
...@@ -146,7 +146,8 @@ struct miopen_convolution ...@@ -146,7 +146,8 @@ struct miopen_convolution
void set_conv_descriptor() void set_conv_descriptor()
{ {
cd = (op.name() == "deconvolution") ? make_deconv(op) : make_conv(op); cd =
(op.name() == "convolution_backwards") ? make_convolution_backwards(op) : make_conv(op);
} }
value compile(migraphx::context& ctx, const shape& output, const std::vector<shape>& input) value compile(migraphx::context& ctx, const shape& output, const std::vector<shape>& input)
......
...@@ -170,7 +170,7 @@ inline convolution_descriptor make_conv(const T& op) ...@@ -170,7 +170,7 @@ inline convolution_descriptor make_conv(const T& op)
} }
template <class T> template <class T>
inline convolution_descriptor make_deconv(const T& op) inline convolution_descriptor make_convolution_backwards(const T& op)
{ {
auto c = make_obj<convolution_descriptor>(&miopenCreateConvolutionDescriptor); auto c = make_obj<convolution_descriptor>(&miopenCreateConvolutionDescriptor);
miopenConvolutionMode_t c_mode = miopenTranspose; miopenConvolutionMode_t c_mode = miopenTranspose;
......
...@@ -106,7 +106,7 @@ struct miopen_apply ...@@ -106,7 +106,7 @@ struct miopen_apply
add_extend_op("topk"); add_extend_op("topk");
add_convolution_op("convolution"); add_convolution_op("convolution");
add_convolution_op("deconvolution"); add_convolution_op("convolution_backwards");
add_convolution_op("quant_convolution"); add_convolution_op("quant_convolution");
add_gemm_op<op::dot>("dot"); add_gemm_op<op::dot>("dot");
add_gemm_op<op::quant_dot>("quant_dot"); add_gemm_op<op::quant_dot>("quant_dot");
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#include <migraphx/dfor.hpp> #include <migraphx/dfor.hpp>
#include <migraphx/op/identity.hpp> #include <migraphx/op/identity.hpp>
#include <migraphx/op/convolution.hpp> #include <migraphx/op/convolution.hpp>
#include <migraphx/op/deconvolution.hpp> #include <migraphx/op/convolution_backwards.hpp>
#include <migraphx/op/quant_convolution.hpp> #include <migraphx/op/quant_convolution.hpp>
#include <migraphx/op/dot.hpp> #include <migraphx/op/dot.hpp>
#include <migraphx/op/quant_dot.hpp> #include <migraphx/op/quant_dot.hpp>
......
conv_transpose_auto_pad_test:±
:
x
wyconv1" ConvTranspose*
auto_pad"
SAME_UPPER conv_transpose_auto_pad_testZ
x




Z
w




b
y




B
\ No newline at end of file
deconv_bias_test:ž conv_transpose_bias_test:¦
" "
x x
w w
byconv1" ConvTransposedeconv_bias_testZ byconv1" ConvTransposeconv_transpose_bias_testZ
x x
 
 
...@@ -24,4 +24,4 @@ ...@@ -24,4 +24,4 @@
 
 
 
B B
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment