Commit 0afab294 authored by charlie's avatar charlie
Browse files

Progress on changing padding_mode

Weird bug with ref padding shape
still need to change parse_convolution
parent 376b18af
...@@ -33,6 +33,8 @@ namespace migraphx { ...@@ -33,6 +33,8 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
namespace op { namespace op {
// Padding mode is default_ for all constant padding.
// same_lower and same_upper used for dynamic padding.
enum padding_mode_t enum padding_mode_t
{ {
default_, // NOLINT default_, // NOLINT
......
...@@ -37,7 +37,7 @@ namespace op { ...@@ -37,7 +37,7 @@ namespace op {
struct convolution struct convolution
{ {
std::vector<std::size_t> padding = {}; std::vector<std::size_t> padding = {0, 0};
std::vector<std::size_t> stride = {1, 1}; std::vector<std::size_t> stride = {1, 1};
std::vector<std::size_t> dilation = {1, 1}; std::vector<std::size_t> dilation = {1, 1};
...@@ -65,8 +65,6 @@ struct convolution ...@@ -65,8 +65,6 @@ struct convolution
} }
} }
value attributes() const { return {{"normalize_padding", "padding"}}; }
shape normalize_compute_shape(std::vector<shape> inputs) const shape normalize_compute_shape(std::vector<shape> inputs) const
{ {
check_shapes{inputs, *this, true}.has(2).same_type().same_ndims().min_ndims(3); check_shapes{inputs, *this, true}.has(2).same_type().same_ndims().min_ndims(3);
...@@ -152,7 +150,7 @@ struct convolution ...@@ -152,7 +150,7 @@ struct convolution
dynamic_shape_push_back(w_shape); dynamic_shape_push_back(w_shape);
const size_t num_spatial_dims = x_shape.max_lens().size() - 2; const size_t num_spatial_dims = x_shape.max_lens().size() - 2;
if(padding_mode != op::padding_mode_t::default_) if(padding_mode != default_)
{ {
for(std::size_t i = 0; i < num_spatial_dims; ++i) for(std::size_t i = 0; i < num_spatial_dims; ++i)
{ {
......
...@@ -37,7 +37,7 @@ namespace op { ...@@ -37,7 +37,7 @@ namespace op {
struct quant_convolution struct quant_convolution
{ {
std::vector<std::size_t> padding = {}; std::vector<std::size_t> padding = {0, 0};
std::vector<std::size_t> stride = {1, 1}; std::vector<std::size_t> stride = {1, 1};
std::vector<std::size_t> dilation = {1, 1}; std::vector<std::size_t> dilation = {1, 1};
...@@ -54,10 +54,7 @@ struct quant_convolution ...@@ -54,10 +54,7 @@ struct quant_convolution
f(self.group, "group")); f(self.group, "group"));
} }
value attributes() const value attributes() const { return {{"general_data_type", "convolution"}}; }
{
return {{"general_data_type", "convolution"}, {"normalize_padding", "padding"}};
}
std::string name() const { return "quant_convolution"; } std::string name() const { return "quant_convolution"; }
......
...@@ -24,9 +24,10 @@ ...@@ -24,9 +24,10 @@
#ifndef MIGRAPHX_GUARD_OPERATORS_PAD_CALC_HPP #ifndef MIGRAPHX_GUARD_OPERATORS_PAD_CALC_HPP
#define MIGRAPHX_GUARD_OPERATORS_PAD_CALC_HPP #define MIGRAPHX_GUARD_OPERATORS_PAD_CALC_HPP
#include <migraphx/config.hpp>
#include <cstdint> #include <cstdint>
#include <vector> #include <vector>
#include <migraphx/config.hpp>
#include <migraphx/shape.hpp>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
...@@ -49,12 +50,20 @@ void calculate_padding(int64_t idx, ...@@ -49,12 +50,20 @@ void calculate_padding(int64_t idx,
* \param use_upper put odd padding on upper or lower side * \param use_upper put odd padding on upper or lower side
* \return padding in the form of {x0_begin, x1_begin, ... x0_end , x1_end, ...} * \return padding in the form of {x0_begin, x1_begin, ... x0_end , x1_end, ...}
*/ */
std::vector<std::size_t> calc_dyn_auto_pad(std::vector<std::size_t> tensor_lens, std::vector<std::size_t> calc_dyn_auto_pad(const std::vector<std::size_t>& tensor_lens,
std::vector<std::size_t> k_lens, const std::vector<std::size_t>& k_lens,
std::vector<std::size_t> strides, const std::vector<std::size_t>& strides,
std::vector<std::size_t> dilations, const std::vector<std::size_t>& dilations,
bool use_upper = true); bool use_upper = true);
// Used for dynamic auto padding of convolution operators since padding needs to be computed at
// evaulation time.
shape compute_padded_shape(const shape& input,
const shape& weights,
const std::vector<std::size_t>& padding,
const std::vector<std::size_t>& stride,
const std::vector<std::size_t>& dilation);
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx } // namespace migraphx
......
...@@ -125,7 +125,6 @@ struct parse_convolution : op_parser<parse_convolution> ...@@ -125,7 +125,6 @@ struct parse_convolution : op_parser<parse_convolution>
values["padding_mode"] = is_same_upper values["padding_mode"] = is_same_upper
? to_value(op::padding_mode_t::same_upper) ? to_value(op::padding_mode_t::same_upper)
: to_value(op::padding_mode_t::same_lower); : to_value(op::padding_mode_t::same_lower);
values["use_dynamic_same_auto_pad"] = true;
} }
else else
{ {
......
...@@ -95,6 +95,8 @@ struct parse_deconvolution : op_parser<parse_deconvolution> ...@@ -95,6 +95,8 @@ struct parse_deconvolution : op_parser<parse_deconvolution>
check_attr_sizes( check_attr_sizes(
kdims, values["dilation"].size(), "PARSE_CONV_TRANSPOSE: inconsistent dilations"); kdims, values["dilation"].size(), "PARSE_CONV_TRANSPOSE: inconsistent dilations");
} }
// TODO: nothing is done with this?
if(contains(info.attributes, "auto_pad")) if(contains(info.attributes, "auto_pad"))
{ {
auto s = info.attributes["auto_pad"].s(); auto s = info.attributes["auto_pad"].s();
...@@ -106,7 +108,9 @@ struct parse_deconvolution : op_parser<parse_deconvolution> ...@@ -106,7 +108,9 @@ struct parse_deconvolution : op_parser<parse_deconvolution>
if(s.find("SAME") != std::string::npos) if(s.find("SAME") != std::string::npos)
{ {
values["padding_mode"] = to_value(op::padding_mode_t::same); bool is_same_upper = (s.find("SAME_UPPER") != std::string::npos);
values["padding_mode"] = is_same_upper ? to_value(op::padding_mode_t::same_upper)
: to_value(op::padding_mode_t::same_lower);
} }
} }
......
...@@ -52,10 +52,10 @@ void calculate_padding(int64_t idx, ...@@ -52,10 +52,10 @@ void calculate_padding(int64_t idx,
} }
} }
std::vector<std::size_t> calc_dyn_auto_pad(std::vector<std::size_t> tensor_lens, std::vector<std::size_t> calc_dyn_auto_pad(const std::vector<std::size_t>& tensor_lens,
std::vector<std::size_t> k_lens, const std::vector<std::size_t>& k_lens,
std::vector<std::size_t> strides, const std::vector<std::size_t>& strides,
std::vector<std::size_t> dilations, const std::vector<std::size_t>& dilations,
bool use_upper) bool use_upper)
{ {
std::vector<std::size_t> padding; std::vector<std::size_t> padding;
...@@ -86,5 +86,28 @@ std::vector<std::size_t> calc_dyn_auto_pad(std::vector<std::size_t> tensor_lens, ...@@ -86,5 +86,28 @@ std::vector<std::size_t> calc_dyn_auto_pad(std::vector<std::size_t> tensor_lens,
return padding; return padding;
} }
shape compute_padded_shape(const shape& input,
const shape& weights,
const std::vector<std::size_t>& padding,
const std::vector<std::size_t>& stride,
const std::vector<std::size_t>& dilation)
{
const size_t num_spatial_dims = input.lens().size() - 2;
std::vector<size_t> output_lens{input.lens()[0], weights.lens()[0]};
// calculate the output shape of the convolution: ((W - K + 2P) / S) + 1
for(size_t i = 0; i < num_spatial_dims; i++)
{
auto padding_factor = padding[i] + padding[i + num_spatial_dims];
output_lens.push_back(std::size_t(std::max<std::ptrdiff_t>(
1,
(input.lens()[i + 2] - (1 + dilation[i] * (weights.lens()[i + 2] - 1)) +
padding_factor) /
stride[i] +
1)));
}
return input.with_lens(output_lens);
}
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx } // namespace migraphx
...@@ -240,12 +240,10 @@ struct ref_convolution : auto_register_op<ref_convolution<Op>> ...@@ -240,12 +240,10 @@ struct ref_convolution : auto_register_op<ref_convolution<Op>>
if(op.padding_mode != op::padding_mode_t::default_) if(op.padding_mode != op::padding_mode_t::default_)
{ {
auto input_lens = args[0].get_shape().lens(); auto input_lens = args[0].get_shape().lens();
std::vector<std::size_t> img_lens{input_lens.begin(), input_lens.end()};
auto weights_lens = args[1].get_shape().lens(); auto weights_lens = args[1].get_shape().lens();
std::vector<std::size_t> k_lens{weights_lens.begin(), weights_lens.end()}; padding = calc_dyn_auto_pad(input_lens, weights_lens, op.stride, op.dilation);
padding = calc_dyn_auto_pad(img_lens, k_lens, op.stride, op.dilation); output_shape = compute_padded_shape(
output_shape = args.at(0).get_shape(), args.at(1).get_shape(), padding, op.stride, op.dilation);
compute_padded_shape({args.at(0).get_shape(), args.at(1).get_shape()}, padding);
} }
else else
{ {
...@@ -313,34 +311,6 @@ struct ref_convolution : auto_register_op<ref_convolution<Op>> ...@@ -313,34 +311,6 @@ struct ref_convolution : auto_register_op<ref_convolution<Op>>
}); });
return result; return result;
} }
private:
/*!
* Used for dynamic auto padding since padding needs to be computed at evaulation time.
* \param inputs two fixed shape inputs [input_tensor, weights]
* \param padding from auto_pad calculation
*/
shape compute_padded_shape(const std::vector<shape>& inputs,
const std::vector<std::size_t>& padding) const
{
const shape& input = inputs.at(0);
const shape& weights = inputs.at(1);
const size_t num_spatial_dims = input.lens().size() - 2;
std::vector<size_t> output_lens{input.lens()[0], weights.lens()[0]};
// calculate the output shape of the convolution: ((W - K + 2P) / S) + 1
for(size_t i = 0; i < num_spatial_dims; i++)
{
auto padding_factor = padding[i] + padding[i + num_spatial_dims];
output_lens.push_back(std::size_t(std::max<std::ptrdiff_t>(
1,
(input.lens()[i + 2] - (1 + op.dilation[i] * (weights.lens()[i + 2] - 1)) +
padding_factor) /
op.stride[i] +
1)));
}
return inputs[0].with_lens(output_lens);
}
}; };
struct ref_im2col struct ref_im2col
......
...@@ -75,7 +75,6 @@ struct parse_conv : op_parser<parse_conv> ...@@ -75,7 +75,6 @@ struct parse_conv : op_parser<parse_conv>
const std::string& pad_mode = info.attributes.at("padding").s(); const std::string& pad_mode = info.attributes.at("padding").s();
if(pad_mode.find("SAME") != std::string::npos) if(pad_mode.find("SAME") != std::string::npos)
{ {
op.padding_mode = op::padding_mode_t::same;
std::vector<size_t> weight_dims = weights->get_shape().lens(); std::vector<size_t> weight_dims = weights->get_shape().lens();
size_t weight_h = weight_dims[2]; size_t weight_h = weight_dims[2];
size_t weight_w = weight_dims[3]; size_t weight_w = weight_dims[3];
...@@ -87,10 +86,6 @@ struct parse_conv : op_parser<parse_conv> ...@@ -87,10 +86,6 @@ struct parse_conv : op_parser<parse_conv>
op.padding = std::vector<size_t>(pads.begin(), pads.end()); op.padding = std::vector<size_t>(pads.begin(), pads.end());
} }
else if(pad_mode.find("VALID") != std::string::npos)
{
op.padding_mode = op::padding_mode_t::valid;
}
else if(pad_mode.find("EXPLICIT") != std::string::npos) else if(pad_mode.find("EXPLICIT") != std::string::npos)
{ {
std::vector<size_t> padding; std::vector<size_t> padding;
......
...@@ -80,7 +80,6 @@ struct parse_depthwiseconv : op_parser<parse_depthwiseconv> ...@@ -80,7 +80,6 @@ struct parse_depthwiseconv : op_parser<parse_depthwiseconv>
if(pad_mode.find("SAME") != std::string::npos) if(pad_mode.find("SAME") != std::string::npos)
{ {
op.padding_mode = op::padding_mode_t::same;
std::vector<size_t> weight_dims = weights->get_shape().lens(); std::vector<size_t> weight_dims = weights->get_shape().lens();
size_t weight_h = weight_dims[2]; size_t weight_h = weight_dims[2];
size_t weight_w = weight_dims[3]; size_t weight_w = weight_dims[3];
...@@ -101,10 +100,6 @@ struct parse_depthwiseconv : op_parser<parse_depthwiseconv> ...@@ -101,10 +100,6 @@ struct parse_depthwiseconv : op_parser<parse_depthwiseconv>
op.padding[1] = pads[1]; op.padding[1] = pads[1];
} }
} }
else if(pad_mode.find("VALID") != std::string::npos)
{
op.padding_mode = op::padding_mode_t::valid;
}
} }
std::vector<int64_t> new_weights_shape; std::vector<int64_t> new_weights_shape;
......
...@@ -760,7 +760,6 @@ TEST_CASE(conv_autopad_same_test) ...@@ -760,7 +760,6 @@ TEST_CASE(conv_autopad_same_test)
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}}); auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}});
migraphx::op::convolution op; migraphx::op::convolution op;
op.padding = {1, 1, 1, 1}; op.padding = {1, 1, 1, 1};
op.padding_mode = migraphx::op::padding_mode_t::same;
mm->add_instruction(op, l0, l1); mm->add_instruction(op, l0, l1);
auto prog = optimize_onnx("conv_autopad_same_test.onnx"); auto prog = optimize_onnx("conv_autopad_same_test.onnx");
...@@ -909,13 +908,9 @@ TEST_CASE(conv_dynamic_batch_same_upper) ...@@ -909,13 +908,9 @@ TEST_CASE(conv_dynamic_batch_same_upper)
auto l0 = mm->add_parameter( auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 10, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}}}); "0", {migraphx::shape::float_type, {{1, 10, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}}); auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}});
auto c0 = auto c0 = mm->add_instruction(
mm->add_instruction(migraphx::make_op("convolution", migraphx::make_op("convolution",
{{"padding", {1, 1, 1, 1}}, {{"padding", {1, 1, 1, 1}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
{"stride", {1, 1}},
{"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same},
{"use_dynamic_same_auto_pad", false}}),
l0, l0,
l1); l1);
mm->add_return({c0}); mm->add_return({c0});
...@@ -939,8 +934,7 @@ TEST_CASE(conv_dynamic_img_same_upper) ...@@ -939,8 +934,7 @@ TEST_CASE(conv_dynamic_img_same_upper)
{{"padding", {0, 0}}, {{"padding", {0, 0}},
{"stride", {1, 1}}, {"stride", {1, 1}},
{"dilation", {1, 1}}, {"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_upper}, {"padding_mode", migraphx::op::padding_mode_t::same_upper}}),
{"use_dynamic_same_auto_pad", true}}),
l0, l0,
l1); l1);
mm->add_return({c0}); mm->add_return({c0});
...@@ -964,8 +958,7 @@ TEST_CASE(conv_dynamic_kernel_same_lower) ...@@ -964,8 +958,7 @@ TEST_CASE(conv_dynamic_kernel_same_lower)
{{"padding", {0, 0}}, {{"padding", {0, 0}},
{"stride", {1, 1}}, {"stride", {1, 1}},
{"dilation", {1, 1}}, {"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_lower}, {"padding_mode", migraphx::op::padding_mode_t::same_lower}}),
{"use_dynamic_same_auto_pad", true}}),
l0, l0,
l1); l1);
mm->add_return({c0}); mm->add_return({c0});
......
...@@ -261,8 +261,7 @@ TEST_CASE(convolution_shape) ...@@ -261,8 +261,7 @@ TEST_CASE(convolution_shape)
migraphx::make_op("convolution", migraphx::make_op("convolution",
{{"stride", {1, 1}}, {{"stride", {1, 1}},
{"dilation", {1, 1}}, {"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_upper}, {"padding_mode", migraphx::op::padding_mode_t::same_upper}}),
{"use_dynamic_same_auto_pad", true}}),
input_dyn_shape, input_dyn_shape,
weights_shape); weights_shape);
...@@ -275,8 +274,7 @@ TEST_CASE(convolution_shape) ...@@ -275,8 +274,7 @@ TEST_CASE(convolution_shape)
migraphx::make_op("convolution", migraphx::make_op("convolution",
{{"stride", {1, 1}}, {{"stride", {1, 1}},
{"dilation", {1, 1}}, {"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_upper}, {"padding_mode", migraphx::op::padding_mode_t::same_upper}}),
{"use_dynamic_same_auto_pad", true}}),
input_dyn_shape, input_dyn_shape,
weights_shape); weights_shape);
...@@ -290,8 +288,7 @@ TEST_CASE(convolution_shape) ...@@ -290,8 +288,7 @@ TEST_CASE(convolution_shape)
migraphx::make_op("convolution", migraphx::make_op("convolution",
{{"stride", {1, 1}}, {{"stride", {1, 1}},
{"dilation", {1, 1}}, {"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_lower}, {"padding_mode", migraphx::op::padding_mode_t::same_lower}}),
{"use_dynamic_same_auto_pad", true}}),
input_dyn_shape, input_dyn_shape,
weights_shape); weights_shape);
} }
......
...@@ -1168,10 +1168,9 @@ TEST_CASE(conv_dynamic_img_same_upper_test) ...@@ -1168,10 +1168,9 @@ TEST_CASE(conv_dynamic_img_same_upper_test)
auto input = mm->add_parameter("X", input_dyn_shape); auto input = mm->add_parameter("X", input_dyn_shape);
auto weights = mm->add_parameter("W", weights_shape); auto weights = mm->add_parameter("W", weights_shape);
mm->add_instruction( mm->add_instruction(
migraphx::make_op("convolution", migraphx::make_op(
{{"stride", {1, 1}}, "convolution",
{"padding_mode", migraphx::op::padding_mode_t::same_upper}, {{"stride", {1, 1}}, {"padding_mode", migraphx::op::padding_mode_t::same_upper}}),
{"use_dynamic_same_auto_pad", true}}),
input, input,
weights); weights);
...@@ -1240,10 +1239,9 @@ TEST_CASE(conv_dynamic_kernel_same_lower_test) ...@@ -1240,10 +1239,9 @@ TEST_CASE(conv_dynamic_kernel_same_lower_test)
auto input = mm->add_parameter("X", input_shape); auto input = mm->add_parameter("X", input_shape);
auto weights = mm->add_parameter("W", weights_shape); auto weights = mm->add_parameter("W", weights_shape);
mm->add_instruction( mm->add_instruction(
migraphx::make_op("convolution", migraphx::make_op(
{{"stride", {1, 1}}, "convolution",
{"padding_mode", migraphx::op::padding_mode_t::same_lower}, {{"stride", {1, 1}}, {"padding_mode", migraphx::op::padding_mode_t::same_lower}}),
{"use_dynamic_same_auto_pad", true}}),
input, input,
weights); weights);
......
...@@ -327,7 +327,6 @@ migraphx::program create_conv() ...@@ -327,7 +327,6 @@ migraphx::program create_conv()
mm->add_literal(migraphx::shape{migraphx::shape::float_type, {3, 3, 3, 32}}, weight_data); mm->add_literal(migraphx::shape{migraphx::shape::float_type, {3, 3, 3, 32}}, weight_data);
migraphx::op::convolution op; migraphx::op::convolution op;
op.padding_mode = migraphx::op::padding_mode_t::same;
op.padding = {1, 1, 1, 1}; op.padding = {1, 1, 1, 1};
op.stride = {1, 1}; op.stride = {1, 1};
op.dilation = {1, 1}; op.dilation = {1, 1};
...@@ -406,7 +405,6 @@ TEST_CASE(depthwiseconv_test) ...@@ -406,7 +405,6 @@ TEST_CASE(depthwiseconv_test)
mm->add_literal(migraphx::shape{migraphx::shape::float_type, {3, 3, 3, 1}}, weight_data); mm->add_literal(migraphx::shape{migraphx::shape::float_type, {3, 3, 3, 1}}, weight_data);
migraphx::op::convolution op; migraphx::op::convolution op;
op.padding_mode = migraphx::op::padding_mode_t::same;
op.padding = {1, 1}; op.padding = {1, 1};
op.stride = {1, 1}; op.stride = {1, 1};
op.dilation = {1, 1}; op.dilation = {1, 1};
......
...@@ -37,10 +37,7 @@ struct quant_conv_default_mode : verify_program<quant_conv_default_mode> ...@@ -37,10 +37,7 @@ struct quant_conv_default_mode : verify_program<quant_conv_default_mode>
auto pa = mm->add_parameter("a", a_shape); auto pa = mm->add_parameter("a", a_shape);
migraphx::shape c_shape{migraphx::shape::int8_type, {2, 3, 3, 3}}; migraphx::shape c_shape{migraphx::shape::int8_type, {2, 3, 3, 3}};
auto pc = mm->add_parameter("c", c_shape); auto pc = mm->add_parameter("c", c_shape);
mm->add_instruction( mm->add_instruction(migraphx::op::quant_convolution{{{0, 0}}, {{1, 1}}, {{1, 1}}}, pa, pc);
migraphx::op::quant_convolution{{{0, 0}}, {{1, 1}}, {{1, 1}}, migraphx::op::same},
pa,
pc);
return p; return p;
} }
}; };
...@@ -37,10 +37,7 @@ struct quant_conv_int8x4_default : verify_program<quant_conv_int8x4_default> ...@@ -37,10 +37,7 @@ struct quant_conv_int8x4_default : verify_program<quant_conv_int8x4_default>
auto pa = mm->add_parameter("a", a_shape); auto pa = mm->add_parameter("a", a_shape);
migraphx::shape c_shape{migraphx::shape::int8_type, {16, 16, 3, 3}}; migraphx::shape c_shape{migraphx::shape::int8_type, {16, 16, 3, 3}};
auto pc = mm->add_parameter("c", c_shape); auto pc = mm->add_parameter("c", c_shape);
mm->add_instruction( mm->add_instruction(migraphx::op::quant_convolution{{{0, 0}}, {{1, 1}}, {{1, 1}}}, pa, pc);
migraphx::op::quant_convolution{{{0, 0}}, {{1, 1}}, {{1, 1}}, migraphx::op::same},
pa,
pc);
return p; return p;
} }
}; };
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/op/quant_convolution.hpp>
struct quant_conv_valid_mode : verify_program<quant_conv_valid_mode>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape a_shape{migraphx::shape::int8_type, {2, 3, 4, 4}};
auto pa = mm->add_parameter("a", a_shape);
migraphx::shape c_shape{migraphx::shape::int8_type, {2, 3, 3, 3}};
auto pc = mm->add_parameter("c", c_shape);
mm->add_instruction(
migraphx::op::quant_convolution{{{0, 0}}, {{1, 1}}, {{1, 1}}, migraphx::op::valid},
pa,
pc);
return p;
}
};
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment