Unverified Commit 1cc724ee authored by kahmed10's avatar kahmed10 Committed by GitHub
Browse files

ND convolution GPU support (#550)

* initial progress

* formatting

* add pooling changes

* formatting

* change eliminate_pad

* formatting

* rename var

* fomratting

* update op shape test and compute

* formatting

* revert conv constructor

* formatting

* change initializer

* formatting

* fix tidy

* change quant conv and shape check

* add tests and fixes

* formatting

* fix type

* fix conv test

* formatting

* add pooling and bn tests

* formatting

* add inconsistent attr tests

* fix padding issue

* formatting

* progress on 1d to 2d

* formatting

* change compute and compile functions

* formatting

* fix duplicate

* fix conflict

* fix issue with 1d conv

* formatting

* add check for 3d limit

* rename function

* formatting

* rename functions

* formatting

* add op_shape test

* change functions

* formatting

* change to copy_backward

* formatting
parent 59e36b72
...@@ -71,6 +71,19 @@ struct check_shapes ...@@ -71,6 +71,19 @@ struct check_shapes
return *this; return *this;
} }
const check_shapes& max_ndims(std::size_t n) const
{
assert(begin != nullptr);
assert(end != nullptr);
if(begin != end)
{
if(begin->lens().size() > n)
MIGRAPHX_THROW(prefix() + "Shape must have at most " + std::to_string(n) +
" dimensions");
}
return *this;
}
const check_shapes& min_ndims(std::size_t n) const const check_shapes& min_ndims(std::size_t n) const
{ {
assert(begin != nullptr); assert(begin != nullptr);
......
...@@ -37,13 +37,19 @@ struct convolution ...@@ -37,13 +37,19 @@ struct convolution
} }
std::string name() const { return "convolution"; } std::string name() const { return "convolution"; }
shape compute_shape(std::vector<shape> inputs) const
void check_attribute_size() const
{ {
check_shapes{inputs, *this}.has(2).same_type().same_ndims().min_ndims(3);
if(not(padding.size() == stride.size() and padding.size() == dilation.size())) if(not(padding.size() == stride.size() and padding.size() == dilation.size()))
{ {
MIGRAPHX_THROW("convolution: inconsistent attribute sizes"); MIGRAPHX_THROW("convolution: inconsistent attribute sizes");
} }
}
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this}.has(2).same_type().same_ndims().min_ndims(3);
check_attribute_size();
const shape& input = inputs.at(0); const shape& input = inputs.at(0);
const shape& weights = inputs.at(1); const shape& weights = inputs.at(1);
...@@ -67,6 +73,12 @@ struct convolution ...@@ -67,6 +73,12 @@ struct convolution
return {t, output_lens}; return {t, output_lens};
} }
size_t kdims() const
{
check_attribute_size();
return padding.size();
}
}; };
} // namespace op } // namespace op
......
...@@ -37,13 +37,19 @@ struct quant_convolution ...@@ -37,13 +37,19 @@ struct quant_convolution
} }
std::string name() const { return "quant_convolution"; } std::string name() const { return "quant_convolution"; }
shape compute_shape(std::vector<shape> inputs) const
void check_attribute_size() const
{ {
check_shapes{inputs, *this}.has(2).same_type().same_ndims().min_ndims(3);
if(not(padding.size() == stride.size() and padding.size() == dilation.size())) if(not(padding.size() == stride.size() and padding.size() == dilation.size()))
{ {
MIGRAPHX_THROW("quant_convolution: inconsistent attribute sizes"); MIGRAPHX_THROW("quant_convolution: inconsistent attribute sizes");
} }
}
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this}.has(2).same_type().same_ndims().min_ndims(3);
check_attribute_size();
const shape& input = inputs.at(0); const shape& input = inputs.at(0);
const shape& weights = inputs.at(1); const shape& weights = inputs.at(1);
...@@ -71,6 +77,12 @@ struct quant_convolution ...@@ -71,6 +77,12 @@ struct quant_convolution
return {t, output_lens}; return {t, output_lens};
} }
size_t kdims() const
{
check_attribute_size();
return padding.size();
}
}; };
} // namespace op } // namespace op
......
...@@ -9,15 +9,32 @@ namespace gpu { ...@@ -9,15 +9,32 @@ namespace gpu {
shape miopen_convolution::compute_shape(const std::vector<shape>& inputs) const shape miopen_convolution::compute_shape(const std::vector<shape>& inputs) const
{ {
check_shapes{inputs, *this}.has(4).standard(); check_shapes{inputs, *this}.has(4).standard();
return op.compute_shape({inputs.at(0), inputs.at(1)}); std::vector<shape> conv_inputs(inputs.begin(), inputs.begin() + 2);
check_shapes{conv_inputs, *this}.max_ndims(5);
return op.compute_shape(conv_inputs);
} }
inline shape recompute_shape_to_2d(const shape& input)
{
shape new_shape{input};
auto dims = new_shape.lens();
if(dims.size() == 3)
{
std::vector<size_t> new_dims = dims;
new_dims.insert(new_dims.begin() + 2, 1);
new_shape = shape{input.type(), new_dims};
}
return new_shape;
}
argument miopen_convolution::compute(context& ctx, argument miopen_convolution::compute(context& ctx,
const shape& output_shape, const shape& output_shape,
const std::vector<argument>& args) const const std::vector<argument>& args) const
{ {
auto x_desc = make_tensor(args[0].get_shape()); auto x_desc = make_tensor(recompute_shape_to_2d(args[0].get_shape()));
auto w_desc = make_tensor(args[1].get_shape()); auto w_desc = make_tensor(recompute_shape_to_2d(args[1].get_shape()));
auto y_desc = make_tensor(output_shape); auto y_desc = make_tensor(recompute_shape_to_2d(output_shape));
float alpha = 1; float alpha = 1;
float beta = 0; float beta = 0;
...@@ -44,9 +61,10 @@ shape miopen_convolution::compile(context& ctx, ...@@ -44,9 +61,10 @@ shape miopen_convolution::compile(context& ctx,
std::vector<shape> inputs) std::vector<shape> inputs)
{ {
shape workspace_shape{}; shape workspace_shape{};
auto x_desc = make_tensor(inputs[0]);
auto w_desc = make_tensor(inputs[1]); auto x_desc = make_tensor(recompute_shape_to_2d(inputs[0]));
auto y_desc = make_tensor(output_shape); auto w_desc = make_tensor(recompute_shape_to_2d(inputs[1]));
auto y_desc = make_tensor(recompute_shape_to_2d(output_shape));
std::size_t workspace_size = 0; std::size_t workspace_size = 0;
miopenConvolutionForwardGetWorkSpaceSize(ctx.get_stream().get_miopen(), miopenConvolutionForwardGetWorkSpaceSize(ctx.get_stream().get_miopen(),
......
...@@ -82,14 +82,18 @@ inline convolution_descriptor make_conv(const T& op) ...@@ -82,14 +82,18 @@ inline convolution_descriptor make_conv(const T& op)
miopenConvolutionMode_t c_mode = miopenConvolution; miopenConvolutionMode_t c_mode = miopenConvolution;
if(op.group > 1) if(op.group > 1)
c_mode = miopenGroupConv; c_mode = miopenGroupConv;
miopenInitConvolutionDescriptor(c.get(),
c_mode, int kdims = op.kdims();
op.padding[0], std::vector<int> padding(std::max(2, kdims), 0);
op.padding[1], std::vector<int> stride(std::max(2, kdims), 1);
op.stride[0], std::vector<int> dilation(std::max(2, kdims), 1);
op.stride[1],
op.dilation[0], std::copy_backward(op.padding.begin(), op.padding.end(), padding.end());
op.dilation[1]); std::copy_backward(op.stride.begin(), op.stride.end(), stride.end());
std::copy_backward(op.dilation.begin(), op.dilation.end(), dilation.end());
miopenInitConvolutionNdDescriptor(
c.get(), padding.size(), padding.data(), stride.data(), dilation.data(), c_mode);
if(op.group > 1) if(op.group > 1)
miopenSetConvolutionGroupCount(c.get(), op.group); miopenSetConvolutionGroupCount(c.get(), op.group);
return c; return c;
......
...@@ -96,6 +96,7 @@ TEST_CASE(quant_convolution_shape) ...@@ -96,6 +96,7 @@ TEST_CASE(quant_convolution_shape)
migraphx::shape weights{migraphx::shape::int8_type, {4, 3, 3, 3}}; migraphx::shape weights{migraphx::shape::int8_type, {4, 3, 3, 3}};
expect_shape(output, migraphx::op::quant_convolution{}, input, weights); expect_shape(output, migraphx::op::quant_convolution{}, input, weights);
throws_shape(migraphx::op::quant_convolution{}, input); throws_shape(migraphx::op::quant_convolution{}, input);
throws_shape(migraphx::op::quant_convolution{{0}, {1, 1}, {1, 1}}, input, weights);
migraphx::shape input2{migraphx::shape::int32_type, {3, 3}}; migraphx::shape input2{migraphx::shape::int32_type, {3, 3}};
migraphx::shape weights2{migraphx::shape::float_type, {3, 3}}; migraphx::shape weights2{migraphx::shape::float_type, {3, 3}};
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment