Commit b406a418 authored by Paul's avatar Paul
Browse files

Handle unsinged integers

parent 1851e975
......@@ -20,7 +20,7 @@ namespace op {
struct convolution
{
std::vector<std::size_t> padding = {0, 0};
std::vector<std::size_t> padding = {0, 0, 0, 0};
std::vector<std::size_t> stride = {1, 1};
std::vector<std::size_t> dilation = {1, 1};
......
......@@ -185,8 +185,11 @@ struct mlir_program
{
return mlirIntegerAttrGet(mlirIntegerTypeSignedGet(ctx.get(), 64), i);
}
MlirAttribute attribute(std::uint64_t i) const { return attribute(std::int64_t(i)); }
MlirAttribute attribute(unsigned char i) const { return attribute(std::int64_t(i)); }
MlirAttribute attribute(std::uint64_t i) const
{
return mlirIntegerAttrGet(mlirIntegerTypeUnsignedGet(ctx.get(), 64), i);
}
MlirAttribute attribute(unsigned char i) const { return attribute(std::uint64_t(i)); }
MlirAttribute attribute(bool b) const { return mlirBoolAttrGet(ctx.get(), b ? 1 : 0); }
MlirAttribute attribute(double d) const
{
......
......@@ -33,8 +33,8 @@ TEST_CASE(conv)
const std::string mlir_output = R"__migraphx__(
module {
func @main(%arg0: tensor<1x8x4x4xf32>, %arg1: tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32> {
%0 = migraphx.convolution(%arg0, %arg1) {dilation = [1 : si64, 1 : si64], group = 1 : si64, padding = [0 : si64, 0 : si64], padding_mode = 0 : si64, stride = [1 : si64, 1 :
si64]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%0 = migraphx.convolution(%arg0, %arg1) {dilation = [1 : ui64, 1 : ui64], group = 1 : si64, padding = [0 : ui64, 0 : ui64, 0 : ui64, 0 : ui64], padding_mode = 0 : ui64, stride
= [1 : ui64, 1 : ui64]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%1 = return %0 : tensor<1x2x2x2xf32>
}
}
......@@ -48,6 +48,7 @@ si64]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
// Skip test if MLIR is not enabled
if(s.empty())
return;
std::cout << s << std::endl;
EXPECT(encode(s) == encode(mlir_output));
auto op = migraphx::gpu::compile_mlir(m);
}
......@@ -57,7 +58,7 @@ TEST_CASE(conv_add_relu)
const std::string mlir_output = R"__migraphx__(
module {
func @main(%arg0: tensor<1x8x4x4xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32> {
%0 = migraphx.convolution(%arg0, %arg1) {dilation = [1 : si64, 1 : si64], group = 1 : si64, padding = [0 : si64, 0 : si64], padding_mode = 0 : si64, stride = [1 : si64, 1 : si64]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%0 = migraphx.convolution(%arg0, %arg1) {dilation = [1 : ui64, 1 : ui64], group = 1 : si64, padding = [0 : ui64, 0 : ui64, 0 : ui64, 0 : ui64], padding_mode = 0 : ui64, stride = [1 : ui64, 1 : ui64]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%1 = migraphx.add(%0, %arg2) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%3 = return %2 : tensor<1x2x2x2xf32>
......@@ -76,6 +77,7 @@ module {
// Skip test if MLIR is not enabled
if(s.empty())
return;
std::cout << s << std::endl;
EXPECT(encode(s) == encode(mlir_output));
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment