Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
b406a418
"docs/source/locales/vscode:/vscode.git/clone" did not exist on "a911b856172741802083ab1ce2f92f09e9ec279f"
Commit
b406a418
authored
Dec 01, 2021
by
Paul
Browse files
Handle unsinged integers
parent
1851e975
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
11 additions
and
6 deletions
+11
-6
src/include/migraphx/op/convolution.hpp
src/include/migraphx/op/convolution.hpp
+1
-1
src/targets/gpu/mlir.cpp
src/targets/gpu/mlir.cpp
+5
-2
test/gpu/mlir.cpp
test/gpu/mlir.cpp
+5
-3
No files found.
src/include/migraphx/op/convolution.hpp
View file @
b406a418
...
@@ -20,7 +20,7 @@ namespace op {
...
@@ -20,7 +20,7 @@ namespace op {
struct
convolution
struct
convolution
{
{
std
::
vector
<
std
::
size_t
>
padding
=
{
0
,
0
};
std
::
vector
<
std
::
size_t
>
padding
=
{
0
,
0
,
0
,
0
};
std
::
vector
<
std
::
size_t
>
stride
=
{
1
,
1
};
std
::
vector
<
std
::
size_t
>
stride
=
{
1
,
1
};
std
::
vector
<
std
::
size_t
>
dilation
=
{
1
,
1
};
std
::
vector
<
std
::
size_t
>
dilation
=
{
1
,
1
};
...
...
src/targets/gpu/mlir.cpp
View file @
b406a418
...
@@ -185,8 +185,11 @@ struct mlir_program
...
@@ -185,8 +185,11 @@ struct mlir_program
{
{
return
mlirIntegerAttrGet
(
mlirIntegerTypeSignedGet
(
ctx
.
get
(),
64
),
i
);
return
mlirIntegerAttrGet
(
mlirIntegerTypeSignedGet
(
ctx
.
get
(),
64
),
i
);
}
}
MlirAttribute
attribute
(
std
::
uint64_t
i
)
const
{
return
attribute
(
std
::
int64_t
(
i
));
}
MlirAttribute
attribute
(
std
::
uint64_t
i
)
const
MlirAttribute
attribute
(
unsigned
char
i
)
const
{
return
attribute
(
std
::
int64_t
(
i
));
}
{
return
mlirIntegerAttrGet
(
mlirIntegerTypeUnsignedGet
(
ctx
.
get
(),
64
),
i
);
}
MlirAttribute
attribute
(
unsigned
char
i
)
const
{
return
attribute
(
std
::
uint64_t
(
i
));
}
MlirAttribute
attribute
(
bool
b
)
const
{
return
mlirBoolAttrGet
(
ctx
.
get
(),
b
?
1
:
0
);
}
MlirAttribute
attribute
(
bool
b
)
const
{
return
mlirBoolAttrGet
(
ctx
.
get
(),
b
?
1
:
0
);
}
MlirAttribute
attribute
(
double
d
)
const
MlirAttribute
attribute
(
double
d
)
const
{
{
...
...
test/gpu/mlir.cpp
View file @
b406a418
...
@@ -33,8 +33,8 @@ TEST_CASE(conv)
...
@@ -33,8 +33,8 @@ TEST_CASE(conv)
const
std
::
string
mlir_output
=
R"__migraphx__(
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
module {
func @main(%arg0: tensor<1x8x4x4xf32>, %arg1: tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32> {
func @main(%arg0: tensor<1x8x4x4xf32>, %arg1: tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32> {
%0 = migraphx.convolution(%arg0, %arg1) {dilation = [1 :
s
i64, 1 :
s
i64], group = 1 : si64, padding = [0 :
s
i64, 0 :
s
i64], padding_mode = 0 :
s
i64, stride
= [1 : si64, 1 :
%0 = migraphx.convolution(%arg0, %arg1) {dilation = [1 :
u
i64, 1 :
u
i64], group = 1 : si64, padding = [0 :
ui64, 0 : ui64, 0 : u
i64, 0 :
u
i64], padding_mode = 0 :
u
i64, stride
s
i64]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
= [1 : ui64, 1 : u
i64]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%1 = return %0 : tensor<1x2x2x2xf32>
%1 = return %0 : tensor<1x2x2x2xf32>
}
}
}
}
...
@@ -48,6 +48,7 @@ si64]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
...
@@ -48,6 +48,7 @@ si64]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
// Skip test if MLIR is not enabled
// Skip test if MLIR is not enabled
if
(
s
.
empty
())
if
(
s
.
empty
())
return
;
return
;
std
::
cout
<<
s
<<
std
::
endl
;
EXPECT
(
encode
(
s
)
==
encode
(
mlir_output
));
EXPECT
(
encode
(
s
)
==
encode
(
mlir_output
));
auto
op
=
migraphx
::
gpu
::
compile_mlir
(
m
);
auto
op
=
migraphx
::
gpu
::
compile_mlir
(
m
);
}
}
...
@@ -57,7 +58,7 @@ TEST_CASE(conv_add_relu)
...
@@ -57,7 +58,7 @@ TEST_CASE(conv_add_relu)
const
std
::
string
mlir_output
=
R"__migraphx__(
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
module {
func @main(%arg0: tensor<1x8x4x4xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32> {
func @main(%arg0: tensor<1x8x4x4xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32> {
%0 = migraphx.convolution(%arg0, %arg1) {dilation = [1 :
s
i64, 1 :
s
i64], group = 1 : si64, padding = [0 :
s
i64, 0 :
s
i64], padding_mode = 0 :
s
i64, stride = [1 :
s
i64, 1 :
s
i64]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%0 = migraphx.convolution(%arg0, %arg1) {dilation = [1 :
u
i64, 1 :
u
i64], group = 1 : si64, padding = [0 :
u
i64, 0 :
ui64, 0 : ui64, 0 : u
i64], padding_mode = 0 :
u
i64, stride = [1 :
u
i64, 1 :
u
i64]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%1 = migraphx.add(%0, %arg2) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%1 = migraphx.add(%0, %arg2) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%3 = return %2 : tensor<1x2x2x2xf32>
%3 = return %2 : tensor<1x2x2x2xf32>
...
@@ -76,6 +77,7 @@ module {
...
@@ -76,6 +77,7 @@ module {
// Skip test if MLIR is not enabled
// Skip test if MLIR is not enabled
if
(
s
.
empty
())
if
(
s
.
empty
())
return
;
return
;
std
::
cout
<<
s
<<
std
::
endl
;
EXPECT
(
encode
(
s
)
==
encode
(
mlir_output
));
EXPECT
(
encode
(
s
)
==
encode
(
mlir_output
));
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment