Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
2c952efd
Commit
2c952efd
authored
Dec 10, 2021
by
Paul
Browse files
Dont provide output for return instruction
parent
032af369
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
17 additions
and
10 deletions
+17
-10
src/targets/gpu/mlir.cpp
src/targets/gpu/mlir.cpp
+13
-5
test/gpu/mlir.cpp
test/gpu/mlir.cpp
+4
-5
No files found.
src/targets/gpu/mlir.cpp
View file @
2c952efd
...
@@ -183,11 +183,15 @@ struct mlir_program
...
@@ -183,11 +183,15 @@ struct mlir_program
MlirAttribute
attribute
(
std
::
int64_t
i
)
const
MlirAttribute
attribute
(
std
::
int64_t
i
)
const
{
{
return
mlirIntegerAttrGet
(
mlirIntegerTypeSignedGet
(
ctx
.
get
(),
64
),
i
);
if
(
i
<
0
)
MIGRAPHX_THROW
(
"MLIR cant handle negative values since they are ambiguous"
);
return
mlirIntegerAttrGet
(
mlirIntegerTypeGet
(
ctx
.
get
(),
64
),
i
);
}
}
MlirAttribute
attribute
(
std
::
uint64_t
i
)
const
MlirAttribute
attribute
(
std
::
uint64_t
i
)
const
{
{
return
mlirIntegerAttrGet
(
mlirIntegerTypeUnsignedGet
(
ctx
.
get
(),
64
),
i
);
if
(
i
>
(
std
::
numeric_limits
<
std
::
uint64_t
>::
max
()
/
2
))
MIGRAPHX_THROW
(
"MLIR cant handle large integer values since they are ambiguous"
);
return
mlirIntegerAttrGet
(
mlirIntegerTypeGet
(
ctx
.
get
(),
64
),
i
);
}
}
MlirAttribute
attribute
(
unsigned
char
i
)
const
{
return
attribute
(
std
::
uint64_t
(
i
));
}
MlirAttribute
attribute
(
unsigned
char
i
)
const
{
return
attribute
(
std
::
uint64_t
(
i
));
}
MlirAttribute
attribute
(
bool
b
)
const
{
return
mlirBoolAttrGet
(
ctx
.
get
(),
b
?
1
:
0
);
}
MlirAttribute
attribute
(
bool
b
)
const
{
return
mlirBoolAttrGet
(
ctx
.
get
(),
b
?
1
:
0
);
}
...
@@ -433,7 +437,8 @@ struct mlir_program
...
@@ -433,7 +437,8 @@ struct mlir_program
auto
name
=
get_name
(
ins
);
auto
name
=
get_name
(
ins
);
auto
ops
=
create_operation_state
(
name
);
auto
ops
=
create_operation_state
(
name
);
ops
.
add_attribute_value
(
ins
->
get_operator
().
to_value
());
ops
.
add_attribute_value
(
ins
->
get_operator
().
to_value
());
ops
.
add_results
({
get_shape
(
ins
)});
if
(
ins
->
name
()
!=
"@return"
)
ops
.
add_results
({
get_shape
(
ins
)});
std
::
vector
<
MlirValue
>
inputs
;
std
::
vector
<
MlirValue
>
inputs
;
transform
(
transform
(
...
@@ -441,8 +446,11 @@ struct mlir_program
...
@@ -441,8 +446,11 @@ struct mlir_program
ops
.
add_operands
(
inputs
);
ops
.
add_operands
(
inputs
);
auto
outputs
=
insert
(
fbody
,
std
::
move
(
ops
));
auto
outputs
=
insert
(
fbody
,
std
::
move
(
ops
));
assert
(
outputs
.
size
()
==
1
);
if
(
ins
->
name
()
!=
"@return"
)
ins_map
[
ins
]
=
outputs
.
front
();
{
assert
(
outputs
.
size
()
==
1
);
ins_map
[
ins
]
=
outputs
.
front
();
}
}
}
}
}
...
...
test/gpu/mlir.cpp
View file @
2c952efd
...
@@ -33,9 +33,8 @@ TEST_CASE(conv)
...
@@ -33,9 +33,8 @@ TEST_CASE(conv)
const
std
::
string
mlir_output
=
R"__migraphx__(
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
module {
func @main(%arg0: tensor<1x8x4x4xf32>, %arg1: tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32> {
func @main(%arg0: tensor<1x8x4x4xf32>, %arg1: tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32> {
%0 = migraphx.convolution(%arg0, %arg1) {dilation = [1 : ui64, 1 : ui64], group = 1 : si64, padding = [0 : ui64, 0 : ui64, 0 : ui64, 0 : ui64], padding_mode = 0 : ui64, stride
%0 = migraphx.convolution(%arg0, %arg1) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
= [1 : ui64, 1 : ui64]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
return %0 : tensor<1x2x2x2xf32>
%1 = return %0 : tensor<1x2x2x2xf32>
}
}
}
}
)__migraphx__"
;
)__migraphx__"
;
...
@@ -58,10 +57,10 @@ TEST_CASE(conv_add_relu)
...
@@ -58,10 +57,10 @@ TEST_CASE(conv_add_relu)
const
std
::
string
mlir_output
=
R"__migraphx__(
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
module {
func @main(%arg0: tensor<1x8x4x4xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32> {
func @main(%arg0: tensor<1x8x4x4xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32> {
%0 = migraphx.convolution(%arg0, %arg1) {dilation = [1
: ui64, 1 : ui64
], group = 1 :
s
i64, padding = [0
: ui64, 0 : ui64, 0 : ui64, 0 : ui64
], padding_mode = 0 :
u
i64, stride = [1
: ui64, 1 : ui64
]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%0 = migraphx.convolution(%arg0, %arg1) {dilation = [1
, 1
], group = 1 : i64, padding = [0
, 0, 0, 0
], padding_mode = 0 : i64, stride = [1
, 1
]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%1 = migraphx.add(%0, %arg2) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%1 = migraphx.add(%0, %arg2) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%3 =
return %2 : tensor<1x2x2x2xf32>
return %2 : tensor<1x2x2x2xf32>
}
}
}
}
)__migraphx__"
;
)__migraphx__"
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment