Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
b34a8e60
Commit
b34a8e60
authored
Nov 14, 2023
by
Nives Vukovic
Browse files
Implement layout attribute support for RNN operator
parent
0039b11a
Changes
16
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
2045 additions
and
193 deletions
+2045
-193
src/onnx/parse_rnn.cpp
src/onnx/parse_rnn.cpp
+39
-0
test/onnx/gen_onnx.py
test/onnx/gen_onnx.py
+124
-0
test/onnx/onnx_rnn_test.cpp
test/onnx/onnx_rnn_test.cpp
+227
-0
test/onnx/rnn_bi_layout_test.onnx
test/onnx/rnn_bi_layout_test.onnx
+0
-0
test/onnx/rnn_f_5arg_layout_test.onnx
test/onnx/rnn_f_5arg_layout_test.onnx
+0
-0
test/onnx/rnn_f_layout_test.onnx
test/onnx/rnn_f_layout_test.onnx
+0
-0
test/onnx/rnn_r_3arg_layout_test.onnx
test/onnx/rnn_r_3arg_layout_test.onnx
+0
-0
test/onnx/rnn_r_layout_test.onnx
test/onnx/rnn_r_layout_test.onnx
+0
-0
test/py/onnx_backend_test.py
test/py/onnx_backend_test.py
+0
-1
test/ref/rnn_ops.cpp
test/ref/rnn_ops.cpp
+1149
-192
test/verify/test_rnn_4args_layout.cpp
test/verify/test_rnn_4args_layout.cpp
+79
-0
test/verify/test_rnn_bi_3args_layout.cpp
test/verify/test_rnn_bi_3args_layout.cpp
+77
-0
test/verify/test_rnn_bidirectional_layout.cpp
test/verify/test_rnn_bidirectional_layout.cpp
+86
-0
test/verify/test_rnn_forward_layout.cpp
test/verify/test_rnn_forward_layout.cpp
+88
-0
test/verify/test_rnn_reverse_layout.cpp
test/verify/test_rnn_reverse_layout.cpp
+85
-0
test/verify/test_rnn_sql_1_layout.cpp
test/verify/test_rnn_sql_1_layout.cpp
+91
-0
No files found.
src/onnx/parse_rnn.cpp
View file @
b34a8e60
...
@@ -33,6 +33,29 @@ namespace migraphx {
...
@@ -33,6 +33,29 @@ namespace migraphx {
inline
namespace
MIGRAPHX_INLINE_NS
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
onnx
{
namespace
onnx
{
void
rnn_transpose_inputs
(
onnx_parser
::
node_info
&
info
,
std
::
vector
<
instruction_ref
>&
args
)
{
std
::
vector
<
int64_t
>
perm
{
1
,
0
,
2
};
args
[
0
]
=
info
.
add_instruction
(
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
args
[
0
]);
if
(
args
.
size
()
==
6
and
not
args
[
5
]
->
is_undefined
())
{
args
[
5
]
=
info
.
add_instruction
(
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
args
[
5
]);
}
}
void
rnn_transpose_outputs
(
onnx_parser
::
node_info
&
info
,
instruction_ref
&
hidden_states
,
instruction_ref
&
last_output
)
{
std
::
vector
<
int64_t
>
perm_hs
{
2
,
0
,
1
,
3
};
hidden_states
=
info
.
add_instruction
(
make_op
(
"transpose"
,
{{
"permutation"
,
perm_hs
}}),
hidden_states
);
std
::
vector
<
int64_t
>
perm_last
{
1
,
0
,
2
};
last_output
=
info
.
add_instruction
(
make_op
(
"transpose"
,
{{
"permutation"
,
perm_last
}}),
last_output
);
}
struct
parse_rnn
:
op_parser
<
parse_rnn
>
struct
parse_rnn
:
op_parser
<
parse_rnn
>
{
{
std
::
vector
<
op_desc
>
operators
()
const
{
return
{{
"RNN"
}};
}
std
::
vector
<
op_desc
>
operators
()
const
{
return
{{
"RNN"
}};
}
...
@@ -116,6 +139,12 @@ struct parse_rnn : op_parser<parse_rnn>
...
@@ -116,6 +139,12 @@ struct parse_rnn : op_parser<parse_rnn>
clip
=
parser
.
parse_value
(
info
.
attributes
.
at
(
"clip"
)).
at
<
float
>
();
clip
=
parser
.
parse_value
(
info
.
attributes
.
at
(
"clip"
)).
at
<
float
>
();
}
}
int
layout
=
0
;
if
(
contains
(
info
.
attributes
,
"layout"
))
{
layout
=
parser
.
parse_value
(
info
.
attributes
.
at
(
"layout"
)).
at
<
int
>
();
}
// if the number of arguments is less than 6, append
// if the number of arguments is less than 6, append
// undefined operator to have 6 arguments
// undefined operator to have 6 arguments
if
(
args
.
size
()
<
6
)
if
(
args
.
size
()
<
6
)
...
@@ -124,6 +153,11 @@ struct parse_rnn : op_parser<parse_rnn>
...
@@ -124,6 +153,11 @@ struct parse_rnn : op_parser<parse_rnn>
args
.
insert
(
args
.
end
(),
(
6
-
args
.
size
()),
ins
);
args
.
insert
(
args
.
end
(),
(
6
-
args
.
size
()),
ins
);
}
}
if
(
layout
!=
0
)
{
rnn_transpose_inputs
(
info
,
args
);
}
// first output for the concatenation of hidden states
// first output for the concatenation of hidden states
auto
hidden_states
=
info
.
add_instruction
(
make_op
(
"rnn"
,
auto
hidden_states
=
info
.
add_instruction
(
make_op
(
"rnn"
,
{{
"hidden_size"
,
hidden_size
},
{{
"hidden_size"
,
hidden_size
},
...
@@ -135,6 +169,11 @@ struct parse_rnn : op_parser<parse_rnn>
...
@@ -135,6 +169,11 @@ struct parse_rnn : op_parser<parse_rnn>
// second output for the last hidden state
// second output for the last hidden state
auto
last_output
=
info
.
add_instruction
(
make_op
(
"rnn_last_hs_output"
),
hidden_states
);
auto
last_output
=
info
.
add_instruction
(
make_op
(
"rnn_last_hs_output"
),
hidden_states
);
if
(
layout
!=
0
)
{
rnn_transpose_outputs
(
info
,
hidden_states
,
last_output
);
}
return
{
hidden_states
,
last_output
};
return
{
hidden_states
,
last_output
};
}
}
};
};
...
...
test/onnx/gen_onnx.py
View file @
b34a8e60
...
@@ -7223,6 +7223,130 @@ def reversesequence_time_test():
...
@@ -7223,6 +7223,130 @@ def reversesequence_time_test():
return
([
node
],
[
x
],
[
y
])
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
rnn_bi_layout_test
():
seq
=
helper
.
make_tensor_value_info
(
'seq'
,
TensorProto
.
FLOAT
,
[
3
,
5
,
10
])
w
=
helper
.
make_tensor_value_info
(
'w'
,
TensorProto
.
FLOAT
,
[
2
,
20
,
10
])
r
=
helper
.
make_tensor_value_info
(
'r'
,
TensorProto
.
FLOAT
,
[
2
,
20
,
20
])
bias
=
helper
.
make_tensor_value_info
(
'bias'
,
TensorProto
.
FLOAT
,
[
2
,
40
])
seq_len
=
helper
.
make_tensor_value_info
(
'seq_len'
,
TensorProto
.
INT32
,
[
3
])
h0
=
helper
.
make_tensor_value_info
(
'h0'
,
TensorProto
.
FLOAT
,
[
3
,
2
,
20
])
hs
=
helper
.
make_tensor_value_info
(
'hs'
,
TensorProto
.
FLOAT
,
[
3
,
5
,
2
,
20
])
output
=
helper
.
make_tensor_value_info
(
'output'
,
TensorProto
.
FLOAT
,
[
3
,
2
,
20
])
node
=
onnx
.
helper
.
make_node
(
'RNN'
,
inputs
=
[
'seq'
,
'w'
,
'r'
,
'bias'
,
'seq_len'
,
'h0'
],
outputs
=
[
'hs'
,
'output'
],
activations
=
[
'tanh'
,
'sigmoid'
],
clip
=
0
,
direction
=
'bidirectional'
,
hidden_size
=
20
,
layout
=
1
)
return
([
node
],
[
seq
,
w
,
r
,
bias
,
seq_len
,
h0
],
[
hs
,
output
])
@
onnx_test
()
def
rnn_f_layout_test
():
seq
=
helper
.
make_tensor_value_info
(
'seq'
,
TensorProto
.
FLOAT
,
[
3
,
5
,
10
])
w
=
helper
.
make_tensor_value_info
(
'w'
,
TensorProto
.
FLOAT
,
[
1
,
20
,
10
])
r
=
helper
.
make_tensor_value_info
(
'r'
,
TensorProto
.
FLOAT
,
[
1
,
20
,
20
])
bias
=
helper
.
make_tensor_value_info
(
'bias'
,
TensorProto
.
FLOAT
,
[
1
,
40
])
seq_len
=
helper
.
make_tensor_value_info
(
'seq_len'
,
TensorProto
.
INT32
,
[
3
])
h0
=
helper
.
make_tensor_value_info
(
'h0'
,
TensorProto
.
FLOAT
,
[
3
,
1
,
20
])
hs
=
helper
.
make_tensor_value_info
(
'hs'
,
TensorProto
.
FLOAT
,
[
3
,
5
,
1
,
20
])
output
=
helper
.
make_tensor_value_info
(
'output'
,
TensorProto
.
FLOAT
,
[
3
,
1
,
20
])
node
=
onnx
.
helper
.
make_node
(
'RNN'
,
inputs
=
[
'seq'
,
'w'
,
'r'
,
'bias'
,
'seq_len'
,
'h0'
],
outputs
=
[
'hs'
,
'output'
],
activations
=
[
'tanh'
,
'sigmoid'
],
clip
=
0
,
direction
=
'forward'
,
hidden_size
=
20
,
layout
=
1
)
return
([
node
],
[
seq
,
w
,
r
,
bias
,
seq_len
,
h0
],
[
hs
,
output
])
@
onnx_test
()
def
rnn_f_5arg_layout_test
():
seq
=
helper
.
make_tensor_value_info
(
'seq'
,
TensorProto
.
FLOAT
,
[
3
,
5
,
10
])
w
=
helper
.
make_tensor_value_info
(
'w'
,
TensorProto
.
FLOAT
,
[
1
,
20
,
10
])
r
=
helper
.
make_tensor_value_info
(
'r'
,
TensorProto
.
FLOAT
,
[
1
,
20
,
20
])
bias
=
helper
.
make_tensor_value_info
(
'bias'
,
TensorProto
.
FLOAT
,
[
1
,
40
])
seq_len
=
helper
.
make_tensor_value_info
(
'seq_len'
,
TensorProto
.
INT32
,
[
3
])
hs
=
helper
.
make_tensor_value_info
(
'hs'
,
TensorProto
.
FLOAT
,
[
3
,
5
,
1
,
20
])
output
=
helper
.
make_tensor_value_info
(
'output'
,
TensorProto
.
FLOAT
,
[
3
,
1
,
20
])
node
=
onnx
.
helper
.
make_node
(
'RNN'
,
inputs
=
[
'seq'
,
'w'
,
'r'
,
'bias'
,
'seq_len'
],
outputs
=
[
'hs'
,
'output'
],
activations
=
[
'tanh'
,
'sigmoid'
],
clip
=
0
,
direction
=
'forward'
,
hidden_size
=
20
,
layout
=
1
)
return
([
node
],
[
seq
,
w
,
r
,
bias
,
seq_len
],
[
hs
,
output
])
@
onnx_test
()
def
rnn_r_layout_test
():
seq
=
helper
.
make_tensor_value_info
(
'seq'
,
TensorProto
.
FLOAT
,
[
3
,
5
,
10
])
w
=
helper
.
make_tensor_value_info
(
'w'
,
TensorProto
.
FLOAT
,
[
1
,
20
,
10
])
r
=
helper
.
make_tensor_value_info
(
'r'
,
TensorProto
.
FLOAT
,
[
1
,
20
,
20
])
bias
=
helper
.
make_tensor_value_info
(
'bias'
,
TensorProto
.
FLOAT
,
[
1
,
40
])
seq_len
=
helper
.
make_tensor_value_info
(
'seq_len'
,
TensorProto
.
INT32
,
[
3
])
h0
=
helper
.
make_tensor_value_info
(
'h0'
,
TensorProto
.
FLOAT
,
[
3
,
1
,
20
])
hs
=
helper
.
make_tensor_value_info
(
'hs'
,
TensorProto
.
FLOAT
,
[
3
,
5
,
1
,
20
])
output
=
helper
.
make_tensor_value_info
(
'output'
,
TensorProto
.
FLOAT
,
[
3
,
1
,
20
])
node
=
onnx
.
helper
.
make_node
(
'RNN'
,
inputs
=
[
'seq'
,
'w'
,
'r'
,
'bias'
,
'seq_len'
,
'h0'
],
outputs
=
[
'hs'
,
'output'
],
activations
=
[
'tanh'
,
'sigmoid'
],
clip
=
0
,
direction
=
'reverse'
,
hidden_size
=
20
,
layout
=
1
)
return
([
node
],
[
seq
,
w
,
r
,
bias
,
seq_len
,
h0
],
[
hs
,
output
])
@
onnx_test
()
def
rnn_r_3arg_layout_test
():
seq
=
helper
.
make_tensor_value_info
(
'seq'
,
TensorProto
.
FLOAT
,
[
3
,
5
,
10
])
w
=
helper
.
make_tensor_value_info
(
'w'
,
TensorProto
.
FLOAT
,
[
1
,
20
,
10
])
r
=
helper
.
make_tensor_value_info
(
'r'
,
TensorProto
.
FLOAT
,
[
1
,
20
,
20
])
hs
=
helper
.
make_tensor_value_info
(
'hs'
,
TensorProto
.
FLOAT
,
[
3
,
5
,
1
,
20
])
output
=
helper
.
make_tensor_value_info
(
'output'
,
TensorProto
.
FLOAT
,
[
3
,
1
,
20
])
node
=
onnx
.
helper
.
make_node
(
'RNN'
,
inputs
=
[
'seq'
,
'w'
,
'r'
],
outputs
=
[
'hs'
,
'output'
],
activations
=
[
'tanh'
,
'sigmoid'
],
clip
=
0
,
direction
=
'reverse'
,
hidden_size
=
20
,
layout
=
1
)
return
([
node
],
[
seq
,
w
,
r
],
[
hs
,
output
])
@
onnx_test
()
@
onnx_test
()
def
roialign_default_test
():
def
roialign_default_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
10
,
4
,
7
,
8
])
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
10
,
4
,
7
,
8
])
...
...
test/onnx/onnx_rnn_test.cpp
View file @
b34a8e60
...
@@ -100,6 +100,60 @@ TEST_CASE(rnn_test_bidirectional)
...
@@ -100,6 +100,60 @@ TEST_CASE(rnn_test_bidirectional)
EXPECT
(
p
==
prog
);
EXPECT
(
p
==
prog
);
}
}
TEST_CASE
(
rnn_test_bidirectional_layout
)
{
std
::
size_t
sl
=
5
;
// sequence len
std
::
size_t
bs
=
3
;
// batch size
std
::
size_t
hs
=
20
;
// hidden size
std
::
size_t
is
=
10
;
// input size
std
::
size_t
nd
=
2
;
// num directions
float
clip
=
0.0
f
;
migraphx
::
shape
seq_shape
{
migraphx
::
shape
::
float_type
,
{
bs
,
sl
,
is
}};
migraphx
::
shape
w_shape
{
migraphx
::
shape
::
float_type
,
{
nd
,
hs
,
is
}};
migraphx
::
shape
r_shape
{
migraphx
::
shape
::
float_type
,
{
nd
,
hs
,
hs
}};
migraphx
::
shape
bias_shape
{
migraphx
::
shape
::
float_type
,
{
nd
,
2
*
hs
}};
migraphx
::
shape
sl_shape
{
migraphx
::
shape
::
int32_type
,
{
bs
}};
migraphx
::
shape
ih_shape
{
migraphx
::
shape
::
float_type
,
{
bs
,
nd
,
hs
}};
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
seq
=
mm
->
add_parameter
(
"seq"
,
seq_shape
);
auto
w
=
mm
->
add_parameter
(
"w"
,
w_shape
);
auto
r
=
mm
->
add_parameter
(
"r"
,
r_shape
);
auto
bias
=
mm
->
add_parameter
(
"bias"
,
bias_shape
);
auto
seq_len
=
mm
->
add_parameter
(
"seq_len"
,
sl_shape
);
auto
ih
=
mm
->
add_parameter
(
"h0"
,
ih_shape
);
std
::
vector
<
int64_t
>
perm
{
1
,
0
,
2
};
seq
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
seq
);
ih
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
ih
);
auto
out_hs
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"rnn"
,
{{
"hidden_size"
,
hs
},
{
"actv_func"
,
migraphx
::
to_value
(
std
::
vector
<
migraphx
::
operation
>
{
migraphx
::
make_op
(
"tanh"
),
migraphx
::
make_op
(
"sigmoid"
)})},
{
"direction"
,
migraphx
::
to_value
(
migraphx
::
op
::
rnn_direction
::
bidirectional
)},
{
"clip"
,
clip
}}),
seq
,
w
,
r
,
bias
,
seq_len
,
ih
);
auto
last_output
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"rnn_last_hs_output"
),
out_hs
);
std
::
vector
<
int64_t
>
perm_hid
{
2
,
0
,
1
,
3
};
out_hs
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm_hid
}}),
out_hs
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
last_output
);
auto
prog
=
optimize_onnx
(
"rnn_bi_layout_test.onnx"
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
rnn_test_one_direction
)
TEST_CASE
(
rnn_test_one_direction
)
{
{
std
::
size_t
sl
=
5
;
// sequence len
std
::
size_t
sl
=
5
;
// sequence len
...
@@ -241,6 +295,179 @@ TEST_CASE(rnn_test_one_direction)
...
@@ -241,6 +295,179 @@ TEST_CASE(rnn_test_one_direction)
}
}
}
}
TEST_CASE
(
rnn_test_one_direction_layout
)
{
std
::
size_t
sl
=
5
;
// sequence len
std
::
size_t
bs
=
3
;
// batch size
std
::
size_t
hs
=
20
;
// hidden size
std
::
size_t
is
=
10
;
// input size
std
::
size_t
nd
=
1
;
// num directions
float
clip
=
0.0
f
;
migraphx
::
shape
seq_shape
{
migraphx
::
shape
::
float_type
,
{
bs
,
sl
,
is
}};
migraphx
::
shape
w_shape
{
migraphx
::
shape
::
float_type
,
{
nd
,
hs
,
is
}};
migraphx
::
shape
r_shape
{
migraphx
::
shape
::
float_type
,
{
nd
,
hs
,
hs
}};
migraphx
::
shape
bias_shape
{
migraphx
::
shape
::
float_type
,
{
nd
,
2
*
hs
}};
migraphx
::
shape
sl_shape
{
migraphx
::
shape
::
int32_type
,
{
bs
}};
migraphx
::
shape
ih_shape
{
migraphx
::
shape
::
float_type
,
{
bs
,
nd
,
hs
}};
// forward
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
seq
=
mm
->
add_parameter
(
"seq"
,
seq_shape
);
auto
w
=
mm
->
add_parameter
(
"w"
,
w_shape
);
auto
r
=
mm
->
add_parameter
(
"r"
,
r_shape
);
auto
bias
=
mm
->
add_parameter
(
"bias"
,
bias_shape
);
auto
seq_len
=
mm
->
add_parameter
(
"seq_len"
,
sl_shape
);
auto
ih
=
mm
->
add_parameter
(
"h0"
,
ih_shape
);
std
::
vector
<
int64_t
>
perm
{
1
,
0
,
2
};
seq
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
seq
);
ih
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
ih
);
auto
out_hs
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"rnn"
,
{{
"hidden_size"
,
hs
},
{
"actv_func"
,
migraphx
::
to_value
(
std
::
vector
<
migraphx
::
operation
>
{
migraphx
::
make_op
(
"tanh"
),
migraphx
::
make_op
(
"sigmoid"
)})},
{
"direction"
,
migraphx
::
to_value
(
migraphx
::
op
::
rnn_direction
::
forward
)},
{
"clip"
,
clip
}}),
seq
,
w
,
r
,
bias
,
seq_len
,
ih
);
auto
last_output
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"rnn_last_hs_output"
),
out_hs
);
std
::
vector
<
int64_t
>
perm_hid
{
2
,
0
,
1
,
3
};
out_hs
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm_hid
}}),
out_hs
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
last_output
);
auto
prog
=
optimize_onnx
(
"rnn_f_layout_test.onnx"
);
EXPECT
(
p
==
prog
);
}
// reverse
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
seq
=
mm
->
add_parameter
(
"seq"
,
seq_shape
);
auto
w
=
mm
->
add_parameter
(
"w"
,
w_shape
);
auto
r
=
mm
->
add_parameter
(
"r"
,
r_shape
);
auto
bias
=
mm
->
add_parameter
(
"bias"
,
bias_shape
);
auto
seq_len
=
mm
->
add_parameter
(
"seq_len"
,
sl_shape
);
auto
ih
=
mm
->
add_parameter
(
"h0"
,
ih_shape
);
std
::
vector
<
int64_t
>
perm
{
1
,
0
,
2
};
seq
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
seq
);
ih
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
ih
);
auto
out_hs
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"rnn"
,
{{
"hidden_size"
,
hs
},
{
"actv_func"
,
migraphx
::
to_value
(
std
::
vector
<
migraphx
::
operation
>
{
migraphx
::
make_op
(
"tanh"
),
migraphx
::
make_op
(
"sigmoid"
)})},
{
"direction"
,
migraphx
::
to_value
(
migraphx
::
op
::
rnn_direction
::
reverse
)},
{
"clip"
,
clip
}}),
seq
,
w
,
r
,
bias
,
seq_len
,
ih
);
auto
last_output
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"rnn_last_hs_output"
),
out_hs
);
std
::
vector
<
int64_t
>
perm_hid
{
2
,
0
,
1
,
3
};
out_hs
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm_hid
}}),
out_hs
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
last_output
);
auto
prog
=
optimize_onnx
(
"rnn_r_layout_test.onnx"
);
EXPECT
(
p
==
prog
);
}
// 3 argumments
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
seq
=
mm
->
add_parameter
(
"seq"
,
seq_shape
);
auto
w
=
mm
->
add_parameter
(
"w"
,
w_shape
);
auto
r
=
mm
->
add_parameter
(
"r"
,
r_shape
);
auto
und
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"undefined"
));
std
::
vector
<
int64_t
>
perm
{
1
,
0
,
2
};
seq
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
seq
);
auto
out_hs
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"rnn"
,
{{
"hidden_size"
,
hs
},
{
"actv_func"
,
migraphx
::
to_value
(
std
::
vector
<
migraphx
::
operation
>
{
migraphx
::
make_op
(
"tanh"
),
migraphx
::
make_op
(
"sigmoid"
)})},
{
"direction"
,
migraphx
::
to_value
(
migraphx
::
op
::
rnn_direction
::
reverse
)},
{
"clip"
,
clip
}}),
seq
,
w
,
r
,
und
,
und
,
und
);
auto
last_output
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"rnn_last_hs_output"
),
out_hs
);
std
::
vector
<
int64_t
>
perm_hid
{
2
,
0
,
1
,
3
};
out_hs
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm_hid
}}),
out_hs
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
last_output
);
auto
prog
=
optimize_onnx
(
"rnn_r_3arg_layout_test.onnx"
);
EXPECT
(
p
==
prog
);
}
// 5 argumments
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
seq
=
mm
->
add_parameter
(
"seq"
,
seq_shape
);
auto
w
=
mm
->
add_parameter
(
"w"
,
w_shape
);
auto
r
=
mm
->
add_parameter
(
"r"
,
r_shape
);
auto
bias
=
mm
->
add_parameter
(
"bias"
,
bias_shape
);
auto
seq_len
=
mm
->
add_parameter
(
"seq_len"
,
sl_shape
);
auto
und
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"undefined"
));
std
::
vector
<
int64_t
>
perm
{
1
,
0
,
2
};
seq
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
seq
);
auto
out_hs
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"rnn"
,
{{
"hidden_size"
,
hs
},
{
"actv_func"
,
migraphx
::
to_value
(
std
::
vector
<
migraphx
::
operation
>
{
migraphx
::
make_op
(
"tanh"
),
migraphx
::
make_op
(
"sigmoid"
)})},
{
"direction"
,
migraphx
::
to_value
(
migraphx
::
op
::
rnn_direction
::
forward
)},
{
"clip"
,
clip
}}),
seq
,
w
,
r
,
bias
,
seq_len
,
und
);
auto
last_output
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"rnn_last_hs_output"
),
out_hs
);
std
::
vector
<
int64_t
>
perm_hid
{
2
,
0
,
1
,
3
};
out_hs
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm_hid
}}),
out_hs
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
last_output
);
auto
prog
=
optimize_onnx
(
"rnn_f_5arg_layout_test.onnx"
);
EXPECT
(
p
==
prog
);
}
}
TEST_CASE
(
gru_test
)
TEST_CASE
(
gru_test
)
{
{
std
::
size_t
sl
=
5
;
// sequence len
std
::
size_t
sl
=
5
;
// sequence len
...
...
test/onnx/rnn_bi_layout_test.onnx
0 → 100644
View file @
b34a8e60
File added
test/onnx/rnn_f_5arg_layout_test.onnx
0 → 100644
View file @
b34a8e60
File added
test/onnx/rnn_f_layout_test.onnx
0 → 100644
View file @
b34a8e60
File added
test/onnx/rnn_r_3arg_layout_test.onnx
0 → 100644
View file @
b34a8e60
File added
test/onnx/rnn_r_layout_test.onnx
0 → 100644
View file @
b34a8e60
File added
test/py/onnx_backend_test.py
View file @
b34a8e60
...
@@ -574,7 +574,6 @@ def disabled_tests_onnx_1_9_0(backend_test):
...
@@ -574,7 +574,6 @@ def disabled_tests_onnx_1_9_0(backend_test):
# fails
# fails
# from OnnxBackendNodeModelTest
# from OnnxBackendNodeModelTest
backend_test
.
exclude
(
r
'test_gru_batchwise_cpu'
)
backend_test
.
exclude
(
r
'test_gru_batchwise_cpu'
)
backend_test
.
exclude
(
r
'test_simple_rnn_batchwise_cpu'
)
# from OnnxBackendPyTorchConvertedModelTest
# from OnnxBackendPyTorchConvertedModelTest
backend_test
.
exclude
(
r
'test_MaxPool1d_stride_padding_dilation_cpu'
)
backend_test
.
exclude
(
r
'test_MaxPool1d_stride_padding_dilation_cpu'
)
backend_test
.
exclude
(
r
'test_MaxPool2d_stride_padding_dilation_cpu'
)
backend_test
.
exclude
(
r
'test_MaxPool2d_stride_padding_dilation_cpu'
)
...
...
test/ref/rnn_ops.cpp
View file @
b34a8e60
This diff is collapsed.
Click to expand it.
test/verify/test_rnn_4args_layout.cpp
0 → 100644
View file @
b34a8e60
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/serialize.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/common.hpp>
struct
test_rnn_4args_layout
:
verify_program
<
test_rnn_4args_layout
>
{
migraphx
::
program
create_program
()
const
{
std
::
size_t
batch_size
=
2
;
std
::
size_t
seq_len
=
5
;
std
::
size_t
hidden_size
=
4
;
std
::
size_t
input_size
=
3
;
std
::
size_t
num_dirct
=
1
;
float
clip
=
0.0
f
;
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
in_shape
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
seq_len
,
input_size
}};
migraphx
::
shape
w_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
input_size
}};
migraphx
::
shape
r_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
hidden_size
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
2
*
hidden_size
}};
auto
seq
=
mm
->
add_parameter
(
"seq"
,
in_shape
);
auto
w
=
mm
->
add_parameter
(
"w"
,
w_shape
);
auto
r
=
mm
->
add_parameter
(
"r"
,
r_shape
);
auto
bias
=
mm
->
add_parameter
(
"bias"
,
b_shape
);
std
::
vector
<
int64_t
>
perm
{
1
,
0
,
2
};
seq
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
seq
);
auto
hs
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"rnn"
,
{{
"hidden_size"
,
hidden_size
},
{
"actv_func"
,
migraphx
::
to_value
(
std
::
vector
<
migraphx
::
operation
>
{
migraphx
::
make_op
(
"tanh"
),
migraphx
::
make_op
(
"tanh"
)})},
{
"direction"
,
migraphx
::
to_value
(
migraphx
::
op
::
rnn_direction
::
reverse
)},
{
"clip"
,
clip
}}),
seq
,
w
,
r
,
bias
);
std
::
vector
<
int64_t
>
perm_hid
{
2
,
0
,
1
,
3
};
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm_hid
}}),
hs
);
return
p
;
}
std
::
string
section
()
const
{
return
"rnn"
;
}
};
test/verify/test_rnn_bi_3args_layout.cpp
0 → 100644
View file @
b34a8e60
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/serialize.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/common.hpp>
struct
test_rnn_bi_3args_layout
:
verify_program
<
test_rnn_bi_3args_layout
>
{
migraphx
::
program
create_program
()
const
{
std
::
size_t
batch_size
=
2
;
std
::
size_t
seq_len
=
10
;
std
::
size_t
hidden_size
=
4
;
std
::
size_t
input_size
=
3
;
std
::
size_t
num_dirct
=
2
;
float
clip
=
0.0
f
;
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
in_shape
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
seq_len
,
input_size
}};
migraphx
::
shape
w_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
input_size
}};
migraphx
::
shape
r_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
hidden_size
}};
auto
seq
=
mm
->
add_parameter
(
"seq"
,
in_shape
);
auto
w
=
mm
->
add_parameter
(
"w"
,
w_shape
);
auto
r
=
mm
->
add_parameter
(
"r"
,
r_shape
);
std
::
vector
<
int64_t
>
perm
{
1
,
0
,
2
};
seq
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
seq
);
auto
output
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"rnn"
,
{{
"hidden_size"
,
hidden_size
},
{
"actv_func"
,
migraphx
::
to_value
(
std
::
vector
<
migraphx
::
operation
>
{
migraphx
::
make_op
(
"tanh"
),
migraphx
::
make_op
(
"tanh"
)})},
{
"direction"
,
migraphx
::
to_value
(
migraphx
::
op
::
rnn_direction
::
bidirectional
)},
{
"clip"
,
clip
}}),
seq
,
w
,
r
);
auto
last_output
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"rnn_last_hs_output"
),
output
);
last_output
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
last_output
);
return
p
;
}
std
::
string
section
()
const
{
return
"rnn"
;
}
};
test/verify/test_rnn_bidirectional_layout.cpp
0 → 100644
View file @
b34a8e60
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/serialize.hpp>
#include <migraphx/op/common.hpp>
struct
test_rnn_bidirectional_layout
:
verify_program
<
test_rnn_bidirectional_layout
>
{
migraphx
::
program
create_program
()
const
{
std
::
size_t
batch_size
=
2
;
std
::
size_t
seq_len
=
1
;
std
::
size_t
hidden_size
=
4
;
std
::
size_t
input_size
=
3
;
std
::
size_t
num_dirct
=
2
;
float
clip
=
0.0
f
;
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
in_shape
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
seq_len
,
input_size
}};
migraphx
::
shape
w_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
input_size
}};
migraphx
::
shape
r_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
hidden_size
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
2
*
hidden_size
}};
migraphx
::
shape
ih_shape
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
num_dirct
,
hidden_size
}};
auto
seq
=
mm
->
add_parameter
(
"seq"
,
in_shape
);
auto
w
=
mm
->
add_parameter
(
"w"
,
w_shape
);
auto
r
=
mm
->
add_parameter
(
"r"
,
r_shape
);
auto
bias
=
mm
->
add_parameter
(
"bias"
,
b_shape
);
auto
ih
=
mm
->
add_parameter
(
"ih"
,
ih_shape
);
auto
und
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"undefined"
));
std
::
vector
<
int64_t
>
perm
{
1
,
0
,
2
};
seq
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
seq
);
ih
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
ih
);
auto
output
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"rnn"
,
{{
"hidden_size"
,
hidden_size
},
{
"actv_func"
,
migraphx
::
to_value
(
std
::
vector
<
migraphx
::
operation
>
{
migraphx
::
make_op
(
"tanh"
),
migraphx
::
make_op
(
"tanh"
)})},
{
"direction"
,
migraphx
::
to_value
(
migraphx
::
op
::
rnn_direction
::
bidirectional
)},
{
"clip"
,
clip
}}),
seq
,
w
,
r
,
bias
,
und
,
ih
);
auto
last_output
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"rnn_last_hs_output"
),
output
);
last_output
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
last_output
);
return
p
;
}
std
::
string
section
()
const
{
return
"rnn"
;
}
};
test/verify/test_rnn_forward_layout.cpp
0 → 100644
View file @
b34a8e60
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/serialize.hpp>
#include <migraphx/op/common.hpp>
struct
test_rnn_forward_layout
:
verify_program
<
test_rnn_forward_layout
>
{
migraphx
::
program
create_program
()
const
{
std
::
size_t
batch_size
=
2
;
std
::
size_t
seq_len
=
1
;
std
::
size_t
hidden_size
=
4
;
std
::
size_t
input_size
=
3
;
std
::
size_t
num_dirct
=
1
;
float
clip
=
0.0
f
;
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
in_shape
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
seq_len
,
input_size
}};
migraphx
::
shape
w_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
input_size
}};
migraphx
::
shape
r_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
hidden_size
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
2
*
hidden_size
}};
migraphx
::
shape
ih_shape
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
num_dirct
,
hidden_size
}};
auto
seq
=
mm
->
add_parameter
(
"seq"
,
in_shape
);
auto
w
=
mm
->
add_parameter
(
"w"
,
w_shape
);
auto
r
=
mm
->
add_parameter
(
"r"
,
r_shape
);
auto
bias
=
mm
->
add_parameter
(
"bias"
,
b_shape
);
auto
ih
=
mm
->
add_parameter
(
"ih"
,
ih_shape
);
auto
und
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"undefined"
));
std
::
vector
<
int64_t
>
perm
{
1
,
0
,
2
};
seq
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
seq
);
ih
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
ih
);
auto
hs
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"rnn"
,
{{
"hidden_size"
,
hidden_size
},
{
"actv_func"
,
migraphx
::
to_value
(
std
::
vector
<
migraphx
::
operation
>
{
migraphx
::
make_op
(
"tanh"
),
migraphx
::
make_op
(
"tanh"
)})},
{
"direction"
,
migraphx
::
to_value
(
migraphx
::
op
::
rnn_direction
::
forward
)},
{
"clip"
,
clip
}}),
seq
,
w
,
r
,
bias
,
und
,
ih
);
auto
lho
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"rnn_last_hs_output"
),
hs
);
std
::
vector
<
int64_t
>
perm_hid
{
2
,
0
,
1
,
3
};
hs
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm_hid
}}),
hs
);
lho
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
lho
);
mm
->
add_return
({
hs
,
lho
});
return
p
;
}
std
::
string
section
()
const
{
return
"rnn"
;
}
};
test/verify/test_rnn_reverse_layout.cpp
0 → 100644
View file @
b34a8e60
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/serialize.hpp>
#include <migraphx/op/common.hpp>
struct
test_rnn_reverse
:
verify_program
<
test_rnn_reverse
>
{
migraphx
::
program
create_program
()
const
{
std
::
size_t
batch_size
=
2
;
std
::
size_t
seq_len
=
1
;
std
::
size_t
hidden_size
=
4
;
std
::
size_t
input_size
=
3
;
std
::
size_t
num_dirct
=
1
;
float
clip
=
0.0
f
;
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
in_shape
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
seq_len
,
input_size
}};
migraphx
::
shape
w_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
input_size
}};
migraphx
::
shape
r_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
hidden_size
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
2
*
hidden_size
}};
migraphx
::
shape
ih_shape
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
num_dirct
,
hidden_size
}};
auto
seq
=
mm
->
add_parameter
(
"seq"
,
in_shape
);
auto
w
=
mm
->
add_parameter
(
"w"
,
w_shape
);
auto
r
=
mm
->
add_parameter
(
"r"
,
r_shape
);
auto
bias
=
mm
->
add_parameter
(
"bias"
,
b_shape
);
auto
ih
=
mm
->
add_parameter
(
"ih"
,
ih_shape
);
auto
und
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"undefined"
));
std
::
vector
<
int64_t
>
perm
{
1
,
0
,
2
};
seq
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
seq
);
ih
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
ih
);
auto
hs
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"rnn"
,
{{
"hidden_size"
,
hidden_size
},
{
"actv_func"
,
migraphx
::
to_value
(
std
::
vector
<
migraphx
::
operation
>
{
migraphx
::
make_op
(
"tanh"
),
migraphx
::
make_op
(
"tanh"
)})},
{
"direction"
,
migraphx
::
to_value
(
migraphx
::
op
::
rnn_direction
::
reverse
)},
{
"clip"
,
clip
}}),
seq
,
w
,
r
,
bias
,
und
,
ih
);
std
::
vector
<
int64_t
>
perm_hid
{
2
,
0
,
1
,
3
};
hs
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm_hid
}}),
hs
);
return
p
;
}
std
::
string
section
()
const
{
return
"rnn"
;
}
};
test/verify/test_rnn_sql_1_layout.cpp
0 → 100644
View file @
b34a8e60
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/serialize.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/common.hpp>
struct
test_rnn_sql_1_layout
:
verify_program
<
test_rnn_sql_1_layout
>
{
migraphx
::
program
create_program
()
const
{
std
::
size_t
batch_size
=
2
;
std
::
size_t
seq_len
=
10
;
std
::
size_t
hidden_size
=
4
;
std
::
size_t
input_size
=
3
;
std
::
size_t
num_dirct
=
1
;
float
clip
=
0.0
f
;
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
in_shape
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
seq_len
,
input_size
}};
migraphx
::
shape
w_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
input_size
}};
migraphx
::
shape
r_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
hidden_size
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
2
*
hidden_size
}};
migraphx
::
shape
s_shape
{
migraphx
::
shape
::
int32_type
,
{
batch_size
}};
migraphx
::
shape
ih_shape
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
num_dirct
,
hidden_size
}};
auto
seq
=
mm
->
add_parameter
(
"seq"
,
in_shape
);
auto
w
=
mm
->
add_parameter
(
"w"
,
w_shape
);
auto
r
=
mm
->
add_parameter
(
"r"
,
r_shape
);
auto
bias
=
mm
->
add_parameter
(
"bias"
,
b_shape
);
std
::
vector
<
int
>
sl_data
{
5
,
7
};
auto
sql
=
mm
->
add_literal
(
migraphx
::
literal
{
s_shape
,
sl_data
});
auto
ih
=
mm
->
add_parameter
(
"ih"
,
ih_shape
);
std
::
vector
<
int64_t
>
perm
{
1
,
0
,
2
};
seq
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
seq
);
ih
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
ih
);
auto
hs
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"rnn"
,
{{
"hidden_size"
,
hidden_size
},
{
"actv_func"
,
migraphx
::
to_value
(
std
::
vector
<
migraphx
::
operation
>
{
migraphx
::
make_op
(
"tanh"
),
migraphx
::
make_op
(
"tanh"
)})},
{
"direction"
,
migraphx
::
to_value
(
migraphx
::
op
::
rnn_direction
::
forward
)},
{
"clip"
,
clip
}}),
seq
,
w
,
r
,
bias
,
sql
,
ih
);
auto
last_hs
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"rnn_last_hs_output"
),
hs
);
std
::
vector
<
int64_t
>
perm_hid
{
2
,
0
,
1
,
3
};
hs
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm_hid
}}),
hs
);
last_hs
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
last_hs
);
mm
->
add_return
({
hs
,
last_hs
});
return
p
;
}
std
::
string
section
()
const
{
return
"rnn"
;
}
};
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment