Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
1e7457cb
Commit
1e7457cb
authored
Feb 01, 2019
by
Shucai Xiao
Browse files
Merge from branch rnn_operator
parents
5fe0c226
7bab863d
Changes
21
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1275 additions
and
180 deletions
+1275
-180
src/include/migraphx/operators.hpp
src/include/migraphx/operators.hpp
+86
-22
src/include/migraphx/rewrite_rnn.hpp
src/include/migraphx/rewrite_rnn.hpp
+3
-3
src/onnx/onnx.cpp
src/onnx/onnx.cpp
+65
-29
src/program.cpp
src/program.cpp
+7
-0
src/rewrite_rnn.cpp
src/rewrite_rnn.cpp
+75
-39
src/simplify_reshapes.cpp
src/simplify_reshapes.cpp
+2
-1
src/targets/cpu/lowering.cpp
src/targets/cpu/lowering.cpp
+27
-0
src/targets/gpu/CMakeLists.txt
src/targets/gpu/CMakeLists.txt
+2
-0
src/targets/gpu/device/include/migraphx/gpu/device/nary.hpp
src/targets/gpu/device/include/migraphx/gpu/device/nary.hpp
+6
-0
src/targets/gpu/device/pad.cpp
src/targets/gpu/device/pad.cpp
+43
-0
src/targets/gpu/gemm.cpp
src/targets/gpu/gemm.cpp
+1
-0
src/targets/gpu/include/migraphx/gpu/device/pad.hpp
src/targets/gpu/include/migraphx/gpu/device/pad.hpp
+25
-0
src/targets/gpu/include/migraphx/gpu/pad.hpp
src/targets/gpu/include/migraphx/gpu/pad.hpp
+40
-0
src/targets/gpu/lowering.cpp
src/targets/gpu/lowering.cpp
+6
-1
src/targets/gpu/pad.cpp
src/targets/gpu/pad.cpp
+25
-0
src/targets/gpu/target.cpp
src/targets/gpu/target.cpp
+3
-4
test/cpu_ops_test.cpp
test/cpu_ops_test.cpp
+453
-81
test/gpu/miopen.cpp
test/gpu/miopen.cpp
+386
-0
test/onnx/onnx_test.cpp
test/onnx/onnx_test.cpp
+8
-0
test/onnx/pad_test.onnx
test/onnx/pad_test.onnx
+12
-0
No files found.
src/include/migraphx/operators.hpp
View file @
1e7457cb
...
...
@@ -16,6 +16,13 @@ namespace migraphx {
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
op
{
enum
padding_mode_t
{
default_
,
// NOLINT
same
,
valid
};
struct
not_computable
{
argument
compute
(
const
shape
&
,
const
std
::
vector
<
argument
>&
)
const
...
...
@@ -58,12 +65,7 @@ struct convolution
std
::
array
<
std
::
size_t
,
2
>
padding
=
{{
0
,
0
}};
std
::
array
<
std
::
size_t
,
2
>
stride
=
{{
1
,
1
}};
std
::
array
<
std
::
size_t
,
2
>
dilation
=
{{
1
,
1
}};
enum
padding_mode_t
{
default_
,
// NOLINT
same
,
valid
};
padding_mode_t
padding_mode
=
default_
;
int
group
=
1
;
...
...
@@ -138,12 +140,7 @@ struct im2col
std
::
array
<
std
::
size_t
,
2
>
padding
=
{{
0
,
0
}};
std
::
array
<
std
::
size_t
,
2
>
stride
=
{{
1
,
1
}};
std
::
array
<
std
::
size_t
,
2
>
dilation
=
{{
1
,
1
}};
enum
padding_mode_t
{
default_
,
// NOLINT
same
,
valid
};
padding_mode_t
padding_mode
=
default_
;
template
<
class
Self
,
class
F
>
...
...
@@ -189,12 +186,14 @@ struct pooling
std
::
array
<
std
::
size_t
,
2
>
padding
=
{{
0
,
0
}};
std
::
array
<
std
::
size_t
,
2
>
stride
=
{{
1
,
1
}};
std
::
array
<
std
::
size_t
,
2
>
lengths
=
{{
1
,
1
}};
padding_mode_t
padding_mode
=
default_
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
pack
(
f
(
self
.
mode
,
"mode"
),
f
(
self
.
padding
,
"padding"
),
f
(
self
.
padding
,
"padding_mode"
),
f
(
self
.
stride
,
"stride"
),
f
(
self
.
lengths
,
"lengths"
));
}
...
...
@@ -211,7 +210,10 @@ struct pooling
assert
(
lengths
[
0
]
<=
(
input
.
lens
()[
2
]
+
2
*
padding
[
0
]));
assert
(
lengths
[
1
]
<=
(
input
.
lens
()[
3
]
+
2
*
padding
[
1
]));
return
{
t
,
if
(
padding_mode
==
default_
)
{
return
{
t
,
{
input
.
lens
()[
0
],
input
.
lens
()[
1
],
...
...
@@ -226,6 +228,39 @@ struct pooling
static_cast
<
float
>
(
stride
[
1
])))
+
1
)),
}};
}
else
if
(
padding_mode
==
same
)
{
return
{
t
,
{
input
.
lens
()[
0
],
input
.
lens
()[
1
],
static_cast
<
std
::
size_t
>
(
std
::
ceil
(
static_cast
<
double
>
(
input
.
lens
()[
2
])
/
stride
[
0
])),
static_cast
<
std
::
size_t
>
(
std
::
ceil
(
static_cast
<
double
>
(
input
.
lens
()[
3
])
/
stride
[
1
]))}};
}
else
if
(
padding_mode
==
valid
)
{
return
{
t
,
{
input
.
lens
()[
0
],
input
.
lens
()[
1
],
std
::
size_t
(
std
::
max
<
std
::
ptrdiff_t
>
(
1
,
std
::
ptrdiff_t
(
std
::
floor
((
input
.
lens
()[
2
]
-
lengths
[
0
])
/
static_cast
<
float
>
(
stride
[
0
])))
+
1
)),
std
::
size_t
(
std
::
max
<
std
::
ptrdiff_t
>
(
1
,
std
::
ptrdiff_t
(
std
::
floor
((
input
.
lens
()[
3
]
-
lengths
[
1
])
/
static_cast
<
float
>
(
stride
[
1
])))
+
1
)),
}};
}
else
{
MIGRAPHX_THROW
(
"Invalid padding mode"
);
}
}
};
...
...
@@ -395,7 +430,6 @@ struct concat
}
return
result
;
}
int
output_alias
(
const
std
::
vector
<
shape
>&
)
const
{
return
0
;
}
};
struct
slice
...
...
@@ -598,11 +632,7 @@ struct reshape
rdims
[
i
]
=
missing_dim
;
}
}
// if(dims.back() == -1)
//{
// rdims.pop_back();
// std::copy(idims.begin() + rdims.size(), idims.end(), std::back_inserter(rdims));
//}
shape
s
{
inputs
.
front
().
type
(),
rdims
};
if
(
s
.
elements
()
!=
inputs
.
front
().
elements
())
MIGRAPHX_THROW
(
"Wrong number of elements for reshape"
);
...
...
@@ -615,6 +645,42 @@ struct reshape
int
output_alias
(
const
std
::
vector
<
shape
>&
)
const
{
return
0
;
}
};
struct
pad
{
std
::
vector
<
int64_t
>
pads
;
float
value
=
0.0
f
;
enum
pad_op_mode_t
{
constant_pad
,
reflect_pad
,
edge_pad
};
pad_op_mode_t
mode
=
constant_pad
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
pack
(
f
(
self
.
mode
,
"mode"
),
f
(
self
.
pads
,
"pads"
),
f
(
self
.
value
,
"value"
));
}
std
::
string
name
()
const
{
return
"pad"
;
}
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
1
);
auto
&&
idims
=
inputs
.
front
().
lens
();
std
::
vector
<
std
::
size_t
>
rdims
(
idims
.
begin
(),
idims
.
end
());
std
::
size_t
num_dims
=
rdims
.
size
();
for
(
std
::
size_t
i
=
0
;
i
<
num_dims
;
i
++
)
{
rdims
[
i
]
+=
pads
[
i
]
+
pads
[
i
+
num_dims
];
}
shape
s
{
inputs
.
front
().
type
(),
rdims
};
return
s
;
}
};
struct
as_shape
{
shape
s
;
...
...
@@ -698,8 +764,6 @@ struct gather
return
result
;
}
int
output_alias
(
const
std
::
vector
<
shape
>&
)
const
{
return
0
;
}
};
struct
dot
...
...
@@ -1098,7 +1162,7 @@ struct rnn
if
(
num_directions
!=
hidden_dims
[
0
])
{
MIGRAPHX_THROW
(
"RNN: num_direction
does not match the directio
n attribute"
);
MIGRAPHX_THROW
(
"RNN: num_direction
mismatch i
n attribute
and input
"
);
}
std
::
vector
<
std
::
size_t
>
out_dims
(
in_dims
);
...
...
src/include/migraphx/rewrite_rnn.hpp
View file @
1e7457cb
...
...
@@ -25,10 +25,10 @@ struct rewrite_rnn
program
&
prog
,
instruction_ref
ins
,
instruction_ref
input
,
instruction_ref
wx
,
instruction_ref
wh
,
instruction_ref
ih
,
instruction_ref
w
,
instruction_ref
r
,
instruction_ref
bias
,
instruction_ref
ih
,
operation
&
actv_func
)
const
;
};
...
...
src/onnx/onnx.cpp
View file @
1e7457cb
...
...
@@ -88,6 +88,7 @@ struct onnx_parser
add_mem_op
(
"Transpose"
,
&
onnx_parser
::
parse_transpose
);
add_mem_op
(
"RNN"
,
&
onnx_parser
::
parse_rnn
);
add_mem_op
(
"GRU"
,
&
onnx_parser
::
parse_gru
);
add_mem_op
(
"Pad"
,
&
onnx_parser
::
parse_pad
);
// init the activation function map
init_actv_func
();
...
...
@@ -229,24 +230,30 @@ struct onnx_parser
parse_conv
(
const
std
::
string
&
,
attribute_map
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
op
::
convolution
op
;
auto
l0
=
args
[
0
];
if
(
contains
(
attributes
,
"pads"
))
{
if
(
contains
(
attributes
,
"auto_pad"
))
{
MIGRAPHX_THROW
(
"auto_pad and padding cannot be specified simultaneously"
);
}
std
::
vector
<
std
::
size
_t
>
padding
(
4
)
;
copy
(
attributes
[
"pads"
].
ints
(),
padding
.
begin
(
));
std
::
vector
<
std
::
int64
_t
>
padding
;
copy
(
attributes
[
"pads"
].
ints
(),
std
::
back_inserter
(
padding
));
if
(
padding
.
size
()
!=
4
)
{
MIGRAPHX_THROW
(
"padding should have 4 values"
);
}
if
(
padding
[
0
]
!=
padding
[
2
]
||
padding
[
1
]
!=
padding
[
3
])
{
MIGRAPHX_THROW
(
"migraphx does not support asymetric padding"
);
// insert zeros for pad op (args[0] has 4 dims)
padding
=
{
0
,
0
,
padding
[
0
],
padding
[
1
],
0
,
0
,
padding
[
2
],
padding
[
3
]};
l0
=
prog
.
add_instruction
(
op
::
pad
{
padding
},
l0
);
}
else
{
op
.
padding
[
0
]
=
padding
[
0
];
op
.
padding
[
1
]
=
padding
[
1
];
}
op
.
padding
[
0
]
=
padding
[
0
];
op
.
padding
[
1
]
=
padding
[
1
];
}
if
(
contains
(
attributes
,
"strides"
))
{
...
...
@@ -266,7 +273,7 @@ struct onnx_parser
if
(
s
.
find
(
"SAME"
)
!=
std
::
string
::
npos
)
{
op
.
padding_mode
=
op
::
convolution
::
same
;
op
.
padding_mode
=
op
::
padding_mode_t
::
same
;
}
}
if
(
contains
(
attributes
,
"group"
))
...
...
@@ -280,7 +287,7 @@ struct onnx_parser
auto
l2
=
prog
.
add_instruction
(
op
::
broadcast
{
axis
,
l1
->
get_shape
()},
args
[
2
]);
return
prog
.
add_instruction
(
op
::
add
{},
l1
,
l2
);
}
return
prog
.
add_instruction
(
op
,
args
);
return
prog
.
add_instruction
(
op
,
l0
,
args
[
1
]
);
}
instruction_ref
parse_pooling
(
const
std
::
string
&
name
,
...
...
@@ -288,6 +295,7 @@ struct onnx_parser
std
::
vector
<
instruction_ref
>
args
)
{
op
::
pooling
op
{
ends_with
(
name
,
"MaxPool"
)
?
"max"
:
"average"
};
auto
l0
=
args
[
0
];
if
(
starts_with
(
name
,
"Global"
))
{
auto
lens
=
args
.
front
()
->
get_shape
().
lens
();
...
...
@@ -295,18 +303,23 @@ struct onnx_parser
}
if
(
contains
(
attributes
,
"pads"
))
{
std
::
vector
<
std
::
size
_t
>
padding
(
4
)
;
copy
(
attributes
[
"pads"
].
ints
(),
padding
.
begin
(
));
std
::
vector
<
std
::
int64
_t
>
padding
;
copy
(
attributes
[
"pads"
].
ints
(),
std
::
back_inserter
(
padding
));
if
(
padding
.
size
()
!=
4
)
{
MIGRAPHX_THROW
(
"padding should have 4 values"
);
}
if
(
padding
[
0
]
!=
padding
[
2
]
||
padding
[
1
]
!=
padding
[
3
])
{
MIGRAPHX_THROW
(
"migraphx does not support asymetric padding"
);
// insert zeros for pad op (args[0] has 4 dims)
padding
=
{
0
,
0
,
padding
[
0
],
padding
[
1
],
0
,
0
,
padding
[
2
],
padding
[
3
]};
l0
=
prog
.
add_instruction
(
op
::
pad
{
padding
},
l0
);
}
else
{
op
.
padding
[
0
]
=
padding
[
0
];
op
.
padding
[
1
]
=
padding
[
1
];
}
op
.
padding
[
0
]
=
padding
[
0
];
op
.
padding
[
1
]
=
padding
[
1
];
}
if
(
contains
(
attributes
,
"strides"
))
{
...
...
@@ -319,13 +332,14 @@ struct onnx_parser
if
(
contains
(
attributes
,
"auto_pad"
))
{
auto
s
=
attributes
[
"auto_pad"
].
s
();
if
(
to_upper
(
s
)
!=
"NOTSET"
)
if
(
s
.
find
(
"SAME_UPPER"
)
==
std
::
string
::
npos
)
{
MIGRAPHX_THROW
(
"auto_pad
is not
support
ed
for pooling"
);
MIGRAPHX_THROW
(
"auto_pad
only
support
s SAME_UPPER
for pooling"
);
}
op
.
padding_mode
=
op
::
padding_mode_t
::
same
;
}
return
prog
.
add_instruction
(
op
,
std
::
move
(
args
)
);
return
prog
.
add_instruction
(
op
,
l0
);
}
instruction_ref
...
...
@@ -563,6 +577,28 @@ struct onnx_parser
return
prog
.
add_instruction
(
migraphx
::
op
::
transpose
{
perm
},
args
.
front
());
}
instruction_ref
parse_pad
(
const
std
::
string
&
,
attribute_map
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
std
::
vector
<
int64_t
>
pads
{};
float
value
=
0.0
f
;
if
(
contains
(
attributes
,
"pads"
))
{
auto
&&
pad_vals
=
attributes
[
"pads"
].
ints
();
pads
=
std
::
vector
<
int64_t
>
(
pad_vals
.
begin
(),
pad_vals
.
end
());
}
if
(
contains
(
attributes
,
"value"
))
{
value
=
parse_value
(
attributes
.
at
(
"value"
)).
at
<
float
>
();
}
if
(
contains
(
attributes
,
"mode"
))
{
auto
mode
=
attributes
.
at
(
"mode"
).
s
();
if
(
mode
!=
"constant"
)
MIGRAPHX_THROW
(
"migraphx currently only supports constant padding"
);
}
return
prog
.
add_instruction
(
migraphx
::
op
::
pad
{
pads
,
value
},
args
.
front
());
}
// Use a literal instruction to replace the shape since, output of
// shape operator are literals in migraphx
instruction_ref
...
...
@@ -664,11 +700,11 @@ struct onnx_parser
if
(
contains
(
attributes
,
"hidden_size"
))
{
hidden_size
=
parse_value
(
attributes
.
at
(
"hidden_size"
)).
at
<
int
>
();
}
else
{
MIGRAPHX_THROW
(
"RNN: hidden size attribute missing"
);
std
::
size_t
hidden_size
_att
=
parse_value
(
attributes
.
at
(
"hidden_size"
)).
at
<
int
>
();
if
(
hidden_size
!=
hidden_size_att
)
{
MIGRAPHX_THROW
(
"RNN: hidden size mismatch in input and attribute"
);
}
}
// Handling of direction to be added later
...
...
@@ -699,12 +735,13 @@ struct onnx_parser
for_each
(
vec_names
.
begin
(),
vec_names
.
end
(),
[
&
](
auto
&
fn
)
{
if
(
map_actv_funcs
.
count
(
fn
)
==
0
)
{
MIGRAPHX_THROW
(
"RNN: activation function "
+
fn
+
" not supported"
);
MIGRAPHX_THROW
(
"RNN: activation function "
+
std
::
string
(
fn
)
+
" not supported"
);
}
});
// bidirectional should have two activation functions
// if only one actv function is provides, we use it in both
// bidirectional case should have two activation functions.
// one is for forward, and the other is for reverse.
// if only one actv function is provided, we use it in both
// forward and reverse direction
if
(
dirct
==
op
::
rnn
::
bidirectional
)
{
...
...
@@ -714,9 +751,9 @@ struct onnx_parser
}
}
std
::
vector
<
operation
>
vec_actv_funcs
;
for_each
(
vec_names
.
begin
(),
vec_names
.
end
(),
[
&
](
auto
&
fn
)
{
vec_actv_funcs
.
push_back
(
map_actv_funcs
[
fn
]
)
;
std
::
vector
<
operation
>
vec_actv_funcs
(
vec_names
.
size
())
;
std
::
transform
(
vec_names
.
begin
(),
vec_names
.
end
(),
vec_actv_funcs
.
begin
(),
[
&
](
auto
&
fn
)
{
return
map_actv_funcs
[
fn
];
});
// To be added later
...
...
@@ -915,9 +952,8 @@ struct onnx_parser
// For RNN, LSTM, and GRU operators, one of the input arguments
// is prim::Undefined, and it is ignored by protobuf. We use a
// hack to ignore this argument for these three operators
std
::
string
op_type
=
node
.
op_type
();
if
((
op_type
==
"RNN"
||
op_type
==
"LSTM"
||
op_type
==
"GRU"
)
&&
input
.
empty
()
==
true
)
const
std
::
string
&
op_type
=
node
.
op_type
();
if
((
op_type
==
"RNN"
||
op_type
==
"LSTM"
||
op_type
==
"GRU"
)
&&
input
.
empty
())
{
continue
;
}
...
...
src/program.cpp
View file @
1e7457cb
#include <migraphx/program.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/env.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/time.hpp>
...
...
@@ -134,6 +135,12 @@ instruction_ref program::replace_instruction(instruction_ref ins, instruction_re
assert
(
has_instruction
(
ins
));
assert
(
has_instruction
(
rep
));
assert
(
ins
!=
rep
);
if
(
ins
==
std
::
prev
(
this
->
end
()))
{
return
replace_instruction
(
ins
,
op
::
identity
{},
rep
);
}
// TODO: Should it be an error if the output is empty?
if
(
ins
->
outputs
().
empty
())
{
...
...
src/rewrite_rnn.cpp
View file @
1e7457cb
...
...
@@ -10,7 +10,7 @@ inline namespace MIGRAPHX_INLINE_NS {
void
rewrite_rnn
::
apply
(
program
&
prog
)
const
{
instruction_ref
last_output
=
prog
.
end
()
;
std
::
unordered_map
<
instruction_ref
,
instruction_ref
>
map_last_output
;
for
(
auto
ins
:
iterator_for
(
prog
))
{
// rewrite rnn operator
...
...
@@ -27,7 +27,7 @@ void rewrite_rnn::apply(program& prog) const
std
::
size_t
batch_size
=
seq_shape
.
lens
()[
1
];
shape
::
type_t
type
=
seq_shape
.
type
();
migraphx
::
shape
ih_shape
{
type
,
{
1
,
batch_size
,
hidden_size
}};
std
::
vector
<
char
>
data
(
ih_shape
.
byte
s
(),
0
);
std
::
vector
<
float
>
data
(
ih_shape
.
element
s
(),
0
);
auto
rnn_op
=
any_cast
<
op
::
rnn
>
(
ins
->
get_operator
());
op
::
rnn
::
rnn_direction_t
dicrt
=
rnn_op
.
direction
;
...
...
@@ -85,19 +85,33 @@ void rewrite_rnn::apply(program& prog) const
ih_reverse
,
rnn_op
.
actv_funcs
.
at
(
1
));
last_output
=
prog
.
insert_instruction
(
ins
,
op
::
concat
{
0
},
ret_forward
[
1
],
ret_reverse
[
1
]);
auto
concat_output
=
prog
.
insert_instruction
(
ins
,
op
::
concat
{
1
},
ret_forward
[
1
],
ret_reverse
[
1
]);
auto
last_output
=
prog
.
insert_instruction
(
ins
,
op
::
squeeze
{{
0
}},
concat_output
);
// add the dimension of num_direction
ret_forward
[
0
]
=
prog
.
insert_instruction
(
ins
,
op
::
unsqueeze
{{
1
}},
ret_forward
[
0
]);
ret_reverse
[
0
]
=
prog
.
insert_instruction
(
ins
,
op
::
unsqueeze
{{
1
}},
ret_reverse
[
0
]);
// concat the forward and reverse output
prog
.
replace_instruction
(
ins
,
op
::
concat
{
1
},
{
ret_forward
[
0
],
ret_reverse
[
0
]});
// The following logic is to ensure the last instruction rewritten from
// rnn operator is a concat instruction
// sequence len is 1
instruction_ref
hidden_output
{};
if
(
ret_forward
[
0
]
==
prog
.
end
())
{
hidden_output
=
prog
.
replace_instruction
(
ins
,
op
::
concat
{
1
},
ret_forward
[
1
],
ret_reverse
[
1
]);
}
else
{
ret_forward
[
0
]
=
prog
.
insert_instruction
(
ins
,
op
::
concat
{
0
},
ret_forward
[
0
],
ret_forward
[
1
]);
ret_reverse
[
0
]
=
prog
.
insert_instruction
(
ins
,
op
::
concat
{
0
},
ret_reverse
[
1
],
ret_reverse
[
0
]);
hidden_output
=
prog
.
replace_instruction
(
ins
,
op
::
concat
{
1
},
{
ret_forward
[
0
],
ret_reverse
[
0
]});
}
map_last_output
[
hidden_output
]
=
last_output
;
}
else
{
bool
is_forward
=
(
dicrt
==
op
::
rnn
::
rnn_direction_t
::
forward
)
?
true
:
false
;
bool
is_forward
=
(
dicrt
==
op
::
rnn
::
rnn_direction_t
::
forward
);
// input weight matrix
auto
w
=
args
[
1
];
...
...
@@ -125,10 +139,24 @@ void rewrite_rnn::apply(program& prog) const
auto
ret
=
rnn_cell
(
is_forward
,
prog
,
ins
,
args
[
0
],
w
,
r
,
bias
,
ih
,
rnn_op
.
actv_funcs
.
at
(
0
));
last_output
=
ret
[
1
];
auto
last_output
=
prog
.
insert_instruction
(
ins
,
op
::
squeeze
{{
0
}},
ret
[
1
]
)
;
// add the dimension of num_direction
prog
.
replace_instruction
(
ins
,
op
::
unsqueeze
{{
1
}},
ret
[
0
]);
// following logic is to ensure the last instruction is a
// concat instruction
// sequence len is 1
instruction_ref
hidden_output
{};
if
(
ret
[
0
]
==
prog
.
end
())
{
hidden_output
=
prog
.
replace_instruction
(
ins
,
op
::
concat
{
0
},
ret
[
1
]);
}
else
{
auto
concat_arg0
=
is_forward
?
ret
[
0
]
:
ret
[
1
];
auto
concat_arg1
=
is_forward
?
ret
[
1
]
:
ret
[
0
];
hidden_output
=
prog
.
replace_instruction
(
ins
,
op
::
concat
{
0
},
concat_arg0
,
concat_arg1
);
}
map_last_output
[
hidden_output
]
=
last_output
;
}
}
...
...
@@ -138,12 +166,15 @@ void rewrite_rnn::apply(program& prog) const
// so we can just use it as the output here
if
(
ins
->
name
()
==
"rnn_last_output"
)
{
// if rnn operator is executed, the last_output != prog.end()
if
(
last_output
!=
prog
.
end
())
auto
inputs
=
ins
->
inputs
();
assert
(
inputs
.
size
()
==
1
);
auto
arg
=
inputs
[
0
];
if
(
map_last_output
.
count
(
arg
)
==
0
)
{
prog
.
replace_instruction
(
ins
,
op
::
identity
{},
last_output
);
last_output
=
prog
.
end
();
MIGRAPHX_THROW
(
"RNN_LAST_OUTPUT: no related rnn operator as its input"
);
}
prog
.
replace_instruction
(
ins
,
map_last_output
[
arg
]);
}
}
}
...
...
@@ -181,11 +212,12 @@ std::vector<instruction_ref> rewrite_rnn::rnn_cell(bool is_forward,
bias
=
prog
.
insert_instruction
(
ins
,
op
::
broadcast
{
1
,
sih
->
get_shape
()},
b
);
}
instruction_ref
hidden_out
,
last_out
;
std
::
size_t
seq_len
=
input
->
get_shape
().
lens
()[
0
]
;
long
seq_index
=
is_forward
?
0
:
seq_len
-
1
;
instruction_ref
hidden_out
=
prog
.
end
()
,
last_out
;
last_out
=
prog
.
insert_instruction
(
ins
,
op
::
unsqueeze
{{
0
,
1
}},
sih
)
;
std
::
size_t
seq_len
=
input
->
get_shape
().
lens
()[
0
]
;
for
(
std
::
size_t
i
=
0
;
i
<
seq_len
;
i
++
)
{
long
seq_index
=
is_forward
?
i
:
(
seq_len
-
1
-
i
);
auto
xt
=
prog
.
insert_instruction
(
ins
,
op
::
slice
{{
0
},
{
seq_index
},
{
seq_index
+
1
}},
input
);
xt
=
prog
.
insert_instruction
(
ins
,
op
::
squeeze
{{
0
}},
xt
);
auto
xt_wi
=
prog
.
insert_instruction
(
ins
,
op
::
dot
{},
xt
,
tran_sw
);
...
...
@@ -205,29 +237,33 @@ std::vector<instruction_ref> rewrite_rnn::rnn_cell(bool is_forward,
ht
=
prog
.
insert_instruction
(
ins
,
actv_func
,
ht
);
sih
=
ht
;
// add the dimension of sequence length
last_out
=
prog
.
insert_instruction
(
ins
,
op
::
unsqueeze
{{
0
}},
ht
);
// add the dimensions of sequence length (axis 0 for sequence length,
// axis 1 for num_directions
last_out
=
prog
.
insert_instruction
(
ins
,
op
::
unsqueeze
{{
0
,
1
}},
ht
);
if
(
is_forward
)
// concatenation for the last last_out is performed in the apply()
// function to ensure the last instruction is concat, then we have
// output inserted
if
(
i
<
seq_len
-
1
)
{
hidden_out
=
(
seq_index
==
0
)
?
last_out
:
prog
.
insert_instruction
(
ins
,
op
::
concat
{
0
},
hidden_out
,
last_out
);
}
else
{
hidden_out
=
(
seq_index
==
seq_len
-
1
)
?
last_out
:
prog
.
insert_instruction
(
ins
,
op
::
concat
{
0
},
last_out
,
hidden_out
);
if
(
is_forward
)
{
hidden_out
=
(
seq_index
==
0
)
?
last_out
:
prog
.
insert_instruction
(
ins
,
op
::
concat
{
0
},
hidden_out
,
last_out
);
}
else
{
hidden_out
=
(
seq_index
==
seq_len
-
1
)
?
last_out
:
prog
.
insert_instruction
(
ins
,
op
::
concat
{
0
},
last_out
,
hidden_out
);
}
}
seq_index
=
is_forward
?
(
seq_index
+
1
)
:
(
seq_index
-
1
);
}
std
::
vector
<
instruction_ref
>
out_args
;
out_args
.
push_back
(
hidden_out
);
out_args
.
push_back
(
last_out
);
return
out_args
;
return
{
hidden_out
,
last_out
};
}
}
// namespace MIGRAPHX_INLINE_NS
...
...
src/simplify_reshapes.cpp
View file @
1e7457cb
...
...
@@ -14,7 +14,8 @@ bool is_nonstandard_reshaper(instruction_ref ins)
{
// clang-format off
static
const
std
::
unordered_set
<
std
::
string
>
names
=
{
"reshape"
"reshape"
,
"contiguous"
};
// clang-format on
return
contains
(
names
,
ins
->
name
())
and
ins
->
inputs
().
front
()
->
name
()
==
"contiguous"
;
...
...
src/targets/cpu/lowering.cpp
View file @
1e7457cb
...
...
@@ -298,6 +298,32 @@ struct cpu_contiguous
}
};
struct
cpu_pad
{
op
::
pad
op
;
std
::
string
name
()
const
{
return
"cpu::contiguous"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
return
op
.
compute_shape
(
inputs
);
}
argument
compute
(
context
&
,
const
shape
&
output_shape
,
std
::
vector
<
argument
>
args
)
const
{
assert
(
output_shape
.
standard
());
argument
result
{
output_shape
};
result
.
visit
([
&
](
auto
output
)
{
std
::
fill
(
output
.
begin
(),
output
.
end
(),
op
.
value
);
});
visit_all
(
result
,
args
[
0
])([
&
](
auto
output
,
auto
input
)
{
shape_for_each
(
input
.
get_shape
(),
[
&
](
const
auto
&
idx
)
{
std
::
vector
<
std
::
size_t
>
new_idx
(
idx
.
size
());
std
::
transform
(
idx
.
begin
(),
idx
.
end
(),
op
.
pads
.
begin
(),
new_idx
.
begin
(),
[](
auto
i
,
auto
j
)
{
return
i
+
j
;
});
output
(
new_idx
.
begin
(),
new_idx
.
end
())
=
input
(
idx
.
begin
(),
idx
.
end
());
});
});
return
result
;
}
};
struct
cpu_concat
{
op
::
concat
op
;
...
...
@@ -663,6 +689,7 @@ struct cpu_apply
apply_map
[
"batch_norm_inference"
]
=
extend_op
<
cpu_batch_norm_inference
,
op
::
batch_norm_inference
>
();
apply_map
[
"contiguous"
]
=
extend_op
<
cpu_contiguous
,
op
::
contiguous
>
();
apply_map
[
"pad"
]
=
extend_op
<
cpu_pad
,
op
::
pad
>
();
apply_map
[
"concat"
]
=
extend_op
<
cpu_concat
,
op
::
concat
>
();
apply_map
[
"gather"
]
=
extend_op
<
cpu_gather
,
op
::
gather
>
();
apply_map
[
"leaky_relu"
]
=
extend_op
<
cpu_unary
<
leaky_relu_op
>
,
op
::
leaky_relu
>
();
...
...
src/targets/gpu/CMakeLists.txt
View file @
1e7457cb
...
...
@@ -28,6 +28,7 @@ add_library(migraphx_device
device/contiguous.cpp
device/mul.cpp
device/concat.cpp
device/pad.cpp
device/gather.cpp
)
set_target_properties
(
migraphx_device PROPERTIES EXPORT_NAME device
)
...
...
@@ -57,6 +58,7 @@ add_library(migraphx_gpu
sigmoid.cpp
abs.cpp
elu.cpp
pad.cpp
gather.cpp
)
set_target_properties
(
migraphx_gpu PROPERTIES EXPORT_NAME gpu
)
...
...
src/targets/gpu/device/include/migraphx/gpu/device/nary.hpp
View file @
1e7457cb
...
...
@@ -313,6 +313,12 @@ void nary_impl(hipStream_t stream, F f, argument result, Arguments... args)
nary_nonstandard_impl
(
stream
,
f
,
result
,
args
...);
}
template
<
class
F
>
void
nary_impl
(
hipStream_t
stream
,
F
f
,
argument
result
)
{
nary_standard_impl
(
stream
,
f
,
result
);
}
template
<
class
...
Arguments
>
auto
nary_nonstandard
(
hipStream_t
stream
,
argument
result
,
Arguments
...
args
)
{
...
...
src/targets/gpu/device/pad.cpp
0 → 100644
View file @
1e7457cb
#include <migraphx/shape.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/gpu/device/nary.hpp>
#include <migraphx/gpu/device/pad.hpp>
#include <migraphx/gpu/device/tensor.hpp>
#include <migraphx/gpu/device/launch.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
namespace
device
{
argument
pad
(
hipStream_t
stream
,
argument
result
,
argument
arg1
,
float
value
,
std
::
vector
<
std
::
int64_t
>
pads
)
{
std
::
size_t
nelements
=
arg1
.
get_shape
().
elements
();
nary
(
stream
,
result
)([
=
]
{
return
value
;
});
visit_all
(
result
,
arg1
)([
&
](
auto
output
,
auto
input
)
{
visit_tensor_size
(
result
.
get_shape
().
lens
().
size
(),
[
&
](
auto
ndim
)
{
std
::
size_t
offsets
[
ndim
];
std
::
copy
(
pads
.
begin
(),
pads
.
begin
()
+
ndim
,
offsets
);
auto
*
outptr
=
output
.
data
();
const
auto
*
inptr
=
input
.
data
();
hip_tensor_descriptor
<
ndim
>
desc_input
(
input
.
get_shape
());
hip_tensor_descriptor
<
ndim
>
desc_output
(
output
.
get_shape
());
gs_launch
(
stream
,
nelements
)([
=
](
auto
i
)
{
auto
idx
=
desc_input
.
multi
(
i
);
for
(
std
::
size_t
j
=
0
;
j
<
ndim
;
j
++
)
{
idx
[
j
]
+=
offsets
[
j
];
}
outptr
[
desc_output
.
linear
(
idx
)]
=
inptr
[
i
];
});
});
});
return
result
;
}
}
// namespace device
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
src/targets/gpu/gemm.cpp
View file @
1e7457cb
...
...
@@ -107,6 +107,7 @@ argument miopen_gemm::compute(context& ctx,
ldc
);
});
return
args
[
2
];
}
...
...
src/targets/gpu/include/migraphx/gpu/device/pad.hpp
0 → 100644
View file @
1e7457cb
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_PAD_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_PAD_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
namespace
device
{
argument
pad
(
hipStream_t
stream
,
argument
result
,
argument
arg1
,
float
value
,
std
::
vector
<
std
::
int64_t
>
pads
);
}
// namespace device
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
#endif
src/targets/gpu/include/migraphx/gpu/pad.hpp
0 → 100644
View file @
1e7457cb
#ifndef MIGRAPHX_GUARD_RTGLIB_PAD_HPP
#define MIGRAPHX_GUARD_RTGLIB_PAD_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/pad.hpp>
#include <migraphx/gpu/device/add.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <utility>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
struct
hip_pad
{
op
::
pad
op
;
std
::
string
name
()
const
{
return
"gpu::pad"
;
}
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
#endif
src/targets/gpu/lowering.cpp
View file @
1e7457cb
...
...
@@ -40,6 +40,7 @@
#include <migraphx/gpu/pooling.hpp>
#include <migraphx/gpu/gemm.hpp>
#include <migraphx/gpu/concat.hpp>
#include <migraphx/gpu/pad.hpp>
#include <migraphx/gpu/gather.hpp>
#include <utility>
#include <functional>
...
...
@@ -54,6 +55,7 @@ struct miopen_apply
program
*
prog
=
nullptr
;
context
ctx
{};
std
::
unordered_map
<
std
::
string
,
std
::
function
<
instruction_ref
(
instruction_ref
)
>>
apply_map
{};
instruction_ref
last
{};
void
check_shape
(
shape
x
,
instruction_ref
i
)
{
...
...
@@ -64,6 +66,7 @@ struct miopen_apply
void
init
()
{
this
->
last
=
instruction
::
get_output_alias
(
std
::
prev
(
prog
->
end
()));
add_miopen_simple_op
<
miopen_relu
>
(
"relu"
,
make_relu
);
add_miopen_simple_op
<
miopen_sigmoid
>
(
"sigmoid"
,
make_sigmoid
);
add_miopen_simple_op
<
miopen_abs
>
(
"abs"
,
make_abs
);
...
...
@@ -92,6 +95,8 @@ struct miopen_apply
add_extend_op
<
hip_concat
,
op
::
concat
>
(
"concat"
);
add_extend_op
<
miopen_softmax
,
op
::
softmax
>
(
"softmax"
);
add_extend_op
<
hip_gather
,
op
::
gather
>
(
"gather"
);
add_extend_op
<
hip_pad
,
op
::
pad
>
(
"pad"
);
add_convolution_op
();
add_pooling_op
();
add_batch_norm_inference_op
();
...
...
@@ -112,7 +117,7 @@ struct miopen_apply
instruction_ref
insert_allocation
(
instruction_ref
ins
,
const
shape
&
s
,
std
::
string
tag
=
""
)
{
if
(
ins
==
--
prog
->
end
()
and
tag
.
empty
())
if
(
ins
==
last
and
tag
.
empty
())
{
return
prog
->
add_parameter
(
"output"
,
s
);
}
...
...
src/targets/gpu/pad.cpp
0 → 100644
View file @
1e7457cb
#include <migraphx/gpu/pad.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/device/pad.hpp>
#include <utility>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
shape
hip_pad
::
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
inputs
.
pop_back
();
return
op
.
compute_shape
(
inputs
);
}
argument
hip_pad
::
compute
(
context
&
ctx
,
const
shape
&
,
const
std
::
vector
<
argument
>&
args
)
const
{
return
device
::
pad
(
ctx
.
get_stream
().
get
(),
args
.
back
(),
args
.
front
(),
op
.
value
,
op
.
pads
);
}
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
src/targets/gpu/target.cpp
View file @
1e7457cb
...
...
@@ -33,18 +33,17 @@ std::vector<pass> target::get_passes(migraphx::context& gctx) const
dead_code_elimination
{},
fwd_conv_batchnorm_rewrite
{},
dead_code_elimination
{},
common_subexpression_elimination
{},
dead_code_elimination
{},
rewrite_rnn
{},
dead_code_elimination
{},
rewrite_gru
{},
dead_code_elimination
{},
common_subexpression_elimination
{},
dead_code_elimination
{},
simplify_algebra
{},
dead_code_elimination
{},
constant_propagate
{},
dead_code_elimination
{},
auto_contiguous
{},
simplify_reshapes
{},
//
simplify_reshapes{},
dead_code_elimination
{},
lowering
{
ctx
},
eliminate_concat
{
concat_gpu_optimization
{}},
...
...
test/cpu_ops_test.cpp
View file @
1e7457cb
This diff is collapsed.
Click to expand it.
test/gpu/miopen.cpp
View file @
1e7457cb
...
...
@@ -934,6 +934,41 @@ struct test_concat_relu
}
};
struct
test_pad
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
s0
{
migraphx
::
shape
::
int32_type
,
{
1
,
96
,
165
,
165
}};
std
::
vector
<
int64_t
>
pads0
=
{
0
,
0
,
0
,
0
,
0
,
0
,
1
,
1
};
std
::
vector
<
int64_t
>
pads1
=
{
0
,
0
,
0
,
0
,
1
,
1
,
1
,
1
};
std
::
vector
<
int64_t
>
pads2
=
{
1
,
1
,
1
,
1
,
0
,
0
,
0
,
0
};
std
::
vector
<
int64_t
>
pads3
=
{
1
,
0
,
1
,
0
,
1
,
0
,
2
,
0
};
auto
l0
=
p
.
add_parameter
(
"x"
,
s0
);
p
.
add_instruction
(
migraphx
::
op
::
pad
{
pads0
},
l0
);
p
.
add_instruction
(
migraphx
::
op
::
pad
{
pads1
},
l0
);
p
.
add_instruction
(
migraphx
::
op
::
pad
{
pads2
},
l0
);
p
.
add_instruction
(
migraphx
::
op
::
pad
{
pads3
},
l0
);
return
p
;
}
};
struct
test_pooling_autopad
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
s0
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
63
,
63
}};
auto
l0
=
p
.
add_parameter
(
"x"
,
s0
);
migraphx
::
op
::
pooling
op
{
"max"
};
op
.
padding_mode
=
migraphx
::
op
::
padding_mode_t
::
same
;
op
.
lengths
=
{
2
,
2
};
op
.
stride
=
{
2
,
2
};
p
.
add_instruction
(
op
,
l0
);
return
p
;
}
};
struct
test_gather
{
migraphx
::
program
create_program
()
const
...
...
@@ -1049,12 +1084,354 @@ struct test_conv_bn_relu_pooling2
}
};
struct
test_rnn_forward
{
migraphx
::
program
create_program
()
const
{
std
::
size_t
batch_size
=
2
;
std
::
size_t
seq_len
=
1
;
std
::
size_t
hidden_size
=
4
;
std
::
size_t
input_size
=
3
;
std
::
size_t
num_dirct
=
1
;
float
clip
=
0.0
f
;
migraphx
::
program
p
;
migraphx
::
shape
in_shape
{
migraphx
::
shape
::
float_type
,
{
seq_len
,
batch_size
,
input_size
}};
migraphx
::
shape
w_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
input_size
}};
migraphx
::
shape
r_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
hidden_size
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
2
*
hidden_size
}};
migraphx
::
shape
ih_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
batch_size
,
hidden_size
}};
auto
seq
=
p
.
add_parameter
(
"seq"
,
in_shape
);
auto
w
=
p
.
add_parameter
(
"w"
,
w_shape
);
auto
r
=
p
.
add_parameter
(
"r"
,
r_shape
);
auto
bias
=
p
.
add_parameter
(
"bias"
,
b_shape
);
auto
ih
=
p
.
add_parameter
(
"ih"
,
ih_shape
);
auto
output
=
p
.
add_instruction
(
migraphx
::
op
::
rnn
{
hidden_size
,
{
migraphx
::
op
::
tanh
{},
migraphx
::
op
::
tanh
{}},
migraphx
::
op
::
rnn
::
forward
,
clip
},
seq
,
w
,
r
,
bias
,
ih
);
p
.
add_instruction
(
migraphx
::
op
::
rnn_last_output
{},
output
);
return
p
;
}
};
struct
test_rnn_forward10
{
migraphx
::
program
create_program
()
const
{
std
::
size_t
batch_size
=
2
;
std
::
size_t
seq_len
=
10
;
std
::
size_t
hidden_size
=
4
;
std
::
size_t
input_size
=
3
;
std
::
size_t
num_dirct
=
1
;
float
clip
=
0.0
f
;
migraphx
::
program
p
;
migraphx
::
shape
in_shape
{
migraphx
::
shape
::
float_type
,
{
seq_len
,
batch_size
,
input_size
}};
migraphx
::
shape
w_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
input_size
}};
migraphx
::
shape
r_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
hidden_size
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
2
*
hidden_size
}};
migraphx
::
shape
ih_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
batch_size
,
hidden_size
}};
auto
seq
=
p
.
add_parameter
(
"seq"
,
in_shape
);
auto
w
=
p
.
add_parameter
(
"w"
,
w_shape
);
auto
r
=
p
.
add_parameter
(
"r"
,
r_shape
);
auto
bias
=
p
.
add_parameter
(
"bias"
,
b_shape
);
auto
ih
=
p
.
add_parameter
(
"ih"
,
ih_shape
);
auto
output
=
p
.
add_instruction
(
migraphx
::
op
::
rnn
{
hidden_size
,
{
migraphx
::
op
::
tanh
{},
migraphx
::
op
::
tanh
{}},
migraphx
::
op
::
rnn
::
forward
,
clip
},
seq
,
w
,
r
,
bias
,
ih
);
p
.
add_instruction
(
migraphx
::
op
::
rnn_last_output
{},
output
);
return
p
;
}
};
struct
test_rnn_reverse
{
migraphx
::
program
create_program
()
const
{
std
::
size_t
batch_size
=
2
;
std
::
size_t
seq_len
=
1
;
std
::
size_t
hidden_size
=
4
;
std
::
size_t
input_size
=
3
;
std
::
size_t
num_dirct
=
1
;
float
clip
=
0.0
f
;
migraphx
::
program
p
;
migraphx
::
shape
in_shape
{
migraphx
::
shape
::
float_type
,
{
seq_len
,
batch_size
,
input_size
}};
migraphx
::
shape
w_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
input_size
}};
migraphx
::
shape
r_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
hidden_size
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
2
*
hidden_size
}};
migraphx
::
shape
ih_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
batch_size
,
hidden_size
}};
auto
seq
=
p
.
add_parameter
(
"seq"
,
in_shape
);
auto
w
=
p
.
add_parameter
(
"w"
,
w_shape
);
auto
r
=
p
.
add_parameter
(
"r"
,
r_shape
);
auto
bias
=
p
.
add_parameter
(
"bias"
,
b_shape
);
auto
ih
=
p
.
add_parameter
(
"ih"
,
ih_shape
);
p
.
add_instruction
(
migraphx
::
op
::
rnn
{
hidden_size
,
{
migraphx
::
op
::
tanh
{},
migraphx
::
op
::
tanh
{}},
migraphx
::
op
::
rnn
::
reverse
,
clip
},
seq
,
w
,
r
,
bias
,
ih
);
return
p
;
}
};
struct
test_rnn_reverse2
{
migraphx
::
program
create_program
()
const
{
std
::
size_t
batch_size
=
2
;
std
::
size_t
seq_len
=
2
;
std
::
size_t
hidden_size
=
4
;
std
::
size_t
input_size
=
3
;
std
::
size_t
num_dirct
=
1
;
float
clip
=
0.0
f
;
migraphx
::
program
p
;
migraphx
::
shape
in_shape
{
migraphx
::
shape
::
float_type
,
{
seq_len
,
batch_size
,
input_size
}};
migraphx
::
shape
w_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
input_size
}};
migraphx
::
shape
r_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
hidden_size
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
2
*
hidden_size
}};
migraphx
::
shape
ih_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
batch_size
,
hidden_size
}};
auto
seq
=
p
.
add_parameter
(
"seq"
,
in_shape
);
auto
w
=
p
.
add_parameter
(
"w"
,
w_shape
);
auto
r
=
p
.
add_parameter
(
"r"
,
r_shape
);
auto
bias
=
p
.
add_parameter
(
"bias"
,
b_shape
);
auto
ih
=
p
.
add_parameter
(
"ih"
,
ih_shape
);
p
.
add_instruction
(
migraphx
::
op
::
rnn
{
hidden_size
,
{
migraphx
::
op
::
tanh
{},
migraphx
::
op
::
tanh
{}},
migraphx
::
op
::
rnn
::
reverse
,
clip
},
seq
,
w
,
r
,
bias
,
ih
);
return
p
;
}
};
struct
test_rnn_3args
{
migraphx
::
program
create_program
()
const
{
std
::
size_t
batch_size
=
2
;
std
::
size_t
seq_len
=
1
;
std
::
size_t
hidden_size
=
4
;
std
::
size_t
input_size
=
3
;
std
::
size_t
num_dirct
=
1
;
float
clip
=
0.0
f
;
migraphx
::
program
p
;
migraphx
::
shape
in_shape
{
migraphx
::
shape
::
float_type
,
{
seq_len
,
batch_size
,
input_size
}};
migraphx
::
shape
w_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
input_size
}};
migraphx
::
shape
r_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
hidden_size
}};
auto
seq
=
p
.
add_parameter
(
"seq"
,
in_shape
);
auto
w
=
p
.
add_parameter
(
"w"
,
w_shape
);
auto
r
=
p
.
add_parameter
(
"r"
,
r_shape
);
p
.
add_instruction
(
migraphx
::
op
::
rnn
{
hidden_size
,
{
migraphx
::
op
::
tanh
{},
migraphx
::
op
::
tanh
{}},
migraphx
::
op
::
rnn
::
reverse
,
clip
},
seq
,
w
,
r
);
return
p
;
}
};
struct
test_rnn_4args
{
migraphx
::
program
create_program
()
const
{
std
::
size_t
batch_size
=
2
;
std
::
size_t
seq_len
=
5
;
std
::
size_t
hidden_size
=
4
;
std
::
size_t
input_size
=
3
;
std
::
size_t
num_dirct
=
1
;
float
clip
=
0.0
f
;
migraphx
::
program
p
;
migraphx
::
shape
in_shape
{
migraphx
::
shape
::
float_type
,
{
seq_len
,
batch_size
,
input_size
}};
migraphx
::
shape
w_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
input_size
}};
migraphx
::
shape
r_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
hidden_size
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
2
*
hidden_size
}};
auto
seq
=
p
.
add_parameter
(
"seq"
,
in_shape
);
auto
w
=
p
.
add_parameter
(
"w"
,
w_shape
);
auto
r
=
p
.
add_parameter
(
"r"
,
r_shape
);
auto
bias
=
p
.
add_parameter
(
"bias"
,
b_shape
);
p
.
add_instruction
(
migraphx
::
op
::
rnn
{
hidden_size
,
{
migraphx
::
op
::
tanh
{},
migraphx
::
op
::
tanh
{}},
migraphx
::
op
::
rnn
::
reverse
,
clip
},
seq
,
w
,
r
,
bias
);
return
p
;
}
};
struct
test_rnn_5args
{
migraphx
::
program
create_program
()
const
{
std
::
size_t
batch_size
=
2
;
std
::
size_t
seq_len
=
10
;
std
::
size_t
hidden_size
=
4
;
std
::
size_t
input_size
=
3
;
std
::
size_t
num_dirct
=
1
;
float
clip
=
0.0
f
;
migraphx
::
program
p
;
migraphx
::
shape
in_shape
{
migraphx
::
shape
::
float_type
,
{
seq_len
,
batch_size
,
input_size
}};
migraphx
::
shape
w_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
input_size
}};
migraphx
::
shape
r_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
hidden_size
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
2
*
hidden_size
}};
auto
seq
=
p
.
add_parameter
(
"seq"
,
in_shape
);
auto
w
=
p
.
add_parameter
(
"w"
,
w_shape
);
auto
r
=
p
.
add_parameter
(
"r"
,
r_shape
);
auto
bias
=
p
.
add_parameter
(
"bias"
,
b_shape
);
auto
output
=
p
.
add_instruction
(
migraphx
::
op
::
rnn
{
hidden_size
,
{
migraphx
::
op
::
tanh
{},
migraphx
::
op
::
tanh
{}},
migraphx
::
op
::
rnn
::
forward
,
clip
},
seq
,
w
,
r
,
bias
);
p
.
add_instruction
(
migraphx
::
op
::
rnn_last_output
{},
output
);
return
p
;
}
};
struct
test_rnn_bidirectional
{
migraphx
::
program
create_program
()
const
{
std
::
size_t
batch_size
=
2
;
std
::
size_t
seq_len
=
1
;
std
::
size_t
hidden_size
=
4
;
std
::
size_t
input_size
=
3
;
std
::
size_t
num_dirct
=
2
;
float
clip
=
0.0
f
;
migraphx
::
program
p
;
migraphx
::
shape
in_shape
{
migraphx
::
shape
::
float_type
,
{
seq_len
,
batch_size
,
input_size
}};
migraphx
::
shape
w_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
input_size
}};
migraphx
::
shape
r_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
hidden_size
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
2
*
hidden_size
}};
migraphx
::
shape
ih_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
batch_size
,
hidden_size
}};
auto
seq
=
p
.
add_parameter
(
"seq"
,
in_shape
);
auto
w
=
p
.
add_parameter
(
"w"
,
w_shape
);
auto
r
=
p
.
add_parameter
(
"r"
,
r_shape
);
auto
bias
=
p
.
add_parameter
(
"bias"
,
b_shape
);
auto
ih
=
p
.
add_parameter
(
"ih"
,
ih_shape
);
auto
output
=
p
.
add_instruction
(
migraphx
::
op
::
rnn
{
hidden_size
,
{
migraphx
::
op
::
tanh
{},
migraphx
::
op
::
tanh
{}},
migraphx
::
op
::
rnn
::
bidirectional
,
clip
},
seq
,
w
,
r
,
bias
,
ih
);
p
.
add_instruction
(
migraphx
::
op
::
rnn_last_output
{},
output
);
return
p
;
}
};
struct
test_rnn_bidirectional10
{
migraphx
::
program
create_program
()
const
{
std
::
size_t
batch_size
=
2
;
std
::
size_t
seq_len
=
10
;
std
::
size_t
hidden_size
=
4
;
std
::
size_t
input_size
=
3
;
std
::
size_t
num_dirct
=
2
;
float
clip
=
0.0
f
;
migraphx
::
program
p
;
migraphx
::
shape
in_shape
{
migraphx
::
shape
::
float_type
,
{
seq_len
,
batch_size
,
input_size
}};
migraphx
::
shape
w_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
input_size
}};
migraphx
::
shape
r_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
hidden_size
,
hidden_size
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
2
*
hidden_size
}};
migraphx
::
shape
ih_shape
{
migraphx
::
shape
::
float_type
,
{
num_dirct
,
batch_size
,
hidden_size
}};
auto
seq
=
p
.
add_parameter
(
"seq"
,
in_shape
);
auto
w
=
p
.
add_parameter
(
"w"
,
w_shape
);
auto
r
=
p
.
add_parameter
(
"r"
,
r_shape
);
auto
bias
=
p
.
add_parameter
(
"bias"
,
b_shape
);
auto
ih
=
p
.
add_parameter
(
"ih"
,
ih_shape
);
auto
output
=
p
.
add_instruction
(
migraphx
::
op
::
rnn
{
hidden_size
,
{
migraphx
::
op
::
tanh
{},
migraphx
::
op
::
tanh
{}},
migraphx
::
op
::
rnn
::
bidirectional
,
clip
},
seq
,
w
,
r
,
bias
,
ih
);
p
.
add_instruction
(
migraphx
::
op
::
rnn_last_output
{},
output
);
return
p
;
}
};
int
main
()
{
verify_program
<
test_pooling_autopad
>
();
verify_program
<
test_abs
>
();
verify_program
<
test_concat
>
();
verify_program
<
test_concat2
>
();
verify_program
<
test_concat_relu
>
();
verify_program
<
test_pad
>
();
verify_program
<
test_add
>
();
verify_program
<
test_add_half
>
();
verify_program
<
test_mul
>
();
...
...
@@ -1108,4 +1485,13 @@ int main()
verify_program
<
test_slice
>
();
verify_program
<
test_gather
>
();
verify_program
<
test_gather_neg_axis
>
();
verify_program
<
test_rnn_forward
>
();
verify_program
<
test_rnn_forward10
>
();
verify_program
<
test_rnn_reverse
>
();
verify_program
<
test_rnn_reverse2
>
();
verify_program
<
test_rnn_3args
>
();
verify_program
<
test_rnn_4args
>
();
verify_program
<
test_rnn_5args
>
();
verify_program
<
test_rnn_bidirectional
>
();
verify_program
<
test_rnn_bidirectional10
>
();
}
test/onnx/onnx_test.cpp
View file @
1e7457cb
...
...
@@ -718,4 +718,12 @@ TEST_CASE(group_conv_test)
migraphx
::
parse_onnx
(
"group_conv_test.onnx"
);
}
TEST_CASE
(
pad_test
)
{
migraphx
::
program
p
;
auto
l0
=
p
.
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
2
,
2
}});
p
.
add_instruction
(
migraphx
::
op
::
pad
{{
1
,
1
,
1
,
1
}},
l0
);
migraphx
::
parse_onnx
(
"pad_test.onnx"
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/onnx/pad_test.onnx
0 → 100644
View file @
1e7457cb
pad-example:T
01"Pad*
pads@@@@test-padZ
0
b
1
B
\ No newline at end of file
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment