Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
31065c7d
Commit
31065c7d
authored
Oct 31, 2022
by
charlie
Browse files
Merge branch 'dyn_squeeze' of github.com:ROCmSoftwarePlatform/AMDMIGraphX into dyn_model_test
parents
6bec381f
6acbd4e4
Changes
482
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
176 additions
and
104 deletions
+176
-104
src/include/migraphx/iterator.hpp
src/include/migraphx/iterator.hpp
+2
-2
src/include/migraphx/make_op.hpp
src/include/migraphx/make_op.hpp
+4
-0
src/include/migraphx/marker.hpp
src/include/migraphx/marker.hpp
+2
-2
src/include/migraphx/match/gelu_erf.hpp
src/include/migraphx/match/gelu_erf.hpp
+5
-5
src/include/migraphx/match/layernorm.hpp
src/include/migraphx/match/layernorm.hpp
+2
-2
src/include/migraphx/matcher.hpp
src/include/migraphx/matcher.hpp
+6
-2
src/include/migraphx/module.hpp
src/include/migraphx/module.hpp
+1
-1
src/include/migraphx/onnx.hpp
src/include/migraphx/onnx.hpp
+6
-8
src/include/migraphx/op/binary.hpp
src/include/migraphx/op/binary.hpp
+14
-4
src/include/migraphx/op/broadcast.hpp
src/include/migraphx/op/broadcast.hpp
+91
-33
src/include/migraphx/op/common.hpp
src/include/migraphx/op/common.hpp
+2
-2
src/include/migraphx/op/concat.hpp
src/include/migraphx/op/concat.hpp
+1
-1
src/include/migraphx/op/convert.hpp
src/include/migraphx/op/convert.hpp
+10
-2
src/include/migraphx/op/convolution.hpp
src/include/migraphx/op/convolution.hpp
+8
-16
src/include/migraphx/op/deconvolution.hpp
src/include/migraphx/op/deconvolution.hpp
+2
-2
src/include/migraphx/op/dot.hpp
src/include/migraphx/op/dot.hpp
+3
-2
src/include/migraphx/op/elu.hpp
src/include/migraphx/op/elu.hpp
+9
-5
src/include/migraphx/op/fmod.hpp
src/include/migraphx/op/fmod.hpp
+0
-10
src/include/migraphx/op/gather.hpp
src/include/migraphx/op/gather.hpp
+1
-1
src/include/migraphx/op/leaky_relu.hpp
src/include/migraphx/op/leaky_relu.hpp
+7
-4
No files found.
src/include/migraphx/iterator.hpp
View file @
31065c7d
...
...
@@ -31,9 +31,9 @@ namespace migraphx {
inline
namespace
MIGRAPHX_INLINE_NS
{
template
<
class
Iterator
,
class
EndIterator
>
auto
is_end
(
rank
<
2
>
,
Iterator
it
,
EndIterator
)
->
decltype
(
!
it
.
_M_dereferenceable
())
auto
is_end
(
rank
<
2
>
,
Iterator
it
,
EndIterator
)
->
decltype
(
not
it
.
_M_dereferenceable
())
{
return
!
it
.
_M_dereferenceable
();
return
not
it
.
_M_dereferenceable
();
}
template
<
class
Iterator
,
class
EndIterator
>
...
...
src/include/migraphx/make_op.hpp
View file @
31065c7d
...
...
@@ -27,6 +27,8 @@
#include <migraphx/config.hpp>
#include <migraphx/operation.hpp>
#include <migraphx/value.hpp>
#include <migraphx/json.hpp>
#include <migraphx/convert_to_json.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
...
...
@@ -46,6 +48,8 @@ operation make_op(const std::string& name, const Value& v)
return
make_op_from_value
(
name
,
v
);
}
operation
make_json_op
(
const
std
::
string
&
name
,
const
std
::
string
&
s
);
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
...
...
src/include/migraphx/marker.hpp
View file @
31065c7d
...
...
@@ -181,7 +181,7 @@ struct marker
template
<
typename
PrivateDetailTypeErasedU
=
PrivateDetailTypeErasedT
>
private_detail_te_handle_type
(
PrivateDetailTypeErasedT
value
,
typename
std
::
enable_if
<
!
std
::
is_reference
<
PrivateDetailTypeErasedU
>::
value
,
typename
std
::
enable_if
<
not
std
::
is_reference
<
PrivateDetailTypeErasedU
>::
value
,
int
>::
type
*
=
nullptr
)
noexcept
:
private_detail_te_value
(
std
::
move
(
value
))
{
...
...
@@ -233,7 +233,7 @@ struct marker
private_detail_te_handle_base_type
&
private_detail_te_get_handle
()
{
assert
(
private_detail_te_handle_mem_var
!=
nullptr
);
if
(
!
private_detail_te_handle_mem_var
.
unique
())
if
(
not
private_detail_te_handle_mem_var
.
unique
())
private_detail_te_handle_mem_var
=
private_detail_te_handle_mem_var
->
clone
();
return
*
private_detail_te_handle_mem_var
;
}
...
...
src/include/migraphx/match/gelu_erf.hpp
View file @
31065c7d
...
...
@@ -38,11 +38,11 @@ struct gelu_erf_matcher
F
f
;
auto
erf_fn
()
const
{
return
f
(
"erf"
)(
used_once
(),
arg
(
0
)(
used_once
(),
f
(
"mul"
)(
either_arg
(
0
,
1
)
(
none_of
(
has_value
(
M_SQRT
1_
2
,
1e-3
)).
bind
(
"x"
),
has_value
(
M_SQRT1_2
,
1e-3
))
)));
auto
mul_1_sqrt_2
=
f
(
"mul"
)(
either_arg
(
0
,
1
)(
none_of
(
has_value
(
M_SQRT1_2
,
1e-3
)).
bind
(
"x"
),
has_value
(
M_SQRT1_2
,
1e-3
)));
auto
div_sqrt_2
=
f
(
"div"
)(
args
(
none_of
(
has_value
(
M_SQRT2
,
1e-3
)).
bind
(
"x"
),
has_value
(
M_SQRT2
,
1e-3
)));
return
f
(
"erf"
)(
used_once
(),
arg
(
0
)(
used_once
(),
any_of
(
mul_1_sqrt_2
,
div_sqrt_2
)));
}
auto
add_erf
()
const
...
...
src/include/migraphx/match/layernorm.hpp
View file @
31065c7d
...
...
@@ -50,8 +50,8 @@ struct layernorm_matcher
{
return
f
(
"div"
)(
arg
(
0
)(
x_minus_mean
()),
arg
(
1
)(
skip_broadcasts
(
f
(
"sqrt"
)(
arg
(
0
)(
f
(
"add"
)(
either_arg
(
0
,
1
)(
variance
(),
has_value
(
1e-12
f
))))))));
arg
(
1
)(
skip_broadcasts
(
f
(
"sqrt"
)(
arg
(
0
)(
f
(
"add"
)(
either_arg
(
0
,
1
)(
variance
(),
is_constant
().
bind
(
"eps"
))))))));
}
auto
matcher
()
const
{
return
layernorm_onnx
();
}
...
...
src/include/migraphx/matcher.hpp
View file @
31065c7d
...
...
@@ -564,6 +564,11 @@ MIGRAPHX_BASIC_MATCHER(is_unused, const matcher_context& ctx, instruction_ref in
return
nullopt
;
}
MIGRAPHX_PRED_MATCHER
(
broadcast
,
instruction_ref
ins
)
{
return
contains
({
"broadcast"
,
"multibroadcast"
},
ins
->
name
());
}
template
<
class
...
Ms
>
auto
skip
(
Ms
...
ms
)
{
...
...
@@ -813,8 +818,7 @@ inline auto has_attribute(const std::string& name)
template
<
class
...
Ms
>
auto
pointwise
(
Ms
...
ms
)
{
return
match
::
has_attribute
(
"pointwise"
)(
match
::
any_of
(
match
::
nargs
(
1
),
match
::
nargs
(
2
)),
ms
...);
return
match
::
has_attribute
(
"pointwise"
)(
ms
...);
}
}
// namespace match
...
...
src/include/migraphx/module.hpp
View file @
31065c7d
...
...
@@ -219,7 +219,7 @@ struct module
friend
std
::
ostream
&
operator
<<
(
std
::
ostream
&
os
,
const
module
&
m
);
friend
bool
operator
==
(
const
module
&
x
,
const
module
&
y
);
friend
bool
operator
!=
(
const
module
&
x
,
const
module
&
y
)
{
return
!
(
x
==
y
);
}
friend
bool
operator
!=
(
const
module
&
x
,
const
module
&
y
)
{
return
not
(
x
==
y
);
}
private:
void
assign
(
const
module
&
m
);
...
...
src/include/migraphx/onnx.hpp
View file @
31065c7d
...
...
@@ -35,17 +35,13 @@ struct onnx_options
{
/// Old way to set default fixed dimension size
std
::
size_t
default_dim_value
=
0
;
/*!
* Default dynamic dimension size (if both default_dim_value and default_dyn_dim_value
* set parser throws)
*/
/// Default dynamic dimension size (if both default_dim_value and default_dyn_dim_value set
/// parser throws)
shape
::
dynamic_dimension
default_dyn_dim_value
=
{
1
,
1
,
0
};
/// Explicitly specify the dims of an input
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
std
::
size_t
>>
map_input_dims
=
{};
/*!
* Explicitly specify dynamic dims of an input (if both map_input_dims and
* map_dyn_input_dims set parser throws)
*/
/// Explicitly specify dynamic dims of an input (if both map_input_dims and map_dyn_input_dims
/// set parser throws)
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
shape
::
dynamic_dimension
>>
map_dyn_input_dims
=
{};
/// Continue parsing onnx file if an unknown operator is found
bool
skip_unknown_operators
=
false
;
...
...
@@ -53,6 +49,8 @@ struct onnx_options
bool
print_program_on_error
=
false
;
/// Max iter num for the loop operator
int64_t
max_loop_iterations
=
10
;
/// Use dynamic output for operators when available
bool
use_dyn_output
=
false
;
};
/// Create a program from an onnx file
...
...
src/include/migraphx/op/binary.hpp
View file @
31065c7d
...
...
@@ -28,6 +28,7 @@
#include <migraphx/check_shapes.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/value.hpp>
#include <migraphx/dyn_output.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
...
...
@@ -60,10 +61,19 @@ struct binary : op_name<Derived>
value
attributes
()
const
{
return
base_attributes
();
}
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
check_shapes
{
inputs
,
static_cast
<
const
Derived
&>
(
*
this
)}.
has
(
2
).
same_type
().
same_dims
();
check_shapes
{
inputs
,
static_cast
<
const
Derived
&>
(
*
this
),
true
}
.
has
(
2
)
.
same_type
()
.
same_dims
();
auto
s0
=
inputs
.
at
(
0
);
auto
s1
=
inputs
.
at
(
1
);
if
(
s0
==
s1
and
s0
.
packed
())
if
(
s0
.
dynamic
()
or
s1
.
dynamic
())
{
if
(
s0
==
s1
)
return
s0
;
MIGRAPHX_THROW
(
"BINARY: "
+
point_function
()
+
": fixed-dyn shape for inputs"
);
}
else
if
(
s0
==
s1
and
s0
.
packed
())
{
return
s0
;
}
...
...
@@ -81,9 +91,9 @@ struct binary : op_name<Derived>
}
}
argument
compute
(
const
shape
&
output_shape
,
std
::
vector
<
argument
>
args
)
const
argument
compute
(
const
dyn_output
&
dyn_out
,
std
::
vector
<
argument
>
args
)
const
{
argument
result
{
out
put_shape
};
argument
result
{
dyn_out
.
com
put
ed
_shape
};
visit_all
(
result
,
args
[
0
],
args
[
1
])([
&
](
auto
output
,
auto
input1
,
auto
input2
)
{
std
::
transform
(
input1
.
begin
(),
input1
.
end
(),
...
...
src/include/migraphx/op/broadcast.hpp
View file @
31065c7d
...
...
@@ -27,23 +27,30 @@
#include <migraphx/check_shapes.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <migraphx/dyn_output.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
op
{
/// The broadcast operator performs the numpy-style broadcasting of an axis of a given tensor. This
/// is achieved primarily by setting the stride of the broadcasted axis to zero. Linear indicies are
/// computed from multi-indicies by computing the inner product on the multi-index with the strides.
/// For example, if we have a tensor A(2,3) it has lengths of (2,3) and strides of (3,1). If we want
/// to compute the linear offset that corresponds to the element on the 2nd row (i = 1) and 3rd
/// column (j = 2), we compute the following inner product (1,2) dot (3, 1) = 1*3 + 2*1 = 5. It is
/// obvious from there that we can negate the effects of a given axis by setting the stride of that
/// axis to zero.
/**
* 1 input version:
* Broadcasts a tensor from the original shape to the broadcast_lens by setting the stride of
* broadcasted dimensions to zero. `axis` attribute for a 1D input shape is the output dimension
* that stays the same. ex: broadcasting shape [1024] -> [4, 1024, 3] has axis = 1 For higher rank
* input shapes, axis is an offset parameter for the broadcasting. Such that this operator would
* work in the opposite direction of NumPy broadcasting. ex: broadcasting shape [2, 2] -> [2, 2, 3]
* with axis = 0
*
* 2 input version:
* Broadcast the first input 1D shape into the second input shape based on the axis parameter.
* Handles broadcasting a 1D static shape into a higher rank dynamic shape.
* broadcast_lens is not used
*/
struct
broadcast
{
uint64_t
axis
=
0
;
std
::
vector
<
std
::
size_t
>
broadcast_lens
;
uint64_t
axis
=
0
;
std
::
vector
<
std
::
size_t
>
broadcast_lens
=
{}
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
...
...
@@ -54,37 +61,88 @@ struct broadcast
std
::
string
name
()
const
{
return
"broadcast"
;
}
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
auto
input
=
inputs
.
at
(
0
);
auto
t
=
input
.
type
();
std
::
vector
<
size_t
>
bcast_strides
(
broadcast_lens
.
size
(),
0
);
// the broacast op is deprecated now, so not handling the negative
// value of axis anymore
if
(
axis
>=
broadcast_lens
.
size
())
check_shapes
{
inputs
,
*
this
,
true
}.
has
(
1
,
2
);
auto
s0
=
inputs
.
at
(
0
);
auto
t
=
s0
.
type
();
if
(
inputs
.
size
()
==
1
)
{
MIGRAPHX_THROW
(
"BROADCAST : axis is out of range"
);
}
// the ONNX broadcast op is deprecated now, so not handling the negative
// value of axis anymore
if
(
axis
>=
broadcast_lens
.
size
())
{
MIGRAPHX_THROW
(
"BROADCAST : axis "
+
migraphx
::
to_string
(
axis
)
+
" is out of range"
);
}
if
(
broadcast_lens
.
size
()
-
axis
<
s0
.
lens
().
size
())
{
MIGRAPHX_THROW
(
"BROADCAST: (broadcast ndims - axis) is less than s0 ndims"
);
}
if
(
not
std
::
equal
(
s0
.
lens
().
begin
(),
s0
.
lens
().
end
(),
broadcast_lens
.
begin
()
+
axis
))
{
MIGRAPHX_THROW
(
"BROADCAST: when broadcasting, succeeding sizes must match"
);
}
if
(
broadcast_lens
.
size
()
-
axis
<
input
.
lens
().
size
())
{
MIGRAPHX_THROW
(
"BROADCAST: (broadcast ndims - axis) is less than input ndims"
);
std
::
vector
<
size_t
>
bcast_strides
(
broadcast_lens
.
size
(),
0
);
std
::
copy
(
s0
.
strides
().
begin
(),
s0
.
strides
().
end
(),
bcast_strides
.
begin
()
+
axis
);
shape
output
{
t
,
broadcast_lens
,
std
::
move
(
bcast_strides
)};
if
(
output
.
elements
()
<
s0
.
elements
())
{
// don't think this can occur?
MIGRAPHX_THROW
(
"BROADCAST: output size must be greater than or equal to s0 size"
);
}
return
output
;
}
if
(
!
std
::
equal
(
input
.
lens
().
begin
(),
input
.
lens
().
end
(),
broadcast_lens
.
begin
()
+
axis
))
else
{
MIGRAPHX_THROW
(
"BROADCAST: when broadcasting, succeeding sizes must match"
);
}
std
::
copy
(
input
.
strides
().
begin
(),
input
.
strides
().
end
(),
bcast_strides
.
begin
()
+
axis
);
// two inputs
auto
s1
=
inputs
.
at
(
1
);
if
(
s0
.
dynamic
())
{
MIGRAPHX_THROW
(
"BROADCAST_2in: s0 is a dynamic shape, does not handle broadcasting "
"a dynamic shape"
);
}
if
(
s0
.
ndim
()
!=
1
)
{
MIGRAPHX_THROW
(
"BROADCAST_2in: s0 has ndim "
+
migraphx
::
to_string
(
s0
.
ndim
())
+
", only handle ndim = 1"
);
}
if
(
axis
>=
s1
.
ndim
())
{
MIGRAPHX_THROW
(
"BROADCAST_2in: axis "
+
migraphx
::
to_string
(
axis
)
+
" is out of range"
);
}
if
(
s1
.
dynamic
())
{
s0
=
s0
.
to_dynamic
();
if
(
s0
.
dyn_dims
()[
0
]
!=
s1
.
dyn_dims
()[
axis
])
{
MIGRAPHX_THROW
(
"BROADCAST_2in: s0 length doesn't match with dynamic s1 axis "
"dimension length ("
+
migraphx
::
to_string
(
s0
.
dyn_dims
()[
0
])
+
" != "
+
migraphx
::
to_string
(
s1
.
dyn_dims
()[
axis
])
+
")"
);
}
return
s1
;
}
shape
output
{
t
,
broadcast_lens
,
std
::
move
(
bcast_strides
)};
if
(
output
.
elements
()
<
input
.
elements
())
MIGRAPHX_THROW
(
"BROADCAST: output size must be greater than or equal to input size"
);
return
output
;
if
(
s0
.
lens
()[
0
]
!=
s1
.
lens
()[
axis
])
{
MIGRAPHX_THROW
(
"BROADCAST_2in: s0 length doesn't match with static s1 axis "
"dimension length ("
+
migraphx
::
to_string
(
s0
.
dyn_dims
()[
0
])
+
" != "
+
migraphx
::
to_string
(
s1
.
dyn_dims
()[
axis
])
+
")"
);
}
std
::
vector
<
size_t
>
bcast_strides
(
s1
.
ndim
(),
0
);
std
::
copy
(
s0
.
strides
().
begin
(),
s0
.
strides
().
end
(),
bcast_strides
.
begin
()
+
axis
);
shape
output
{
t
,
s1
.
lens
(),
std
::
move
(
bcast_strides
)};
return
output
;
}
}
argument
compute
(
shape
output_shape
,
std
::
vector
<
argument
>
args
)
const
argument
compute
(
const
dyn_output
&
dyn_out
,
std
::
vector
<
argument
>
args
)
const
{
return
args
[
0
].
reshape
(
out
put_shape
);
return
args
[
0
].
reshape
(
dyn_out
.
com
put
ed
_shape
);
}
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
)
const
{
return
0
;
}
};
...
...
src/include/migraphx/op/common.hpp
View file @
31065c7d
...
...
@@ -33,11 +33,11 @@ namespace migraphx {
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
op
{
// Padding mode is default_ for fixed shape padding.
// same_lower and same_upper used for dynamic padding.
enum
padding_mode_t
{
default_
,
// NOLINT
same
,
valid
,
same_lower
,
same_upper
};
...
...
src/include/migraphx/op/concat.hpp
View file @
31065c7d
...
...
@@ -86,7 +86,7 @@ struct concat
{
if
(
l
!=
axis
)
{
if
(
!
std
::
all_of
(
inputs
.
begin
(),
inputs
.
end
(),
[
&
](
auto
s
)
{
if
(
not
std
::
all_of
(
inputs
.
begin
(),
inputs
.
end
(),
[
&
](
auto
s
)
{
return
s
.
lens
()[
l
]
==
first_shape_lens
[
l
];
}))
{
...
...
src/include/migraphx/op/convert.hpp
View file @
31065c7d
...
...
@@ -44,8 +44,16 @@ struct convert : unary<convert>
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
1
);
return
{
target_type
,
inputs
.
at
(
0
).
lens
(),
inputs
.
at
(
0
).
strides
()};
check_shapes
{
inputs
,
*
this
,
true
}.
has
(
1
);
auto
input
=
inputs
.
at
(
0
);
if
(
input
.
dynamic
())
{
return
{
target_type
,
input
.
dyn_dims
()};
}
else
{
return
{
target_type
,
input
.
lens
(),
input
.
strides
()};
}
}
std
::
string
point_op
()
const
...
...
src/include/migraphx/op/convolution.hpp
View file @
31065c7d
...
...
@@ -41,9 +41,8 @@ struct convolution
std
::
vector
<
std
::
size_t
>
stride
=
{
1
,
1
};
std
::
vector
<
std
::
size_t
>
dilation
=
{
1
,
1
};
int
group
=
1
;
padding_mode_t
padding_mode
=
default_
;
bool
use_dynamic_same_auto_pad
=
false
;
int
group
=
1
;
padding_mode_t
padding_mode
=
default_
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
...
...
@@ -52,16 +51,15 @@ struct convolution
f
(
self
.
stride
,
"stride"
),
f
(
self
.
dilation
,
"dilation"
),
f
(
self
.
group
,
"group"
),
f
(
self
.
padding_mode
,
"padding_mode"
),
f
(
self
.
use_dynamic_same_auto_pad
,
"use_dynamic_same_auto_pad"
));
f
(
self
.
padding_mode
,
"padding_mode"
));
}
std
::
string
name
()
const
{
return
"convolution"
;
}
void
check_attribute_size
()
const
{
if
(
not
(
(
padding
.
size
()
=
=
stride
.
size
()
or
(
padding
.
size
()
/
2
)
=
=
stride
.
size
())
and
stride
.
size
()
=
=
dilation
.
size
())
)
if
((
padding
.
size
()
!
=
stride
.
size
()
and
(
padding
.
size
()
/
2
)
!
=
stride
.
size
())
or
stride
.
size
()
!
=
dilation
.
size
())
{
MIGRAPHX_THROW
(
"CONVOLUTION: inconsistent attribute sizes"
);
}
...
...
@@ -76,7 +74,8 @@ struct convolution
// num of dims of input and attribute should match
const
auto
input_size
=
inputs
[
0
].
max_lens
().
size
();
const
auto
padding_size
=
padding
.
size
();
if
(
not
(
input_size
==
padding_size
/
2
+
2
or
input_size
==
padding_size
+
2
))
if
(
input_size
!=
padding_size
/
2
+
2
&&
input_size
!=
padding_size
+
2
)
{
MIGRAPHX_THROW
(
"CONVOLUTION: input and attribute size mismatch!"
);
}
...
...
@@ -93,13 +92,6 @@ struct convolution
x_shape
.
lens
().
at
(
1
)
!=
(
w_shape
.
lens
().
at
(
1
)
*
group
))
MIGRAPHX_THROW
(
"CONVOLUTION: mismatched channel numbers"
);
std
::
vector
<
op
::
padding_mode_t
>
dyn_pad_modes
=
{
op
::
padding_mode_t
::
same_upper
,
op
::
padding_mode_t
::
same_lower
};
if
(
use_dynamic_same_auto_pad
and
not
contains
(
dyn_pad_modes
,
padding_mode
))
{
MIGRAPHX_THROW
(
"CONVOLUTION: use_dynamic_same_auto_pad set with invalid padding mode"
);
}
if
(
x_shape
.
dynamic
()
or
w_shape
.
dynamic
())
{
return
dynamic_compute_shape
(
x_shape
,
w_shape
);
...
...
@@ -161,7 +153,7 @@ struct convolution
dynamic_shape_push_back
(
w_shape
);
const
size_t
num_spatial_dims
=
x_shape
.
max_lens
().
size
()
-
2
;
if
(
use_dynamic_same_auto_pad
)
if
(
padding_mode
!=
default_
)
{
for
(
std
::
size_t
i
=
0
;
i
<
num_spatial_dims
;
++
i
)
{
...
...
src/include/migraphx/op/deconvolution.hpp
View file @
31065c7d
...
...
@@ -61,8 +61,8 @@ struct deconvolution
void
check_attribute_size
()
const
{
if
(
not
(
(
padding
.
size
()
=
=
stride
.
size
()
or
(
padding
.
size
()
/
2
)
=
=
stride
.
size
())
and
stride
.
size
()
=
=
dilation
.
size
())
)
if
((
padding
.
size
()
!
=
stride
.
size
()
and
(
padding
.
size
()
/
2
)
!
=
stride
.
size
())
or
stride
.
size
()
!
=
dilation
.
size
())
{
MIGRAPHX_THROW
(
"deconvolution: inconsistent attribute sizes"
);
}
...
...
src/include/migraphx/op/dot.hpp
View file @
31065c7d
...
...
@@ -43,13 +43,14 @@ struct dot
const
shape
&
b
=
inputs
.
at
(
1
);
auto
t
=
a
.
type
();
if
(
!
std
::
all_of
(
inputs
.
begin
(),
inputs
.
end
(),
[](
auto
s
)
{
return
s
.
lens
().
size
()
>=
2
;
}))
if
(
not
std
::
all_of
(
inputs
.
begin
(),
inputs
.
end
(),
[](
auto
s
)
{
return
s
.
lens
().
size
()
>=
2
;
}))
{
MIGRAPHX_THROW
(
"DOT: dot only accept 2 or more dims operands"
);
}
// only handle the case that the batch size of a and b are the same
if
(
!
std
::
equal
(
if
(
not
std
::
equal
(
a
.
lens
().
rbegin
()
+
2
,
a
.
lens
().
rend
(),
b
.
lens
().
rbegin
()
+
2
,
b
.
lens
().
rend
()))
{
MIGRAPHX_THROW
(
"DOT: batch size of A and B mismatch: {"
+
to_string_range
(
a
.
lens
())
+
...
...
src/include/migraphx/op/elu.hpp
View file @
31065c7d
...
...
@@ -32,14 +32,13 @@ namespace migraphx {
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
op
{
struct
elu
struct
elu
:
unary
<
elu
>
{
std
::
string
name
()
const
{
return
"elu"
;
}
float
alpha
=
1
;
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
std
::
string
point_op
()
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
1
);
return
inputs
.
front
();
return
"${function:where}(${0} > 0, ${0}, ${alpha} * (${function:exp}(${0}) - 1))"
;
}
template
<
class
Self
,
class
F
>
...
...
@@ -47,6 +46,11 @@ struct elu
{
return
pack
(
f
(
self
.
alpha
,
"alpha"
));
}
auto
apply
()
const
{
return
[
&
](
auto
x
)
{
return
x
>
0
?
x
:
alpha
*
std
::
expm1
(
x
);
};
}
};
}
// namespace op
...
...
src/include/migraphx/op/fmod.hpp
View file @
31065c7d
...
...
@@ -24,17 +24,8 @@
#ifndef MIGRAPHX_GUARD_OPERATORS_FMOD_HPP
#define MIGRAPHX_GUARD_OPERATORS_FMOD_HPP
#include <array>
#include <migraphx/op/binary.hpp>
#include <migraphx/check_shapes.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/streamutils.hpp>
#include <migraphx/literal.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <cmath>
#include <utility>
#include <type_traits>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
...
...
@@ -49,7 +40,6 @@ struct fmod : binary<fmod>
a
[
"commutative"
]
=
false
;
return
a
;
}
std
::
string
point_function
()
const
{
return
"fmod"
;
}
auto
apply
()
const
{
return
[](
auto
x
,
auto
y
)
{
return
std
::
fmod
(
x
,
y
);
};
...
...
src/include/migraphx/op/gather.hpp
View file @
31065c7d
...
...
@@ -65,7 +65,7 @@ struct gather
auto
lens
=
inputs
[
0
].
lens
();
auto
type
=
inputs
[
0
].
type
();
lens
.
erase
(
lens
.
begin
()
+
axis
);
if
(
!
inputs
[
1
].
scalar
())
if
(
not
inputs
[
1
].
scalar
())
{
auto
ind_lens
=
inputs
[
1
].
lens
();
lens
.
insert
(
lens
.
begin
()
+
axis
,
ind_lens
.
begin
(),
ind_lens
.
end
());
...
...
src/include/migraphx/op/leaky_relu.hpp
View file @
31065c7d
...
...
@@ -26,12 +26,13 @@
#include <migraphx/check_shapes.hpp>
#include <migraphx/config.hpp>
#include <migraphx/op/unary.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
op
{
struct
leaky_relu
struct
leaky_relu
:
unary
<
leaky_relu
>
{
float
alpha
=
0.01
;
...
...
@@ -41,11 +42,13 @@ struct leaky_relu
return
pack
(
f
(
self
.
alpha
,
"alpha"
));
}
std
::
string
point_op
()
const
{
return
"${function:where}(${0} > 0, ${0}, ${alpha} * ${0})"
;
}
std
::
string
name
()
const
{
return
"leaky_relu"
;
}
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
auto
apply
()
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
1
);
return
inputs
.
front
();
return
[
&
](
auto
x
)
{
return
x
>
0
?
x
:
x
*
alpha
;
};
}
};
...
...
Prev
1
2
3
4
5
6
7
…
25
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment