Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
be5f3539
"experiments/pyexps/ae/data_nopaxos.py" did not exist on "32a74b8eb0183e6911e87292795f8a8968d214af"
Commit
be5f3539
authored
Jul 09, 2019
by
Shucai Xiao
Browse files
merge develop branch changes
parents
7e3bdc34
ebfe9735
Changes
103
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
648 additions
and
364 deletions
+648
-364
src/include/migraphx/op/multibroadcast.hpp
src/include/migraphx/op/multibroadcast.hpp
+17
-3
src/include/migraphx/op/pooling.hpp
src/include/migraphx/op/pooling.hpp
+7
-37
src/include/migraphx/op/reduce_sum.hpp
src/include/migraphx/op/reduce_sum.hpp
+56
-0
src/include/migraphx/op/softmax.hpp
src/include/migraphx/op/softmax.hpp
+0
-7
src/include/migraphx/operators.hpp
src/include/migraphx/operators.hpp
+4
-0
src/include/migraphx/pad_calc.hpp
src/include/migraphx/pad_calc.hpp
+13
-2
src/include/migraphx/ranges.hpp
src/include/migraphx/ranges.hpp
+22
-0
src/include/migraphx/raw_data.hpp
src/include/migraphx/raw_data.hpp
+19
-0
src/include/migraphx/requires.hpp
src/include/migraphx/requires.hpp
+7
-24
src/include/migraphx/shape.hpp
src/include/migraphx/shape.hpp
+2
-0
src/include/migraphx/stringutils.hpp
src/include/migraphx/stringutils.hpp
+2
-0
src/onnx/CMakeLists.txt
src/onnx/CMakeLists.txt
+1
-1
src/onnx/onnx.cpp
src/onnx/onnx.cpp
+113
-5
src/pass_manager.cpp
src/pass_manager.cpp
+0
-1
src/py/migraphx_py.cpp
src/py/migraphx_py.cpp
+7
-1
src/rewrite_rnn.cpp
src/rewrite_rnn.cpp
+108
-174
src/shape.cpp
src/shape.cpp
+18
-0
src/simplify_reshapes.cpp
src/simplify_reshapes.cpp
+179
-56
src/targets/cpu/lowering.cpp
src/targets/cpu/lowering.cpp
+66
-53
src/targets/gpu/CMakeLists.txt
src/targets/gpu/CMakeLists.txt
+7
-0
No files found.
src/include/migraphx/op/multibroadcast.hpp
View file @
be5f3539
...
@@ -35,14 +35,28 @@ struct multibroadcast
...
@@ -35,14 +35,28 @@ struct multibroadcast
auto
input
=
inputs
.
at
(
0
);
auto
input
=
inputs
.
at
(
0
);
if
(
input
.
lens
().
empty
())
if
(
input
.
lens
().
empty
())
MIGRAPHX_THROW
(
"inputs dimensions should be > 0"
);
{
MIGRAPHX_THROW
(
"MULTIBROADCAST: inputs dimensions should be > 0"
);
}
if
(
input
.
lens
().
size
()
>
output_lens
.
size
())
if
(
input
.
lens
().
size
()
>
output_lens
.
size
())
MIGRAPHX_THROW
(
"inputs dimensions should <= output size"
);
{
MIGRAPHX_THROW
(
"MULTIBROADCAST: inputs dimensions should <= output size"
);
}
std
::
vector
<
size_t
>
bcast_strides
(
output_lens
.
size
(),
0
);
auto
offset
=
output_lens
.
size
()
-
input
.
lens
().
size
();
auto
offset
=
output_lens
.
size
()
-
input
.
lens
().
size
();
for
(
std
::
ptrdiff_t
i
=
input
.
lens
().
size
()
-
1
;
i
>=
0
;
i
--
)
for
(
std
::
ptrdiff_t
i
=
input
.
lens
().
size
()
-
1
;
i
>=
0
;
i
--
)
{
if
(
output_lens
[
i
+
offset
]
!=
input
.
lens
()[
i
]
and
input
.
lens
()[
i
]
!=
1
)
{
MIGRAPHX_THROW
(
"MULTIBROADCAST: input shape {"
+
to_string_range
(
input
.
lens
())
+
"} cannot be broadcasted to {"
+
to_string_range
(
output_lens
)
+
"}!"
);
}
}
std
::
vector
<
size_t
>
bcast_strides
(
output_lens
.
size
(),
0
);
for
(
std
::
ptrdiff_t
i
=
input
.
lens
().
size
()
-
1
;
i
>=
0
;
i
--
)
{
{
if
(
output_lens
[
i
+
offset
]
==
input
.
lens
()[
i
])
if
(
output_lens
[
i
+
offset
]
==
input
.
lens
()[
i
])
{
{
...
...
src/include/migraphx/op/pooling.hpp
View file @
be5f3539
...
@@ -48,52 +48,22 @@ struct pooling
...
@@ -48,52 +48,22 @@ struct pooling
assert
(
lengths
[
0
]
<=
(
input
.
lens
()[
2
]
+
2
*
padding
[
0
]));
assert
(
lengths
[
0
]
<=
(
input
.
lens
()[
2
]
+
2
*
padding
[
0
]));
assert
(
lengths
[
1
]
<=
(
input
.
lens
()[
3
]
+
2
*
padding
[
1
]));
assert
(
lengths
[
1
]
<=
(
input
.
lens
()[
3
]
+
2
*
padding
[
1
]));
if
(
padding_mode
==
default_
)
{
return
{
t
,
return
{
t
,
{
{
input
.
lens
()[
0
],
input
.
lens
()[
0
],
input
.
lens
()[
1
],
input
.
lens
()[
1
],
std
::
size_t
(
std
::
max
<
std
::
ptrdiff_t
>
(
std
::
size_t
(
std
::
max
<
std
::
ptrdiff_t
>
(
1
,
1
,
floor_divide
<
std
::
ptrdiff_t
>
(
floor_divide
<
std
::
ptrdiff_t
>
(
input
.
lens
()[
2
]
+
2
*
padding
[
0
]
-
lengths
[
0
],
input
.
lens
()[
2
]
+
2
*
padding
[
0
]
-
lengths
[
0
],
stride
[
0
])
+
stride
[
0
])
+
1
)),
1
)),
std
::
size_t
(
std
::
max
<
std
::
ptrdiff_t
>
(
std
::
size_t
(
std
::
max
<
std
::
ptrdiff_t
>
(
1
,
1
,
floor_divide
<
std
::
ptrdiff_t
>
(
floor_divide
<
std
::
ptrdiff_t
>
(
input
.
lens
()[
3
]
+
2
*
padding
[
1
]
-
lengths
[
1
],
input
.
lens
()[
3
]
+
2
*
padding
[
1
]
-
lengths
[
1
],
stride
[
1
])
+
stride
[
1
])
+
1
)),
1
)),
}};
}};
}
}
else
if
(
padding_mode
==
same
)
{
return
{
t
,
{
input
.
lens
()[
0
],
input
.
lens
()[
1
],
ceil_divide
<
std
::
size_t
>
(
input
.
lens
()[
2
],
stride
[
0
]),
ceil_divide
<
std
::
size_t
>
(
input
.
lens
()[
3
],
stride
[
1
])}};
}
else
if
(
padding_mode
==
valid
)
{
return
{
t
,
{
input
.
lens
()[
0
],
input
.
lens
()[
1
],
std
::
size_t
(
std
::
max
<
std
::
ptrdiff_t
>
(
1
,
floor_divide
<
std
::
ptrdiff_t
>
(
input
.
lens
()[
2
]
-
lengths
[
0
],
stride
[
0
])
+
1
)),
std
::
size_t
(
std
::
max
<
std
::
ptrdiff_t
>
(
1
,
floor_divide
<
std
::
ptrdiff_t
>
(
input
.
lens
()[
3
]
-
lengths
[
1
],
stride
[
1
])
+
1
)),
}};
}
else
{
MIGRAPHX_THROW
(
"Invalid padding mode"
);
}
}
};
};
}
// namespace op
}
// namespace op
...
...
src/include/migraphx/op/reduce_sum.hpp
0 → 100644
View file @
be5f3539
#ifndef MIGRAPHX_GUARD_OPERATORS_SUM_HPP
#define MIGRAPHX_GUARD_OPERATORS_SUM_HPP
#include <migraphx/check_shapes.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <vector>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
op
{
struct
reduce_sum
{
std
::
vector
<
std
::
size_t
>
axes
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
pack
(
f
(
self
.
axes
,
"axes"
));
}
std
::
string
name
()
const
{
return
"reduce_sum"
;
}
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
1
);
auto
s
=
inputs
.
at
(
0
);
auto
lens
=
s
.
lens
();
for
(
auto
axis
:
axes
)
lens
[
axis
]
=
1
;
return
{
s
.
type
(),
lens
};
}
argument
compute
(
const
shape
&
output_shape
,
std
::
vector
<
argument
>
args
)
const
{
argument
result
{
output_shape
};
visit_all
(
result
,
args
[
0
])([
&
](
auto
output
,
auto
input
)
{
shape_for_each
(
input
.
get_shape
(),
[
&
](
auto
&&
in_idx
)
{
auto
out_idx
=
in_idx
;
for
(
auto
axis
:
axes
)
out_idx
[
axis
]
=
0
;
output
(
out_idx
.
begin
(),
out_idx
.
end
())
+=
input
(
in_idx
.
begin
(),
in_idx
.
end
());
});
});
return
result
;
}
};
}
// namespace op
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
#endif
src/include/migraphx/op/softmax.hpp
View file @
be5f3539
#ifndef MIGRAPHX_GUARD_OPERATORS_SOFTMAX_HPP
#ifndef MIGRAPHX_GUARD_OPERATORS_SOFTMAX_HPP
#define MIGRAPHX_GUARD_OPERATORS_SOFTMAX_HPP
#define MIGRAPHX_GUARD_OPERATORS_SOFTMAX_HPP
#include <array>
#include <migraphx/operation.hpp>
#include <migraphx/operation.hpp>
#include <migraphx/check_shapes.hpp>
#include <migraphx/check_shapes.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/streamutils.hpp>
#include <migraphx/literal.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <migraphx/config.hpp>
#include <cmath>
#include <utility>
namespace
migraphx
{
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
inline
namespace
MIGRAPHX_INLINE_NS
{
...
...
src/include/migraphx/operators.hpp
View file @
be5f3539
...
@@ -5,6 +5,8 @@
...
@@ -5,6 +5,8 @@
#include <migraphx/op/abs.hpp>
#include <migraphx/op/abs.hpp>
#include <migraphx/op/acos.hpp>
#include <migraphx/op/acos.hpp>
#include <migraphx/op/add.hpp>
#include <migraphx/op/add.hpp>
#include <migraphx/op/argmax.hpp>
#include <migraphx/op/argmin.hpp>
#include <migraphx/op/asin.hpp>
#include <migraphx/op/asin.hpp>
#include <migraphx/op/as_shape.hpp>
#include <migraphx/op/as_shape.hpp>
#include <migraphx/op/atan.hpp>
#include <migraphx/op/atan.hpp>
...
@@ -23,6 +25,7 @@
...
@@ -23,6 +25,7 @@
#include <migraphx/op/div.hpp>
#include <migraphx/op/div.hpp>
#include <migraphx/op/dot.hpp>
#include <migraphx/op/dot.hpp>
#include <migraphx/op/elu.hpp>
#include <migraphx/op/elu.hpp>
#include <migraphx/op/erf.hpp>
#include <migraphx/op/exp.hpp>
#include <migraphx/op/exp.hpp>
#include <migraphx/op/flatten.hpp>
#include <migraphx/op/flatten.hpp>
#include <migraphx/op/gather.hpp>
#include <migraphx/op/gather.hpp>
...
@@ -45,6 +48,7 @@
...
@@ -45,6 +48,7 @@
#include <migraphx/op/pooling.hpp>
#include <migraphx/op/pooling.hpp>
#include <migraphx/op/quant_convolution.hpp>
#include <migraphx/op/quant_convolution.hpp>
#include <migraphx/op/quant_dot.hpp>
#include <migraphx/op/quant_dot.hpp>
#include <migraphx/op/reduce_sum.hpp>
#include <migraphx/op/relu.hpp>
#include <migraphx/op/relu.hpp>
#include <migraphx/op/reshape.hpp>
#include <migraphx/op/reshape.hpp>
#include <migraphx/op/rnn.hpp>
#include <migraphx/op/rnn.hpp>
...
...
src/include/migraphx/pad_calc.hpp
View file @
be5f3539
...
@@ -2,13 +2,24 @@
...
@@ -2,13 +2,24 @@
#define MIGRAPHX_GUARD_OPERATORS_PAD_CALC_HPP
#define MIGRAPHX_GUARD_OPERATORS_PAD_CALC_HPP
#include <utility>
#include <utility>
#include <cstdint>
#include <vector>
namespace
migraphx
{
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
inline
namespace
MIGRAPHX_INLINE_NS
{
inline
std
::
size_t
calculate_padding
(
std
::
size_t
weight_dim
,
std
::
size_t
dilation
)
inline
void
calculate_padding
(
int64_t
idx
,
std
::
vector
<
int64_t
>&
pads
,
int64_t
input_dim
,
int64_t
stride
,
int64_t
dilation
,
int64_t
weight_dim
)
{
{
return
(
dilation
*
(
weight_dim
-
1
))
/
2
;
int64_t
output_dim
=
input_dim
/
stride
;
int64_t
pad
=
std
::
max
(
static_cast
<
int64_t
>
(
0
),
(
output_dim
-
1
)
*
stride
+
dilation
*
weight_dim
-
input_dim
);
pads
[
idx
]
=
pad
/
2
;
pads
[
idx
+
2
]
=
pad
-
pad
/
2
;
}
}
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace MIGRAPHX_INLINE_NS
...
...
src/include/migraphx/ranges.hpp
View file @
be5f3539
...
@@ -33,6 +33,10 @@ auto generic_find_impl(rank<0>, C&& c, const T& x)
...
@@ -33,6 +33,10 @@ auto generic_find_impl(rank<0>, C&& c, const T& x)
return
std
::
find
(
c
.
begin
(),
c
.
end
(),
x
);
return
std
::
find
(
c
.
begin
(),
c
.
end
(),
x
);
}
}
struct
empty
{
};
}
// namespace detail
}
// namespace detail
template
<
class
C
,
class
T
>
template
<
class
C
,
class
T
>
...
@@ -71,6 +75,12 @@ bool all_of(const std::initializer_list<T>& c, const Predicate& p)
...
@@ -71,6 +75,12 @@ bool all_of(const std::initializer_list<T>& c, const Predicate& p)
return
std
::
all_of
(
c
.
begin
(),
c
.
end
(),
p
);
return
std
::
all_of
(
c
.
begin
(),
c
.
end
(),
p
);
}
}
template
<
class
Predicate
>
bool
all_of
(
detail
::
empty
,
const
Predicate
&
)
{
return
true
;
}
template
<
class
C
,
class
Predicate
>
template
<
class
C
,
class
Predicate
>
bool
any_of
(
const
C
&
c
,
const
Predicate
&
p
)
bool
any_of
(
const
C
&
c
,
const
Predicate
&
p
)
{
{
...
@@ -83,6 +93,12 @@ bool any_of(const std::initializer_list<T>& c, const Predicate& p)
...
@@ -83,6 +93,12 @@ bool any_of(const std::initializer_list<T>& c, const Predicate& p)
return
std
::
any_of
(
c
.
begin
(),
c
.
end
(),
p
);
return
std
::
any_of
(
c
.
begin
(),
c
.
end
(),
p
);
}
}
template
<
class
Predicate
>
bool
any_of
(
detail
::
empty
,
const
Predicate
&
)
{
return
false
;
}
template
<
class
C
,
class
Predicate
>
template
<
class
C
,
class
Predicate
>
bool
none_of
(
const
C
&
c
,
const
Predicate
&
p
)
bool
none_of
(
const
C
&
c
,
const
Predicate
&
p
)
{
{
...
@@ -95,6 +111,12 @@ bool none_of(const std::initializer_list<T>& c, const Predicate& p)
...
@@ -95,6 +111,12 @@ bool none_of(const std::initializer_list<T>& c, const Predicate& p)
return
std
::
none_of
(
c
.
begin
(),
c
.
end
(),
p
);
return
std
::
none_of
(
c
.
begin
(),
c
.
end
(),
p
);
}
}
template
<
class
Predicate
>
bool
none_of
(
detail
::
empty
,
const
Predicate
&
)
{
return
true
;
}
template
<
class
Range
,
class
Iterator
>
template
<
class
Range
,
class
Iterator
>
void
copy
(
Range
&&
r
,
Iterator
it
)
void
copy
(
Range
&&
r
,
Iterator
it
)
{
{
...
...
src/include/migraphx/raw_data.hpp
View file @
be5f3539
...
@@ -212,6 +212,25 @@ auto visit_all(T&& x, Ts&&... xs)
...
@@ -212,6 +212,25 @@ auto visit_all(T&& x, Ts&&... xs)
};
};
}
}
template
<
class
T
>
auto
visit_all
(
const
std
::
vector
<
T
>&
x
)
{
auto
&&
s
=
x
.
front
().
get_shape
();
if
(
!
std
::
all_of
(
x
.
begin
(),
x
.
end
(),
[
&
](
const
T
&
y
)
{
return
y
.
get_shape
().
type
()
==
s
.
type
();
}))
MIGRAPHX_THROW
(
"Types must be the same"
);
return
[
&
](
auto
v
)
{
s
.
visit_type
([
&
](
auto
as
)
{
using
type
=
typename
decltype
(
as
)
::
type
;
std
::
vector
<
tensor_view
<
type
>>
result
;
std
::
transform
(
x
.
begin
(),
x
.
end
(),
std
::
back_inserter
(
result
),
[
&
](
const
auto
&
y
)
{
return
make_view
(
y
.
get_shape
(),
as
.
from
(
y
.
data
()));
});
v
(
result
);
});
};
}
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
}
// namespace migraphx
...
...
src/include/migraphx/requires.hpp
View file @
be5f3539
...
@@ -15,35 +15,18 @@ struct and_ : std::is_same<and_<Bs...>, and_<(Bs || true)...>> // NOLINT
...
@@ -15,35 +15,18 @@ struct and_ : std::is_same<and_<Bs...>, and_<(Bs || true)...>> // NOLINT
template
<
bool
B
>
template
<
bool
B
>
using
bool_c
=
std
::
integral_constant
<
bool
,
B
>
;
using
bool_c
=
std
::
integral_constant
<
bool
,
B
>
;
template
<
int
N
>
#define MIGRAPHX_REQUIRES_PRIMITIVE_CAT(x, y) x##y
struct
requires_enum
#define MIGRAPHX_REQUIRES_CAT(x, y) MIGRAPHX_REQUIRES_PRIMITIVE_CAT(x, y)
{
enum
e
{
a
=
0
};
};
#define MIGRAPHX_REQUIRES_
CAT(x, y) x##y
#define MIGRAPHX_REQUIRES_
VAR() MIGRAPHX_REQUIRES_CAT(PrivateRequires, __LINE__)
#ifdef CPPCHECK
#ifdef CPPCHECK
#define MIGRAPHX_REQUIRES(...) class = void
#define MIGRAPHX_REQUIRES(...) class = void
#else
#else
#if 0
// TODO: This currently crashed on clang
#define MIGRAPHX_REQUIRES(...) \
#define MIGRAPHX_REQUIRES(...) \
typename migraphx::requires_enum<__LINE__>::e MIGRAPHX_REQUIRES_CAT( \
bool MIGRAPHX_REQUIRES_VAR() = true, \
PrivateRequires, \
typename std::enable_if<(MIGRAPHX_REQUIRES_VAR() && (migraphx::and_<__VA_ARGS__>{})), \
__LINE__) = migraphx::requires_enum<__LINE__>::a, \
int>::type = 0
class = typename std::enable_if<and_<__VA_ARGS__, \
MIGRAPHX_REQUIRES_CAT(PrivateRequires, __LINE__) == \
migraphx::requires_enum<__LINE__>::a>{}>::type
#else
#define MIGRAPHX_REQUIRES(...) \
typename migraphx::requires_enum<__LINE__>::e MIGRAPHX_REQUIRES_CAT( \
PrivateRequires, __LINE__) = migraphx::requires_enum<__LINE__>::a, \
class = typename std::enable_if<and_<__VA_ARGS__>{}>::type
#endif
#endif
#endif
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace MIGRAPHX_INLINE_NS
...
...
src/include/migraphx/shape.hpp
View file @
be5f3539
...
@@ -99,6 +99,8 @@ struct shape
...
@@ -99,6 +99,8 @@ struct shape
/// Map element index to space index
/// Map element index to space index
std
::
size_t
index
(
std
::
size_t
i
)
const
;
std
::
size_t
index
(
std
::
size_t
i
)
const
;
std
::
vector
<
std
::
size_t
>
multi
(
std
::
size_t
i
)
const
;
/// Returns true if the shape is packed with no padding
/// Returns true if the shape is packed with no padding
bool
packed
()
const
;
bool
packed
()
const
;
/// Returns true is the shape has been transposed. That is the strides are not in descending
/// Returns true is the shape has been transposed. That is the strides are not in descending
...
...
src/include/migraphx/stringutils.hpp
View file @
be5f3539
...
@@ -52,6 +52,8 @@ inline std::string transform_string(std::string s, F f)
...
@@ -52,6 +52,8 @@ inline std::string transform_string(std::string s, F f)
inline
std
::
string
to_upper
(
std
::
string
s
)
{
return
transform_string
(
std
::
move
(
s
),
::
toupper
);
}
inline
std
::
string
to_upper
(
std
::
string
s
)
{
return
transform_string
(
std
::
move
(
s
),
::
toupper
);
}
inline
std
::
string
to_lower
(
std
::
string
s
)
{
return
transform_string
(
std
::
move
(
s
),
::
tolower
);
}
inline
bool
starts_with
(
const
std
::
string
&
value
,
const
std
::
string
&
prefix
)
inline
bool
starts_with
(
const
std
::
string
&
value
,
const
std
::
string
&
prefix
)
{
{
if
(
prefix
.
size
()
>
value
.
size
())
if
(
prefix
.
size
()
>
value
.
size
())
...
...
src/onnx/CMakeLists.txt
View file @
be5f3539
...
@@ -19,7 +19,7 @@ rocm_install_targets(
...
@@ -19,7 +19,7 @@ rocm_install_targets(
add_executable
(
read_onnx read_onnx.cpp
)
add_executable
(
read_onnx read_onnx.cpp
)
rocm_clang_tidy_check
(
read_onnx
)
rocm_clang_tidy_check
(
read_onnx
)
target_link_libraries
(
read_onnx migraphx_onnx
)
target_link_libraries
(
read_onnx
migraphx_cpu
migraphx_onnx
)
if
(
MIGRAPHX_ENABLE_GPU
)
if
(
MIGRAPHX_ENABLE_GPU
)
...
...
src/onnx/onnx.cpp
View file @
be5f3539
...
@@ -40,6 +40,7 @@ struct onnx_parser
...
@@ -40,6 +40,7 @@ struct onnx_parser
add_generic_op
(
"Sigmoid"
,
op
::
sigmoid
{});
add_generic_op
(
"Sigmoid"
,
op
::
sigmoid
{});
add_generic_op
(
"Abs"
,
op
::
abs
{});
add_generic_op
(
"Abs"
,
op
::
abs
{});
add_generic_op
(
"Exp"
,
op
::
exp
{});
add_generic_op
(
"Exp"
,
op
::
exp
{});
add_generic_op
(
"Erf"
,
op
::
erf
{});
add_generic_op
(
"Log"
,
op
::
log
{});
add_generic_op
(
"Log"
,
op
::
log
{});
// disable dropout for inference
// disable dropout for inference
add_generic_op
(
"Dropout"
,
op
::
identity
{});
add_generic_op
(
"Dropout"
,
op
::
identity
{});
...
@@ -63,6 +64,8 @@ struct onnx_parser
...
@@ -63,6 +64,8 @@ struct onnx_parser
add_variadic_op
(
"Max"
,
op
::
max
{});
add_variadic_op
(
"Max"
,
op
::
max
{});
add_variadic_op
(
"Min"
,
op
::
min
{});
add_variadic_op
(
"Min"
,
op
::
min
{});
add_mem_op
(
"ArgMax"
,
&
onnx_parser
::
parse_argmax
);
add_mem_op
(
"ArgMin"
,
&
onnx_parser
::
parse_argmin
);
add_mem_op
(
"Clip"
,
&
onnx_parser
::
parse_clip
);
add_mem_op
(
"Clip"
,
&
onnx_parser
::
parse_clip
);
add_mem_op
(
"LRN"
,
&
onnx_parser
::
parse_lrn
);
add_mem_op
(
"LRN"
,
&
onnx_parser
::
parse_lrn
);
add_mem_op
(
"ImageScaler"
,
&
onnx_parser
::
parse_imagescaler
);
add_mem_op
(
"ImageScaler"
,
&
onnx_parser
::
parse_imagescaler
);
...
@@ -93,6 +96,7 @@ struct onnx_parser
...
@@ -93,6 +96,7 @@ struct onnx_parser
add_mem_op
(
"GRU"
,
&
onnx_parser
::
parse_gru
);
add_mem_op
(
"GRU"
,
&
onnx_parser
::
parse_gru
);
add_mem_op
(
"LSTM"
,
&
onnx_parser
::
parse_lstm
);
add_mem_op
(
"LSTM"
,
&
onnx_parser
::
parse_lstm
);
add_mem_op
(
"Pad"
,
&
onnx_parser
::
parse_pad
);
add_mem_op
(
"Pad"
,
&
onnx_parser
::
parse_pad
);
add_mem_op
(
"ReduceSum"
,
&
onnx_parser
::
parse_reduce_sum
);
// init the activation function map
// init the activation function map
init_actv_func
();
init_actv_func
();
...
@@ -100,6 +104,7 @@ struct onnx_parser
...
@@ -100,6 +104,7 @@ struct onnx_parser
void
init_actv_func
()
void
init_actv_func
()
{
{
// Support name format of all lower case or the first letter capital
map_actv_funcs
.
insert
(
std
::
make_pair
(
"tanh"
,
op
::
tanh
{}));
map_actv_funcs
.
insert
(
std
::
make_pair
(
"tanh"
,
op
::
tanh
{}));
map_actv_funcs
.
insert
(
std
::
make_pair
(
"relu"
,
op
::
relu
{}));
map_actv_funcs
.
insert
(
std
::
make_pair
(
"relu"
,
op
::
relu
{}));
map_actv_funcs
.
insert
(
std
::
make_pair
(
"sigmoid"
,
op
::
sigmoid
{}));
map_actv_funcs
.
insert
(
std
::
make_pair
(
"sigmoid"
,
op
::
sigmoid
{}));
...
@@ -181,7 +186,15 @@ struct onnx_parser
...
@@ -181,7 +186,15 @@ struct onnx_parser
s0
.
end
(),
s0
.
end
(),
s1
.
begin
()
+
offset
,
s1
.
begin
()
+
offset
,
out_lens
.
begin
()
+
offset
,
out_lens
.
begin
()
+
offset
,
[](
auto
a
,
auto
b
)
{
return
std
::
max
(
a
,
b
);
});
[
&
](
auto
a
,
auto
b
)
{
if
(
a
!=
b
and
a
!=
1
and
b
!=
1
)
{
MIGRAPHX_THROW
(
"COMPUTE_BROADCASTLEN: shape {"
+
to_string_range
(
s0
)
+
"} and {"
+
to_string_range
(
s1
)
+
"} mismatch!"
);
}
return
std
::
max
(
a
,
b
);
});
return
out_lens
;
return
out_lens
;
}
}
...
@@ -265,6 +278,60 @@ struct onnx_parser
...
@@ -265,6 +278,60 @@ struct onnx_parser
return
prog
.
add_instruction
(
op
::
logsoftmax
{
axis
},
std
::
move
(
args
));
return
prog
.
add_instruction
(
op
::
logsoftmax
{
axis
},
std
::
move
(
args
));
}
}
instruction_ref
parse_argmax
(
const
std
::
string
&
,
const
attribute_map
&
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
int64_t
axis
=
0
;
if
(
contains
(
attributes
,
"axis"
))
{
axis
=
static_cast
<
int64_t
>
(
parse_value
(
attributes
.
at
(
"axis"
)).
at
<
int
>
());
}
int
keep_dims
=
1
;
if
(
contains
(
attributes
,
"keepdims"
))
{
keep_dims
=
parse_value
(
attributes
.
at
(
"keepdims"
)).
at
<
int
>
();
}
if
(
keep_dims
==
0
)
{
auto
ins
=
prog
.
add_instruction
(
op
::
argmax
{
axis
},
std
::
move
(
args
));
return
prog
.
add_instruction
(
op
::
squeeze
{{
axis
}},
ins
);
}
else
{
return
prog
.
add_instruction
(
op
::
argmax
{
axis
},
std
::
move
(
args
));
}
}
instruction_ref
parse_argmin
(
const
std
::
string
&
,
const
attribute_map
&
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
int64_t
axis
=
0
;
if
(
contains
(
attributes
,
"axis"
))
{
axis
=
static_cast
<
int64_t
>
(
parse_value
(
attributes
.
at
(
"axis"
)).
at
<
int
>
());
}
int
keep_dims
=
1
;
if
(
contains
(
attributes
,
"keepdims"
))
{
keep_dims
=
parse_value
(
attributes
.
at
(
"keepdims"
)).
at
<
int
>
();
}
if
(
keep_dims
==
0
)
{
auto
ins
=
prog
.
add_instruction
(
op
::
argmin
{
axis
},
std
::
move
(
args
));
return
prog
.
add_instruction
(
op
::
squeeze
{{
axis
}},
ins
);
}
else
{
return
prog
.
add_instruction
(
op
::
argmin
{
axis
},
std
::
move
(
args
));
}
}
instruction_ref
instruction_ref
parse_conv
(
const
std
::
string
&
,
attribute_map
attributes
,
std
::
vector
<
instruction_ref
>
args
)
parse_conv
(
const
std
::
string
&
,
attribute_map
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
{
...
@@ -352,7 +419,8 @@ struct onnx_parser
...
@@ -352,7 +419,8 @@ struct onnx_parser
{
{
// insert zeros for pad op (args[0] has 4 dims)
// insert zeros for pad op (args[0] has 4 dims)
padding
=
{
0
,
0
,
padding
[
0
],
padding
[
1
],
0
,
0
,
padding
[
2
],
padding
[
3
]};
padding
=
{
0
,
0
,
padding
[
0
],
padding
[
1
],
0
,
0
,
padding
[
2
],
padding
[
3
]};
l0
=
prog
.
add_instruction
(
op
::
pad
{
padding
},
l0
);
l0
=
prog
.
add_instruction
(
op
::
pad
{
padding
,
std
::
numeric_limits
<
float
>::
lowest
()},
l0
);
}
}
else
else
{
{
...
@@ -870,7 +938,9 @@ struct onnx_parser
...
@@ -870,7 +938,9 @@ struct onnx_parser
auto
names
=
attributes
.
at
(
"activations"
).
strings
();
auto
names
=
attributes
.
at
(
"activations"
).
strings
();
vec_names
.
clear
();
vec_names
.
clear
();
vec_names
.
resize
(
names
.
size
());
vec_names
.
resize
(
names
.
size
());
std
::
copy
(
names
.
begin
(),
names
.
end
(),
vec_names
.
begin
());
std
::
transform
(
names
.
begin
(),
names
.
end
(),
vec_names
.
begin
(),
[](
auto
name
)
{
return
to_lower
(
name
);
});
}
}
auto
name_it
=
std
::
find_if
(
vec_names
.
begin
(),
vec_names
.
end
(),
[
&
](
auto
&
name
)
{
auto
name_it
=
std
::
find_if
(
vec_names
.
begin
(),
vec_names
.
end
(),
[
&
](
auto
&
name
)
{
...
@@ -961,7 +1031,9 @@ struct onnx_parser
...
@@ -961,7 +1031,9 @@ struct onnx_parser
auto
names
=
attributes
.
at
(
"activations"
).
strings
();
auto
names
=
attributes
.
at
(
"activations"
).
strings
();
vec_names
.
clear
();
vec_names
.
clear
();
vec_names
.
resize
(
names
.
size
());
vec_names
.
resize
(
names
.
size
());
std
::
copy
(
names
.
begin
(),
names
.
end
(),
vec_names
.
begin
());
std
::
transform
(
names
.
begin
(),
names
.
end
(),
vec_names
.
begin
(),
[](
auto
name
)
{
return
to_lower
(
name
);
});
}
}
// need 4 activation functions
// need 4 activation functions
...
@@ -1088,7 +1160,9 @@ struct onnx_parser
...
@@ -1088,7 +1160,9 @@ struct onnx_parser
auto
names
=
attributes
.
at
(
"activations"
).
strings
();
auto
names
=
attributes
.
at
(
"activations"
).
strings
();
vec_names
.
clear
();
vec_names
.
clear
();
vec_names
.
resize
(
names
.
size
());
vec_names
.
resize
(
names
.
size
());
std
::
copy
(
names
.
begin
(),
names
.
end
(),
vec_names
.
begin
());
std
::
transform
(
names
.
begin
(),
names
.
end
(),
vec_names
.
begin
(),
[](
auto
name
)
{
return
to_lower
(
name
);
});
}
}
// need 6 activation functions for bidirectional directions
// need 6 activation functions for bidirectional directions
...
@@ -1214,6 +1288,40 @@ struct onnx_parser
...
@@ -1214,6 +1288,40 @@ struct onnx_parser
return
{
hidden_states
,
last_output
,
last_cell_output
};
return
{
hidden_states
,
last_output
,
last_cell_output
};
}
}
instruction_ref
parse_reduce_sum
(
const
std
::
string
&
,
attribute_map
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
std
::
size_t
n_dim
=
args
.
front
()
->
get_shape
().
lens
().
size
();
// default to reduce over all dimensions
std
::
vector
<
std
::
size_t
>
axes
(
n_dim
);
std
::
iota
(
axes
.
begin
(),
axes
.
end
(),
0
);
if
(
contains
(
attributes
,
"axes"
))
{
axes
.
clear
();
auto
&&
attr_axes
=
attributes
[
"axes"
].
ints
();
axes
=
std
::
vector
<
std
::
size_t
>
(
attr_axes
.
begin
(),
attr_axes
.
end
());
}
int
keep_dims
=
1
;
if
(
contains
(
attributes
,
"keepdims"
))
{
keep_dims
=
parse_value
(
attributes
.
at
(
"keepdims"
)).
at
<
int
>
();
}
if
(
keep_dims
==
1
)
{
return
prog
.
add_instruction
(
op
::
reduce_sum
{
axes
},
std
::
move
(
args
));
}
else
{
auto
ins
=
prog
.
add_instruction
(
op
::
reduce_sum
{
axes
},
std
::
move
(
args
));
std
::
vector
<
int64_t
>
squeeze_axes
{
axes
.
begin
(),
axes
.
end
()};
return
prog
.
add_instruction
(
op
::
squeeze
{
squeeze_axes
},
ins
);
}
}
void
parse_from
(
std
::
istream
&
is
)
void
parse_from
(
std
::
istream
&
is
)
{
{
onnx
::
ModelProto
model
;
onnx
::
ModelProto
model
;
...
...
src/pass_manager.cpp
View file @
be5f3539
...
@@ -2,7 +2,6 @@
...
@@ -2,7 +2,6 @@
#include <migraphx/pass_manager.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/target.hpp>
#include <migraphx/target.hpp>
#include <migraphx/env.hpp>
#include <migraphx/env.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/ranges.hpp>
...
...
src/py/migraphx_py.cpp
View file @
be5f3539
...
@@ -8,6 +8,7 @@
...
@@ -8,6 +8,7 @@
#include <migraphx/stringutils.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/tf.hpp>
#include <migraphx/tf.hpp>
#include <migraphx/onnx.hpp>
#include <migraphx/onnx.hpp>
#include <migraphx/type_name.hpp>
#ifdef HAVE_GPU
#ifdef HAVE_GPU
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/target.hpp>
...
@@ -101,8 +102,13 @@ migraphx::shape to_shape(const py::buffer_info& info)
...
@@ -101,8 +102,13 @@ migraphx::shape to_shape(const py::buffer_info& info)
t
=
as
.
type_enum
();
t
=
as
.
type_enum
();
n
=
sizeof
(
as
());
n
=
sizeof
(
as
());
}
}
});
});
if
(
n
==
0
)
{
MIGRAPHX_THROW
(
"MIGRAPHX PYTHON: Unsupported data type"
+
info
.
format
);
}
auto
strides
=
info
.
strides
;
auto
strides
=
info
.
strides
;
std
::
transform
(
strides
.
begin
(),
strides
.
end
(),
strides
.
begin
(),
[
&
](
auto
i
)
->
std
::
size_t
{
std
::
transform
(
strides
.
begin
(),
strides
.
end
(),
strides
.
begin
(),
[
&
](
auto
i
)
->
std
::
size_t
{
return
n
>
0
?
i
/
n
:
0
;
return
n
>
0
?
i
/
n
:
0
;
...
...
src/rewrite_rnn.cpp
View file @
be5f3539
This diff is collapsed.
Click to expand it.
src/shape.cpp
View file @
be5f3539
...
@@ -138,6 +138,24 @@ std::size_t shape::index(std::size_t i) const
...
@@ -138,6 +138,24 @@ std::size_t shape::index(std::size_t i) const
return
result
;
return
result
;
}
}
}
}
std
::
vector
<
std
::
size_t
>
shape
::
multi
(
std
::
size_t
i
)
const
{
assert
(
this
->
standard
());
std
::
vector
<
std
::
size_t
>
indices
(
lens
().
size
());
std
::
transform
(
strides
().
begin
(),
strides
().
end
(),
lens
().
begin
(),
indices
.
begin
(),
[
&
](
std
::
size_t
stride
,
std
::
size_t
len
)
{
assert
(
len
>
0
and
stride
>
0
);
return
(
i
/
stride
)
%
len
;
});
return
indices
;
}
bool
shape
::
packed
()
const
{
return
this
->
elements
()
==
this
->
element_space
();
}
bool
shape
::
packed
()
const
{
return
this
->
elements
()
==
this
->
element_space
();
}
bool
shape
::
transposed
()
const
bool
shape
::
transposed
()
const
...
...
src/simplify_reshapes.cpp
View file @
be5f3539
...
@@ -2,14 +2,17 @@
...
@@ -2,14 +2,17 @@
#include <migraphx/program.hpp>
#include <migraphx/program.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/op/as_shape.hpp>
#include <migraphx/op/as_shape.hpp>
#include <migraphx/op/transpose.hpp>
#include <migraphx/op/concat.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/matcher.hpp>
#include <unordered_set>
#include <unordered_set>
namespace
migraphx
{
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
inline
namespace
MIGRAPHX_INLINE_NS
{
bool
is_reshaper
(
instruction_ref
ins
)
const
auto
&
reshaper_names
(
)
{
{
// clang-format off
// clang-format off
static
const
std
::
unordered_set
<
std
::
string
>
names
=
{
static
const
std
::
unordered_set
<
std
::
string
>
names
=
{
...
@@ -19,17 +22,10 @@ bool is_reshaper(instruction_ref ins)
...
@@ -19,17 +22,10 @@ bool is_reshaper(instruction_ref ins)
"unsqueeze"
"unsqueeze"
};
};
// clang-format on
// clang-format on
return
contains
(
names
,
ins
->
name
())
;
return
names
;
}
}
bool
is_transpose_output
(
instruction_ref
ins
)
bool
is_reshaper
(
instruction_ref
ins
)
{
return
contains
(
reshaper_names
(),
ins
->
name
());
}
{
if
(
ins
->
outputs
().
size
()
!=
1
)
return
false
;
if
(
ins
->
outputs
().
front
()
->
name
()
==
"contiguous"
)
return
is_transpose_output
(
ins
->
outputs
().
front
());
return
ins
->
outputs
().
front
()
->
name
()
==
"transpose"
;
}
instruction_ref
find_transpose_input
(
instruction_ref
ins
)
instruction_ref
find_transpose_input
(
instruction_ref
ins
)
{
{
...
@@ -42,21 +38,62 @@ instruction_ref find_transpose_input(instruction_ref ins)
...
@@ -42,21 +38,62 @@ instruction_ref find_transpose_input(instruction_ref ins)
return
ins
;
return
ins
;
}
}
void
simplify_reshapes
::
apply
(
program
&
p
)
co
ns
t
auto
get_transpose_dims
(
instruction_ref
i
ns
)
{
{
auto
end
=
std
::
prev
(
p
.
end
());
return
any_cast
<
const
op
::
transpose
&>
(
ins
->
get_operator
()).
dims
;
for
(
auto
ins
:
iterator_for
(
p
))
}
std
::
vector
<
int64_t
>
reorder_dims
(
std
::
vector
<
int64_t
>
dims
,
std
::
vector
<
int64_t
>
permutation
)
{
std
::
vector
<
int64_t
>
result
(
dims
.
size
());
assert
(
dims
.
size
()
==
permutation
.
size
());
for
(
std
::
size_t
i
=
0
;
i
<
dims
.
size
();
i
++
)
{
{
if
(
ins
==
end
and
ins
->
name
()
==
"contiguous"
)
result
[
i
]
=
dims
[
permutation
[
i
]];
continue
;
}
// Skip possible dead instructions
return
result
;
if
(
ins
->
outputs
().
empty
()
and
ins
!=
end
)
}
continue
;
if
(
is_reshaper
(
ins
))
bool
is_no_transpose
(
const
std
::
vector
<
int64_t
>&
dims
)
{
if
(
dims
.
empty
())
return
true
;
if
(
dims
.
front
()
!=
0
)
return
false
;
return
std
::
adjacent_find
(
dims
.
begin
(),
dims
.
end
(),
[](
auto
x
,
auto
y
)
{
return
(
y
-
x
)
!=
1
;
})
==
dims
.
end
();
}
template
<
class
Vector
,
class
Op
>
std
::
vector
<
int64_t
>
sort_permutation
(
const
Vector
&
data
,
Op
op
)
{
std
::
vector
<
std
::
int64_t
>
result
(
data
.
size
());
std
::
iota
(
result
.
begin
(),
result
.
end
(),
0
);
std
::
sort
(
result
.
begin
(),
result
.
end
(),
[
&
](
auto
x
,
auto
y
)
{
return
op
(
data
[
x
],
data
[
y
]);
});
return
result
;
}
std
::
vector
<
int64_t
>
invert_permutation
(
const
std
::
vector
<
int64_t
>&
permutation
)
{
return
sort_permutation
(
permutation
,
std
::
less
<>
{});
}
std
::
vector
<
int64_t
>
find_permutation
(
const
shape
&
s
)
{
return
sort_permutation
(
s
.
strides
(),
std
::
greater
<>
{});
}
struct
find_reshaper
{
auto
matcher
()
const
{
{
if
(
std
::
any_of
(
ins
->
outputs
().
begin
(),
ins
->
outputs
().
end
(),
&
is_reshaper
))
return
match
::
name
(
reshaper_names
())(
continue
;
match
::
any_of
[
match
::
outputs
()](
match
::
name
(
reshaper_names
())));
// Gather reshapes
}
void
apply
(
program
&
p
,
const
match
::
matcher_result
&
mr
)
const
{
auto
ins
=
mr
.
result
;
std
::
vector
<
instruction_ref
>
reshapes
{
ins
};
std
::
vector
<
instruction_ref
>
reshapes
{
ins
};
while
(
is_reshaper
(
reshapes
.
back
()))
while
(
is_reshaper
(
reshapes
.
back
()))
{
{
...
@@ -83,21 +120,107 @@ void simplify_reshapes::apply(program& p) const
...
@@ -83,21 +120,107 @@ void simplify_reshapes::apply(program& p) const
p
.
replace_instruction
(
r
.
first
,
r
.
second
);
p
.
replace_instruction
(
r
.
first
,
r
.
second
);
}
}
}
}
else
if
(
ins
->
name
()
==
"transpose"
)
};
struct
find_nop_reshapes
{
auto
matcher
()
const
{
{
if
(
is_transpose_output
(
ins
))
auto
reshapes
=
reshaper_names
();
continue
;
reshapes
.
insert
(
"transpose"
);
reshapes
.
insert
(
"slice"
);
return
match
::
name
(
reshapes
)(
match
::
same_shape
(
match
::
arg
(
0
)));
}
void
apply
(
program
&
p
,
const
match
::
matcher_result
&
mr
)
const
{
auto
ins
=
mr
.
result
;
p
.
replace_instruction
(
ins
,
ins
->
inputs
().
front
());
}
};
struct
find_transpose
{
auto
matcher
()
const
{
return
match
::
name
(
"transpose"
)(
match
::
none_of
(
match
::
skip_output
(
match
::
name
(
"contiguous"
))(
match
::
name
(
"transpose"
))));
}
void
apply
(
program
&
p
,
const
match
::
matcher_result
&
mr
)
const
{
auto
ins
=
mr
.
result
;
auto
x
=
ins
;
auto
x
=
ins
;
auto
t
=
ins
;
auto
t
=
ins
;
std
::
vector
<
std
::
int64_t
>
dims
(
ins
->
get_shape
().
lens
().
size
());
std
::
iota
(
dims
.
begin
(),
dims
.
end
(),
0
);
do
do
{
{
dims
=
reorder_dims
(
get_transpose_dims
(
t
),
dims
);
x
=
t
;
x
=
t
;
t
=
find_transpose_input
(
x
);
t
=
find_transpose_input
(
x
);
}
while
(
x
!=
t
and
t
->
name
()
==
"transpose"
);
}
while
(
x
!=
t
and
t
->
name
()
==
"transpose"
);
if
(
t
==
ins
or
t
->
name
()
!=
"transpose"
)
if
(
t
==
ins
or
t
->
name
()
!=
"transpose"
)
continue
;
return
;
if
(
is_no_transpose
(
dims
))
{
p
.
replace_instruction
(
ins
,
t
->
inputs
().
front
());
p
.
replace_instruction
(
ins
,
t
->
inputs
().
front
());
}
}
else
{
p
.
replace_instruction
(
ins
,
op
::
transpose
{{
dims
}},
t
->
inputs
().
front
());
}
}
};
struct
find_concat_transpose
{
auto
matcher
()
const
{
return
match
::
name
(
"concat"
)(
match
::
same_input_shapes
(),
match
::
all_of
[
match
::
inputs
()](
match
::
transpose_shape
()));
}
void
apply
(
program
&
p
,
const
match
::
matcher_result
&
mr
)
const
{
auto
ins
=
mr
.
result
;
auto
s
=
ins
->
inputs
().
front
()
->
get_shape
();
assert
(
s
.
transposed
());
auto
op
=
any_cast
<
op
::
concat
>
(
ins
->
get_operator
());
auto
permutation
=
find_permutation
(
s
);
auto
ipermutation
=
invert_permutation
(
permutation
);
op
.
axis
=
ipermutation
[
op
.
axis
];
std
::
vector
<
instruction_ref
>
inputs
;
std
::
transform
(
ins
->
inputs
().
begin
(),
ins
->
inputs
().
end
(),
std
::
back_inserter
(
inputs
),
[
&
](
auto
i
)
{
if
(
i
->
name
()
==
"transpose"
and
i
->
inputs
().
front
()
->
get_shape
().
standard
())
return
i
->
inputs
().
front
();
return
p
.
insert_instruction
(
ins
,
op
::
transpose
{
permutation
},
i
);
});
auto
concat
=
p
.
insert_instruction
(
ins
,
op
,
inputs
);
auto
t
=
p
.
insert_instruction
(
ins
,
op
::
transpose
{
ipermutation
},
concat
);
assert
(
ins
->
get_shape
().
lens
()
==
t
->
get_shape
().
lens
());
p
.
replace_instruction
(
ins
,
t
);
}
};
void
simplify_reshapes
::
apply
(
program
&
p
)
const
{
auto
end
=
std
::
prev
(
p
.
end
());
for
(
auto
ins
:
iterator_for
(
p
))
{
if
(
ins
==
end
and
ins
->
name
()
==
"contiguous"
)
continue
;
// Skip possible dead instructions
if
(
ins
->
outputs
().
empty
()
and
ins
!=
end
)
continue
;
match
::
find_matches
(
p
,
ins
,
find_nop_reshapes
{},
find_reshaper
{},
find_transpose
{},
find_concat_transpose
{});
}
}
}
}
...
...
src/targets/cpu/lowering.cpp
View file @
be5f3539
...
@@ -2,7 +2,19 @@
...
@@ -2,7 +2,19 @@
#include <migraphx/cpu/lowering.hpp>
#include <migraphx/cpu/lowering.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/op/batch_norm.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/op/dot.hpp>
#include <migraphx/op/elu.hpp>
#include <migraphx/op/im2col.hpp>
#include <migraphx/op/leaky_relu.hpp>
#include <migraphx/op/logsoftmax.hpp>
#include <migraphx/op/lrn.hpp>
#include <migraphx/op/pad.hpp>
#include <migraphx/op/pooling.hpp>
#include <migraphx/op/softmax.hpp>
#include <migraphx/op/argmax.hpp>
#include <migraphx/op/argmin.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/par_dfor.hpp>
#include <migraphx/par_dfor.hpp>
...
@@ -650,18 +662,11 @@ struct cpu_softmax
...
@@ -650,18 +662,11 @@ struct cpu_softmax
std
::
string
name
()
const
{
return
"cpu::softmax"
;
}
std
::
string
name
()
const
{
return
"cpu::softmax"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
return
op
.
compute_shape
(
inputs
);
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
return
op
.
compute_shape
(
inputs
);
}
template
<
typename
T
>
std
::
size_t
compute_batch_index
(
T
idx
,
shape
&
batch_shape
,
int
axis
)
const
{
idx
[
axis
]
=
0
;
return
batch_shape
.
index
(
idx
);
}
argument
compute
(
context
&
,
const
shape
&
output_shape
,
std
::
vector
<
argument
>
args
)
const
argument
compute
(
context
&
,
const
shape
&
output_shape
,
std
::
vector
<
argument
>
args
)
const
{
{
argument
result
{
output_shape
};
argument
result
{
output_shape
};
auto
batch_lens
=
output_shape
.
lens
();
auto
batch_lens
=
output_shape
.
lens
();
std
::
size_t
n_dims
=
batch_lens
[
op
.
axis
];
batch_lens
[
op
.
axis
]
=
1
;
batch_lens
[
op
.
axis
]
=
1
;
shape
batch_shape
{
shape
::
int32_type
,
batch_lens
};
shape
batch_shape
{
shape
::
int32_type
,
batch_lens
};
...
@@ -669,26 +674,33 @@ struct cpu_softmax
...
@@ -669,26 +674,33 @@ struct cpu_softmax
using
value_type
=
typename
decltype
(
input
)
::
value_type
;
using
value_type
=
typename
decltype
(
input
)
::
value_type
;
std
::
vector
<
value_type
>
batch_max
(
batch_shape
.
elements
(),
std
::
vector
<
value_type
>
batch_max
(
batch_shape
.
elements
(),
std
::
numeric_limits
<
value_type
>::
lowest
());
std
::
numeric_limits
<
value_type
>::
lowest
());
shape_for_each
(
output_shape
,
[
&
](
auto
idx
)
{
std
::
vector
<
value_type
>
batch_sum
(
batch_shape
.
elements
(),
value_type
(
0
));
auto
index
=
this
->
compute_batch_index
(
idx
,
batch_shape
,
op
.
axis
);
par_for
(
batch_shape
.
elements
(),
[
&
](
auto
i
)
{
batch_max
[
index
]
=
std
::
max
(
batch_max
[
index
],
input
(
idx
.
begin
(),
idx
.
end
()));
auto
idx
=
batch_shape
.
multi
(
i
);
});
for
(
std
::
size_t
j
=
0
;
j
<
n_dims
;
++
j
)
{
idx
[
op
.
axis
]
=
j
;
batch_max
[
i
]
=
std
::
max
(
batch_max
[
i
],
input
(
idx
.
begin
(),
idx
.
end
()));
}
shape_for_each
(
output_shape
,
[
&
](
auto
idx
)
{
for
(
std
::
size_t
j
=
0
;
j
<
n_dims
;
++
j
)
auto
index
=
this
->
compute_batch_index
(
idx
,
batch_shape
,
op
.
axis
);
{
output
(
idx
.
begin
(),
idx
.
end
())
=
idx
[
op
.
axis
]
=
j
;
std
::
exp
(
input
(
idx
.
begin
(),
idx
.
end
())
-
batch_max
[
index
]);
std
::
size_t
index
=
output_shape
.
index
(
idx
);
});
output
[
index
]
=
std
::
exp
(
input
[
index
]
-
batch_max
[
i
]);
}
std
::
vector
<
value_type
>
batch_sum
(
batch_shape
.
elements
(),
value_type
(
0
));
for
(
std
::
size_t
j
=
0
;
j
<
n_dims
;
++
j
)
shape_for_each
(
output_shape
,
[
&
](
auto
idx
)
{
{
auto
index
=
this
->
compute_batch_index
(
idx
,
batch_shape
,
op
.
axis
)
;
idx
[
op
.
axis
]
=
j
;
batch_sum
[
i
ndex
]
+=
output
(
idx
.
begin
(),
idx
.
end
());
batch_sum
[
i
]
+=
output
(
idx
.
begin
(),
idx
.
end
());
});
}
shape_for_each
(
output_shape
,
[
&
](
auto
idx
)
{
for
(
std
::
size_t
j
=
0
;
j
<
n_dims
;
++
j
)
auto
index
=
this
->
compute_batch_index
(
idx
,
batch_shape
,
op
.
axis
);
{
output
(
idx
.
begin
(),
idx
.
end
())
/=
batch_sum
[
index
];
idx
[
op
.
axis
]
=
j
;
output
(
idx
.
begin
(),
idx
.
end
())
/=
batch_sum
[
i
];
}
});
});
});
});
...
@@ -708,49 +720,50 @@ struct cpu_logsoftmax
...
@@ -708,49 +720,50 @@ struct cpu_logsoftmax
std
::
string
name
()
const
{
return
"cpu::logsoftmax"
;
}
std
::
string
name
()
const
{
return
"cpu::logsoftmax"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
return
op
.
compute_shape
(
inputs
);
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
return
op
.
compute_shape
(
inputs
);
}
template
<
typename
T
>
std
::
size_t
compute_batch_index
(
T
idx
,
const
shape
&
batch_shape
,
int
axis
)
const
{
idx
[
axis
]
=
0
;
return
batch_shape
.
index
(
idx
);
}
argument
compute
(
context
&
,
const
shape
&
output_shape
,
std
::
vector
<
argument
>
args
)
const
argument
compute
(
context
&
,
const
shape
&
output_shape
,
std
::
vector
<
argument
>
args
)
const
{
{
argument
result
{
output_shape
};
argument
result
{
output_shape
};
auto
batch_lens
=
output_shape
.
lens
();
auto
batch_lens
=
output_shape
.
lens
();
std
::
size_t
n_dims
=
batch_lens
[
op
.
axis
];
batch_lens
[
op
.
axis
]
=
1
;
batch_lens
[
op
.
axis
]
=
1
;
shape
batch_shape
{
shape
::
int32_type
,
batch_lens
};
shape
batch_shape
{
shape
::
int32_type
,
batch_lens
};
// use a parallel implementation to acheive better performance
// one thread for one batch
visit_all
(
result
,
args
[
0
])([
&
](
auto
output
,
auto
input
)
{
visit_all
(
result
,
args
[
0
])([
&
](
auto
output
,
auto
input
)
{
using
value_type
=
typename
decltype
(
input
)
::
value_type
;
using
value_type
=
typename
decltype
(
input
)
::
value_type
;
std
::
vector
<
value_type
>
batch_max
(
batch_shape
.
elements
(),
std
::
vector
<
value_type
>
batch_max
(
batch_shape
.
elements
(),
std
::
numeric_limits
<
value_type
>::
lowest
());
std
::
numeric_limits
<
value_type
>::
lowest
());
shape_for_each
(
output_shape
,
[
&
](
auto
idx
)
{
std
::
vector
<
value_type
>
batch_sum
(
batch_shape
.
elements
(),
value_type
(
0
));
auto
index
=
this
->
compute_batch_index
(
idx
,
batch_shape
,
op
.
axis
);
batch_max
[
index
]
=
std
::
max
(
batch_max
[
index
],
input
(
idx
.
begin
(),
idx
.
end
()));
});
shape_for_each
(
output_shape
,
[
&
](
auto
idx
)
{
par_for
(
batch_shape
.
elements
(),
[
&
](
auto
i
)
{
auto
index
=
this
->
compute_batch_index
(
idx
,
batch_shape
,
op
.
axis
);
auto
idx
=
batch_shape
.
multi
(
i
);
output
(
idx
.
begin
(),
idx
.
end
())
=
input
(
idx
.
begin
(),
idx
.
end
())
-
batch_max
[
index
];
for
(
std
::
size_t
j
=
0
;
j
<
n_dims
;
++
j
)
});
{
idx
[
op
.
axis
]
=
j
;
batch_max
[
i
]
=
std
::
max
(
batch_max
[
i
],
input
(
idx
.
begin
(),
idx
.
end
()));
}
std
::
vector
<
value_type
>
batch_sum
(
batch_shape
.
elements
(),
value_type
(
0
));
for
(
std
::
size_t
j
=
0
;
j
<
n_dims
;
++
j
)
shape_for_each
(
output_shape
,
[
&
](
auto
idx
)
{
{
auto
index
=
this
->
compute_batch_index
(
idx
,
batch_shape
,
op
.
axis
);
idx
[
op
.
axis
]
=
j
;
batch_sum
[
index
]
+=
std
::
exp
(
output
(
idx
.
begin
(),
idx
.
end
()));
std
::
size_t
index
=
output_shape
.
index
(
idx
);
});
output
[
index
]
=
input
[
index
]
-
batch_max
[
i
];
}
for
(
std
::
size_t
i
=
0
;
i
<
batch_sum
.
size
()
;
++
i
)
for
(
std
::
size_t
j
=
0
;
j
<
n_dims
;
++
j
)
{
{
batch_sum
[
i
]
=
std
::
log
(
batch_sum
[
i
]);
idx
[
op
.
axis
]
=
j
;
batch_sum
[
i
]
+=
std
::
exp
(
output
(
idx
.
begin
(),
idx
.
end
()));
}
}
shape_for_each
(
output_shape
,
[
&
](
auto
idx
)
{
batch_sum
[
i
]
=
std
::
log
(
batch_sum
[
i
]);
auto
index
=
this
->
compute_batch_index
(
idx
,
batch_shape
,
op
.
axis
);
output
(
idx
.
begin
(),
idx
.
end
())
-=
batch_sum
[
index
];
for
(
std
::
size_t
j
=
0
;
j
<
n_dims
;
++
j
)
{
idx
[
op
.
axis
]
=
j
;
output
(
idx
.
begin
(),
idx
.
end
())
-=
batch_sum
[
i
];
}
});
});
});
});
...
...
src/targets/gpu/CMakeLists.txt
View file @
be5f3539
...
@@ -12,9 +12,12 @@ endif()
...
@@ -12,9 +12,12 @@ endif()
add_library
(
migraphx_device
add_library
(
migraphx_device
device/add.cpp
device/add.cpp
device/argmax.cpp
device/argmin.cpp
device/max.cpp
device/max.cpp
device/min.cpp
device/min.cpp
device/exp.cpp
device/exp.cpp
device/erf.cpp
device/log.cpp
device/log.cpp
device/sin.cpp
device/sin.cpp
device/cos.cpp
device/cos.cpp
...
@@ -36,6 +39,7 @@ add_library(migraphx_device
...
@@ -36,6 +39,7 @@ add_library(migraphx_device
device/sub.cpp
device/sub.cpp
device/pack.cpp
device/pack.cpp
device/clip.cpp
device/clip.cpp
device/reduce_sum.cpp
)
)
set_target_properties
(
migraphx_device PROPERTIES EXPORT_NAME device
)
set_target_properties
(
migraphx_device PROPERTIES EXPORT_NAME device
)
rocm_clang_tidy_check
(
migraphx_device
)
rocm_clang_tidy_check
(
migraphx_device
)
...
@@ -44,6 +48,8 @@ target_include_directories(migraphx_device PUBLIC $<BUILD_INTERFACE:${CMAKE_CURR
...
@@ -44,6 +48,8 @@ target_include_directories(migraphx_device PUBLIC $<BUILD_INTERFACE:${CMAKE_CURR
target_include_directories
(
migraphx_device PRIVATE $<BUILD_INTERFACE:
${
CMAKE_CURRENT_SOURCE_DIR
}
/device/include>
)
target_include_directories
(
migraphx_device PRIVATE $<BUILD_INTERFACE:
${
CMAKE_CURRENT_SOURCE_DIR
}
/device/include>
)
add_library
(
migraphx_gpu
add_library
(
migraphx_gpu
argmax.cpp
argmin.cpp
eliminate_workspace.cpp
eliminate_workspace.cpp
fuse_ops.cpp
fuse_ops.cpp
hip.cpp
hip.cpp
...
@@ -74,6 +80,7 @@ add_library(migraphx_gpu
...
@@ -74,6 +80,7 @@ add_library(migraphx_gpu
schedule_model.cpp
schedule_model.cpp
adjust_allocation.cpp
adjust_allocation.cpp
clip.cpp
clip.cpp
reduce_sum.cpp
)
)
set_target_properties
(
migraphx_gpu PROPERTIES EXPORT_NAME gpu
)
set_target_properties
(
migraphx_gpu PROPERTIES EXPORT_NAME gpu
)
rocm_clang_tidy_check
(
migraphx_gpu
)
rocm_clang_tidy_check
(
migraphx_gpu
)
...
...
Prev
1
2
3
4
5
6
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment