Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
8407200f
Commit
8407200f
authored
Nov 16, 2018
by
Khalique
Browse files
add sigmoid, tanh, abs
parent
05a0e86e
Changes
10
Show whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
302 additions
and
0 deletions
+302
-0
src/onnx/onnx.cpp
src/onnx/onnx.cpp
+3
-0
src/targets/gpu/CMakeLists.txt
src/targets/gpu/CMakeLists.txt
+3
-0
src/targets/gpu/abs.cpp
src/targets/gpu/abs.cpp
+38
-0
src/targets/gpu/include/migraphx/gpu/abs.hpp
src/targets/gpu/include/migraphx/gpu/abs.hpp
+39
-0
src/targets/gpu/include/migraphx/gpu/miopen.hpp
src/targets/gpu/include/migraphx/gpu/miopen.hpp
+23
-0
src/targets/gpu/include/migraphx/gpu/sigmoid.hpp
src/targets/gpu/include/migraphx/gpu/sigmoid.hpp
+39
-0
src/targets/gpu/include/migraphx/gpu/tanh.hpp
src/targets/gpu/include/migraphx/gpu/tanh.hpp
+39
-0
src/targets/gpu/lowering.cpp
src/targets/gpu/lowering.cpp
+42
-0
src/targets/gpu/sigmoid.cpp
src/targets/gpu/sigmoid.cpp
+38
-0
src/targets/gpu/tanh.cpp
src/targets/gpu/tanh.cpp
+38
-0
No files found.
src/onnx/onnx.cpp
View file @
8407200f
...
...
@@ -51,6 +51,9 @@ struct onnx_parser
{
add_generic_op
(
"MatMul"
,
op
::
dot
{});
add_generic_op
(
"Relu"
,
op
::
relu
{});
add_generic_op
(
"Sigmoid"
,
op
::
sigmoid
{});
add_generic_op
(
"Tanh"
,
op
::
tanh
{});
add_generic_op
(
"Abs"
,
op
::
abs
{});
// disable dropout for inference
add_generic_op
(
"Dropout"
,
op
::
identity
{});
add_generic_op
(
"Identity"
,
op
::
identity
{});
...
...
src/targets/gpu/CMakeLists.txt
View file @
8407200f
...
...
@@ -42,6 +42,9 @@ add_library(migraphx_gpu
batchnorm.cpp
write_literals.cpp
rocblas.cpp
sigmoid.cpp
tanh.cpp
abs.cpp
)
set_target_properties
(
migraphx_gpu PROPERTIES EXPORT_NAME gpu
)
rocm_clang_tidy_check
(
migraphx_gpu
)
...
...
src/targets/gpu/abs.cpp
0 → 100644
View file @
8407200f
#include <migraphx/gpu/abs.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <utility>
namespace
migraphx
{
inline
namespace
MIGRAPH_INLINE_NS
{
namespace
gpu
{
shape
miopen_abs
::
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
2
).
not_broadcasted
();
return
inputs
.
at
(
1
);
}
argument
miopen_abs
::
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
{
float
alpha
=
1
,
beta
=
0
;
auto
x_desc
=
make_tensor
(
args
[
0
].
get_shape
());
auto
y_desc
=
make_tensor
(
output_shape
);
miopenActivationForward
(
ctx
.
get_stream
().
get_miopen
(),
ad
.
get
(),
&
alpha
,
x_desc
.
get
(),
args
[
0
].
implicit
(),
&
beta
,
y_desc
.
get
(),
args
[
1
].
implicit
());
return
args
[
1
];
}
}
// namespace gpu
}
// namespace MIGRAPH_INLINE_NS
}
// namespace migraphx
src/targets/gpu/include/migraphx/gpu/abs.hpp
0 → 100644
View file @
8407200f
#ifndef MIGRAPH_GUARD_RTGLIB_ABS_HPP
#define MIGRAPH_GUARD_RTGLIB_ABS_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/contiguous.hpp>
#include <migraphx/gpu/device/add.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <utility>
namespace
migraphx
{
inline
namespace
MIGRAPH_INLINE_NS
{
namespace
gpu
{
struct
miopen_abs
{
shared
<
activation_descriptor
>
ad
;
std
::
string
name
()
const
{
return
"gpu::abs"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
}
// namespace MIGRAPH_INLINE_NS
}
// namespace migraphx
#endif
src/targets/gpu/include/migraphx/gpu/miopen.hpp
View file @
8407200f
...
...
@@ -91,6 +91,29 @@ inline activation_descriptor make_relu()
return
ad
;
}
inline
activation_descriptor
make_sigmoid
()
{
auto
ad
=
make_obj
<
activation_descriptor
>
(
&
miopenCreateActivationDescriptor
);
miopenSetActivationDescriptor
(
ad
.
get
(),
miopenActivationLOGISTIC
,
0
,
0
,
0
);
return
ad
;
}
inline
activation_descriptor
make_tanh
()
{
auto
ad
=
make_obj
<
activation_descriptor
>
(
&
miopenCreateActivationDescriptor
);
// onnx operator does not apply additional scaling for tanh
// defaults for alpha and beta are therefore set to 1
miopenSetActivationDescriptor
(
ad
.
get
(),
miopenActivationTANH
,
1
,
1
,
0
);
return
ad
;
}
inline
activation_descriptor
make_abs
()
{
auto
ad
=
make_obj
<
activation_descriptor
>
(
&
miopenCreateActivationDescriptor
);
miopenSetActivationDescriptor
(
ad
.
get
(),
miopenActivationABS
,
0
,
0
,
0
);
return
ad
;
}
inline
activation_descriptor
make_leaky_relu
(
double
alpha
)
{
auto
ad
=
make_obj
<
activation_descriptor
>
(
&
miopenCreateActivationDescriptor
);
...
...
src/targets/gpu/include/migraphx/gpu/sigmoid.hpp
0 → 100644
View file @
8407200f
#ifndef MIGRAPH_GUARD_RTGLIB_SIGMOID_HPP
#define MIGRAPH_GUARD_RTGLIB_SIGMOID_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/contiguous.hpp>
#include <migraphx/gpu/device/add.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <utility>
namespace
migraphx
{
inline
namespace
MIGRAPH_INLINE_NS
{
namespace
gpu
{
struct
miopen_sigmoid
{
shared
<
activation_descriptor
>
ad
;
std
::
string
name
()
const
{
return
"gpu::sigmoid"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
}
// namespace MIGRAPH_INLINE_NS
}
// namespace migraphx
#endif
src/targets/gpu/include/migraphx/gpu/tanh.hpp
0 → 100644
View file @
8407200f
#ifndef MIGRAPH_GUARD_RTGLIB_TANH_HPP
#define MIGRAPH_GUARD_RTGLIB_TANH_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/contiguous.hpp>
#include <migraphx/gpu/device/add.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <utility>
namespace
migraphx
{
inline
namespace
MIGRAPH_INLINE_NS
{
namespace
gpu
{
struct
miopen_tanh
{
shared
<
activation_descriptor
>
ad
;
std
::
string
name
()
const
{
return
"gpu::tanh"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
}
// namespace MIGRAPH_INLINE_NS
}
// namespace migraphx
#endif
src/targets/gpu/lowering.cpp
View file @
8407200f
...
...
@@ -16,6 +16,9 @@
#include <migraphx/gpu/convolution.hpp>
#include <migraphx/gpu/contiguous.hpp>
#include <migraphx/gpu/relu.hpp>
#include <migraphx/gpu/sigmoid.hpp>
#include <migraphx/gpu/tanh.hpp>
#include <migraphx/gpu/abs.hpp>
#include <migraphx/gpu/leaky_relu.hpp>
#include <migraphx/gpu/softmax.hpp>
#include <migraphx/gpu/add.hpp>
...
...
@@ -55,6 +58,18 @@ struct miopen_apply
{
check_shape
(
s
,
apply_relu
(
it
));
}
else
if
(
it
->
name
()
==
"sigmoid"
)
{
check_shape
(
s
,
apply_sigmoid
(
it
));
}
else
if
(
it
->
name
()
==
"tanh"
)
{
check_shape
(
s
,
apply_tanh
(
it
));
}
else
if
(
it
->
name
()
==
"abs"
)
{
check_shape
(
s
,
apply_abs
(
it
));
}
else
if
(
it
->
name
()
==
"leaky_relu"
)
{
check_shape
(
s
,
apply_leaky_relu
(
it
));
...
...
@@ -141,6 +156,33 @@ struct miopen_apply
ins
,
miopen_relu
{
std
::
move
(
ad
)},
ins
->
inputs
().
at
(
0
),
output
);
}
instruction_ref
apply_sigmoid
(
instruction_ref
ins
)
{
auto
ad
=
make_sigmoid
();
auto
output
=
insert_allocation
(
ins
,
ins
->
get_shape
());
return
prog
->
replace_instruction
(
ins
,
miopen_sigmoid
{
std
::
move
(
ad
)},
ins
->
inputs
().
at
(
0
),
output
);
}
instruction_ref
apply_tanh
(
instruction_ref
ins
)
{
auto
ad
=
make_tanh
();
auto
output
=
insert_allocation
(
ins
,
ins
->
get_shape
());
return
prog
->
replace_instruction
(
ins
,
miopen_tanh
{
std
::
move
(
ad
)},
ins
->
inputs
().
at
(
0
),
output
);
}
instruction_ref
apply_abs
(
instruction_ref
ins
)
{
auto
ad
=
make_abs
();
auto
output
=
insert_allocation
(
ins
,
ins
->
get_shape
());
return
prog
->
replace_instruction
(
ins
,
miopen_abs
{
std
::
move
(
ad
)},
ins
->
inputs
().
at
(
0
),
output
);
}
instruction_ref
apply_leaky_relu
(
instruction_ref
ins
)
{
auto
&&
op
=
any_cast
<
op
::
leaky_relu
>
(
ins
->
get_operator
());
...
...
src/targets/gpu/sigmoid.cpp
0 → 100644
View file @
8407200f
#include <migraphx/gpu/sigmoid.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <utility>
namespace
migraphx
{
inline
namespace
MIGRAPH_INLINE_NS
{
namespace
gpu
{
shape
miopen_sigmoid
::
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
2
).
not_broadcasted
();
return
inputs
.
at
(
1
);
}
argument
miopen_sigmoid
::
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
{
float
alpha
=
1
,
beta
=
0
;
auto
x_desc
=
make_tensor
(
args
[
0
].
get_shape
());
auto
y_desc
=
make_tensor
(
output_shape
);
miopenActivationForward
(
ctx
.
get_stream
().
get_miopen
(),
ad
.
get
(),
&
alpha
,
x_desc
.
get
(),
args
[
0
].
implicit
(),
&
beta
,
y_desc
.
get
(),
args
[
1
].
implicit
());
return
args
[
1
];
}
}
// namespace gpu
}
// namespace MIGRAPH_INLINE_NS
}
// namespace migraphx
src/targets/gpu/tanh.cpp
0 → 100644
View file @
8407200f
#include <migraphx/gpu/tanh.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <utility>
namespace
migraphx
{
inline
namespace
MIGRAPH_INLINE_NS
{
namespace
gpu
{
shape
miopen_tanh
::
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
2
).
not_broadcasted
();
return
inputs
.
at
(
1
);
}
argument
miopen_tanh
::
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
{
float
alpha
=
1
,
beta
=
0
;
auto
x_desc
=
make_tensor
(
args
[
0
].
get_shape
());
auto
y_desc
=
make_tensor
(
output_shape
);
miopenActivationForward
(
ctx
.
get_stream
().
get_miopen
(),
ad
.
get
(),
&
alpha
,
x_desc
.
get
(),
args
[
0
].
implicit
(),
&
beta
,
y_desc
.
get
(),
args
[
1
].
implicit
());
return
args
[
1
];
}
}
// namespace gpu
}
// namespace MIGRAPH_INLINE_NS
}
// namespace migraphx
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment