"src/module.cpp" did not exist on "6a141efae8d30ebb64177a72068c96bb0c482cb3"
Commit 8407200f authored by Khalique's avatar Khalique
Browse files

add sigmoid, tanh, abs

parent 05a0e86e
...@@ -51,6 +51,9 @@ struct onnx_parser ...@@ -51,6 +51,9 @@ struct onnx_parser
{ {
add_generic_op("MatMul", op::dot{}); add_generic_op("MatMul", op::dot{});
add_generic_op("Relu", op::relu{}); add_generic_op("Relu", op::relu{});
add_generic_op("Sigmoid", op::sigmoid{});
add_generic_op("Tanh", op::tanh{});
add_generic_op("Abs", op::abs{});
// disable dropout for inference // disable dropout for inference
add_generic_op("Dropout", op::identity{}); add_generic_op("Dropout", op::identity{});
add_generic_op("Identity", op::identity{}); add_generic_op("Identity", op::identity{});
......
...@@ -42,6 +42,9 @@ add_library(migraphx_gpu ...@@ -42,6 +42,9 @@ add_library(migraphx_gpu
batchnorm.cpp batchnorm.cpp
write_literals.cpp write_literals.cpp
rocblas.cpp rocblas.cpp
sigmoid.cpp
tanh.cpp
abs.cpp
) )
set_target_properties(migraphx_gpu PROPERTIES EXPORT_NAME gpu) set_target_properties(migraphx_gpu PROPERTIES EXPORT_NAME gpu)
rocm_clang_tidy_check(migraphx_gpu) rocm_clang_tidy_check(migraphx_gpu)
......
#include <migraphx/gpu/abs.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPH_INLINE_NS {
namespace gpu {
shape miopen_abs::compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(2).not_broadcasted();
return inputs.at(1);
}
argument miopen_abs::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1, beta = 0;
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(),
ad.get(),
&alpha,
x_desc.get(),
args[0].implicit(),
&beta,
y_desc.get(),
args[1].implicit());
return args[1];
}
} // namespace gpu
} // namespace MIGRAPH_INLINE_NS
} // namespace migraphx
#ifndef MIGRAPH_GUARD_RTGLIB_ABS_HPP
#define MIGRAPH_GUARD_RTGLIB_ABS_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/contiguous.hpp>
#include <migraphx/gpu/device/add.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPH_INLINE_NS {
namespace gpu {
struct miopen_abs
{
shared<activation_descriptor> ad;
std::string name() const { return "gpu::abs"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
int output_alias(const std::vector<shape>& shapes) const { return shapes.size() - 1; }
};
} // namespace gpu
} // namespace MIGRAPH_INLINE_NS
} // namespace migraphx
#endif
...@@ -91,6 +91,29 @@ inline activation_descriptor make_relu() ...@@ -91,6 +91,29 @@ inline activation_descriptor make_relu()
return ad; return ad;
} }
inline activation_descriptor make_sigmoid()
{
auto ad = make_obj<activation_descriptor>(&miopenCreateActivationDescriptor);
miopenSetActivationDescriptor(ad.get(), miopenActivationLOGISTIC, 0, 0, 0);
return ad;
}
inline activation_descriptor make_tanh()
{
auto ad = make_obj<activation_descriptor>(&miopenCreateActivationDescriptor);
// onnx operator does not apply additional scaling for tanh
// defaults for alpha and beta are therefore set to 1
miopenSetActivationDescriptor(ad.get(), miopenActivationTANH, 1, 1, 0);
return ad;
}
inline activation_descriptor make_abs()
{
auto ad = make_obj<activation_descriptor>(&miopenCreateActivationDescriptor);
miopenSetActivationDescriptor(ad.get(), miopenActivationABS, 0, 0, 0);
return ad;
}
inline activation_descriptor make_leaky_relu(double alpha) inline activation_descriptor make_leaky_relu(double alpha)
{ {
auto ad = make_obj<activation_descriptor>(&miopenCreateActivationDescriptor); auto ad = make_obj<activation_descriptor>(&miopenCreateActivationDescriptor);
......
#ifndef MIGRAPH_GUARD_RTGLIB_SIGMOID_HPP
#define MIGRAPH_GUARD_RTGLIB_SIGMOID_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/contiguous.hpp>
#include <migraphx/gpu/device/add.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPH_INLINE_NS {
namespace gpu {
struct miopen_sigmoid
{
shared<activation_descriptor> ad;
std::string name() const { return "gpu::sigmoid"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
int output_alias(const std::vector<shape>& shapes) const { return shapes.size() - 1; }
};
} // namespace gpu
} // namespace MIGRAPH_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPH_GUARD_RTGLIB_TANH_HPP
#define MIGRAPH_GUARD_RTGLIB_TANH_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/contiguous.hpp>
#include <migraphx/gpu/device/add.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPH_INLINE_NS {
namespace gpu {
struct miopen_tanh
{
shared<activation_descriptor> ad;
std::string name() const { return "gpu::tanh"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
int output_alias(const std::vector<shape>& shapes) const { return shapes.size() - 1; }
};
} // namespace gpu
} // namespace MIGRAPH_INLINE_NS
} // namespace migraphx
#endif
...@@ -16,6 +16,9 @@ ...@@ -16,6 +16,9 @@
#include <migraphx/gpu/convolution.hpp> #include <migraphx/gpu/convolution.hpp>
#include <migraphx/gpu/contiguous.hpp> #include <migraphx/gpu/contiguous.hpp>
#include <migraphx/gpu/relu.hpp> #include <migraphx/gpu/relu.hpp>
#include <migraphx/gpu/sigmoid.hpp>
#include <migraphx/gpu/tanh.hpp>
#include <migraphx/gpu/abs.hpp>
#include <migraphx/gpu/leaky_relu.hpp> #include <migraphx/gpu/leaky_relu.hpp>
#include <migraphx/gpu/softmax.hpp> #include <migraphx/gpu/softmax.hpp>
#include <migraphx/gpu/add.hpp> #include <migraphx/gpu/add.hpp>
...@@ -55,6 +58,18 @@ struct miopen_apply ...@@ -55,6 +58,18 @@ struct miopen_apply
{ {
check_shape(s, apply_relu(it)); check_shape(s, apply_relu(it));
} }
else if(it->name() == "sigmoid")
{
check_shape(s, apply_sigmoid(it));
}
else if(it->name() == "tanh")
{
check_shape(s, apply_tanh(it));
}
else if(it->name() == "abs")
{
check_shape(s, apply_abs(it));
}
else if(it->name() == "leaky_relu") else if(it->name() == "leaky_relu")
{ {
check_shape(s, apply_leaky_relu(it)); check_shape(s, apply_leaky_relu(it));
...@@ -141,6 +156,33 @@ struct miopen_apply ...@@ -141,6 +156,33 @@ struct miopen_apply
ins, miopen_relu{std::move(ad)}, ins->inputs().at(0), output); ins, miopen_relu{std::move(ad)}, ins->inputs().at(0), output);
} }
instruction_ref apply_sigmoid(instruction_ref ins)
{
auto ad = make_sigmoid();
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, miopen_sigmoid{std::move(ad)}, ins->inputs().at(0), output);
}
instruction_ref apply_tanh(instruction_ref ins)
{
auto ad = make_tanh();
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, miopen_tanh{std::move(ad)}, ins->inputs().at(0), output);
}
instruction_ref apply_abs(instruction_ref ins)
{
auto ad = make_abs();
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, miopen_abs{std::move(ad)}, ins->inputs().at(0), output);
}
instruction_ref apply_leaky_relu(instruction_ref ins) instruction_ref apply_leaky_relu(instruction_ref ins)
{ {
auto&& op = any_cast<op::leaky_relu>(ins->get_operator()); auto&& op = any_cast<op::leaky_relu>(ins->get_operator());
......
#include <migraphx/gpu/sigmoid.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPH_INLINE_NS {
namespace gpu {
shape miopen_sigmoid::compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(2).not_broadcasted();
return inputs.at(1);
}
argument miopen_sigmoid::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1, beta = 0;
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(),
ad.get(),
&alpha,
x_desc.get(),
args[0].implicit(),
&beta,
y_desc.get(),
args[1].implicit());
return args[1];
}
} // namespace gpu
} // namespace MIGRAPH_INLINE_NS
} // namespace migraphx
#include <migraphx/gpu/tanh.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPH_INLINE_NS {
namespace gpu {
shape miopen_tanh::compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(2).not_broadcasted();
return inputs.at(1);
}
argument miopen_tanh::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1, beta = 0;
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(),
ad.get(),
&alpha,
x_desc.get(),
args[0].implicit(),
&beta,
y_desc.get(),
args[1].implicit());
return args[1];
}
} // namespace gpu
} // namespace MIGRAPH_INLINE_NS
} // namespace migraphx
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment