Commit 123f7a01 authored by Shucai Xiao's avatar Shucai Xiao
Browse files

Merge branch 'activations' of https://github.com/ROCmSoftwarePlatform/MIGraph...

Merge branch 'activations' of https://github.com/ROCmSoftwarePlatform/MIGraph into activationOperators
parents 1a56cbc1 ca33154a
...@@ -234,10 +234,28 @@ struct leaky_relu ...@@ -234,10 +234,28 @@ struct leaky_relu
check_shapes{inputs, *this}.has(1); check_shapes{inputs, *this}.has(1);
return inputs.front(); return inputs.front();
} }
friend std::ostream& operator<<(std::ostream& os, const leaky_relu& op)
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.alpha, "alpha"));
}
};
struct elu
{
std::string name() const { return "elu"; }
float alpha;
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this}.has(1);
return inputs.front();
}
template <class Self, class F>
static auto reflect(Self& self, F f)
{ {
os << op.name() << ":" << op.alpha; return pack(f(self.alpha, "alpha"));
return os;
} }
}; };
......
...@@ -51,6 +51,9 @@ struct onnx_parser ...@@ -51,6 +51,9 @@ struct onnx_parser
{ {
add_generic_op("MatMul", op::dot{}); add_generic_op("MatMul", op::dot{});
add_generic_op("Relu", op::relu{}); add_generic_op("Relu", op::relu{});
add_generic_op("Sigmoid", op::sigmoid{});
add_generic_op("Tanh", op::tanh{});
add_generic_op("Abs", op::abs{});
// disable dropout for inference // disable dropout for inference
add_generic_op("Dropout", op::identity{}); add_generic_op("Dropout", op::identity{});
add_generic_op("Identity", op::identity{}); add_generic_op("Identity", op::identity{});
...@@ -63,6 +66,7 @@ struct onnx_parser ...@@ -63,6 +66,7 @@ struct onnx_parser
add_mem_op("ImageScaler", &onnx_parser::parse_imagescaler); add_mem_op("ImageScaler", &onnx_parser::parse_imagescaler);
add_mem_op("LeakyRelu", &onnx_parser::parse_leaky_relu); add_mem_op("LeakyRelu", &onnx_parser::parse_leaky_relu);
add_mem_op("Elu", &onnx_parser::parse_elu);
add_mem_op("Constant", &onnx_parser::parse_constant); add_mem_op("Constant", &onnx_parser::parse_constant);
add_mem_op("Conv", &onnx_parser::parse_conv); add_mem_op("Conv", &onnx_parser::parse_conv);
add_mem_op("MaxPool", &onnx_parser::parse_pooling); add_mem_op("MaxPool", &onnx_parser::parse_pooling);
...@@ -387,6 +391,18 @@ struct onnx_parser ...@@ -387,6 +391,18 @@ struct onnx_parser
return prog.add_instruction(op, args.front()); return prog.add_instruction(op, args.front());
} }
instruction_ref
parse_elu(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
{
float alpha = 1.0; // default alpha val for elu
if(contains(attributes, "alpha"))
{
alpha = parse_value(attributes.at("alpha")).at<float>();
}
op::elu op{alpha};
return prog.add_instruction(op, args.front());
}
instruction_ref parse_imagescaler(const std::string&, instruction_ref parse_imagescaler(const std::string&,
attribute_map attributes, attribute_map attributes,
std::vector<instruction_ref> args) std::vector<instruction_ref> args)
......
...@@ -19,6 +19,14 @@ T zero(const T&) ...@@ -19,6 +19,14 @@ T zero(const T&)
return T(0); return T(0);
} }
template <class T>
typename std::conditional_t<std::is_integral<T>{}, std::make_signed<T>, std::enable_if<true, T>>::
type
make_signed(T x)
{
return x;
}
// //
// cpu implemenataion of batch norm for inference // cpu implemenataion of batch norm for inference
// //
...@@ -339,7 +347,7 @@ struct abs_op ...@@ -339,7 +347,7 @@ struct abs_op
std::string name() const { return "cpu::abs"; } std::string name() const { return "cpu::abs"; }
auto fcn() const auto fcn() const
{ {
return [](auto x) { return std::abs(x); }; return [](auto x) { return std::abs(make_signed(x)); };
} }
}; };
...@@ -453,6 +461,17 @@ struct leaky_relu_op ...@@ -453,6 +461,17 @@ struct leaky_relu_op
} }
}; };
struct elu_op
{
op::elu op;
std::string name() const { return "cpu::elu"; }
auto fcn() const
{
auto& a = op.alpha;
return [a](auto x) { return x > 0 ? x : a * std::expm1(x); };
}
};
template <typename Op> template <typename Op>
struct cpu_unary struct cpu_unary
{ {
...@@ -599,7 +618,9 @@ struct cpu_apply ...@@ -599,7 +618,9 @@ struct cpu_apply
apply_map["contiguous"] = extend_op<cpu_contiguous, op::contiguous>(); apply_map["contiguous"] = extend_op<cpu_contiguous, op::contiguous>();
apply_map["concat"] = extend_op<cpu_concat, op::concat>(); apply_map["concat"] = extend_op<cpu_concat, op::concat>();
apply_map["leaky_relu"] = extend_op<cpu_unary<leaky_relu_op>, op::leaky_relu>(); apply_map["leaky_relu"] = extend_op<cpu_unary<leaky_relu_op>, op::leaky_relu>();
apply_map["elu"] = extend_op<cpu_unary<elu_op>, op::elu>();
apply_map["identity"] = simple_op<cpu_unary<identity_op>>(); apply_map["identity"] = simple_op<cpu_unary<identity_op>>();
apply_map["abs"] = simple_op<cpu_unary<abs_op>>();
apply_map["tanh"] = simple_op<cpu_unary<tanh_op>>(); apply_map["tanh"] = simple_op<cpu_unary<tanh_op>>();
apply_map["sigmoid"] = simple_op<cpu_unary<sigmoid_op>>(); apply_map["sigmoid"] = simple_op<cpu_unary<sigmoid_op>>();
apply_map["exp"] = simple_op<cpu_unary<exp_op>>(); apply_map["exp"] = simple_op<cpu_unary<exp_op>>();
......
...@@ -44,6 +44,10 @@ add_library(migraphx_gpu ...@@ -44,6 +44,10 @@ add_library(migraphx_gpu
batchnorm.cpp batchnorm.cpp
write_literals.cpp write_literals.cpp
rocblas.cpp rocblas.cpp
sigmoid.cpp
tanh.cpp
abs.cpp
elu.cpp
) )
set_target_properties(migraphx_gpu PROPERTIES EXPORT_NAME gpu) set_target_properties(migraphx_gpu PROPERTIES EXPORT_NAME gpu)
rocm_clang_tidy_check(migraphx_gpu) rocm_clang_tidy_check(migraphx_gpu)
......
#include <migraphx/gpu/abs.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPH_INLINE_NS {
namespace gpu {
shape miopen_abs::compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(2).not_broadcasted();
return inputs.at(1);
}
argument miopen_abs::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1, beta = 0;
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(),
ad.get(),
&alpha,
x_desc.get(),
args[0].implicit(),
&beta,
y_desc.get(),
args[1].implicit());
return args[1];
}
} // namespace gpu
} // namespace MIGRAPH_INLINE_NS
} // namespace migraphx
#include <migraphx/gpu/elu.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPH_INLINE_NS {
namespace gpu {
shape miopen_elu::compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(2).not_broadcasted();
return inputs.at(1);
}
argument miopen_elu::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1, beta = 0;
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(),
ad.get(),
&alpha,
x_desc.get(),
args[0].implicit(),
&beta,
y_desc.get(),
args[1].implicit());
return args[1];
}
} // namespace gpu
} // namespace MIGRAPH_INLINE_NS
} // namespace migraphx
#ifndef MIGRAPH_GUARD_RTGLIB_ABS_HPP
#define MIGRAPH_GUARD_RTGLIB_ABS_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/contiguous.hpp>
#include <migraphx/gpu/device/add.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPH_INLINE_NS {
namespace gpu {
struct miopen_abs
{
shared<activation_descriptor> ad;
std::string name() const { return "gpu::abs"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
int output_alias(const std::vector<shape>& shapes) const { return shapes.size() - 1; }
};
} // namespace gpu
} // namespace MIGRAPH_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPH_GUARD_RTGLIB_ELU_HPP
#define MIGRAPH_GUARD_RTGLIB_ELU_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/contiguous.hpp>
#include <migraphx/gpu/device/add.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPH_INLINE_NS {
namespace gpu {
struct miopen_elu
{
shared<activation_descriptor> ad;
std::string name() const { return "gpu::elu"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
int output_alias(const std::vector<shape>& shapes) const { return shapes.size() - 1; }
};
} // namespace gpu
} // namespace MIGRAPH_INLINE_NS
} // namespace migraphx
#endif
...@@ -91,6 +91,29 @@ inline activation_descriptor make_relu() ...@@ -91,6 +91,29 @@ inline activation_descriptor make_relu()
return ad; return ad;
} }
inline activation_descriptor make_sigmoid()
{
auto ad = make_obj<activation_descriptor>(&miopenCreateActivationDescriptor);
miopenSetActivationDescriptor(ad.get(), miopenActivationLOGISTIC, 0, 0, 0);
return ad;
}
inline activation_descriptor make_tanh()
{
auto ad = make_obj<activation_descriptor>(&miopenCreateActivationDescriptor);
// onnx operator does not apply additional scaling for tanh
// defaults for alpha and beta are therefore set to 1
miopenSetActivationDescriptor(ad.get(), miopenActivationTANH, 1, 1, 0);
return ad;
}
inline activation_descriptor make_abs()
{
auto ad = make_obj<activation_descriptor>(&miopenCreateActivationDescriptor);
miopenSetActivationDescriptor(ad.get(), miopenActivationABS, 0, 0, 0);
return ad;
}
inline activation_descriptor make_leaky_relu(double alpha) inline activation_descriptor make_leaky_relu(double alpha)
{ {
auto ad = make_obj<activation_descriptor>(&miopenCreateActivationDescriptor); auto ad = make_obj<activation_descriptor>(&miopenCreateActivationDescriptor);
...@@ -98,6 +121,13 @@ inline activation_descriptor make_leaky_relu(double alpha) ...@@ -98,6 +121,13 @@ inline activation_descriptor make_leaky_relu(double alpha)
return ad; return ad;
} }
inline activation_descriptor make_elu(double alpha)
{
auto ad = make_obj<activation_descriptor>(&miopenCreateActivationDescriptor);
miopenSetActivationDescriptor(ad.get(), miopenActivationELU, alpha, 0, 0);
return ad;
}
inline fusion_plan_descriptor make_fusion_plan(const shape& input) inline fusion_plan_descriptor make_fusion_plan(const shape& input)
{ {
auto t = make_tensor(input); auto t = make_tensor(input);
......
#ifndef MIGRAPH_GUARD_RTGLIB_SIGMOID_HPP
#define MIGRAPH_GUARD_RTGLIB_SIGMOID_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/contiguous.hpp>
#include <migraphx/gpu/device/add.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPH_INLINE_NS {
namespace gpu {
struct miopen_sigmoid
{
shared<activation_descriptor> ad;
std::string name() const { return "gpu::sigmoid"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
int output_alias(const std::vector<shape>& shapes) const { return shapes.size() - 1; }
};
} // namespace gpu
} // namespace MIGRAPH_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPH_GUARD_RTGLIB_TANH_HPP
#define MIGRAPH_GUARD_RTGLIB_TANH_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/contiguous.hpp>
#include <migraphx/gpu/device/add.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPH_INLINE_NS {
namespace gpu {
struct miopen_tanh
{
shared<activation_descriptor> ad;
std::string name() const { return "gpu::tanh"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
int output_alias(const std::vector<shape>& shapes) const { return shapes.size() - 1; }
};
} // namespace gpu
} // namespace MIGRAPH_INLINE_NS
} // namespace migraphx
#endif
...@@ -16,7 +16,11 @@ ...@@ -16,7 +16,11 @@
#include <migraphx/gpu/convolution.hpp> #include <migraphx/gpu/convolution.hpp>
#include <migraphx/gpu/contiguous.hpp> #include <migraphx/gpu/contiguous.hpp>
#include <migraphx/gpu/relu.hpp> #include <migraphx/gpu/relu.hpp>
#include <migraphx/gpu/sigmoid.hpp>
#include <migraphx/gpu/tanh.hpp>
#include <migraphx/gpu/abs.hpp>
#include <migraphx/gpu/leaky_relu.hpp> #include <migraphx/gpu/leaky_relu.hpp>
#include <migraphx/gpu/elu.hpp>
#include <migraphx/gpu/softmax.hpp> #include <migraphx/gpu/softmax.hpp>
#include <migraphx/gpu/add.hpp> #include <migraphx/gpu/add.hpp>
#include <migraphx/gpu/sin.hpp> #include <migraphx/gpu/sin.hpp>
...@@ -26,6 +30,7 @@ ...@@ -26,6 +30,7 @@
#include <migraphx/gpu/gemm.hpp> #include <migraphx/gpu/gemm.hpp>
#include <migraphx/gpu/concat.hpp> #include <migraphx/gpu/concat.hpp>
#include <utility> #include <utility>
#include <functional>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPH_INLINE_NS { inline namespace MIGRAPH_INLINE_NS {
...@@ -35,6 +40,8 @@ struct miopen_apply ...@@ -35,6 +40,8 @@ struct miopen_apply
{ {
program* prog = nullptr; program* prog = nullptr;
context ctx{}; context ctx{};
std::unordered_map<std::string, std::function<instruction_ref(miopen_apply&, instruction_ref)>>
apply_map{};
void check_shape(shape x, instruction_ref i) void check_shape(shape x, instruction_ref i)
{ {
...@@ -43,58 +50,35 @@ struct miopen_apply ...@@ -43,58 +50,35 @@ struct miopen_apply
(void)i; (void)i;
} }
void init()
{
apply_map["convolution"] = &miopen_apply::apply_convolution;
apply_map["relu"] = &miopen_apply::apply_relu;
apply_map["sigmoid"] = &miopen_apply::apply_sigmoid;
apply_map["tanh"] = &miopen_apply::apply_tanh;
apply_map["abs"] = &miopen_apply::apply_abs;
apply_map["leaky_relu"] = &miopen_apply::apply_leaky_relu;
apply_map["elu"] = &miopen_apply::apply_elu;
apply_map["pooling"] = &miopen_apply::apply_pooling;
apply_map["add"] = &miopen_apply::apply_add;
apply_map["sin"] = &miopen_apply::apply_sin;
apply_map["mul"] = &miopen_apply::apply_mul;
apply_map["dot"] = &miopen_apply::apply_dot;
apply_map["contiguous"] = &miopen_apply::apply_contiguous;
apply_map["concat"] = &miopen_apply::apply_concat;
apply_map["batch_norm_inference"] = &miopen_apply::apply_batch_norm_inference;
apply_map["softmax"] = &miopen_apply::apply_softmax;
}
void apply() void apply()
{ {
init();
for(auto it = prog->begin(); it != prog->end(); it++) for(auto it = prog->begin(); it != prog->end(); it++)
{ {
auto s = it->get_shape(); auto s = it->get_shape();
if(it->name() == "convolution") if(apply_map.count(it->name()) > 0)
{
check_shape(s, apply_convolution(it));
}
else if(it->name() == "relu")
{
check_shape(s, apply_relu(it));
}
else if(it->name() == "leaky_relu")
{
check_shape(s, apply_leaky_relu(it));
}
else if(it->name() == "pooling")
{
check_shape(s, apply_pooling(it));
}
else if(it->name() == "add")
{
check_shape(s, apply_add(it));
}
else if(it->name() == "sin")
{
check_shape(s, apply_sin(it));
}
else if(it->name() == "mul")
{
check_shape(s, apply_mul(it));
}
else if(it->name() == "dot")
{
check_shape(s, apply_gemm(it));
}
else if(it->name() == "contiguous")
{
check_shape(s, apply_contiguous(it));
}
else if(it->name() == "concat")
{
check_shape(s, apply_concat(it));
}
else if(it->name() == "batch_norm_inference")
{
check_shape(s, apply_batch_norm_inference(it));
}
else if(it->name() == "softmax")
{ {
check_shape(s, apply_softmax(it)); check_shape(s, apply_map.at(it->name())(*this, it));
} }
} }
} }
...@@ -146,6 +130,33 @@ struct miopen_apply ...@@ -146,6 +130,33 @@ struct miopen_apply
ins, miopen_relu{std::move(ad)}, ins->inputs().at(0), output); ins, miopen_relu{std::move(ad)}, ins->inputs().at(0), output);
} }
instruction_ref apply_sigmoid(instruction_ref ins)
{
auto ad = make_sigmoid();
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, miopen_sigmoid{std::move(ad)}, ins->inputs().at(0), output);
}
instruction_ref apply_tanh(instruction_ref ins)
{
auto ad = make_tanh();
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, miopen_tanh{std::move(ad)}, ins->inputs().at(0), output);
}
instruction_ref apply_abs(instruction_ref ins)
{
auto ad = make_abs();
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, miopen_abs{std::move(ad)}, ins->inputs().at(0), output);
}
instruction_ref apply_leaky_relu(instruction_ref ins) instruction_ref apply_leaky_relu(instruction_ref ins)
{ {
auto&& op = any_cast<op::leaky_relu>(ins->get_operator()); auto&& op = any_cast<op::leaky_relu>(ins->get_operator());
...@@ -156,6 +167,16 @@ struct miopen_apply ...@@ -156,6 +167,16 @@ struct miopen_apply
ins, miopen_leaky_relu{std::move(ad)}, ins->inputs().at(0), output); ins, miopen_leaky_relu{std::move(ad)}, ins->inputs().at(0), output);
} }
instruction_ref apply_elu(instruction_ref ins)
{
auto&& op = any_cast<op::leaky_relu>(ins->get_operator());
auto ad = make_elu(op.alpha);
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, miopen_elu{std::move(ad)}, ins->inputs().at(0), output);
}
instruction_ref apply_softmax(instruction_ref ins) instruction_ref apply_softmax(instruction_ref ins)
{ {
auto&& op = any_cast<op::softmax>(ins->get_operator()); auto&& op = any_cast<op::softmax>(ins->get_operator());
...@@ -183,7 +204,7 @@ struct miopen_apply ...@@ -183,7 +204,7 @@ struct miopen_apply
ins, hip_mul{}, ins->inputs().at(0), ins->inputs().at(1), output); ins, hip_mul{}, ins->inputs().at(0), ins->inputs().at(1), output);
} }
instruction_ref apply_gemm(instruction_ref ins) instruction_ref apply_dot(instruction_ref ins)
{ {
auto&& op = any_cast<op::dot>(ins->get_operator()); auto&& op = any_cast<op::dot>(ins->get_operator());
auto output = insert_allocation(ins, ins->get_shape()); auto output = insert_allocation(ins, ins->get_shape());
......
#include <migraphx/gpu/sigmoid.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPH_INLINE_NS {
namespace gpu {
shape miopen_sigmoid::compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(2).not_broadcasted();
return inputs.at(1);
}
argument miopen_sigmoid::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1, beta = 0;
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(),
ad.get(),
&alpha,
x_desc.get(),
args[0].implicit(),
&beta,
y_desc.get(),
args[1].implicit());
return args[1];
}
} // namespace gpu
} // namespace MIGRAPH_INLINE_NS
} // namespace migraphx
#include <migraphx/gpu/tanh.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPH_INLINE_NS {
namespace gpu {
shape miopen_tanh::compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(2).not_broadcasted();
return inputs.at(1);
}
argument miopen_tanh::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1, beta = 0;
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(),
ad.get(),
&alpha,
x_desc.get(),
args[0].implicit(),
&beta,
y_desc.get(),
args[1].implicit());
return args[1];
}
} // namespace gpu
} // namespace MIGRAPH_INLINE_NS
} // namespace migraphx
...@@ -7,6 +7,10 @@ ...@@ -7,6 +7,10 @@
#include <migraphx/verify.hpp> #include <migraphx/verify.hpp>
#include "test.hpp" #include "test.hpp"
float sigmoid(float x) { return 1 / (1 + expf(-x)); }
float elu(float a, float x) { return x > 0 ? x : a * std::expm1(x); }
TEST_CASE(slice_test) TEST_CASE(slice_test)
{ {
{ {
...@@ -1105,4 +1109,61 @@ TEST_CASE(identity_test) ...@@ -1105,4 +1109,61 @@ TEST_CASE(identity_test)
EXPECT(std::equal(data.begin(), data.end(), results_vector.begin())); EXPECT(std::equal(data.begin(), data.end(), results_vector.begin()));
} }
TEST_CASE(abs_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
auto l = p.add_literal(migraphx::literal{s, {-1, 2, -3, 4}});
p.add_instruction(migraphx::op::abs{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1, 2, 3, 4};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(sigmoid_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
auto l = p.add_literal(migraphx::literal{s, {-1, 2, -3, 4}});
p.add_instruction(migraphx::op::sigmoid{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{sigmoid(-1), sigmoid(2), sigmoid(-3), sigmoid(4)};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(tanh_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
auto l = p.add_literal(migraphx::literal{s, {-1.0, 2.0, -3.0, 4.0}});
p.add_instruction(migraphx::op::tanh{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{tanhf(-1), tanhf(2), tanhf(-3), tanhf(4)};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(elu_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
auto l = p.add_literal(migraphx::literal{s, {-1.0, 2.0, -3.0, 4.0}});
float alpha = 0.5;
p.add_instruction(migraphx::op::elu{alpha}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{elu(alpha, -1), elu(alpha, 2), elu(alpha, -3), elu(alpha, 4)};
EXPECT(migraphx::verify_range(results_vector, gold));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -456,6 +456,39 @@ struct test_add_relu ...@@ -456,6 +456,39 @@ struct test_add_relu
} }
}; };
struct test_sigmoid
{
migraphx::program create_program() const
{
migraphx::program p;
auto x = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
p.add_instruction(migraphx::op::sigmoid{}, x);
return p;
}
};
struct test_tanh
{
migraphx::program create_program() const
{
migraphx::program p;
auto x = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
p.add_instruction(migraphx::op::tanh{}, x);
return p;
}
};
struct test_abs
{
migraphx::program create_program() const
{
migraphx::program p;
auto x = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
p.add_instruction(migraphx::op::abs{}, x);
return p;
}
};
struct test_leaky_relu struct test_leaky_relu
{ {
migraphx::program create_program() const migraphx::program create_program() const
...@@ -467,6 +500,17 @@ struct test_leaky_relu ...@@ -467,6 +500,17 @@ struct test_leaky_relu
} }
}; };
struct test_elu
{
migraphx::program create_program() const
{
migraphx::program p;
auto x = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
p.add_instruction(migraphx::op::leaky_relu{1.0}, x);
return p;
}
};
struct test_conv_pooling struct test_conv_pooling
{ {
migraphx::program create_program() const migraphx::program create_program() const
...@@ -849,6 +893,7 @@ struct test_conv_bn_relu_pooling2 ...@@ -849,6 +893,7 @@ struct test_conv_bn_relu_pooling2
int main() int main()
{ {
verify_program<test_abs>();
verify_program<test_concat>(); verify_program<test_concat>();
verify_program<test_concat2>(); verify_program<test_concat2>();
verify_program<test_concat_relu>(); verify_program<test_concat_relu>();
...@@ -873,6 +918,9 @@ int main() ...@@ -873,6 +918,9 @@ int main()
verify_program<test_conv_relu_half>(); verify_program<test_conv_relu_half>();
verify_program<test_add_relu>(); verify_program<test_add_relu>();
verify_program<test_leaky_relu>(); verify_program<test_leaky_relu>();
verify_program<test_sigmoid>();
verify_program<test_tanh>();
verify_program<test_elu>();
verify_program<test_conv_pooling>(); verify_program<test_conv_pooling>();
verify_program<test_global_avg_pooling>(); verify_program<test_global_avg_pooling>();
verify_program<test_global_max_pooling>(); verify_program<test_global_max_pooling>();
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment