Unverified Commit 63d8e40a authored by Shucai Xiao's avatar Shucai Xiao Committed by GitHub
Browse files

PRelu operator (#458)



* add prelu operator

* clang format

* add prelu to gpu lowering

* add unit tests for the PRelu operator

* clang format

* add missing onnx file for PRelu operator

* update unit tests for prelu operator

* clang format
Co-authored-by: default avatarmvermeulen <5479696+mvermeulen@users.noreply.github.com>
Co-authored-by: default avatarPaul Fultz II <pfultz2@yahoo.com>
parent a22189dd
#ifndef MIGRAPHX_GUARD_OPERATORS_PRELU_HPP
#define MIGRAPHX_GUARD_OPERATORS_PRELU_HPP
#include <migraphx/op/binary.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace op {
struct prelu : binary<prelu>
{
auto apply() const
{
return [](auto x, auto slope) { return ((x < 0) ? (x * slope) : x); };
}
};
} // namespace op
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
......@@ -52,6 +52,7 @@
#include <migraphx/op/outline.hpp>
#include <migraphx/op/pad.hpp>
#include <migraphx/op/pooling.hpp>
#include <migraphx/op/prelu.hpp>
#include <migraphx/op/quant_convolution.hpp>
#include <migraphx/op/quant_dot.hpp>
#include <migraphx/op/pow.hpp>
......
......@@ -68,6 +68,7 @@ struct onnx_parser
add_binary_op("Div", op::div{});
add_binary_op("Mul", op::mul{});
add_binary_op("Pow", op::pow{});
add_binary_op("PRelu", op::prelu{});
add_binary_op("Sub", op::sub{});
add_variadic_op("Sum", op::add{});
......
......@@ -46,6 +46,7 @@ add_library(migraphx_device
device/mul_add_relu.cpp
device/pad.cpp
device/pow.cpp
device/prelu.cpp
device/reduce_max.cpp
device/reduce_mean.cpp
device/reduce_min.cpp
......
#include <migraphx/gpu/device/prelu.hpp>
#include <migraphx/gpu/device/nary.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void prelu(hipStream_t stream, const argument& result, const argument& arg1, const argument& arg2)
{
nary(stream, result, arg1, arg2)([](auto x, auto slope)
__device__ { return ((x < 0) ? (x * slope) : x); });
}
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_PRELU_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_PRELU_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void prelu(hipStream_t stream, const argument& result, const argument& arg1, const argument& arg2);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_PRELU_HPP
#define MIGRAPHX_GUARD_RTGLIB_PRELU_HPP
#include <migraphx/gpu/oper.hpp>
#include <migraphx/gpu/device/prelu.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct hip_prelu : binary_device<hip_prelu, device::prelu>
{
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
......@@ -70,6 +70,7 @@
#include <migraphx/gpu/pow.hpp>
#include <migraphx/gpu/sqdiff.hpp>
#include <migraphx/gpu/int8_conv_pack.hpp>
#include <migraphx/gpu/prelu.hpp>
#include <utility>
#include <functional>
#include <algorithm>
......@@ -160,6 +161,7 @@ struct miopen_apply
add_generic_op<hip_pow>("pow");
add_generic_op<hip_sqdiff>("sqdiff");
add_generic_op<hip_relu>("relu");
add_generic_op<hip_prelu>("prelu");
add_generic_op<hip_sign>("sign");
add_generic_op<hip_sigmoid>("sigmoid");
add_generic_op<hip_ceil>("ceil");
......
......@@ -678,6 +678,21 @@ TEST_CASE(log_test)
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(prelu_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {3}};
auto x = p.add_literal(migraphx::literal{s, {-1, 0, 2}});
auto slope = p.add_literal(migraphx::literal{s, {2, 1, 2}});
p.add_instruction(migraphx::op::prelu{}, x, slope);
p.compile(migraphx::cpu::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-2.0f, 0.0f, 2.0f};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(pow_test)
{
migraphx::program p;
......
......@@ -346,6 +346,21 @@ struct test_pow : verify_program<test_pow>
}
};
struct test_prelu_brcst : verify_program<test_prelu_brcst>
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {6}};
auto x = p.add_parameter("x", s);
auto slp = p.add_parameter("slp", s);
auto r = p.add_instruction(migraphx::op::prelu{}, x, slp);
p.add_return({r});
return p;
}
};
struct test_sin : verify_program<test_sin>
{
migraphx::program create_program() const
......
......@@ -1386,6 +1386,22 @@ def pow_test():
return ([node], [arg0, arg1], [arg_out])
@onnx_test
def prelu_brcst_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 5])
arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT,
[2, 3, 4, 5])
node = onnx.helper.make_node(
'PRelu',
inputs=['0', '1'],
outputs=['out'],
)
return ([node], [arg0, arg1], [arg_out])
@onnx_test
def reducel1_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
......
......@@ -1065,6 +1065,20 @@ TEST_CASE(pow_test)
EXPECT(p == prog);
}
TEST_CASE(prelu_brcst_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}});
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {4, 5}});
auto bl1 = p.add_instruction(migraphx::op::multibroadcast{l0->get_shape().lens()}, l1);
auto ret = p.add_instruction(migraphx::op::prelu{}, l0, bl1);
p.add_return({ret});
auto prog = migraphx::parse_onnx("prelu_brcst_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(reducel1_test)
{
migraphx::program p;
......
prelu_brcst_test:w

0
1out"PReluprelu_brcst_testZ
0




Z
1


b
out




B
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment