"vscode:/vscode.git/clone" did not exist on "ef8bd7b679f90ca699d04b335df01a9f67d912cc"
Unverified Commit 945e89e0 authored by Paul Fultz II's avatar Paul Fultz II Committed by GitHub
Browse files

Merge pull request #90 from ROCmSoftwarePlatform/leaky_relu

added leaky_relu
parents 7d76401e 9c99e6fe
...@@ -239,6 +239,22 @@ struct activation ...@@ -239,6 +239,22 @@ struct activation
} }
}; };
struct leaky_relu
{
std::string name() const { return "leaky_relu"; }
float alpha;
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this}.has(1);
return inputs.front();
}
friend std::ostream& operator<<(std::ostream& os, const leaky_relu& op)
{
os << op.name() << ":" << op.alpha;
return os;
}
};
struct transpose struct transpose
{ {
std::vector<int64_t> dims; std::vector<int64_t> dims;
......
...@@ -56,6 +56,7 @@ struct onnx_parser ...@@ -56,6 +56,7 @@ struct onnx_parser
add_generic_op("Sub", op::sub{}); add_generic_op("Sub", op::sub{});
add_generic_op("Sum", op::add{}); add_generic_op("Sum", op::add{});
add_mem_op("LeakyRelu", &onnx_parser::parse_leaky_relu);
add_mem_op("Constant", &onnx_parser::parse_constant); add_mem_op("Constant", &onnx_parser::parse_constant);
add_mem_op("Conv", &onnx_parser::parse_conv); add_mem_op("Conv", &onnx_parser::parse_conv);
add_mem_op("MaxPool", &onnx_parser::parse_pooling); add_mem_op("MaxPool", &onnx_parser::parse_pooling);
...@@ -260,6 +261,19 @@ struct onnx_parser ...@@ -260,6 +261,19 @@ struct onnx_parser
return prog.add_instruction(op, std::move(args)); return prog.add_instruction(op, std::move(args));
} }
instruction_ref parse_leaky_relu(const std::string&,
attribute_map attributes,
std::vector<instruction_ref> args)
{
float alpha = 0.01;
if(contains(attributes, "alpha"))
{
alpha = parse_value(attributes.at("alpha")).at<float>();
}
op::leaky_relu op{alpha};
return prog.add_instruction(op, args.front());
}
void parse_from(std::istream& is) void parse_from(std::istream& is)
{ {
onnx::ModelProto model; onnx::ModelProto model;
......
...@@ -413,6 +413,17 @@ struct relu_op ...@@ -413,6 +413,17 @@ struct relu_op
} }
}; };
struct leaky_relu_op
{
op::leaky_relu op;
std::string name() const { return "cpu::leaky_relu"; }
auto fcn() const
{
auto& a = op.alpha;
return [a](auto x) { return x > 0 ? x : x * a; };
}
};
template <typename Op> template <typename Op>
struct cpu_unary struct cpu_unary
{ {
...@@ -557,6 +568,7 @@ struct cpu_apply ...@@ -557,6 +568,7 @@ struct cpu_apply
apply_map["batch_norm_inference"] = apply_map["batch_norm_inference"] =
extend_op<cpu_batch_norm_inference, op::batch_norm_inference>(); extend_op<cpu_batch_norm_inference, op::batch_norm_inference>();
apply_map["contiguous"] = extend_op<cpu_contiguous, op::contiguous>(); apply_map["contiguous"] = extend_op<cpu_contiguous, op::contiguous>();
apply_map["leaky_relu"] = extend_op<cpu_unary<leaky_relu_op>, op::leaky_relu>();
apply_map["identity"] = simple_op<cpu_unary<identity_op>>(); apply_map["identity"] = simple_op<cpu_unary<identity_op>>();
apply_map["tanh"] = simple_op<cpu_unary<tanh_op>>(); apply_map["tanh"] = simple_op<cpu_unary<tanh_op>>();
......
...@@ -32,6 +32,7 @@ add_library(migraph_gpu ...@@ -32,6 +32,7 @@ add_library(migraph_gpu
softmax.cpp softmax.cpp
contiguous.cpp contiguous.cpp
relu.cpp relu.cpp
leaky_relu.cpp
add.cpp add.cpp
batchnorm.cpp batchnorm.cpp
write_literals.cpp write_literals.cpp
......
#ifndef MIGRAPH_GUARD_RTGLIB_LEAKY_RELU_HPP
#define MIGRAPH_GUARD_RTGLIB_LEAKY_RELU_HPP
#include <migraph/gpu/lowering.hpp>
#include <migraph/manage_ptr.hpp>
#include <migraph/instruction.hpp>
#include <migraph/operators.hpp>
#include <migraph/generate.hpp>
#include <migraph/shape_for_each.hpp>
#include <migraph/gpu/miopen.hpp>
#include <migraph/gpu/hip.hpp>
#include <migraph/dfor.hpp>
#include <migraph/gpu/device/contiguous.hpp>
#include <migraph/gpu/device/add.hpp>
#include <migraph/iterator_for.hpp>
#include <migraph/gpu/rocblas.hpp>
#include <migraph/gpu/context.hpp>
#include <utility>
namespace migraph {
namespace gpu {
struct miopen_leaky_relu
{
shared<activation_descriptor> ad;
std::string name() const { return "gpu::leaky_relu"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
};
} // namespace gpu
} // namespace migraph
#endif
...@@ -87,6 +87,13 @@ inline activation_descriptor make_relu() ...@@ -87,6 +87,13 @@ inline activation_descriptor make_relu()
return ad; return ad;
} }
inline activation_descriptor make_leaky_relu(double alpha)
{
auto ad = make_obj<activation_descriptor>(&miopenCreateActivationDescriptor);
miopenSetActivationDescriptor(ad.get(), miopenActivationLEAKYRELU, alpha, 0, 0);
return ad;
}
inline fusion_plan_descriptor make_fusion_plan(const shape& input) inline fusion_plan_descriptor make_fusion_plan(const shape& input)
{ {
auto t = make_tensor(input); auto t = make_tensor(input);
......
#include <migraph/gpu/leaky_relu.hpp>
#include <migraph/operators.hpp>
#include <migraph/manage_ptr.hpp>
#include <migraph/gpu/miopen.hpp>
#include <utility>
namespace migraph {
namespace gpu {
shape miopen_leaky_relu::compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(2).not_broadcasted();
return inputs.at(1);
}
argument miopen_leaky_relu::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1, beta = 0;
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.handle.get(),
ad.get(),
&alpha,
x_desc.get(),
args[0].implicit(),
&beta,
y_desc.get(),
args[1].implicit());
return args[1];
}
} // namespace gpu
} // namespace migraph
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <migraph/gpu/convolution.hpp> #include <migraph/gpu/convolution.hpp>
#include <migraph/gpu/contiguous.hpp> #include <migraph/gpu/contiguous.hpp>
#include <migraph/gpu/relu.hpp> #include <migraph/gpu/relu.hpp>
#include <migraph/gpu/leaky_relu.hpp>
#include <migraph/gpu/softmax.hpp> #include <migraph/gpu/softmax.hpp>
#include <migraph/gpu/add.hpp> #include <migraph/gpu/add.hpp>
#include <migraph/gpu/batchnorm.hpp> #include <migraph/gpu/batchnorm.hpp>
...@@ -51,6 +52,10 @@ struct miopen_apply ...@@ -51,6 +52,10 @@ struct miopen_apply
{ {
check_shape(s, apply_activation(it)); check_shape(s, apply_activation(it));
} }
else if(it->name() == "leaky_relu")
{
check_shape(s, apply_leaky_relu(it));
}
else if(it->name() == "pooling") else if(it->name() == "pooling")
{ {
check_shape(s, apply_pooling(it)); check_shape(s, apply_pooling(it));
...@@ -129,6 +134,16 @@ struct miopen_apply ...@@ -129,6 +134,16 @@ struct miopen_apply
return ins; return ins;
} }
instruction_ref apply_leaky_relu(instruction_ref ins)
{
auto&& op = any_cast<op::leaky_relu>(ins->get_operator());
auto ad = make_leaky_relu(op.alpha);
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, miopen_leaky_relu{std::move(ad)}, ins->inputs().at(0), output);
}
instruction_ref apply_softmax(instruction_ref ins) instruction_ref apply_softmax(instruction_ref ins)
{ {
auto&& op = any_cast<op::softmax>(ins->get_operator()); auto&& op = any_cast<op::softmax>(ins->get_operator());
......
...@@ -461,6 +461,34 @@ void div_test() ...@@ -461,6 +461,34 @@ void div_test()
EXPECT(migraph::verify_range(results_vector, gold)); EXPECT(migraph::verify_range(results_vector, gold));
} }
void relu_test()
{
migraph::program p;
migraph::shape s{migraph::shape::float_type, {3}};
auto l = p.add_literal(migraph::literal{s, {-1.f, 0.f, 1.f}});
p.add_instruction(migraph::op::activation{"relu"}, l);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.f, 0.f, 1.f};
EXPECT(migraph::verify_range(results_vector, gold));
}
void leaky_relu_test()
{
migraph::program p;
migraph::shape s{migraph::shape::float_type, {3}};
auto l = p.add_literal(migraph::literal{s, {-1.f, 0.f, 1.f}});
p.add_instruction(migraph::op::leaky_relu{0.01}, l);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-0.01f, 0.f, 1.f};
EXPECT(migraph::verify_range(results_vector, gold));
}
void reshape_test() void reshape_test()
{ {
migraph::shape a_shape{migraph::shape::float_type, {24, 1, 1, 1}}; migraph::shape a_shape{migraph::shape::float_type, {24, 1, 1, 1}};
...@@ -917,6 +945,9 @@ int main() ...@@ -917,6 +945,9 @@ int main()
add_broadcast_test(); add_broadcast_test();
sub_test(); sub_test();
mul_test(); mul_test();
div_test();
relu_test();
leaky_relu_test();
gemm_test<float>(); gemm_test<float>();
gemm_test<double>(); gemm_test<double>();
reshape_test(); reshape_test();
......
...@@ -368,6 +368,17 @@ struct test_add_relu ...@@ -368,6 +368,17 @@ struct test_add_relu
} }
}; };
struct test_leaky_relu
{
migraph::program create_program() const
{
migraph::program p;
auto x = p.add_parameter("x", migraph::shape{migraph::shape::float_type, {4, 3, 3, 3}});
p.add_instruction(migraph::op::leaky_relu{0.01}, x);
return p;
}
};
struct test_conv_pooling struct test_conv_pooling
{ {
migraph::program create_program() const migraph::program create_program() const
...@@ -619,6 +630,7 @@ int main() ...@@ -619,6 +630,7 @@ int main()
verify_program<test_conv2>(); verify_program<test_conv2>();
verify_program<test_conv_relu>(); verify_program<test_conv_relu>();
verify_program<test_add_relu>(); verify_program<test_add_relu>();
verify_program<test_leaky_relu>();
verify_program<test_conv_pooling>(); verify_program<test_conv_pooling>();
verify_program<test_gemm>(); verify_program<test_gemm>();
// verify_program<test_gemm_ld>(); // verify_program<test_gemm_ld>();
......
leaky_relu-example:R
"
01" LeakyRelu*
alpha
#<
test-modelZ
0

b
1

B
\ No newline at end of file
...@@ -88,10 +88,23 @@ void pytorch_conv_relu_maxpool_x2() ...@@ -88,10 +88,23 @@ void pytorch_conv_relu_maxpool_x2()
EXPECT(p == prog); EXPECT(p == prog);
} }
void leaky_relu_test()
{
migraph::program p;
float alpha = 0.01f;
auto l0 = p.add_parameter("0", {migraph::shape::float_type, {3}});
p.add_instruction(migraph::op::leaky_relu{alpha}, l0);
auto prog = migraph::parse_onnx("leaky_relu.onnx");
EXPECT(p == prog);
}
int main() int main()
{ {
pytorch_conv_bias_test(); pytorch_conv_bias_test();
pytorch_conv_relu_maxpool(); pytorch_conv_relu_maxpool();
pytorch_conv_bn_relu_maxpool(); pytorch_conv_bn_relu_maxpool();
pytorch_conv_relu_maxpool_x2(); pytorch_conv_relu_maxpool_x2();
leaky_relu_test();
} }
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment