Commit a813d1d6 authored by Khalique's avatar Khalique
Browse files

added leaky_relu

parent 11d00d61
...@@ -224,6 +224,22 @@ struct activation ...@@ -224,6 +224,22 @@ struct activation
} }
}; };
struct leaky_relu
{
std::string name() const { return "leaky_relu"; }
float alpha;
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this}.has(1);
return inputs.front();
}
friend std::ostream& operator<<(std::ostream& os, const leaky_relu& op)
{
os << op.name() << ":" << op.alpha;
return os;
}
};
struct transpose struct transpose
{ {
std::vector<int64_t> dims; std::vector<int64_t> dims;
......
...@@ -413,6 +413,18 @@ struct relu_op ...@@ -413,6 +413,18 @@ struct relu_op
} }
}; };
struct leaky_relu_op
{
op::leaky_relu op;
std::string name() const { return "cpu::leaky_relu"; }
auto fcn() const
{
auto& a = op.alpha;
return [a](auto x) { return x > 0 ? x : x * a; };
}
};
template <typename Op> template <typename Op>
struct cpu_unary struct cpu_unary
{ {
...@@ -557,6 +569,7 @@ struct cpu_apply ...@@ -557,6 +569,7 @@ struct cpu_apply
apply_map["batch_norm_inference"] = apply_map["batch_norm_inference"] =
extend_op<cpu_batch_norm_inference, op::batch_norm_inference>(); extend_op<cpu_batch_norm_inference, op::batch_norm_inference>();
apply_map["contiguous"] = extend_op<cpu_contiguous, op::contiguous>(); apply_map["contiguous"] = extend_op<cpu_contiguous, op::contiguous>();
apply_map["leaky_relu"] = extend_op<cpu_unary<leaky_relu_op>, op::leaky_relu>();
apply_map["identity"] = simple_op<cpu_unary<identity_op>>(); apply_map["identity"] = simple_op<cpu_unary<identity_op>>();
apply_map["tanh"] = simple_op<cpu_unary<tanh_op>>(); apply_map["tanh"] = simple_op<cpu_unary<tanh_op>>();
......
#ifndef MIGRAPH_GUARD_RTGLIB_LEAKY_RELU_HPP
#define MIGRAPH_GUARD_RTGLIB_LEAKY_RELU_HPP
#include <migraph/gpu/lowering.hpp>
#include <migraph/manage_ptr.hpp>
#include <migraph/instruction.hpp>
#include <migraph/operators.hpp>
#include <migraph/generate.hpp>
#include <migraph/shape_for_each.hpp>
#include <migraph/gpu/miopen.hpp>
#include <migraph/gpu/hip.hpp>
#include <migraph/dfor.hpp>
#include <migraph/gpu/device/contiguous.hpp>
#include <migraph/gpu/device/add.hpp>
#include <migraph/iterator_for.hpp>
#include <migraph/gpu/rocblas.hpp>
#include <migraph/gpu/context.hpp>
#include <utility>
namespace migraph {
namespace gpu {
struct miopen_leaky_relu
{
shared<activation_descriptor> ad;
std::string name() const { return "gpu::leaky_relu"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
};
} // namespace gpu
} // namespace migraph
#endif
...@@ -87,6 +87,13 @@ inline activation_descriptor make_relu() ...@@ -87,6 +87,13 @@ inline activation_descriptor make_relu()
return ad; return ad;
} }
inline activation_descriptor make_leaky_relu(double alpha)
{
auto ad = make_obj<activation_descriptor>(&miopenCreateActivationDescriptor);
miopenSetActivationDescriptor(ad.get(), miopenActivationLEAKYRELU, alpha, 0, 0);
return ad;
}
inline fusion_plan_descriptor make_fusion_plan(const shape& input) inline fusion_plan_descriptor make_fusion_plan(const shape& input)
{ {
auto t = make_tensor(input); auto t = make_tensor(input);
......
#include <migraph/gpu/leaky_relu.hpp>
#include <migraph/operators.hpp>
#include <migraph/manage_ptr.hpp>
#include <migraph/gpu/miopen.hpp>
#include <utility>
namespace migraph {
namespace gpu {
shape miopen_leaky_relu::compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(2).not_broadcasted();
return inputs.at(1);
}
argument miopen_leaky_relu::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1, beta = 0;
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.handle.get(),
ad.get(),
&alpha,
x_desc.get(),
args[0].implicit(),
&beta,
y_desc.get(),
args[1].implicit());
return args[1];
}
} // namespace gpu
} // namespace migraph
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <migraph/gpu/convolution.hpp> #include <migraph/gpu/convolution.hpp>
#include <migraph/gpu/contiguous.hpp> #include <migraph/gpu/contiguous.hpp>
#include <migraph/gpu/relu.hpp> #include <migraph/gpu/relu.hpp>
#include <migraph/gpu/leaky_relu.hpp>
#include <migraph/gpu/softmax.hpp> #include <migraph/gpu/softmax.hpp>
#include <migraph/gpu/add.hpp> #include <migraph/gpu/add.hpp>
#include <migraph/gpu/batchnorm.hpp> #include <migraph/gpu/batchnorm.hpp>
...@@ -128,6 +129,16 @@ struct miopen_apply ...@@ -128,6 +129,16 @@ struct miopen_apply
} }
return ins; return ins;
} }
instruction_ref apply_leaky_relu(instruction_ref ins)
{
auto&& op = any_cast<op::leaky_relu>(ins->get_operator());
auto ad = make_leaky_relu(op.alpha);
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, miopen_leaky_relu{std::move(ad)}, ins->inputs().at(0), output);
}
instruction_ref apply_softmax(instruction_ref ins) instruction_ref apply_softmax(instruction_ref ins)
{ {
......
...@@ -461,6 +461,34 @@ void div_test() ...@@ -461,6 +461,34 @@ void div_test()
EXPECT(migraph::verify_range(results_vector, gold)); EXPECT(migraph::verify_range(results_vector, gold));
} }
void relu_test()
{
migraph::program p;
migraph::shape s{migraph::shape::float_type, {3}};
auto l = p.add_literal(migraph::literal{s, {-1.f, 0.f, 1.f}});
p.add_instruction(migraph::op::activation{"relu"}, l);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.f, 0.f, 1.f};
EXPECT(migraph::verify_range(results_vector, gold));
}
void leaky_relu_test()
{
migraph::program p;
migraph::shape s{migraph::shape::float_type, {3}};
auto l = p.add_literal(migraph::literal{s, {-1.f, 0.f, 1.f}});
p.add_instruction(migraph::op::leaky_relu{0.01}, l);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-0.01f, 0.f, 1.f};
EXPECT(migraph::verify_range(results_vector, gold));
}
void reshape_test() void reshape_test()
{ {
migraph::shape a_shape{migraph::shape::float_type, {24, 1, 1, 1}}; migraph::shape a_shape{migraph::shape::float_type, {24, 1, 1, 1}};
...@@ -917,6 +945,9 @@ int main() ...@@ -917,6 +945,9 @@ int main()
add_broadcast_test(); add_broadcast_test();
sub_test(); sub_test();
mul_test(); mul_test();
div_test();
relu_test();
leaky_relu_test();
gemm_test<float>(); gemm_test<float>();
gemm_test<double>(); gemm_test<double>();
reshape_test(); reshape_test();
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment