Commit 9cf50769 authored by Khalique's avatar Khalique
Browse files

added cpu tests

parent 68484acf
...@@ -241,6 +241,22 @@ struct leaky_relu ...@@ -241,6 +241,22 @@ struct leaky_relu
} }
}; };
struct elu
{
std::string name() const { return "elu"; }
float alpha;
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this}.has(1);
return inputs.front();
}
friend std::ostream& operator<<(std::ostream& os, const elu& op)
{
os << op.name() << ":" << op.alpha;
return os;
}
};
struct transpose struct transpose
{ {
std::vector<int64_t> dims; std::vector<int64_t> dims;
......
...@@ -66,6 +66,7 @@ struct onnx_parser ...@@ -66,6 +66,7 @@ struct onnx_parser
add_mem_op("ImageScaler", &onnx_parser::parse_imagescaler); add_mem_op("ImageScaler", &onnx_parser::parse_imagescaler);
add_mem_op("LeakyRelu", &onnx_parser::parse_leaky_relu); add_mem_op("LeakyRelu", &onnx_parser::parse_leaky_relu);
add_mem_op("Elu", &onnx_parser::parse_elu);
add_mem_op("Constant", &onnx_parser::parse_constant); add_mem_op("Constant", &onnx_parser::parse_constant);
add_mem_op("Conv", &onnx_parser::parse_conv); add_mem_op("Conv", &onnx_parser::parse_conv);
add_mem_op("MaxPool", &onnx_parser::parse_pooling); add_mem_op("MaxPool", &onnx_parser::parse_pooling);
...@@ -390,6 +391,19 @@ struct onnx_parser ...@@ -390,6 +391,19 @@ struct onnx_parser
return prog.add_instruction(op, args.front()); return prog.add_instruction(op, args.front());
} }
instruction_ref parse_elu(const std::string&,
attribute_map attributes,
std::vector<instruction_ref> args)
{
float alpha = 1.0; // default alpha val for elu
if(contains(attributes, "alpha"))
{
alpha = parse_value(attributes.at("alpha")).at<float>();
}
op::elu op{alpha};
return prog.add_instruction(op, args.front());
}
instruction_ref parse_imagescaler(const std::string&, instruction_ref parse_imagescaler(const std::string&,
attribute_map attributes, attribute_map attributes,
std::vector<instruction_ref> args) std::vector<instruction_ref> args)
......
...@@ -19,6 +19,13 @@ T zero(const T&) ...@@ -19,6 +19,13 @@ T zero(const T&)
return T(0); return T(0);
} }
template<class T>
typename std::conditional_t<std::is_integral<T>{}, std::make_signed<T>, std::enable_if<true, T>>::type
make_signed(T x)
{
return x;
}
// //
// cpu implemenataion of batch norm for inference // cpu implemenataion of batch norm for inference
// //
...@@ -339,7 +346,7 @@ struct abs_op ...@@ -339,7 +346,7 @@ struct abs_op
std::string name() const { return "cpu::abs"; } std::string name() const { return "cpu::abs"; }
auto fcn() const auto fcn() const
{ {
return [](auto x) { return std::abs(x); }; return [](auto x) { return std::abs(make_signed(x)); };
} }
}; };
...@@ -453,6 +460,17 @@ struct leaky_relu_op ...@@ -453,6 +460,17 @@ struct leaky_relu_op
} }
}; };
struct elu_op
{
op::elu op;
std::string name() const { return "cpu::elu"; }
auto fcn() const
{
auto& a = op.alpha;
return [a](auto x) { return x > 0 ? x : a * std::expm1(x); };
}
};
template <typename Op> template <typename Op>
struct cpu_unary struct cpu_unary
{ {
...@@ -599,7 +617,9 @@ struct cpu_apply ...@@ -599,7 +617,9 @@ struct cpu_apply
apply_map["contiguous"] = extend_op<cpu_contiguous, op::contiguous>(); apply_map["contiguous"] = extend_op<cpu_contiguous, op::contiguous>();
apply_map["concat"] = extend_op<cpu_concat, op::concat>(); apply_map["concat"] = extend_op<cpu_concat, op::concat>();
apply_map["leaky_relu"] = extend_op<cpu_unary<leaky_relu_op>, op::leaky_relu>(); apply_map["leaky_relu"] = extend_op<cpu_unary<leaky_relu_op>, op::leaky_relu>();
apply_map["elu"] = extend_op<cpu_unary<elu_op>, op::elu>();
apply_map["identity"] = simple_op<cpu_unary<identity_op>>(); apply_map["identity"] = simple_op<cpu_unary<identity_op>>();
apply_map["abs"] = simple_op<cpu_unary<abs_op>>();
apply_map["tanh"] = simple_op<cpu_unary<tanh_op>>(); apply_map["tanh"] = simple_op<cpu_unary<tanh_op>>();
apply_map["sigmoid"] = simple_op<cpu_unary<sigmoid_op>>(); apply_map["sigmoid"] = simple_op<cpu_unary<sigmoid_op>>();
apply_map["exp"] = simple_op<cpu_unary<exp_op>>(); apply_map["exp"] = simple_op<cpu_unary<exp_op>>();
......
...@@ -45,6 +45,7 @@ add_library(migraphx_gpu ...@@ -45,6 +45,7 @@ add_library(migraphx_gpu
sigmoid.cpp sigmoid.cpp
tanh.cpp tanh.cpp
abs.cpp abs.cpp
elu.cpp
) )
set_target_properties(migraphx_gpu PROPERTIES EXPORT_NAME gpu) set_target_properties(migraphx_gpu PROPERTIES EXPORT_NAME gpu)
rocm_clang_tidy_check(migraphx_gpu) rocm_clang_tidy_check(migraphx_gpu)
......
...@@ -121,6 +121,13 @@ inline activation_descriptor make_leaky_relu(double alpha) ...@@ -121,6 +121,13 @@ inline activation_descriptor make_leaky_relu(double alpha)
return ad; return ad;
} }
inline activation_descriptor make_elu(double alpha)
{
auto ad = make_obj<activation_descriptor>(&miopenCreateActivationDescriptor);
miopenSetActivationDescriptor(ad.get(), miopenActivationELU, alpha, 0, 0);
return ad;
}
inline fusion_plan_descriptor make_fusion_plan(const shape& input) inline fusion_plan_descriptor make_fusion_plan(const shape& input)
{ {
auto t = make_tensor(input); auto t = make_tensor(input);
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <migraphx/gpu/tanh.hpp> #include <migraphx/gpu/tanh.hpp>
#include <migraphx/gpu/abs.hpp> #include <migraphx/gpu/abs.hpp>
#include <migraphx/gpu/leaky_relu.hpp> #include <migraphx/gpu/leaky_relu.hpp>
#include <migraphx/gpu/elu.hpp>
#include <migraphx/gpu/softmax.hpp> #include <migraphx/gpu/softmax.hpp>
#include <migraphx/gpu/add.hpp> #include <migraphx/gpu/add.hpp>
#include <migraphx/gpu/mul.hpp> #include <migraphx/gpu/mul.hpp>
...@@ -74,6 +75,10 @@ struct miopen_apply ...@@ -74,6 +75,10 @@ struct miopen_apply
{ {
check_shape(s, apply_leaky_relu(it)); check_shape(s, apply_leaky_relu(it));
} }
else if(it->name() == "elu")
{
check_shape(s, apply_elu(it));
}
else if(it->name() == "pooling") else if(it->name() == "pooling")
{ {
check_shape(s, apply_pooling(it)); check_shape(s, apply_pooling(it));
...@@ -193,6 +198,16 @@ struct miopen_apply ...@@ -193,6 +198,16 @@ struct miopen_apply
ins, miopen_leaky_relu{std::move(ad)}, ins->inputs().at(0), output); ins, miopen_leaky_relu{std::move(ad)}, ins->inputs().at(0), output);
} }
instruction_ref apply_elu(instruction_ref ins)
{
auto&& op = any_cast<op::leaky_relu>(ins->get_operator());
auto ad = make_elu(op.alpha);
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, miopen_elu{std::move(ad)}, ins->inputs().at(0), output);
}
instruction_ref apply_softmax(instruction_ref ins) instruction_ref apply_softmax(instruction_ref ins)
{ {
auto&& op = any_cast<op::softmax>(ins->get_operator()); auto&& op = any_cast<op::softmax>(ins->get_operator());
......
...@@ -7,6 +7,16 @@ ...@@ -7,6 +7,16 @@
#include <migraphx/verify.hpp> #include <migraphx/verify.hpp>
#include "test.hpp" #include "test.hpp"
float sigmoid(float x)
{
return 1 / (1 + expf(-x));
}
float elu(float a, float x)
{
return x > 0 ? x : a * std::expm1(x);
}
TEST_CASE(slice_test) TEST_CASE(slice_test)
{ {
{ {
...@@ -1105,4 +1115,61 @@ TEST_CASE(identity_test) ...@@ -1105,4 +1115,61 @@ TEST_CASE(identity_test)
EXPECT(std::equal(data.begin(), data.end(), results_vector.begin())); EXPECT(std::equal(data.begin(), data.end(), results_vector.begin()));
} }
TEST_CASE(abs_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
auto l = p.add_literal(migraphx::literal{s, {-1, 2, -3, 4}});
p.add_instruction(migraphx::op::abs{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1, 2, 3, 4};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(sigmoid_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
auto l = p.add_literal(migraphx::literal{s, {-1, 2, -3, 4}});
p.add_instruction(migraphx::op::sigmoid{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{sigmoid(-1), sigmoid(2), sigmoid(-3), sigmoid(4)};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(tanh_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
auto l = p.add_literal(migraphx::literal{s, {-1.0, 2.0, -3.0, 4.0}});
p.add_instruction(migraphx::op::tanh{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{tanhf(-1), tanhf(2), tanhf(-3), tanhf(4)};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(elu_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
auto l = p.add_literal(migraphx::literal{s, {-1.0, 2.0, -3.0, 4.0}});
float alpha = 0.5;
p.add_instruction(migraphx::op::elu{alpha}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{elu(alpha,-1), elu(alpha,2), elu(alpha,-3), elu(alpha,4)};
EXPECT(migraphx::verify_range(results_vector, gold));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -444,6 +444,39 @@ struct test_add_relu ...@@ -444,6 +444,39 @@ struct test_add_relu
} }
}; };
struct test_sigmoid
{
migraphx::program create_program() const
{
migraphx::program p;
auto x = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
p.add_instruction(migraphx::op::sigmoid{}, x);
return p;
}
};
struct test_tanh
{
migraphx::program create_program() const
{
migraphx::program p;
auto x = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
p.add_instruction(migraphx::op::tanh{}, x);
return p;
}
};
struct test_abs
{
migraphx::program create_program() const
{
migraphx::program p;
auto x = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
p.add_instruction(migraphx::op::abs{}, x);
return p;
}
};
struct test_leaky_relu struct test_leaky_relu
{ {
migraphx::program create_program() const migraphx::program create_program() const
...@@ -455,6 +488,17 @@ struct test_leaky_relu ...@@ -455,6 +488,17 @@ struct test_leaky_relu
} }
}; };
struct test_elu
{
migraphx::program create_program() const
{
migraphx::program p;
auto x = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
p.add_instruction(migraphx::op::leaky_relu{1.0}, x);
return p;
}
};
struct test_conv_pooling struct test_conv_pooling
{ {
migraphx::program create_program() const migraphx::program create_program() const
...@@ -837,6 +881,7 @@ struct test_conv_bn_relu_pooling2 ...@@ -837,6 +881,7 @@ struct test_conv_bn_relu_pooling2
int main() int main()
{ {
verify_program<test_abs>();
verify_program<test_concat>(); verify_program<test_concat>();
verify_program<test_concat2>(); verify_program<test_concat2>();
verify_program<test_concat_relu>(); verify_program<test_concat_relu>();
...@@ -860,6 +905,9 @@ int main() ...@@ -860,6 +905,9 @@ int main()
verify_program<test_conv_relu_half>(); verify_program<test_conv_relu_half>();
verify_program<test_add_relu>(); verify_program<test_add_relu>();
verify_program<test_leaky_relu>(); verify_program<test_leaky_relu>();
verify_program<test_sigmoid>();
verify_program<test_tanh>();
verify_program<test_elu>();
verify_program<test_conv_pooling>(); verify_program<test_conv_pooling>();
verify_program<test_global_avg_pooling>(); verify_program<test_global_avg_pooling>();
verify_program<test_global_max_pooling>(); verify_program<test_global_max_pooling>();
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment