Unverified Commit 4c90e9a3 authored by turneram's avatar turneram Committed by GitHub
Browse files

SoftPlus ONNX parser (#1045)

* Add onnx parser and unit test
parent 3f392a3b
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/onnx/checks.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/make_op.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
struct parse_softplus : op_parser<parse_softplus>
{
std::vector<op_desc> operators() const { return {{"Softplus"}}; }
instruction_ref parse(const op_desc& /*opd*/,
const onnx_parser& /*parser*/,
const onnx_parser::node_info& info,
std::vector<instruction_ref> args) const
{
// Apply pointwise formula: y = ln(exp(x) + 1)
auto mb_ones = info.add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", args[0]->get_shape().lens()}}),
info.add_literal(migraphx::literal{migraphx::shape{args[0]->get_shape().type()}, {1}}));
auto exp = info.add_instruction(migraphx::make_op("exp"), args[0]);
auto add = info.add_instruction(migraphx::make_op("add"), exp, mb_ones);
return info.add_instruction(migraphx::make_op("log"), add);
}
};
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
...@@ -4379,6 +4379,26 @@ def softmax_nonstd_input_test(): ...@@ -4379,6 +4379,26 @@ def softmax_nonstd_input_test():
return ([node0, node1], [x], [y]) return ([node0, node1], [x], [y])
@onnx_test
def softplus_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [5])
node = onnx.helper.make_node('Softplus', inputs=['x'], outputs=['y'])
return ([node], [x], [y])
@onnx_test
def softplus_nd_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [3, 4, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [3, 4, 5])
node = onnx.helper.make_node('Softplus', inputs=['x'], outputs=['y'])
return ([node], [x], [y])
@onnx_test @onnx_test
def split_minus_axis_test(): def split_minus_axis_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15])
......
...@@ -4095,6 +4095,46 @@ TEST_CASE(softmax_nonstd_input_test) ...@@ -4095,6 +4095,46 @@ TEST_CASE(softmax_nonstd_input_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(softplus_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{5};
auto input_type = migraphx::shape::float_type;
auto x = mm->add_parameter("x", migraphx::shape{input_type, input_lens});
auto mb_ones =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {1}}));
auto exp = mm->add_instruction(migraphx::make_op("exp"), x);
auto add = mm->add_instruction(migraphx::make_op("add"), exp, mb_ones);
mm->add_instruction(migraphx::make_op("log"), add);
auto prog = optimize_onnx("softplus_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(softplus_nd_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{3, 4, 5};
auto input_type = migraphx::shape::half_type;
auto x = mm->add_parameter("x", migraphx::shape{input_type, input_lens});
auto mb_ones =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {1}}));
auto exp = mm->add_instruction(migraphx::make_op("exp"), x);
auto add = mm->add_instruction(migraphx::make_op("add"), exp, mb_ones);
mm->add_instruction(migraphx::make_op("log"), add);
auto prog = optimize_onnx("softplus_nd_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(split_minus_axis_test) TEST_CASE(split_minus_axis_test)
{ {
migraphx::program p; migraphx::program p;
......
softplus_nd_test:V

xy"Softplussoftplus_nd_testZ
x




b
y




B
\ No newline at end of file
 softplus_test:C

xy"Softplus softplus_testZ
x

b
y

B
\ No newline at end of file
...@@ -588,6 +588,27 @@ TEST_CASE(slice_step_test) ...@@ -588,6 +588,27 @@ TEST_CASE(slice_step_test)
EXPECT(migraphx::verify_range(result_vector, gold)); EXPECT(migraphx::verify_range(result_vector, gold));
} }
TEST_CASE(softplus_test)
{
migraphx::program p = migraphx::parse_onnx("softplus_test.onnx");
p.compile(migraphx::ref::target{});
migraphx::shape s{migraphx::shape::float_type, {5}};
std::vector<float> data = {0, 1, 2, 3, 4};
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s, data.data());
auto result = p.eval(pp).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold(5);
std::transform(
data.begin(), data.end(), gold.begin(), [](auto x) { return std::log1p(std::exp(x)); });
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(upsample_test) TEST_CASE(upsample_test)
{ {
migraphx::program p = migraphx::parse_onnx("upsample_test.onnx"); migraphx::program p = migraphx::parse_onnx("upsample_test.onnx");
......
...@@ -282,12 +282,9 @@ def create_backend_test(testname=None, target_device=None): ...@@ -282,12 +282,9 @@ def create_backend_test(testname=None, target_device=None):
backend_test.exclude(r'test_size_cpu') backend_test.exclude(r'test_size_cpu')
backend_test.exclude(r'test_size_example_cpu') backend_test.exclude(r'test_size_example_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_*') backend_test.exclude(r'test_softmax_cross_entropy_*')
backend_test.exclude(r'test_softplus_cpu')
backend_test.exclude(r'test_softplus_example_cpu')
backend_test.exclude(r'test_softsign_cpu') backend_test.exclude(r'test_softsign_cpu')
backend_test.exclude(r'test_softsign_example_cpu') backend_test.exclude(r'test_softsign_example_cpu')
backend_test.exclude(r'test_Embedding_cpu') backend_test.exclude(r'test_Embedding_cpu')
backend_test.exclude(r'test_Softplus_cpu')
# real model tests # real model tests
backend_test.exclude(r'test_inception_v1_cpu') backend_test.exclude(r'test_inception_v1_cpu')
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment