Commit 95390dfe authored by Shucai Xiao's avatar Shucai Xiao
Browse files

Merge changes from adding log and exp operators.

parents 8a22bda7 497383c8
......@@ -641,6 +641,11 @@ struct exp : unary
std::string name() const { return "exp"; }
};
struct log : unary
{
std::string name() const { return "log"; }
};
struct sin : unary
{
std::string name() const { return "sin"; }
......
......@@ -54,6 +54,8 @@ struct onnx_parser
add_generic_op("Relu", op::relu{});
add_generic_op("Sigmoid", op::sigmoid{});
add_generic_op("Abs", op::abs{});
add_generic_op("Exp", op::exp{});
add_generic_op("Log", op::log{});
// disable dropout for inference
add_generic_op("Dropout", op::identity{});
add_generic_op("Identity", op::identity{});
......
......@@ -360,6 +360,15 @@ struct exp_op
}
};
struct log_op
{
std::string name() const { return "cpu::log"; }
auto fcn() const
{
return [](auto x) { return std::log(x); };
}
};
struct sin_op
{
std::string name() const { return "cpu::sin"; }
......@@ -662,6 +671,7 @@ struct cpu_apply
apply_map["tanh"] = simple_op<cpu_unary<tanh_op>>();
apply_map["sigmoid"] = simple_op<cpu_unary<sigmoid_op>>();
apply_map["exp"] = simple_op<cpu_unary<exp_op>>();
apply_map["log"] = simple_op<cpu_unary<log_op>>();
apply_map["neg"] = simple_op<cpu_unary<neg_op>>();
apply_map["sin"] = simple_op<cpu_unary<sin_op>>();
apply_map["cos"] = simple_op<cpu_unary<cos_op>>();
......
......@@ -14,6 +14,8 @@ add_library(migraphx_device
device/add.cpp
device/max.cpp
device/min.cpp
device/exp.cpp
device/log.cpp
device/sin.cpp
device/cos.cpp
device/tan.cpp
......
#include <migraphx/gpu/device/exp.hpp>
#include <migraphx/gpu/device/nary.hpp>
#include <migraphx/gpu/device/types.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void exp(hipStream_t stream, const argument& result, const argument& arg)
{
nary(stream, result, arg)([](auto x) { return ::exp(to_hip_type(x)); });
}
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#include <migraphx/gpu/device/log.hpp>
#include <migraphx/gpu/device/nary.hpp>
#include <migraphx/gpu/device/types.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void log(hipStream_t stream, const argument& result, const argument& arg)
{
nary(stream, result, arg)([](auto x) { return ::log(to_hip_type(x)); });
}
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#include <migraphx/gpu/exp.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/config.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
shape hip_exp::compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(2);
return inputs.at(0);
}
argument hip_exp::compute(context& ctx, const shape&, const std::vector<argument>& args) const
{
device::exp(ctx.get_stream().get(), args[1], args[0]);
return args[1];
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_EXP_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_EXP_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void exp(hipStream_t stream, const argument& result, const argument& arg);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_LOG_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_LOG_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void log(hipStream_t stream, const argument& result, const argument& arg);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_EXP_HPP
#define MIGRAPHX_GUARD_RTGLIB_EXP_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/contiguous.hpp>
#include <migraphx/gpu/device/exp.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/config.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct hip_exp
{
std::string name() const { return "gpu::exp"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument compute(context&, const shape&, const std::vector<argument>& args) const;
int output_alias(const std::vector<shape>& shapes) const { return shapes.size() - 1; }
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_LOG_HPP
#define MIGRAPHX_GUARD_RTGLIB_LOG_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/contiguous.hpp>
#include <migraphx/gpu/device/log.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/config.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct hip_log
{
std::string name() const { return "gpu::log"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument compute(context&, const shape&, const std::vector<argument>& args) const;
int output_alias(const std::vector<shape>& shapes) const { return shapes.size() - 1; }
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#include <migraphx/gpu/log.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/config.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
shape hip_log::compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(2);
return inputs.at(0);
}
argument hip_log::compute(context& ctx, const shape&, const std::vector<argument>& args) const
{
device::log(ctx.get_stream().get(), args[1], args[0]);
return args[1];
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......@@ -22,6 +22,8 @@
#include <migraphx/gpu/elu.hpp>
#include <migraphx/gpu/softmax.hpp>
#include <migraphx/gpu/add.hpp>
#include <migraphx/gpu/exp.hpp>
#include <migraphx/gpu/log.hpp>
#include <migraphx/gpu/sin.hpp>
#include <migraphx/gpu/cos.hpp>
#include <migraphx/gpu/tan.hpp>
......@@ -69,6 +71,8 @@ struct miopen_apply
add_miopen_extend_op<miopen_elu, op::elu>("elu", make_elu);
add_generic_op<hip_add>("add");
add_generic_op<hip_exp>("exp");
add_generic_op<hip_log>("log");
add_generic_op<hip_sin>("sin");
add_generic_op<hip_cos>("cos");
add_generic_op<hip_tan>("tan");
......
......@@ -413,6 +413,20 @@ TEST_CASE(exp_test)
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(log_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {3}};
auto l = p.add_literal(migraphx::literal{s, {1, 2, 3}});
p.add_instruction(migraphx::op::log{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.0f, 0.6931471806f, 1.0986122887f};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(sin_test)
{
migraphx::program p;
......
......@@ -203,6 +203,32 @@ struct test_mul
}
};
struct test_exp
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {6}};
std::vector<float> data{0.1f, 0.2f, 1.f, 2.f, 0.6f, 10.f};
auto x = p.add_literal(s, data);
p.add_instruction(migraphx::op::exp{}, x);
return p;
}
};
struct test_log
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {6}};
std::vector<float> data{0.1f, 0.2f, 1.f, 2.f, 0.6f, 100.f};
auto x = p.add_literal(s, data);
p.add_instruction(migraphx::op::log{}, x);
return p;
}
};
struct test_sin
{
migraphx::program create_program() const
......@@ -984,6 +1010,8 @@ int main()
verify_program<test_add>();
verify_program<test_add_half>();
verify_program<test_mul>();
verify_program<test_exp>();
verify_program<test_log>();
verify_program<test_sin>();
verify_program<test_cos>();
verify_program<test_tan>();
......
 exp-example:9
xy"Exptest_expZ
x


b
y


B
\ No newline at end of file
 log-example:9
xy"Logtest_logZ
x


b
y


B
\ No newline at end of file
......@@ -179,6 +179,27 @@ void sum_test()
p.add_instruction(migraphx::op::add{}, l0, input2);
auto prog = migraphx::parse_onnx("sum_test.onnx");
EXPECT(p == prog);
}
void exp_test()
{
migraphx::program p;
auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {10}});
p.add_instruction(migraphx::op::exp{}, input);
auto prog = migraphx::parse_onnx("exp_test.onnx");
EXPECT(p == prog);
}
void log_test()
{
migraphx::program p;
auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {10}});
p.add_instruction(migraphx::op::log{}, input);
auto prog = migraphx::parse_onnx("log_test.onnx");
EXPECT(p == prog);
}
void sin_test()
......@@ -316,6 +337,8 @@ int main()
sum_test();
max_test();
min_test();
exp_test();
log_test();
sin_test();
cos_test();
tan_test();
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment