Commit 78ee6fbe authored by Khalique's avatar Khalique
Browse files

manual merge

parents d7254a5e 8885e8ac
#ifndef MIGRAPHX_GUARD_RTGLIB_COSH_HPP
#define MIGRAPHX_GUARD_RTGLIB_COSH_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/contiguous.hpp>
#include <migraphx/gpu/device/cosh.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/config.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct hip_cosh
{
std::string name() const { return "gpu::cosh"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument compute(context&, const shape&, const std::vector<argument>& args) const;
int output_alias(const std::vector<shape>& shapes) const { return shapes.size() - 1; }
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_ACOS_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_ACOS_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void acos(hipStream_t stream, const argument& result, const argument& arg);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_ASIN_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_ASIN_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void asin(hipStream_t stream, const argument& result, const argument& arg);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_ATAN_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_ATAN_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void atan(hipStream_t stream, const argument& result, const argument& arg);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_COS_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_COS_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void cos(hipStream_t stream, const argument& result, const argument& arg);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_COSH_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_COSH_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void cosh(hipStream_t stream, const argument& result, const argument& arg);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_ADD_HPP #ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_ADD_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_ADD_HPP #define MIGRAPHX_GUARD_RTGLIB_DEVICE_ADD_HPP
......
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_SINH_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_SINH_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void sinh(hipStream_t stream, const argument& result, const argument& arg);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_TAN_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_TAN_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void tan(hipStream_t stream, const argument& result, const argument& arg);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_SINH_HPP
#define MIGRAPHX_GUARD_RTGLIB_SINH_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/contiguous.hpp>
#include <migraphx/gpu/device/sinh.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/config.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct hip_sinh
{
std::string name() const { return "gpu::sinh"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument compute(context&, const shape&, const std::vector<argument>& args) const;
int output_alias(const std::vector<shape>& shapes) const { return shapes.size() - 1; }
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_TAN_HPP
#define MIGRAPHX_GUARD_RTGLIB_TAN_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/contiguous.hpp>
#include <migraphx/gpu/device/sin.hpp>
#include <migraphx/gpu/device/cos.hpp>
#include <migraphx/gpu/device/tan.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/config.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct hip_tan
{
std::string name() const { return "gpu::tan"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument compute(context&, const shape&, const std::vector<argument>& args) const;
int output_alias(const std::vector<shape>& shapes) const { return shapes.size() - 1; }
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
...@@ -17,13 +17,20 @@ ...@@ -17,13 +17,20 @@
#include <migraphx/gpu/contiguous.hpp> #include <migraphx/gpu/contiguous.hpp>
#include <migraphx/gpu/relu.hpp> #include <migraphx/gpu/relu.hpp>
#include <migraphx/gpu/sigmoid.hpp> #include <migraphx/gpu/sigmoid.hpp>
#include <migraphx/gpu/tanh.hpp>
#include <migraphx/gpu/abs.hpp> #include <migraphx/gpu/abs.hpp>
#include <migraphx/gpu/leaky_relu.hpp> #include <migraphx/gpu/leaky_relu.hpp>
#include <migraphx/gpu/elu.hpp> #include <migraphx/gpu/elu.hpp>
#include <migraphx/gpu/softmax.hpp> #include <migraphx/gpu/softmax.hpp>
#include <migraphx/gpu/add.hpp> #include <migraphx/gpu/add.hpp>
#include <migraphx/gpu/sin.hpp> #include <migraphx/gpu/sin.hpp>
#include <migraphx/gpu/cos.hpp>
#include <migraphx/gpu/tan.hpp>
#include <migraphx/gpu/sinh.hpp>
#include <migraphx/gpu/cosh.hpp>
#include <migraphx/gpu/tanh.hpp>
#include <migraphx/gpu/asin.hpp>
#include <migraphx/gpu/acos.hpp>
#include <migraphx/gpu/atan.hpp>
#include <migraphx/gpu/mul.hpp> #include <migraphx/gpu/mul.hpp>
#include <migraphx/gpu/max.hpp> #include <migraphx/gpu/max.hpp>
#include <migraphx/gpu/min.hpp> #include <migraphx/gpu/min.hpp>
...@@ -42,8 +49,7 @@ struct miopen_apply ...@@ -42,8 +49,7 @@ struct miopen_apply
{ {
program* prog = nullptr; program* prog = nullptr;
context ctx{}; context ctx{};
std::unordered_map<std::string, std::function<instruction_ref(miopen_apply&, instruction_ref)>> std::unordered_map<std::string, std::function<instruction_ref(instruction_ref)>> apply_map{};
apply_map{};
void check_shape(shape x, instruction_ref i) void check_shape(shape x, instruction_ref i)
{ {
...@@ -54,24 +60,35 @@ struct miopen_apply ...@@ -54,24 +60,35 @@ struct miopen_apply
void init() void init()
{ {
apply_map["convolution"] = &miopen_apply::apply_convolution; add_miopen_simple_op<miopen_relu>("relu", make_relu);
apply_map["relu"] = &miopen_apply::apply_relu; add_miopen_simple_op<miopen_sigmoid>("sigmoid", make_sigmoid);
apply_map["sigmoid"] = &miopen_apply::apply_sigmoid; add_miopen_simple_op<miopen_abs>("abs", make_abs);
apply_map["tanh"] = &miopen_apply::apply_tanh; add_miopen_simple_op<miopen_tanh>("tanh", make_tanh);
apply_map["abs"] = &miopen_apply::apply_abs;
apply_map["leaky_relu"] = &miopen_apply::apply_leaky_relu; add_miopen_extend_op<miopen_leaky_relu, op::leaky_relu>("leaky_relu", make_leaky_relu);
apply_map["elu"] = &miopen_apply::apply_elu; add_miopen_extend_op<miopen_elu, op::elu>("elu", make_elu);
apply_map["pooling"] = &miopen_apply::apply_pooling;
apply_map["add"] = &miopen_apply::apply_add; add_generic_op<hip_add>("add");
apply_map["sin"] = &miopen_apply::apply_sin; add_generic_op<hip_sin>("sin");
apply_map["mul"] = &miopen_apply::apply_mul; add_generic_op<hip_cos>("cos");
apply_map["max"] = &miopen_apply::apply_max; add_generic_op<hip_tan>("tan");
apply_map["min"] = &miopen_apply::apply_min; add_generic_op<hip_sinh>("sinh");
apply_map["dot"] = &miopen_apply::apply_dot; add_generic_op<hip_cosh>("cosh");
apply_map["contiguous"] = &miopen_apply::apply_contiguous; add_generic_op<hip_asin>("asin");
apply_map["concat"] = &miopen_apply::apply_concat; add_generic_op<hip_acos>("acos");
apply_map["batch_norm_inference"] = &miopen_apply::apply_batch_norm_inference; add_generic_op<hip_atan>("atan");
apply_map["softmax"] = &miopen_apply::apply_softmax; add_generic_op<hip_mul>("mul");
add_generic_op<hip_max>("max");
add_generic_op<hip_min>("min");
add_extend_op<miopen_gemm, op::dot>("dot");
add_extend_op<miopen_contiguous, op::contiguous>("contiguous");
add_extend_op<hip_concat, op::concat>("concat");
add_extend_op<miopen_softmax, op::softmax>("softmax");
add_convolution_op();
add_pooling_op();
add_batch_norm_inference_op();
} }
void apply() void apply()
...@@ -82,7 +99,7 @@ struct miopen_apply ...@@ -82,7 +99,7 @@ struct miopen_apply
auto s = it->get_shape(); auto s = it->get_shape();
if(apply_map.count(it->name()) > 0) if(apply_map.count(it->name()) > 0)
{ {
check_shape(s, apply_map.at(it->name())(*this, it)); check_shape(s, apply_map.at(it->name())(it));
} }
} }
} }
...@@ -101,171 +118,103 @@ struct miopen_apply ...@@ -101,171 +118,103 @@ struct miopen_apply
} }
} }
instruction_ref apply_convolution(instruction_ref ins) void add_convolution_op()
{
auto&& op = any_cast<op::convolution>(ins->get_operator());
auto conv = miopen_convolution{op, make_conv(op)};
auto ws = conv.compile(ctx, ins->get_shape(), ins->inputs());
auto workspace = insert_allocation(ins, ws, "workspace");
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, conv, ins->inputs().at(0), ins->inputs().at(1), workspace, output);
}
instruction_ref apply_pooling(instruction_ref ins)
{
auto&& op = any_cast<op::pooling>(ins->get_operator());
auto pd = make_pooling(op);
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, miopen_pooling{op, std::move(pd)}, ins->inputs().at(0), output);
}
instruction_ref apply_relu(instruction_ref ins)
{
auto ad = make_relu();
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, miopen_relu{std::move(ad)}, ins->inputs().at(0), output);
}
instruction_ref apply_sigmoid(instruction_ref ins)
{
auto ad = make_sigmoid();
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, miopen_sigmoid{std::move(ad)}, ins->inputs().at(0), output);
}
instruction_ref apply_tanh(instruction_ref ins)
{ {
auto ad = make_tanh(); apply_map.emplace("convolution", [=](instruction_ref ins) {
auto&& op = any_cast<op::convolution>(ins->get_operator());
auto output = insert_allocation(ins, ins->get_shape()); auto conv = miopen_convolution{op, make_conv(op)};
return prog->replace_instruction( auto ws = conv.compile(ctx, ins->get_shape(), ins->inputs());
ins, miopen_tanh{std::move(ad)}, ins->inputs().at(0), output);
}
instruction_ref apply_abs(instruction_ref ins) auto workspace = insert_allocation(ins, ws, "workspace");
{ auto output = insert_allocation(ins, ins->get_shape());
auto ad = make_abs();
auto output = insert_allocation(ins, ins->get_shape()); return prog->replace_instruction(
return prog->replace_instruction( ins, conv, ins->inputs().at(0), ins->inputs().at(1), workspace, output);
ins, miopen_abs{std::move(ad)}, ins->inputs().at(0), output); });
} }
instruction_ref apply_leaky_relu(instruction_ref ins) void add_pooling_op()
{ {
auto&& op = any_cast<op::leaky_relu>(ins->get_operator()); apply_map.emplace("pooling", [=](instruction_ref ins) {
auto ad = make_leaky_relu(op.alpha); auto&& op = any_cast<op::pooling>(ins->get_operator());
auto pd = make_pooling(op);
auto output = insert_allocation(ins, ins->get_shape());
auto output = insert_allocation(ins, ins->get_shape()); return prog->replace_instruction(
return prog->replace_instruction( ins, miopen_pooling{op, std::move(pd)}, ins->inputs().at(0), output);
ins, miopen_leaky_relu{std::move(ad)}, ins->inputs().at(0), output); });
} }
instruction_ref apply_elu(instruction_ref ins) template <class T>
void add_generic_op(std::string name)
{ {
auto&& op = any_cast<op::leaky_relu>(ins->get_operator()); apply_map.emplace(name, [=](instruction_ref ins) {
auto ad = make_elu(op.alpha); auto output = insert_allocation(ins, ins->get_shape());
std::vector<instruction_ref> refs = ins->inputs();
auto output = insert_allocation(ins, ins->get_shape()); refs.push_back(output);
return prog->replace_instruction(
ins, miopen_elu{std::move(ad)}, ins->inputs().at(0), output);
}
instruction_ref apply_softmax(instruction_ref ins) return prog->replace_instruction(ins, T{}, refs);
{ });
auto&& op = any_cast<op::softmax>(ins->get_operator());
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(ins, miopen_softmax{op}, ins->inputs().at(0), output);
} }
instruction_ref apply_add(instruction_ref ins) template <class T, class Op>
void add_extend_op(std::string name)
{ {
auto output = insert_allocation(ins, ins->get_shape()); apply_map.emplace(name, [=](instruction_ref ins) {
return prog->replace_instruction( auto&& op = any_cast<Op>(ins->get_operator());
ins, hip_add{}, ins->inputs().at(0), ins->inputs().at(1), output); auto output = insert_allocation(ins, ins->get_shape());
} std::vector<instruction_ref> refs = ins->inputs();
refs.push_back(output);
instruction_ref apply_sin(instruction_ref ins) return prog->replace_instruction(ins, T{op}, refs);
{ });
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(ins, hip_sin{}, ins->inputs().at(0), output);
} }
instruction_ref apply_mul(instruction_ref ins) template <class T, class Op, class F>
void add_miopen_extend_op(std::string name, F f)
{ {
auto output = insert_allocation(ins, ins->get_shape()); apply_map.emplace(name, [=](instruction_ref ins) {
return prog->replace_instruction( auto&& op = any_cast<Op>(ins->get_operator());
ins, hip_mul{}, ins->inputs().at(0), ins->inputs().at(1), output); auto ad = f(op.alpha);
}
instruction_ref apply_max(instruction_ref ins) auto output = insert_allocation(ins, ins->get_shape());
{ return prog->replace_instruction(ins, T{std::move(ad)}, ins->inputs().at(0), output);
auto output = insert_allocation(ins, ins->get_shape()); });
return prog->replace_instruction(
ins, hip_max{}, ins->inputs().at(0), ins->inputs().at(1), output);
}
instruction_ref apply_min(instruction_ref ins)
{
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, hip_min{}, ins->inputs().at(0), ins->inputs().at(1), output);
}
instruction_ref apply_dot(instruction_ref ins)
{
auto&& op = any_cast<op::dot>(ins->get_operator());
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, miopen_gemm{op}, ins->inputs().at(0), ins->inputs().at(1), output);
}
instruction_ref apply_contiguous(instruction_ref ins)
{
auto&& op = any_cast<op::contiguous>(ins->get_operator());
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(ins, miopen_contiguous{op}, ins->inputs().at(0), output);
} }
instruction_ref apply_concat(instruction_ref ins) template <class T, class F>
void add_miopen_simple_op(std::string name, F f)
{ {
auto&& op = any_cast<op::concat>(ins->get_operator()); apply_map.emplace(name, [=](instruction_ref ins) {
auto output = insert_allocation(ins, ins->get_shape()); auto ad = f();
std::vector<instruction_ref> refs = ins->inputs(); auto output = insert_allocation(ins, ins->get_shape());
refs.push_back(output); return prog->replace_instruction(ins, T{std::move(ad)}, ins->inputs().at(0), output);
return prog->replace_instruction(ins, hip_concat{op}, refs); });
} }
instruction_ref apply_batch_norm_inference(instruction_ref ins) void add_batch_norm_inference_op()
{ {
auto&& op = any_cast<op::batch_norm_inference>(ins->get_operator()); apply_map.emplace("batch_norm_inference", [=](instruction_ref ins) {
auto output = insert_allocation(ins, ins->get_shape()); auto&& op = any_cast<op::batch_norm_inference>(ins->get_operator());
shape old_shape = ins->inputs().at(1)->get_shape(); auto output = insert_allocation(ins, ins->get_shape());
std::vector<int64_t> new_shape{1, static_cast<int64_t>(old_shape.elements()), 1, 1}; shape old_shape = ins->inputs().at(1)->get_shape();
auto reshape_op = op::reshape{new_shape}; std::vector<int64_t> new_shape{1, static_cast<int64_t>(old_shape.elements()), 1, 1};
std::vector<instruction_ref> reshapes; auto reshape_op = op::reshape{new_shape};
std::transform(ins->inputs().begin() + 1, std::vector<instruction_ref> reshapes;
ins->inputs().end(), std::transform(ins->inputs().begin() + 1,
std::back_inserter(reshapes), ins->inputs().end(),
[&](auto i) { return prog->insert_instruction(ins, reshape_op, i); }); std::back_inserter(reshapes),
return prog->replace_instruction(ins, [&](auto i) { return prog->insert_instruction(ins, reshape_op, i); });
miopen_batch_norm_inference{op}, return prog->replace_instruction(ins,
ins->inputs().at(0), miopen_batch_norm_inference{op},
reshapes[0], ins->inputs().at(0),
reshapes[1], reshapes[0],
reshapes[2], reshapes[1],
reshapes[3], reshapes[2],
output); reshapes[3],
output);
});
} }
}; };
......
#include <migraphx/gpu/sinh.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/config.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
shape hip_sinh::compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(2);
return inputs.at(0);
}
argument hip_sinh::compute(context& ctx, const shape&, const std::vector<argument>& args) const
{
device::sinh(ctx.get_stream().get(), args[1], args[0]);
return args[1];
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#include <migraphx/gpu/tan.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/config.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
shape hip_tan::compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(2);
return inputs.at(0);
}
argument hip_tan::compute(context& ctx, const shape&, const std::vector<argument>& args) const
{
device::tan(ctx.get_stream().get(), args[1], args[0]);
return args[1];
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
...@@ -455,6 +455,50 @@ TEST_CASE(tan_test) ...@@ -455,6 +455,50 @@ TEST_CASE(tan_test)
EXPECT(migraphx::verify_range(results_vector, gold)); EXPECT(migraphx::verify_range(results_vector, gold));
} }
TEST_CASE(asin_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {3}};
std::vector<float> data{-0.5f, 0.0f, 0.9f};
auto l = p.add_literal(migraphx::literal{s, data});
p.add_instruction(migraphx::op::asin{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-0.5235987756f, 0.f, 1.119769515};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(acos_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::double_type, {3}};
std::vector<float> data{-0.8f, 0.0f, 1.0f};
auto l = p.add_literal(migraphx::literal{s, data});
p.add_instruction(migraphx::op::acos{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {2.4980915448f, 1.5707963268f, 0.0f};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(atan_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::double_type, {3}};
auto l = p.add_literal(migraphx::literal{s, {-1, 0, 1}});
p.add_instruction(migraphx::op::atan{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-0.7853981634f, 0.0f, 0.7853981634f};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(add_test) TEST_CASE(add_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -1137,6 +1181,34 @@ TEST_CASE(sigmoid_test) ...@@ -1137,6 +1181,34 @@ TEST_CASE(sigmoid_test)
EXPECT(migraphx::verify_range(results_vector, gold)); EXPECT(migraphx::verify_range(results_vector, gold));
} }
TEST_CASE(sinh_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
auto l = p.add_literal(migraphx::literal{s, {-1.0, 2.0, -3.0, 4.0}});
p.add_instruction(migraphx::op::sinh{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{sinhf(-1), sinhf(2), sinhf(-3), sinhf(4)};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(cosh_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
auto l = p.add_literal(migraphx::literal{s, {-1.0, 2.0, -3.0, 4.0}});
p.add_instruction(migraphx::op::cosh{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{coshf(-1), coshf(2), coshf(-3), coshf(4)};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(tanh_test) TEST_CASE(tanh_test)
{ {
migraphx::program p; migraphx::program p;
......
...@@ -215,6 +215,101 @@ struct test_sin ...@@ -215,6 +215,101 @@ struct test_sin
} }
}; };
struct test_cos
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::double_type, {8}};
auto x = p.add_parameter("x", s);
p.add_instruction(migraphx::op::cos{}, x);
return p;
}
};
struct test_tan
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {16}};
auto x = p.add_parameter("x", s);
p.add_instruction(migraphx::op::tan{}, x);
return p;
}
};
struct test_sinh
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::double_type, {16}};
auto x = p.add_parameter("x", s);
p.add_instruction(migraphx::op::sinh{}, x);
return p;
}
};
struct test_cosh
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::double_type, {16}};
auto x = p.add_parameter("x", s);
p.add_instruction(migraphx::op::cosh{}, x);
return p;
}
};
struct test_tanh
{
migraphx::program create_program() const
{
migraphx::program p;
auto x = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
p.add_instruction(migraphx::op::tanh{}, x);
return p;
}
};
struct test_asin
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::double_type, {16}};
auto x = p.add_parameter("x", s);
p.add_instruction(migraphx::op::asin{}, x);
return p;
}
};
struct test_acos
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::double_type, {16}};
auto x = p.add_parameter("x", s);
p.add_instruction(migraphx::op::acos{}, x);
return p;
}
};
struct test_atan
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::double_type, {16}};
auto x = p.add_parameter("x", s);
p.add_instruction(migraphx::op::atan{}, x);
return p;
}
};
struct test_scale struct test_scale
{ {
migraphx::program create_program() const migraphx::program create_program() const
...@@ -467,17 +562,6 @@ struct test_sigmoid ...@@ -467,17 +562,6 @@ struct test_sigmoid
} }
}; };
struct test_tanh
{
migraphx::program create_program() const
{
migraphx::program p;
auto x = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
p.add_instruction(migraphx::op::tanh{}, x);
return p;
}
};
struct test_abs struct test_abs
{ {
migraphx::program create_program() const migraphx::program create_program() const
...@@ -901,6 +985,14 @@ int main() ...@@ -901,6 +985,14 @@ int main()
verify_program<test_add_half>(); verify_program<test_add_half>();
verify_program<test_mul>(); verify_program<test_mul>();
verify_program<test_sin>(); verify_program<test_sin>();
verify_program<test_cos>();
verify_program<test_tan>();
verify_program<test_sinh>();
verify_program<test_cosh>();
verify_program<test_tanh>();
verify_program<test_asin>();
verify_program<test_acos>();
verify_program<test_atan>();
verify_program<test_scale>(); verify_program<test_scale>();
verify_program<test_triadd>(); verify_program<test_triadd>();
verify_program<test_triadd2>(); verify_program<test_triadd2>();
...@@ -919,7 +1011,6 @@ int main() ...@@ -919,7 +1011,6 @@ int main()
verify_program<test_add_relu>(); verify_program<test_add_relu>();
verify_program<test_leaky_relu>(); verify_program<test_leaky_relu>();
verify_program<test_sigmoid>(); verify_program<test_sigmoid>();
verify_program<test_tanh>();
verify_program<test_elu>(); verify_program<test_elu>();
verify_program<test_conv_pooling>(); verify_program<test_conv_pooling>();
verify_program<test_global_avg_pooling>(); verify_program<test_global_avg_pooling>();
......
 acos-example:;
xy"Acos test_acosZ
x


b
y


B
\ No newline at end of file
 asin-example:;
xy"Asin test_asinZ
x


b
y


B
\ No newline at end of file
 atan-example:;
xy"Atan test_atanZ
x


b
y


B
\ No newline at end of file
 cos-example:9
xy"Costest_cosZ
x


b
y


B
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment