Unverified Commit 436b459e authored by Paul Fultz II's avatar Paul Fultz II Committed by GitHub
Browse files

Merge pull request #135 from ROCmSoftwarePlatform/combineOperators

Combine operators
parents f0fac3e0 62a9aa8e
#ifndef MIGRAPHX_GUARD_RTGLIB_MAX_HPP
#define MIGRAPHX_GUARD_RTGLIB_MAX_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/gpu/oper.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/contiguous.hpp>
#include <migraphx/gpu/device/max.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct hip_max : binary_device<hip_max, device::max>
{
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_MIN_HPP
#define MIGRAPHX_GUARD_RTGLIB_MIN_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/gpu/oper.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/contiguous.hpp>
#include <migraphx/gpu/device/min.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct hip_min : binary_device<hip_min, device::min>
{
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
......@@ -91,6 +91,29 @@ inline activation_descriptor make_relu()
return ad;
}
inline activation_descriptor make_sigmoid()
{
auto ad = make_obj<activation_descriptor>(&miopenCreateActivationDescriptor);
miopenSetActivationDescriptor(ad.get(), miopenActivationLOGISTIC, 0, 0, 0);
return ad;
}
inline activation_descriptor make_tanh()
{
auto ad = make_obj<activation_descriptor>(&miopenCreateActivationDescriptor);
// onnx operator does not apply additional scaling for tanh
// defaults for alpha and beta are therefore set to 1
miopenSetActivationDescriptor(ad.get(), miopenActivationTANH, 1, 1, 0);
return ad;
}
inline activation_descriptor make_abs()
{
auto ad = make_obj<activation_descriptor>(&miopenCreateActivationDescriptor);
miopenSetActivationDescriptor(ad.get(), miopenActivationABS, 0, 0, 0);
return ad;
}
inline activation_descriptor make_leaky_relu(double alpha)
{
auto ad = make_obj<activation_descriptor>(&miopenCreateActivationDescriptor);
......@@ -98,6 +121,13 @@ inline activation_descriptor make_leaky_relu(double alpha)
return ad;
}
inline activation_descriptor make_elu(double alpha)
{
auto ad = make_obj<activation_descriptor>(&miopenCreateActivationDescriptor);
miopenSetActivationDescriptor(ad.get(), miopenActivationELU, alpha, 0, 0);
return ad;
}
inline fusion_plan_descriptor make_fusion_plan(const shape& input)
{
auto t = make_tensor(input);
......
......@@ -2,6 +2,7 @@
#define MIGRAPHX_GUARD_RTGLIB_MUL_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/gpu/oper.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
......@@ -22,12 +23,8 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct hip_mul
struct hip_mul : binary_device<hip_mul, device::mul>
{
std::string name() const { return "gpu::mul"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument compute(context&, const shape&, const std::vector<argument>& args) const;
int output_alias(const std::vector<shape>& shapes) const { return shapes.size() - 1; }
};
} // namespace gpu
......
#ifndef MIGRAPHX_GUARD_RTGLIB_UNARY_HPP
#define MIGRAPHX_GUARD_RTGLIB_UNARY_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/type_name.hpp>
#include <migraphx/gpu/device/contiguous.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/config.hpp>
#include <utility>
#include <iostream>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
template <class Derived>
struct oper
{
std::string name() const
{
const std::string& name = get_type_name<Derived>();
// search the namespace gpu (::gpu::)
auto pos_ns = name.find("::gpu::");
if(pos_ns != std::string::npos)
{
auto pos_name = name.find("hip_", pos_ns + std::string("::gpu::").length());
if(pos_name != std::string::npos)
{
return std::string("gpu::") + name.substr(pos_name + 4);
}
else
{
return name.substr(pos_ns + 2);
}
}
return "unknown";
}
};
template <class Derived, void (*F)(hipStream_t, const argument&, const argument&)>
struct unary_device : oper<Derived>
{
shape compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(2);
return inputs.at(0);
}
argument compute(context& ctx, const shape&, const std::vector<argument>& args) const
{
F(ctx.get_stream().get(), args[1], args[0]);
return args[1];
}
int output_alias(const std::vector<shape>& shapes) const { return shapes.size() - 1; }
};
template <class Derived, void (*F)(hipStream_t, const argument&, const argument&, const argument&)>
struct binary_device : oper<Derived>
{
shape compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(3);
return inputs.at(0);
}
argument compute(context& ctx, const shape&, const std::vector<argument>& args) const
{
F(ctx.get_stream().get(), args[2], args[1], args[0]);
return args[2];
}
int output_alias(const std::vector<shape>& shapes) const { return shapes.size() - 1; }
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_SIGMOID_HPP
#define MIGRAPHX_GUARD_RTGLIB_SIGMOID_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/contiguous.hpp>
#include <migraphx/gpu/device/add.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct miopen_sigmoid
{
shared<activation_descriptor> ad;
std::string name() const { return "gpu::sigmoid"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
int output_alias(const std::vector<shape>& shapes) const { return shapes.size() - 1; }
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
......@@ -2,6 +2,7 @@
#define MIGRAPHX_GUARD_RTGLIB_SIN_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/gpu/oper.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
......@@ -22,12 +23,8 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct hip_sin
struct hip_sin : unary_device<hip_sin, device::sin>
{
std::string name() const { return "gpu::sin"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument compute(context&, const shape&, const std::vector<argument>& args) const;
int output_alias(const std::vector<shape>& shapes) const { return shapes.size() - 1; }
};
} // namespace gpu
......
#include <migraphx/gpu/mul.hpp>
#include <migraphx/operators.hpp>
#ifndef MIGRAPHX_GUARD_RTGLIB_SINH_HPP
#define MIGRAPHX_GUARD_RTGLIB_SINH_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/gpu/oper.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/contiguous.hpp>
#include <migraphx/gpu/device/sinh.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/config.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
shape hip_mul::compute_shape(const std::vector<shape>& inputs) const
struct hip_sinh : unary_device<hip_sinh, device::sinh>
{
// check_shapes{inputs, *this}.has(3).standard();
check_shapes{inputs, *this}.has(3);
return inputs.at(0);
}
argument hip_mul::compute(context& ctx, const shape&, const std::vector<argument>& args) const
{
device::mul(ctx.get_stream().get(), args[2], args[0], args[1]);
return args[2];
}
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_TAN_HPP
#define MIGRAPHX_GUARD_RTGLIB_TAN_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/gpu/oper.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/contiguous.hpp>
#include <migraphx/gpu/device/tan.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/config.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct hip_tan : unary_device<hip_tan, device::tan>
{
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_TANH_HPP
#define MIGRAPHX_GUARD_RTGLIB_TANH_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/contiguous.hpp>
#include <migraphx/gpu/device/add.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct miopen_tanh
{
shared<activation_descriptor> ad;
std::string name() const { return "gpu::tanh"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
int output_alias(const std::vector<shape>& shapes) const { return shapes.size() - 1; }
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
......@@ -16,16 +16,32 @@
#include <migraphx/gpu/convolution.hpp>
#include <migraphx/gpu/contiguous.hpp>
#include <migraphx/gpu/relu.hpp>
#include <migraphx/gpu/sigmoid.hpp>
#include <migraphx/gpu/abs.hpp>
#include <migraphx/gpu/leaky_relu.hpp>
#include <migraphx/gpu/elu.hpp>
#include <migraphx/gpu/softmax.hpp>
#include <migraphx/gpu/add.hpp>
#include <migraphx/gpu/exp.hpp>
#include <migraphx/gpu/log.hpp>
#include <migraphx/gpu/sin.hpp>
#include <migraphx/gpu/cos.hpp>
#include <migraphx/gpu/tan.hpp>
#include <migraphx/gpu/sinh.hpp>
#include <migraphx/gpu/cosh.hpp>
#include <migraphx/gpu/tanh.hpp>
#include <migraphx/gpu/asin.hpp>
#include <migraphx/gpu/acos.hpp>
#include <migraphx/gpu/atan.hpp>
#include <migraphx/gpu/mul.hpp>
#include <migraphx/gpu/max.hpp>
#include <migraphx/gpu/min.hpp>
#include <migraphx/gpu/batchnorm.hpp>
#include <migraphx/gpu/pooling.hpp>
#include <migraphx/gpu/gemm.hpp>
#include <migraphx/gpu/concat.hpp>
#include <utility>
#include <functional>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
......@@ -35,6 +51,7 @@ struct miopen_apply
{
program* prog = nullptr;
context ctx{};
std::unordered_map<std::string, std::function<instruction_ref(instruction_ref)>> apply_map{};
void check_shape(shape x, instruction_ref i)
{
......@@ -43,58 +60,50 @@ struct miopen_apply
(void)i;
}
void init()
{
add_miopen_simple_op<miopen_relu>("relu", make_relu);
add_miopen_simple_op<miopen_sigmoid>("sigmoid", make_sigmoid);
add_miopen_simple_op<miopen_abs>("abs", make_abs);
add_miopen_simple_op<miopen_tanh>("tanh", make_tanh);
add_miopen_extend_op<miopen_leaky_relu, op::leaky_relu>("leaky_relu", make_leaky_relu);
add_miopen_extend_op<miopen_elu, op::elu>("elu", make_elu);
add_generic_op<hip_add>("add");
add_generic_op<hip_exp>("exp");
add_generic_op<hip_log>("log");
add_generic_op<hip_sin>("sin");
add_generic_op<hip_cos>("cos");
add_generic_op<hip_tan>("tan");
add_generic_op<hip_sinh>("sinh");
add_generic_op<hip_cosh>("cosh");
add_generic_op<hip_asin>("asin");
add_generic_op<hip_acos>("acos");
add_generic_op<hip_atan>("atan");
add_generic_op<hip_mul>("mul");
add_generic_op<hip_max>("max");
add_generic_op<hip_min>("min");
add_extend_op<miopen_gemm, op::dot>("dot");
add_extend_op<miopen_contiguous, op::contiguous>("contiguous");
add_extend_op<hip_concat, op::concat>("concat");
add_extend_op<miopen_softmax, op::softmax>("softmax");
add_convolution_op();
add_pooling_op();
add_batch_norm_inference_op();
}
void apply()
{
init();
for(auto it = prog->begin(); it != prog->end(); it++)
{
auto s = it->get_shape();
if(it->name() == "convolution")
{
check_shape(s, apply_convolution(it));
}
else if(it->name() == "relu")
{
check_shape(s, apply_relu(it));
}
else if(it->name() == "leaky_relu")
{
check_shape(s, apply_leaky_relu(it));
}
else if(it->name() == "pooling")
{
check_shape(s, apply_pooling(it));
}
else if(it->name() == "add")
{
check_shape(s, apply_add(it));
}
else if(it->name() == "sin")
{
check_shape(s, apply_sin(it));
}
else if(it->name() == "mul")
{
check_shape(s, apply_mul(it));
}
else if(it->name() == "dot")
{
check_shape(s, apply_gemm(it));
}
else if(it->name() == "contiguous")
{
check_shape(s, apply_contiguous(it));
}
else if(it->name() == "concat")
if(apply_map.count(it->name()) > 0)
{
check_shape(s, apply_concat(it));
}
else if(it->name() == "batch_norm_inference")
{
check_shape(s, apply_batch_norm_inference(it));
}
else if(it->name() == "softmax")
{
check_shape(s, apply_softmax(it));
check_shape(s, apply_map.at(it->name())(it));
}
}
}
......@@ -113,120 +122,103 @@ struct miopen_apply
}
}
instruction_ref apply_convolution(instruction_ref ins)
{
auto&& op = any_cast<op::convolution>(ins->get_operator());
auto conv = miopen_convolution{op, make_conv(op)};
auto ws = conv.compile(ctx, ins->get_shape(), ins->inputs());
auto workspace = insert_allocation(ins, ws, "workspace");
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, conv, ins->inputs().at(0), ins->inputs().at(1), workspace, output);
}
instruction_ref apply_pooling(instruction_ref ins)
void add_convolution_op()
{
auto&& op = any_cast<op::pooling>(ins->get_operator());
auto pd = make_pooling(op);
auto output = insert_allocation(ins, ins->get_shape());
apply_map.emplace("convolution", [=](instruction_ref ins) {
auto&& op = any_cast<op::convolution>(ins->get_operator());
return prog->replace_instruction(
ins, miopen_pooling{op, std::move(pd)}, ins->inputs().at(0), output);
}
instruction_ref apply_relu(instruction_ref ins)
{
auto ad = make_relu();
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, miopen_relu{std::move(ad)}, ins->inputs().at(0), output);
}
auto conv = miopen_convolution{op, make_conv(op)};
auto ws = conv.compile(ctx, ins->get_shape(), ins->inputs());
instruction_ref apply_leaky_relu(instruction_ref ins)
{
auto&& op = any_cast<op::leaky_relu>(ins->get_operator());
auto ad = make_leaky_relu(op.alpha);
auto workspace = insert_allocation(ins, ws, "workspace");
auto output = insert_allocation(ins, ins->get_shape());
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, miopen_leaky_relu{std::move(ad)}, ins->inputs().at(0), output);
return prog->replace_instruction(
ins, conv, ins->inputs().at(0), ins->inputs().at(1), workspace, output);
});
}
instruction_ref apply_softmax(instruction_ref ins)
void add_pooling_op()
{
auto&& op = any_cast<op::softmax>(ins->get_operator());
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(ins, miopen_softmax{op}, ins->inputs().at(0), output);
apply_map.emplace("pooling", [=](instruction_ref ins) {
auto&& op = any_cast<op::pooling>(ins->get_operator());
auto pd = make_pooling(op);
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, miopen_pooling{op, std::move(pd)}, ins->inputs().at(0), output);
});
}
instruction_ref apply_add(instruction_ref ins)
template <class T>
void add_generic_op(std::string name)
{
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, hip_add{}, ins->inputs().at(0), ins->inputs().at(1), output);
}
apply_map.emplace(name, [=](instruction_ref ins) {
auto output = insert_allocation(ins, ins->get_shape());
std::vector<instruction_ref> refs = ins->inputs();
refs.push_back(output);
instruction_ref apply_sin(instruction_ref ins)
{
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(ins, hip_sin{}, ins->inputs().at(0), output);
return prog->replace_instruction(ins, T{}, refs);
});
}
instruction_ref apply_mul(instruction_ref ins)
template <class T, class Op>
void add_extend_op(std::string name)
{
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, hip_mul{}, ins->inputs().at(0), ins->inputs().at(1), output);
apply_map.emplace(name, [=](instruction_ref ins) {
auto&& op = any_cast<Op>(ins->get_operator());
auto output = insert_allocation(ins, ins->get_shape());
std::vector<instruction_ref> refs = ins->inputs();
refs.push_back(output);
return prog->replace_instruction(ins, T{op}, refs);
});
}
instruction_ref apply_gemm(instruction_ref ins)
template <class T, class Op, class F>
void add_miopen_extend_op(std::string name, F f)
{
auto&& op = any_cast<op::dot>(ins->get_operator());
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, miopen_gemm{op}, ins->inputs().at(0), ins->inputs().at(1), output);
}
apply_map.emplace(name, [=](instruction_ref ins) {
auto&& op = any_cast<Op>(ins->get_operator());
auto ad = f(op.alpha);
instruction_ref apply_contiguous(instruction_ref ins)
{
auto&& op = any_cast<op::contiguous>(ins->get_operator());
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(ins, miopen_contiguous{op}, ins->inputs().at(0), output);
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(ins, T{std::move(ad)}, ins->inputs().at(0), output);
});
}
instruction_ref apply_concat(instruction_ref ins)
template <class T, class F>
void add_miopen_simple_op(std::string name, F f)
{
auto&& op = any_cast<op::concat>(ins->get_operator());
auto output = insert_allocation(ins, ins->get_shape());
std::vector<instruction_ref> refs = ins->inputs();
refs.push_back(output);
return prog->replace_instruction(ins, hip_concat{op}, refs);
apply_map.emplace(name, [=](instruction_ref ins) {
auto ad = f();
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(ins, T{std::move(ad)}, ins->inputs().at(0), output);
});
}
instruction_ref apply_batch_norm_inference(instruction_ref ins)
void add_batch_norm_inference_op()
{
auto&& op = any_cast<op::batch_norm_inference>(ins->get_operator());
auto output = insert_allocation(ins, ins->get_shape());
shape old_shape = ins->inputs().at(1)->get_shape();
std::vector<int64_t> new_shape{1, static_cast<int64_t>(old_shape.elements()), 1, 1};
auto reshape_op = op::reshape{new_shape};
std::vector<instruction_ref> reshapes;
std::transform(ins->inputs().begin() + 1,
ins->inputs().end(),
std::back_inserter(reshapes),
[&](auto i) { return prog->insert_instruction(ins, reshape_op, i); });
return prog->replace_instruction(ins,
miopen_batch_norm_inference{op},
ins->inputs().at(0),
reshapes[0],
reshapes[1],
reshapes[2],
reshapes[3],
output);
apply_map.emplace("batch_norm_inference", [=](instruction_ref ins) {
auto&& op = any_cast<op::batch_norm_inference>(ins->get_operator());
auto output = insert_allocation(ins, ins->get_shape());
shape old_shape = ins->inputs().at(1)->get_shape();
std::vector<int64_t> new_shape{1, static_cast<int64_t>(old_shape.elements()), 1, 1};
auto reshape_op = op::reshape{new_shape};
std::vector<instruction_ref> reshapes;
std::transform(ins->inputs().begin() + 1,
ins->inputs().end(),
std::back_inserter(reshapes),
[&](auto i) { return prog->insert_instruction(ins, reshape_op, i); });
return prog->replace_instruction(ins,
miopen_batch_norm_inference{op},
ins->inputs().at(0),
reshapes[0],
reshapes[1],
reshapes[2],
reshapes[3],
output);
});
}
};
......
#include <migraphx/gpu/sigmoid.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
shape miopen_sigmoid::compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(2).not_broadcasted();
return inputs.at(1);
}
argument miopen_sigmoid::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1, beta = 0;
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(),
ad.get(),
&alpha,
x_desc.get(),
args[0].implicit(),
&beta,
y_desc.get(),
args[1].implicit());
return args[1];
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#include <migraphx/gpu/tanh.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
shape miopen_tanh::compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(2).not_broadcasted();
return inputs.at(1);
}
argument miopen_tanh::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1, beta = 0;
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(),
ad.get(),
&alpha,
x_desc.get(),
args[0].implicit(),
&beta,
y_desc.get(),
args[1].implicit());
return args[1];
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......@@ -7,6 +7,10 @@
#include <migraphx/verify.hpp>
#include "test.hpp"
float sigmoid(float x) { return 1 / (1 + expf(-x)); }
float elu(float a, float x) { return x > 0 ? x : a * std::expm1(x); }
TEST_CASE(slice_test)
{
{
......@@ -409,6 +413,20 @@ TEST_CASE(exp_test)
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(log_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {3}};
auto l = p.add_literal(migraphx::literal{s, {1, 2, 3}});
p.add_instruction(migraphx::op::log{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.0f, 0.6931471806f, 1.0986122887f};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(sin_test)
{
migraphx::program p;
......@@ -451,6 +469,50 @@ TEST_CASE(tan_test)
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(asin_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {3}};
std::vector<float> data{-0.5f, 0.0f, 0.9f};
auto l = p.add_literal(migraphx::literal{s, data});
p.add_instruction(migraphx::op::asin{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-0.5235987756f, 0.f, 1.119769515};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(acos_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::double_type, {3}};
std::vector<float> data{-0.8f, 0.0f, 1.0f};
auto l = p.add_literal(migraphx::literal{s, data});
p.add_instruction(migraphx::op::acos{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {2.4980915448f, 1.5707963268f, 0.0f};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(atan_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::double_type, {3}};
auto l = p.add_literal(migraphx::literal{s, {-1, 0, 1}});
p.add_instruction(migraphx::op::atan{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-0.7853981634f, 0.0f, 0.7853981634f};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(add_test)
{
migraphx::program p;
......@@ -1105,4 +1167,123 @@ TEST_CASE(identity_test)
EXPECT(std::equal(data.begin(), data.end(), results_vector.begin()));
}
TEST_CASE(abs_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
auto l = p.add_literal(migraphx::literal{s, {-1, 2, -3, 4}});
p.add_instruction(migraphx::op::abs{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1, 2, 3, 4};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(sigmoid_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
auto l = p.add_literal(migraphx::literal{s, {-1, 2, -3, 4}});
p.add_instruction(migraphx::op::sigmoid{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{sigmoid(-1), sigmoid(2), sigmoid(-3), sigmoid(4)};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(sinh_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
auto l = p.add_literal(migraphx::literal{s, {-1.0, 2.0, -3.0, 4.0}});
p.add_instruction(migraphx::op::sinh{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{sinhf(-1), sinhf(2), sinhf(-3), sinhf(4)};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(cosh_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
auto l = p.add_literal(migraphx::literal{s, {-1.0, 2.0, -3.0, 4.0}});
p.add_instruction(migraphx::op::cosh{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{coshf(-1), coshf(2), coshf(-3), coshf(4)};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(tanh_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
auto l = p.add_literal(migraphx::literal{s, {-1.0, 2.0, -3.0, 4.0}});
p.add_instruction(migraphx::op::tanh{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{tanhf(-1), tanhf(2), tanhf(-3), tanhf(4)};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(elu_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
auto l = p.add_literal(migraphx::literal{s, {-1.0, 2.0, -3.0, 4.0}});
float alpha = 0.5;
p.add_instruction(migraphx::op::elu{alpha}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{elu(alpha, -1), elu(alpha, 2), elu(alpha, -3), elu(alpha, 4)};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(max_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {3}};
auto l0 = p.add_literal(migraphx::literal{s, {1, 4, 3}});
auto l1 = p.add_literal(migraphx::literal{s, {2, 8, 6}});
auto l2 = p.add_literal(migraphx::literal{s, {7, 5, 9}});
auto curr_max = p.add_instruction(migraphx::op::max{}, l0, l1);
p.add_instruction(migraphx::op::max{}, curr_max, l2);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{7, 8, 9};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(min_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {3}};
auto l0 = p.add_literal(migraphx::literal{s, {1, 4, 3}});
auto l1 = p.add_literal(migraphx::literal{s, {2, 8, 6}});
auto l2 = p.add_literal(migraphx::literal{s, {7, 5, 9}});
auto curr_min = p.add_instruction(migraphx::op::min{}, l0, l1);
p.add_instruction(migraphx::op::min{}, curr_min, l2);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1, 4, 3};
EXPECT(migraphx::verify_range(results_vector, gold));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -203,6 +203,32 @@ struct test_mul
}
};
struct test_exp
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {6}};
std::vector<float> data{0.1f, 0.2f, 1.f, 2.f, 0.6f, 10.f};
auto x = p.add_literal(s, data);
p.add_instruction(migraphx::op::exp{}, x);
return p;
}
};
struct test_log
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {6}};
std::vector<float> data{0.1f, 0.2f, 1.f, 2.f, 0.6f, 100.f};
auto x = p.add_literal(s, data);
p.add_instruction(migraphx::op::log{}, x);
return p;
}
};
struct test_sin
{
migraphx::program create_program() const
......@@ -215,6 +241,101 @@ struct test_sin
}
};
struct test_cos
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::double_type, {8}};
auto x = p.add_parameter("x", s);
p.add_instruction(migraphx::op::cos{}, x);
return p;
}
};
struct test_tan
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {16}};
auto x = p.add_parameter("x", s);
p.add_instruction(migraphx::op::tan{}, x);
return p;
}
};
struct test_sinh
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::double_type, {16}};
auto x = p.add_parameter("x", s);
p.add_instruction(migraphx::op::sinh{}, x);
return p;
}
};
struct test_cosh
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::double_type, {16}};
auto x = p.add_parameter("x", s);
p.add_instruction(migraphx::op::cosh{}, x);
return p;
}
};
struct test_tanh
{
migraphx::program create_program() const
{
migraphx::program p;
auto x = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
p.add_instruction(migraphx::op::tanh{}, x);
return p;
}
};
struct test_asin
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::double_type, {16}};
auto x = p.add_parameter("x", s);
p.add_instruction(migraphx::op::asin{}, x);
return p;
}
};
struct test_acos
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::double_type, {16}};
auto x = p.add_parameter("x", s);
p.add_instruction(migraphx::op::acos{}, x);
return p;
}
};
struct test_atan
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::double_type, {16}};
auto x = p.add_parameter("x", s);
p.add_instruction(migraphx::op::atan{}, x);
return p;
}
};
struct test_scale
{
migraphx::program create_program() const
......@@ -456,6 +577,28 @@ struct test_add_relu
}
};
struct test_sigmoid
{
migraphx::program create_program() const
{
migraphx::program p;
auto x = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
p.add_instruction(migraphx::op::sigmoid{}, x);
return p;
}
};
struct test_abs
{
migraphx::program create_program() const
{
migraphx::program p;
auto x = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
p.add_instruction(migraphx::op::abs{}, x);
return p;
}
};
struct test_leaky_relu
{
migraphx::program create_program() const
......@@ -467,6 +610,17 @@ struct test_leaky_relu
}
};
struct test_elu
{
migraphx::program create_program() const
{
migraphx::program p;
auto x = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
p.add_instruction(migraphx::op::leaky_relu{1.0}, x);
return p;
}
};
struct test_conv_pooling
{
migraphx::program create_program() const
......@@ -849,13 +1003,24 @@ struct test_conv_bn_relu_pooling2
int main()
{
verify_program<test_abs>();
verify_program<test_concat>();
verify_program<test_concat2>();
verify_program<test_concat_relu>();
verify_program<test_add>();
verify_program<test_add_half>();
verify_program<test_mul>();
verify_program<test_exp>();
verify_program<test_log>();
verify_program<test_sin>();
verify_program<test_cos>();
verify_program<test_tan>();
verify_program<test_sinh>();
verify_program<test_cosh>();
verify_program<test_tanh>();
verify_program<test_asin>();
verify_program<test_acos>();
verify_program<test_atan>();
verify_program<test_scale>();
verify_program<test_triadd>();
verify_program<test_triadd2>();
......@@ -873,6 +1038,8 @@ int main()
verify_program<test_conv_relu_half>();
verify_program<test_add_relu>();
verify_program<test_leaky_relu>();
verify_program<test_sigmoid>();
verify_program<test_elu>();
verify_program<test_conv_pooling>();
verify_program<test_global_avg_pooling>();
verify_program<test_global_max_pooling>();
......
 acos-example:;
xy"Acos test_acosZ
x


b
y


B
\ No newline at end of file
 asin-example:;
xy"Asin test_asinZ
x


b
y


B
\ No newline at end of file
 atan-example:;
xy"Atan test_atanZ
x


b
y


B
\ No newline at end of file
 cos-example:9
xy"Costest_cosZ
x


b
y


B
\ No newline at end of file
 cosh-example:;
xy"Cosh test_coshZ
x

b
y

B
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment