Commit 01557ea0 authored by Shucai Xiao's avatar Shucai Xiao
Browse files

merge changes from the develop branches.

parents ec1ab58b 767ca0cc
...@@ -13,6 +13,13 @@ struct context; ...@@ -13,6 +13,13 @@ struct context;
struct miopen_contiguous struct miopen_contiguous
{ {
op::contiguous op; op::contiguous op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::contiguous"; } std::string name() const { return "gpu::contiguous"; }
shape compute_shape(const std::vector<shape>& inputs) const; shape compute_shape(const std::vector<shape>& inputs) const;
argument compute(context&, shape output_shape, const std::vector<argument>& args) const; argument compute(context&, shape output_shape, const std::vector<argument>& args) const;
......
...@@ -16,6 +16,12 @@ struct hip_convert : unary_device<hip_convert, device::convert> ...@@ -16,6 +16,12 @@ struct hip_convert : unary_device<hip_convert, device::convert>
{ {
op::convert op; op::convert op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
hip_convert(op::convert oper) : op(oper) {} hip_convert(op::convert oper) : op(oper) {}
shape compute_shape(std::vector<shape> inputs) const shape compute_shape(std::vector<shape> inputs) const
......
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_CLIP_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_CLIP_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void clip(hipStream_t stream, const argument& result, const argument& arg1, float max, float min);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
...@@ -13,6 +13,13 @@ struct context; ...@@ -13,6 +13,13 @@ struct context;
struct miopen_elu struct miopen_elu
{ {
shared<activation_descriptor> ad; shared<activation_descriptor> ad;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return gpu::reflect(self.ad.get(), f);
}
std::string name() const { return "gpu::elu"; } std::string name() const { return "gpu::elu"; }
shape compute_shape(const std::vector<shape>& inputs) const; shape compute_shape(const std::vector<shape>& inputs) const;
argument argument
......
...@@ -14,6 +14,13 @@ struct context; ...@@ -14,6 +14,13 @@ struct context;
struct hip_gather struct hip_gather
{ {
op::gather op; op::gather op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::gather"; } std::string name() const { return "gpu::gather"; }
shape compute_shape(std::vector<shape> inputs) const; shape compute_shape(std::vector<shape> inputs) const;
argument argument
......
...@@ -13,6 +13,13 @@ struct context; ...@@ -13,6 +13,13 @@ struct context;
struct miopen_gemm struct miopen_gemm
{ {
op::dot op; op::dot op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::gemm"; } std::string name() const { return "gpu::gemm"; }
shape compute_shape(const std::vector<shape>& inputs) const; shape compute_shape(const std::vector<shape>& inputs) const;
argument argument
......
...@@ -28,6 +28,13 @@ struct hip_allocate ...@@ -28,6 +28,13 @@ struct hip_allocate
{ {
shape s; shape s;
std::string tag{}; std::string tag{};
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.s, "shape"), f(self.tag, "tag"));
}
std::string name() const { return "hip::allocate"; } std::string name() const { return "hip::allocate"; }
shape compute_shape(const std::vector<shape>& inputs) const shape compute_shape(const std::vector<shape>& inputs) const
{ {
...@@ -43,6 +50,13 @@ struct hip_allocate ...@@ -43,6 +50,13 @@ struct hip_allocate
struct hip_sync struct hip_sync
{ {
std::string tag{}; std::string tag{};
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.tag, "tag"));
}
std::string name() const { return "hip::sync"; } std::string name() const { return "hip::sync"; }
shape compute_shape(const std::vector<shape>& inputs) const shape compute_shape(const std::vector<shape>& inputs) const
{ {
......
...@@ -13,6 +13,13 @@ struct context; ...@@ -13,6 +13,13 @@ struct context;
struct miopen_leaky_relu struct miopen_leaky_relu
{ {
shared<activation_descriptor> ad; shared<activation_descriptor> ad;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return gpu::reflect(self.ad.get(), f);
}
std::string name() const { return "gpu::leaky_relu"; } std::string name() const { return "gpu::leaky_relu"; }
shape compute_shape(const std::vector<shape>& inputs) const; shape compute_shape(const std::vector<shape>& inputs) const;
argument argument
......
...@@ -25,6 +25,13 @@ namespace gpu { ...@@ -25,6 +25,13 @@ namespace gpu {
struct hip_logsoftmax struct hip_logsoftmax
{ {
op::logsoftmax op; op::logsoftmax op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::logsoftmax"; } std::string name() const { return "gpu::logsoftmax"; }
shape compute_shape(const std::vector<shape>& inputs) const; shape compute_shape(const std::vector<shape>& inputs) const;
argument argument
......
...@@ -13,6 +13,13 @@ struct context; ...@@ -13,6 +13,13 @@ struct context;
struct miopen_lrn struct miopen_lrn
{ {
shared<lrn_descriptor> ldesc; shared<lrn_descriptor> ldesc;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return gpu::reflect(self.ldesc.get(), f);
}
std::string name() const { return "gpu::lrn"; } std::string name() const { return "gpu::lrn"; }
shape compute_shape(const std::vector<shape>& inputs) const; shape compute_shape(const std::vector<shape>& inputs) const;
argument argument
......
...@@ -162,6 +162,38 @@ inline fused_operator_args make_fused_args() ...@@ -162,6 +162,38 @@ inline fused_operator_args make_fused_args()
return make_obj<fused_operator_args>(&miopenCreateOperatorArgs); return make_obj<fused_operator_args>(&miopenCreateOperatorArgs);
} }
template <class F>
auto reflect(miopenActivationDescriptor_t ad, F f)
{
assert(ad != nullptr);
miopenActivationMode_t mode = miopenActivationPASTHRU;
double alpha = 0.0;
double beta = 0.0;
double gamma = 0.0;
miopenGetActivationDescriptor(ad, &mode, &alpha, &beta, &gamma);
return pack(f(std::move(mode), "mode"), // NOLINT
f(std::move(alpha), "alpha"), // NOLINT
f(std::move(beta), "beta"), // NOLINT
f(std::move(gamma), "gamma")); // NOLINT
}
template <class F>
auto reflect(miopenLRNDescriptor_t lrnd, F f)
{
assert(lrnd != nullptr);
miopenLRNMode_t mode = miopenLRNWithinChannel;
unsigned int n = 0;
double alpha = 0.0;
double beta = 0.0;
double k = 0.0;
miopenGetLRNDescriptor(lrnd, &mode, &n, &alpha, &beta, &k);
return pack(f(std::move(mode), "mode"), // NOLINT
f(std::move(n), "n"), // NOLINT
f(std::move(alpha), "alpha"), // NOLINT
f(std::move(beta), "beta"), // NOLINT
f(std::move(k), "k")); // NOLINT
}
} // namespace gpu } // namespace gpu
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx } // namespace migraphx
......
...@@ -14,6 +14,12 @@ struct hip_pad ...@@ -14,6 +14,12 @@ struct hip_pad
{ {
op::pad op; op::pad op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::pad"; } std::string name() const { return "gpu::pad"; }
shape compute_shape(std::vector<shape> inputs) const; shape compute_shape(std::vector<shape> inputs) const;
argument argument
......
...@@ -16,6 +16,12 @@ struct miopen_pooling ...@@ -16,6 +16,12 @@ struct miopen_pooling
op::pooling op; op::pooling op;
shared<pooling_descriptor> pd; shared<pooling_descriptor> pd;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::pooling"; } std::string name() const { return "gpu::pooling"; }
shape compute_shape(const std::vector<shape>& inputs) const; shape compute_shape(const std::vector<shape>& inputs) const;
argument argument
......
...@@ -13,6 +13,13 @@ struct context; ...@@ -13,6 +13,13 @@ struct context;
struct miopen_relu struct miopen_relu
{ {
shared<activation_descriptor> ad; shared<activation_descriptor> ad;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return gpu::reflect(self.ad.get(), f);
}
std::string name() const { return "gpu::relu"; } std::string name() const { return "gpu::relu"; }
shape compute_shape(const std::vector<shape>& inputs) const; shape compute_shape(const std::vector<shape>& inputs) const;
argument argument
......
...@@ -13,6 +13,13 @@ struct context; ...@@ -13,6 +13,13 @@ struct context;
struct miopen_sigmoid struct miopen_sigmoid
{ {
shared<activation_descriptor> ad; shared<activation_descriptor> ad;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return gpu::reflect(self.ad.get(), f);
}
std::string name() const { return "gpu::sigmoid"; } std::string name() const { return "gpu::sigmoid"; }
shape compute_shape(const std::vector<shape>& inputs) const; shape compute_shape(const std::vector<shape>& inputs) const;
argument argument
......
...@@ -13,6 +13,13 @@ struct context; ...@@ -13,6 +13,13 @@ struct context;
struct miopen_softmax struct miopen_softmax
{ {
op::softmax op; op::softmax op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::softmax"; } std::string name() const { return "gpu::softmax"; }
shape compute_shape(const std::vector<shape>& inputs) const; shape compute_shape(const std::vector<shape>& inputs) const;
argument argument
......
...@@ -13,6 +13,13 @@ struct context; ...@@ -13,6 +13,13 @@ struct context;
struct miopen_tanh struct miopen_tanh
{ {
shared<activation_descriptor> ad; shared<activation_descriptor> ad;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return gpu::reflect(self.ad.get(), f);
}
std::string name() const { return "gpu::tanh"; } std::string name() const { return "gpu::tanh"; }
shape compute_shape(const std::vector<shape>& inputs) const; shape compute_shape(const std::vector<shape>& inputs) const;
argument argument
......
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include <migraphx/gpu/gather.hpp> #include <migraphx/gpu/gather.hpp>
#include <migraphx/gpu/lrn.hpp> #include <migraphx/gpu/lrn.hpp>
#include <migraphx/gpu/convert.hpp> #include <migraphx/gpu/convert.hpp>
#include <migraphx/gpu/clip.hpp>
#include <utility> #include <utility>
#include <functional> #include <functional>
#include <algorithm> #include <algorithm>
...@@ -103,6 +104,7 @@ struct miopen_apply ...@@ -103,6 +104,7 @@ struct miopen_apply
add_extend_op<hip_gather, op::gather>("gather"); add_extend_op<hip_gather, op::gather>("gather");
add_extend_op<hip_pad, op::pad>("pad"); add_extend_op<hip_pad, op::pad>("pad");
add_extend_op<hip_convert, op::convert>("convert"); add_extend_op<hip_convert, op::convert>("convert");
add_extend_op<hip_clip, op::clip>("clip");
add_lrn_op(); add_lrn_op();
add_convolution_op(); add_convolution_op();
......
...@@ -14,6 +14,13 @@ struct hip_load_literal ...@@ -14,6 +14,13 @@ struct hip_load_literal
{ {
shape s; shape s;
std::size_t n = 0; std::size_t n = 0;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.s, "shape"), f(self.n, "id"));
}
std::string name() const { return "hip::load_literal"; } std::string name() const { return "hip::load_literal"; }
shape compute_shape(const std::vector<shape>& inputs) const shape compute_shape(const std::vector<shape>& inputs) const
{ {
......
...@@ -1586,4 +1586,21 @@ TEST_CASE(fp32_fp16_test) ...@@ -1586,4 +1586,21 @@ TEST_CASE(fp32_fp16_test)
test_case({"add"}); test_case({"add"});
} }
TEST_CASE(clip_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {3}};
auto l = p.add_literal(migraphx::literal{s, {-1.0, 0.0, 10.0}});
migraphx::op::clip op;
op.max_val = 6.0;
op.min_val = 0.0;
p.add_instruction(op, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.0, 0.0, 6.0};
EXPECT(migraphx::verify_range(results_vector, gold));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment