Commit 5f4e8561 authored by Shucai Xiao's avatar Shucai Xiao
Browse files

change mul_add gpu implementation to use half2 for fp16 data type

parent 9e610129
...@@ -8,30 +8,44 @@ inline namespace MIGRAPHX_INLINE_NS { ...@@ -8,30 +8,44 @@ inline namespace MIGRAPHX_INLINE_NS {
namespace gpu { namespace gpu {
namespace device { namespace device {
//__global__ void mul_add_kernel(void* a, void* x, void* b, void* r, int n)
//{
// int id = blockDim.x * blockIdx.x + threadIdx.x;
// __half* ha = reinterpret_cast<__half*>(a);
// __half* hb = reinterpret_cast<__half*>(b);
// __half* hx = reinterpret_cast<__half*>(x);
// __half* hr = reinterpret_cast<__half*>(r);
// if (id < n)
// {
// hr[id] = __float2half(__half2float(ha[id]) * __half2float(hx[id]) + __half2float(hb[id]));
// }
//}
__global__ void mul_add_kernel(void* a, void* x, void* b, void* r, int n) __global__ void mul_add_kernel(void* a, void* x, void* b, void* r, int n)
{ {
int id = blockDim.x * blockIdx.x + threadIdx.x; int id = blockDim.x * blockIdx.x + threadIdx.x;
__half* ha = reinterpret_cast<__half*>(a); __half2* ha = reinterpret_cast<__half2*>(a);
__half* hb = reinterpret_cast<__half*>(b); __half2* hb = reinterpret_cast<__half2*>(b);
__half* hx = reinterpret_cast<__half*>(x); __half2* hx = reinterpret_cast<__half2*>(x);
__half* hr = reinterpret_cast<__half*>(r); __half2* hr = reinterpret_cast<__half2*>(r);
if (id < n) if (id < n)
{ {
hr[id] = __float2half(__half2float(ha[id]) * __half2float(hx[id]) + __half2float(hb[id])); hr[id] = __hadd2(__hmul2(ha[id], hx[id]), hb[id]);
} }
} }
void mul_add(hipStream_t stream, void mul_add(hipStream_t stream,
const argument& result, const argument& result,
const argument& arg1, const argument& arg1,
const argument& arg2, const argument& arg2,
const argument& arg3) const argument& arg3)
{ {
auto elem_num = result.get_shape().elements();
auto type = result.get_shape().type(); auto type = result.get_shape().type();
if (type == shape::half_type) if (type == shape::half_type)
{ {
int block_size = 256; auto elem_num = result.get_shape().elements() / 2;
int block_size = 1024;
int block_num = (elem_num + block_size - 1) / block_size; int block_num = (elem_num + block_size - 1) / block_size;
mul_add_kernel<<<block_num, block_size>>>(arg1.data(), arg2.data(), arg3.data(), result.data(), elem_num); mul_add_kernel<<<block_num, block_size>>>(arg1.data(), arg2.data(), arg3.data(), result.data(), elem_num);
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment