Commit 84b36bc1 authored by Chao Liu's avatar Chao Liu
Browse files

sucess cuda run

parent 6a45afba
#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>
#include "nvToolsExt.h"
#include "tensor.hpp"
#include "constant_tensor_descriptor.cuh"
......@@ -30,7 +31,10 @@ struct GeneratorTensor
template <class... Is>
T operator()(Is... is)
{
#if 0
#if 1
return std::rand() / RAND_MAX;
#elif 0
std::initializer_list<std::size_t> ls = {static_cast<std::size_t>(is)...};
return std::accumulate(ls.begin(), ls.end(), std::size_t(0));
#else
......@@ -111,53 +115,6 @@ void host_convolution(const Tensor<T>& in, const Tensor<T>& wei, Tensor<T>& out)
f_par(std::thread::hardware_concurrency());
}
#if 0
template <class T>
void device_convolution(
const Tensor<T>& in, const Tensor<T>& wei, Tensor<T>& out)
{
DeviceTensorDescriptor<4> in_desc_device(in.mDesc);
DeviceTensorDescriptor<4> wei_desc_device(wei.mDesc);
DeviceTensorDescriptor<4> out_desc_device(out.mDesc);
printf("__func__: in_desc_device: {%u %u %u %u}, {%u %u %u %u}\n",
in_desc_device.GetLength(0),
in_desc_device.GetLength(1),
in_desc_device.GetLength(2),
in_desc_device.GetLength(3),
in_desc_device.GetStride(0),
in_desc_device.GetStride(1),
in_desc_device.GetStride(2),
in_desc_device.GetStride(3));
std::size_t data_sz = sizeof(T);
DeviceMem in_device_buf(data_sz * in.mDesc.GetElementSpace());
DeviceMem wei_device_buf(data_sz * wei.mDesc.GetElementSpace());
DeviceMem out_device_buf(data_sz * out.mDesc.GetElementSpace());
int num_thread = std::thread::hardware_concurrency();
out.GenerateTensorValue(GeneratorConstant<float>{0}, num_thread);
in_device_buf.ToDevice(in.mData.data());
wei_device_buf.ToDevice(wei.mData.data());
out_device_buf.ToDevice(out.mData.data());
dim3 block_dim(64, 1, 1);
dim3 grid_dim(1, 1, 1);
gridwise_convolution<T, 3, 3, 4, 4, 2, 2, 1, 1, 8, 8, 1>
<<<grid_dim, block_dim>>>(in_desc_device,
static_cast<T*>(in_device_buf.GetDeviceBuffer()),
wei_desc_device,
static_cast<T*>(wei_device_buf.GetDeviceBuffer()),
out_desc_device,
static_cast<T*>(out_device_buf.GetDeviceBuffer()));
checkCudaErrors(cudaGetLastError());
out_device_buf.FromDevice(out.mData.data());
}
#else
template <class T, class InDesc, class WeiDesc, class OutDesc>
void const_device_convolution(
InDesc, const Tensor<T>& in, WeiDesc, const Tensor<T>& wei, OutDesc, Tensor<T>& out)
......@@ -169,36 +126,45 @@ void const_device_convolution(
int num_thread = std::thread::hardware_concurrency();
#if 0
out.GenerateTensorValue(GeneratorConstant<float>{0}, num_thread);
#endif
in_device_buf.ToDevice(in.mData.data());
wei_device_buf.ToDevice(wei.mData.data());
out_device_buf.ToDevice(out.mData.data());
dim3 block_dim(64, 1, 1);
dim3 grid_dim(1, 1, 1);
constexpr auto I0 = Index<0>{};
constexpr auto I1 = Index<1>{};
constexpr auto I2 = Index<2>{};
constexpr auto I3 = Index<3>{};
constexpr auto in_desc = InDesc{};
constexpr auto wei_desc = WeiDesc{};
constexpr auto out_desc = OutDesc{};
constexpr unsigned NPerBlock = 1;
constexpr unsigned KPerBlock = 1;
constexpr auto in_desc = InDesc{};
constexpr auto wei_desc = WeiDesc{};
constexpr auto out_desc = OutDesc{};
constexpr unsigned NPerBlock = 1;
constexpr unsigned KPerBlock = 1;
constexpr unsigned CPerBlockLoop = 1;
constexpr unsigned OutTileSizeH = 2;
constexpr unsigned OutTileSizeW = 2;
constexpr unsigned YPerBlock = (out_desc.GetLength(I2) + OutTileSizeH - 1) / OutTileSizeH;
constexpr unsigned XPerBlock = (out_desc.GetLength(I3) + OutTileSizeW - 1) / OutTileSizeW;
constexpr unsigned OutTileSizeH = 2;
constexpr unsigned OutTileSizeW = 2;
constexpr unsigned YPerBlock = 16;
constexpr unsigned XPerBlock = 16;
constexpr unsigned NBlockCopyLen0 = 1;
constexpr unsigned NBlockCopyLen1 = 1;
constexpr unsigned NBlockCopyLen2 = 1;
constexpr unsigned NBlockCopyLen3 = 64;
constexpr unsigned nblock = (out_desc.GetLength(I0) / NPerBlock) *
(out_desc.GetLength(I1) / KPerBlock) *
(out_desc.GetLength(I2) / (OutTileSizeH * YPerBlock)) *
(out_desc.GetLength(I3) / (OutTileSizeW * XPerBlock));
dim3 block_dim(32);
dim3 grid_dim(nblock);
printf("__func__: nblock %u \n", nblock);
gridwise_convolution<T,
InDesc,
WeiDesc,
......@@ -224,11 +190,10 @@ void const_device_convolution(
checkCudaErrors(cudaGetLastError());
out_device_buf.FromDevice(out.mData.data());
}
#endif
int main()
{
#if 1
#if 0
constexpr unsigned N = 1;
constexpr unsigned C = 1;
constexpr unsigned HI = 18;
......@@ -237,21 +202,21 @@ int main()
constexpr unsigned S = 3;
constexpr unsigned R = 3;
#elif 1
constexpr unsigned N = 1;
constexpr unsigned C = 1;
constexpr unsigned HI = 36;
constexpr unsigned WI = 36;
constexpr unsigned K = 1;
constexpr unsigned N = 64;
constexpr unsigned C = 256;
constexpr unsigned HI = 34;
constexpr unsigned WI = 34;
constexpr unsigned K = 56;
constexpr unsigned S = 3;
constexpr unsigned R = 3;
#elif 0
constexpr unsigned N = 1;
constexpr unsigned C = 1;
constexpr unsigned N = 2;
constexpr unsigned C = 3;
constexpr unsigned HI = 130;
constexpr unsigned WI = 130;
constexpr unsigned K = 1;
constexpr unsigned S = 3;
constexpr unsigned R = 3;
constexpr unsigned K = 5;
constexpr unsigned S = 3;
constexpr unsigned R = 3;
#elif 0
constexpr unsigned N = 3;
constexpr unsigned C = 16;
......@@ -278,28 +243,43 @@ int main()
int num_thread = std::thread::hardware_concurrency();
#if 0
in.GenerateTensorValue(GeneratorTensor<float>{}, num_thread);
wei.GenerateTensorValue(GeneratorTensor<float>{}, num_thread);
#endif
#if 0
host_convolution(in, wei, out_host);
#endif
#if 0
device_convolution(in, wei, out_device);
#else
const_device_convolution(in_desc, in, wei_desc, wei, out_desc, out_device);
#endif
std::cout << __func__ << ": done" << std::endl;
#if 0
LogRange(std::cout << __func__ << "in : ", in.mData, ",") << std::endl;
LogRange(std::cout << __func__ << "wei: ", wei.mData, ",") << std::endl;
LogRange(std::cout, out_host.mData, ",") << std::endl;
LogRange(std::cout, out_device.mData, ",") << std::endl;
#endif
float error = 0;
#if 0
float error = 0;
float max_diff = 0;
float host_value = 0, device_value = 0;
for(int i = 0; i < out_host.mData.size(); ++i)
{
error += std::abs(out_host.mData[i] - out_device.mData[i]);
float diff = std::abs(out_host.mData[i] - out_device.mData[i]);
if(max_diff < diff)
{
max_diff = diff;
host_value = out_host.mData[i];
device_value = out_device.mData[i];
}
}
std::cout << "error: " << error << std::endl;
std::cout << "max_diff: " << max_diff << ", " << host_value << ", " << device_value
<< std::endl;
#endif
}
......@@ -20,11 +20,11 @@ __device__ void blockwise_4d_tensor_op(
constexpr auto src_desc = SrcDesc{};
constexpr auto dst_desc = DstDesc{};
#if 1
#if 0
if(threadIdx.x == 0)
{
print_ConstantTensorDescriptor(src_desc);
print_ConstantTensorDescriptor(dst_desc);
print_ConstantTensorDescriptor(src_desc, "blockwise_4d_tensor_op: src_desc: ");
print_ConstantTensorDescriptor(dst_desc, "blockwise_4d_tensor_op: dst_desc: ");
}
#endif
......@@ -99,7 +99,7 @@ __device__ void threadwise_4d_tensor_op(
constexpr auto src_desc = SrcDesc{};
constexpr auto dst_desc = DstDesc{};
#if 1
#if 0
if(threadIdx.x == 0)
{
print_ConstantTensorDescriptor(src_desc);
......@@ -161,7 +161,7 @@ __device__ void threadwise_direct_convolution(InDesc,
constexpr auto wei_desc = WeiDesc{};
constexpr auto out_desc = OutDesc{};
#if 1
#if 0
if(threadIdx.x == 0)
{
print_ConstantTensorDescriptor(in_desc);
......@@ -262,7 +262,7 @@ __device__ void blockwise_convolution(InDesc,
constexpr unsigned InTileSizeH = OutTileSizeH + S - 1;
constexpr unsigned InTileSizeW = OutTileSizeW + R - 1;
#if 1
#if 0
if(threadIdx.x == 0)
{
print_ConstantTensorDescriptor(in_desc);
......@@ -417,40 +417,43 @@ __global__ void gridwise_convolution(InDesc,
constexpr unsigned S = wei_desc.GetLength(I2);
constexpr unsigned R = wei_desc.GetLength(I3);
#if 1
if(threadIdx.x == 0)
{
print_ConstantTensorDescriptor(in_desc);
print_ConstantTensorDescriptor(wei_desc);
print_ConstantTensorDescriptor(out_desc);
}
#endif
constexpr unsigned HoPerBlock = OutTileSizeH * YPerBlock;
constexpr unsigned WoPerBlock = OutTileSizeW * XPerBlock;
constexpr unsigned HiPerBlock = YPerBlock * OutTileSizeH + S - 1;
constexpr unsigned WiPerBlock = XPerBlock * OutTileSizeW + R - 1;
constexpr unsigned NBlockWork = (out_desc.GetLength(I0) + NPerBlock - 1) / NPerBlock;
constexpr unsigned KBlockWork = (out_desc.GetLength(I1) + KPerBlock - 1) / KPerBlock;
constexpr unsigned YBlockWork = (out_desc.GetLength(I2) + YPerBlock - 1) / YPerBlock;
constexpr unsigned XBlockWork = (out_desc.GetLength(I3) + XPerBlock - 1) / XPerBlock;
constexpr unsigned YBlockWork = (out_desc.GetLength(I2) + HoPerBlock - 1) / HoPerBlock;
constexpr unsigned XBlockWork = (out_desc.GetLength(I3) + WoPerBlock - 1) / WoPerBlock;
const unsigned block_id =
blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * (gridDim.y * gridDim.x);
constexpr auto in_block_glb_desc = make_ConstantTensorDescriptor(
Sequence<NPerBlock, CPerBlockLoop, HiPerBlock, WiPerBlock>{}, in_desc.GetStrides());
constexpr auto wei_block_glb_desc = make_ConstantTensorDescriptor(
Sequence<KPerBlock, CPerBlockLoop, S, R>{}, wei_desc.GetStrides());
constexpr auto in_block_desc =
make_ConstantTensorDescriptor(Sequence<NPerBlock,
CPerBlockLoop,
YPerBlock * OutTileSizeH + S - 1,
XPerBlock * OutTileSizeW + R - 1>{});
constexpr auto out_block_glb_desc = make_ConstantTensorDescriptor(
Sequence<NPerBlock, KPerBlock, HoPerBlock, WoPerBlock>{}, out_desc.GetStrides());
constexpr auto wei_block_desc =
make_ConstantTensorDescriptor(Sequence<KPerBlock, CPerBlockLoop, S, R>{});
constexpr auto in_block_lds_desc =
make_ConstantTensorDescriptor(in_block_glb_desc.GetLengths());
constexpr auto wei_block_lds_desc =
make_ConstantTensorDescriptor(wei_block_glb_desc.GetLengths());
constexpr auto out_block_lds_desc =
make_ConstantTensorDescriptor(out_block_glb_desc.GetLengths());
constexpr auto out_block_desc = make_ConstantTensorDescriptor(
Sequence<NPerBlock, KPerBlock, YPerBlock * OutTileSizeH, XPerBlock * OutTileSizeW>{});
constexpr unsigned in_block_size = in_block_lds_desc.GetElementSize();
constexpr unsigned wei_block_size = wei_block_lds_desc.GetElementSize();
constexpr unsigned out_block_size = out_block_lds_desc.GetElementSize();
__shared__ TFloat p_in_block[NPerBlock * CPerBlockLoop * (YPerBlock * OutTileSizeH + S - 1) *
(XPerBlock * OutTileSizeW + R - 1)];
__shared__ TFloat p_wei_block[KPerBlock * CPerBlockLoop * S * R];
__shared__ TFloat p_out_block[NPerBlock * KPerBlock * (YPerBlock * OutTileSizeH) *
(XPerBlock * OutTileSizeW)];
__shared__ TFloat p_in_block[in_block_size];
__shared__ TFloat p_wei_block[wei_block_size];
__shared__ TFloat p_out_block[out_block_size];
const unsigned block_id =
blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * (gridDim.y * gridDim.x);
unsigned itmp = block_id;
unsigned n_block_work_id = itmp / (KBlockWork * YBlockWork * XBlockWork);
......@@ -471,6 +474,34 @@ __global__ void gridwise_convolution(InDesc,
unsigned hi_block_work_begin = ho_block_work_begin; // minus padding
unsigned wi_block_work_begin = wo_block_work_begin; // minus padding
#if 0
if(threadIdx.x == 0)
{
print_ConstantTensorDescriptor( in_desc, "gridwise_convolution: in_desc: ");
print_ConstantTensorDescriptor(wei_desc, "gridwise_convolution: wei_desc: ");
print_ConstantTensorDescriptor(out_desc, "gridwise_convolution: out_desc: ");
print_ConstantTensorDescriptor( in_block_glb_desc, "gridwise_convolution: in_block_glb_desc: ");
print_ConstantTensorDescriptor(wei_block_glb_desc, "gridwise_convolution: wei_block_glb_desc: ");
print_ConstantTensorDescriptor(out_block_glb_desc, "gridwise_convolution: out_block_glb_desc: ");
print_ConstantTensorDescriptor( in_block_lds_desc, "gridwise_convolution: in_block_lds_desc: ");
print_ConstantTensorDescriptor(wei_block_lds_desc, "gridwise_convolution: wei_block_lds_desc: ");
print_ConstantTensorDescriptor(out_block_lds_desc, "gridwise_convolution: out_block_lds_desc: ");
printf("NBlockWork %u, KBlockWork %u, YBlockWork %u, XBlockWork %u \t"
"block_id %u, n_block_work_id %u, k_block_work_id %u, y_block_work_id %u, "
"x_block_work_id %u\n",
NBlockWork,
KBlockWork,
YBlockWork,
XBlockWork,
block_id,
n_block_work_id,
k_block_work_id,
y_block_work_id,
x_block_work_id);
}
#endif
for(unsigned c_block_work_begin = 0; c_block_work_begin < in_desc.GetLength(I1);
c_block_work_begin += CPerBlockLoop)
{
......@@ -478,81 +509,94 @@ __global__ void gridwise_convolution(InDesc,
// copy input tensor to LDS
blockwise_4d_tensor_op<TFloat,
decltype(in_desc),
decltype(in_block_desc),
decltype(in_block_glb_desc),
decltype(in_block_lds_desc),
NBlockCopyLen0,
NBlockCopyLen1,
NBlockCopyLen2,
NBlockCopyLen3,
decltype(f_copy)>(in_desc,
p_in + in_desc.Get1dIndex(n_block_work_begin,
c_block_work_begin,
hi_block_work_begin,
wi_block_work_begin),
in_block_desc,
p_in_block,
f_copy);
decltype(f_copy)>(
in_block_glb_desc,
p_in + in_block_glb_desc.Get1dIndex(n_block_work_begin,
c_block_work_begin,
hi_block_work_begin,
wi_block_work_begin),
in_block_lds_desc,
p_in_block,
f_copy);
#if 1
// copy weight tensor to LDS
blockwise_4d_tensor_op<TFloat,
decltype(wei_desc),
decltype(wei_block_desc),
decltype(wei_block_glb_desc),
decltype(wei_block_lds_desc),
NBlockCopyLen0,
NBlockCopyLen1,
NBlockCopyLen2,
NBlockCopyLen3,
decltype(f_copy)>(
wei_desc,
p_wei + wei_desc.Get1dIndex(k_block_work_begin, c_block_work_begin, 0, 0),
wei_block_desc,
wei_block_glb_desc,
p_wei + wei_block_glb_desc.Get1dIndex(k_block_work_begin, c_block_work_begin, 0, 0),
wei_block_lds_desc,
p_wei_block,
f_copy);
// copy output tensor to LDS
blockwise_4d_tensor_op<TFloat,
decltype(out_desc),
decltype(out_block_desc),
decltype(out_block_glb_desc),
decltype(out_block_lds_desc),
NBlockCopyLen0,
NBlockCopyLen1,
NBlockCopyLen2,
NBlockCopyLen3,
decltype(f_copy)>(out_desc,
p_out + out_desc.Get1dIndex(n_block_work_begin,
k_block_work_begin,
ho_block_work_begin,
wo_block_work_begin),
out_block_desc,
p_out_block,
f_copy);
decltype(f_copy)>(
out_block_glb_desc,
p_out + out_block_glb_desc.Get1dIndex(n_block_work_begin,
k_block_work_begin,
ho_block_work_begin,
wo_block_work_begin),
out_block_lds_desc,
p_out_block,
f_copy);
#if 0
__syncthreads();
#endif
// blockwise convolution
blockwise_convolution<TFloat,
decltype(in_block_desc),
decltype(wei_block_desc),
decltype(out_block_desc),
decltype(in_block_lds_desc),
decltype(wei_block_lds_desc),
decltype(out_block_lds_desc),
OutTileSizeH,
OutTileSizeW>(
in_block_desc, p_in_block, wei_block_desc, p_wei_block, out_block_desc, p_out_block);
OutTileSizeW>(in_block_lds_desc,
p_in_block,
wei_block_lds_desc,
p_wei_block,
out_block_lds_desc,
p_out_block);
#if 0
__syncthreads();
#endif
// accum output tensor from LDS to device mem
blockwise_4d_tensor_op<TFloat,
decltype(out_block_desc),
decltype(out_desc),
decltype(out_block_lds_desc),
decltype(out_block_glb_desc),
NBlockCopyLen0,
NBlockCopyLen1,
NBlockCopyLen2,
NBlockCopyLen3,
decltype(f_copy)>(out_block_desc,
p_out_block,
out_desc,
p_out + out_desc.Get1dIndex(n_block_work_begin,
k_block_work_begin,
ho_block_work_begin,
wo_block_work_begin),
f_copy);
decltype(f_copy)>(
out_block_lds_desc,
p_out_block,
out_block_glb_desc,
p_out + out_block_glb_desc.Get1dIndex(n_block_work_begin,
k_block_work_begin,
ho_block_work_begin,
wo_block_work_begin),
f_copy);
#endif
}
}
......@@ -24,20 +24,6 @@ struct Sequence
}
};
#if 0
template<class F, class T, T... Is>
void for_each(F f, std::integer_sequence<T, Is...>)
{
f(Is)...;
}
template<class F, class T, T N>
void for_n_time(F f, Constant<T, N>)
{
for_each(f, std::make_integer_sequence<T, N>{});
}
#endif
template <class Lengths, class Strides>
struct ConstantTensorDescriptor
{
......@@ -67,17 +53,33 @@ struct ConstantTensorDescriptor
return Strides{}.Get(Index<I>{});
}
#if 0
template <class... Is>
__host__ __device__ unsigned Get1dIndex(Is... is) const
// this is ugly, only for 4d
__host__ __device__ constexpr unsigned GetElementSize() const
{
static_assert(nDim == 4, "nDim is not 4");
constexpr auto I0 = Index<0>{};
constexpr auto I1 = Index<1>{};
constexpr auto I2 = Index<2>{};
constexpr auto I3 = Index<3>{};
return GetLength(I0) * GetLength(I1) * GetLength(I2) * GetLength(I3);
}
// this is ugly, only for 4d
__host__ __device__ constexpr unsigned GetElementSpace() const
{
static_assert(nDim == sizeof...(Is), "nDim not consistent");
const unsigned iss[nDim] = {static_cast<unsigned>(is)...};
unsigned idx = 0;
for_n_time([&](auto iDim) { idx += iss[iDim] * GetStride<iDim>(); }, NDimConstant{});
return idx;
static_assert(nDim == 4, "nDim is not 4");
constexpr auto I0 = Index<0>{};
constexpr auto I1 = Index<1>{};
constexpr auto I2 = Index<2>{};
constexpr auto I3 = Index<3>{};
return (GetLength(I0) - 1) * GetStride(I0) + (GetLength(I1) - 1) * GetStride(I1) +
(GetLength(I2) - 1) * GetStride(I2) + (GetLength(I3) - 1) * GetStride(I3) + 1;
}
#elif 1
// this is ugly, only for 4d
__host__ __device__ unsigned Get1dIndex(unsigned n, unsigned c, unsigned h, unsigned w) const
{
......@@ -86,10 +88,9 @@ struct ConstantTensorDescriptor
constexpr auto I2 = Index<2>{};
constexpr auto I3 = Index<3>{};
static_assert(nDim == 4, "nDim not consistent");
static_assert(nDim == 4, "nDim is not 4");
return n * GetStride(I0) + c * GetStride(I1) + h * GetStride(I2) + w * GetStride(I3);
}
#endif
};
// this is ugly, only for 4d
......@@ -145,7 +146,7 @@ __host__ __device__ constexpr auto get_output_4d_tensor_descriptor(InDesc, WeiDe
// this is ugly, only for 4d
template <class TDesc>
__host__ __device__ void print_ConstantTensorDescriptor(TDesc)
__host__ __device__ void print_ConstantTensorDescriptor(TDesc, const char* s)
{
constexpr auto desc = TDesc{};
......@@ -156,7 +157,8 @@ __host__ __device__ void print_ConstantTensorDescriptor(TDesc)
static_assert(desc.GetDimension() == 4, "dim is not 4");
printf("dim %u, lengths {%u %u %u %u}, strides {%u %u %u %u}\n",
printf("%s dim %u, lengths {%u %u %u %u}, strides {%u %u %u %u}\n",
s,
desc.GetDimension(),
desc.GetLength(I0),
desc.GetLength(I1),
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment