#include #include #include #include "nvToolsExt.h" #include "tensor.hpp" #include "device_tensor.cuh" #include "direct_convolution.cuh" template struct GeneratorConstant { T value = 0; template T operator()(Is... is) { return value; } }; template struct GeneratorTensor { template T operator()(Is... is) { #if 0 std::initializer_list ls = {static_cast(is)...}; return std::accumulate(ls.begin(), ls.end(), std::size_t(0)); #else assert(sizeof...(Is) > 0); std::initializer_list ids = {static_cast(is)...}; std::vector lens(sizeof...(Is), 100); std::vector strides(sizeof...(Is), 1); std::partial_sum(lens.rbegin(), lens.rbegin() + (sizeof...(Is) - 1), strides.rbegin() + 1); return std::inner_product(ids.begin(), ids.end(), strides.begin(), std::size_t(0)) + 1; #endif } }; template void host_convolution(const Tensor& in, const Tensor& wei, Tensor& out, std::size_t num_thread) { auto f = [&](auto n, auto k, auto ho, auto wo) { double v = 0; for(int c = 0; c < wei.mDesc.GetLengths()[1]; ++c) { for(int y = 0; y < wei.mDesc.GetLengths()[2]; ++y) { int hi = ho + y; for(int x = 0; x < wei.mDesc.GetLengths()[3]; ++x) { int wi = wo + x; v += in(n, c, hi, wi) * wei(k, c, y, x); } } } out(n, k, ho, wo) = v; }; auto f_par = make_ParallelTensorFunctor(f, out.mDesc.GetLengths()[0], out.mDesc.GetLengths()[1], out.mDesc.GetLengths()[2], out.mDesc.GetLengths()[3]); f_par(num_thread); } template void device_convolution(const Tensor& in, const Tensor& wei, Tensor& out) { DeviceTensorDescriptor<4> in_desc_device(in.mDesc); DeviceTensorDescriptor<4> wei_desc_device(wei.mDesc); DeviceTensorDescriptor<4> out_desc_device(out.mDesc); printf("__func__: in_desc_device: {%u %u %u %u}, {%u %u %u %u}\n", in_desc_device.GetLength(0), in_desc_device.GetLength(1), in_desc_device.GetLength(2), in_desc_device.GetLength(3), in_desc_device.GetStride(0), in_desc_device.GetStride(1), in_desc_device.GetStride(2), in_desc_device.GetStride(3)); std::size_t data_sz = sizeof(T); DeviceMem in_device_buf(data_sz * in.mDesc.GetElementSpace()); DeviceMem wei_device_buf(data_sz * wei.mDesc.GetElementSpace()); DeviceMem out_device_buf(data_sz * out.mDesc.GetElementSpace()); int num_thread = std::thread::hardware_concurrency(); out.GenerateTensorValue(GeneratorConstant{0}, num_thread); in_device_buf.ToDevice(in.mData.data()); wei_device_buf.ToDevice(wei.mData.data()); out_device_buf.ToDevice(out.mData.data()); dim3 block_dim(64, 1, 1); dim3 grid_dim(1, 1, 1); gridwise_convolution <<>>(in_desc_device, static_cast(in_device_buf.GetDeviceBuffer()), wei_desc_device, static_cast(wei_device_buf.GetDeviceBuffer()), out_desc_device, static_cast(out_device_buf.GetDeviceBuffer())); checkCudaErrors(cudaGetLastError()); out_device_buf.FromDevice(out.mData.data()); } int main() { #if 0 Tensor in({3, 16, 130, 130}); Tensor wei({4, 16, 3, 3}); Tensor out_host({3, 4, 128, 128}); #elif 0 Tensor in({1, 1, 130, 130}); Tensor wei({1, 1, 3, 3}); Tensor out_host({1, 1, 128, 128}); #elif 1 Tensor in({1, 1, 18, 18}); Tensor wei({1, 1, 3, 3}); Tensor out_host({1, 1, 16, 16}); #else Tensor in({1, 1, 4, 4}); Tensor wei({1, 1, 3, 3}); Tensor out_host({1, 1, 2, 2}); #endif Tensor out_device = out_host; int num_thread = std::thread::hardware_concurrency(); std::cout << __func__ << ": num_thread " << num_thread << std::endl; in.GenerateTensorValue(GeneratorTensor{}, num_thread); wei.GenerateTensorValue(GeneratorTensor{}, num_thread); host_convolution(in, wei, out_host, num_thread); device_convolution(in, wei, out_device); std::cout << __func__ << ": done" << std::endl; LogRange(std::cout << __func__ << "in : ", in.mData, ",") << std::endl; LogRange(std::cout << __func__ << "wei: ", wei.mData, ",") << std::endl; LogRange(std::cout, out_host.mData, ",") << std::endl; LogRange(std::cout, out_device.mData, ",") << std::endl; float error = 0; for(int i = 0; i < out_host.mData.size(); ++i) { error += std::abs(out_host.mData[i] - out_device.mData[i]); } std::cout << "error: " << error << std::endl; }