// Copyright 2019 Yan Yan // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include #include #include #include #include #include #include #include namespace spconv { template __global__ void maxPoolFwdBlockKernel(T *outFeatures, const T *inFeatures, const Index *indicesIn, const Index *indicesOut, int numHot, int numPlanes) { T in, out; int ILPStrideY[NumILP]; Index idxo, idxi; #pragma unroll for (int ilp = 0; ilp < NumILP; ilp++) ILPStrideY[ilp] = threadIdx.y + ilp * blockDim.y; outFeatures += blockIdx.y * NumTLP; inFeatures += blockIdx.y * NumTLP; for (int ix = blockIdx.x * blockDim.x; ix < numHot; ix += blockDim.x * gridDim.x) { { #pragma unroll for (int ilp = 0; ilp < NumILP; ++ilp) { idxi = indicesIn[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x; idxo = indicesOut[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x; in = inFeatures[idxi]; out = outFeatures[idxo]; if (in > out) { outFeatures[idxo] = in; } } } } } template __global__ void maxPoolFwdGenericBlockKernel(T *outFeatures, const T *inFeatures, const Index *indicesIn, const Index *indicesOut, int numHot, int numPlanes) { // see http://www.nvidia.com/content/GTC-2010/pdfs/2238_GTC2010.pdf. int ILPStrideX[NumILP]; Index RI[NumILP]; Index RO[NumILP]; T in, out; #pragma unroll for (int ilp = 0; ilp < NumILP; ilp++) ILPStrideX[ilp] = ilp * gridDim.x * blockDim.x; for (int ix : tv::KernelLoopX(numHot)) { #pragma unroll for (int ilp = 0; ilp < NumILP; ilp++) { RI[ilp] = indicesIn[ix + ILPStrideX[ilp]] * numPlanes; RO[ilp] = indicesOut[ix + ILPStrideX[ilp]] * numPlanes; } for (int iy : tv::KernelLoopY(numPlanes)) { #pragma unroll for (int ilp = 0; ilp < NumILP; ++ilp) { in = inFeatures[RI[ilp] + iy]; out = outFeatures[RO[ilp] + iy]; if (in > out) { outFeatures[RO[ilp] + iy] = in; } } } } } template __global__ void maxPoolFwdVecBlockKernel(T *outFeatures, const T *inFeatures, const Index *indicesIn, const Index *indicesOut, int numHot, int numPlanes) { // see http://www.nvidia.com/content/GTC-2010/pdfs/2238_GTC2010.pdf. int ILPStrideY[NumILP]; constexpr int vecloadFactor = sizeof(VecType) / sizeof(T); T bufi[vecloadFactor]; T bufo[vecloadFactor]; Index idxi, idxo; #pragma unroll for (int ilp = 0; ilp < NumILP; ilp++) ILPStrideY[ilp] = threadIdx.y + ilp * blockDim.y; outFeatures += blockIdx.y * NumTLP; inFeatures += blockIdx.y * NumTLP; for (int ix = blockIdx.x * blockDim.x * vecloadFactor; ix < numHot; ix += blockDim.x * gridDim.x * vecloadFactor) { #pragma unroll for (int ilp = 0; ilp < NumILP; ++ilp) { idxi = indicesIn[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x; idxo = indicesOut[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x; reinterpret_cast(bufo)[0] = reinterpret_cast(outFeatures)[idxo]; reinterpret_cast(bufi)[0] = reinterpret_cast(inFeatures)[idxi]; #pragma unroll for (int i = 0; i < vecloadFactor; i++) { if (bufi[i] > bufo[i]) { bufo[i] = bufi[i]; } } reinterpret_cast(outFeatures)[idxo] = reinterpret_cast(bufo)[0]; } } } template __global__ void maxPoolFwdGenericKernel(T *outFeatures, const T *inFeatures, const Index *indicesIn, const Index *indicesOut, int numHot, int numPlanes) { // see http://www.nvidia.com/content/GTC-2010/pdfs/2238_GTC2010.pdf. int ILPStrideX[NumILP]; Index RI[NumILP]; Index RO[NumILP]; T in, out; #pragma unroll for (int ilp = 0; ilp < NumILP; ilp++) ILPStrideX[ilp] = ilp * gridDim.x * blockDim.x; for (int ix : tv::KernelLoopX(numHot)) { #pragma unroll for (int ilp = 0; ilp < NumILP; ilp++) { if (ix + ILPStrideX[ilp] < numHot) { RI[ilp] = indicesIn[ix + ILPStrideX[ilp]] * numPlanes; RO[ilp] = indicesOut[ix + ILPStrideX[ilp]] * numPlanes; } } for (int iy : tv::KernelLoopY(numPlanes)) { #pragma unroll for (int ilp = 0; ilp < NumILP; ++ilp) { if (ix + ILPStrideX[ilp] < numHot) { in = inFeatures[RI[ilp] + iy]; out = outFeatures[RO[ilp] + iy]; if (in > out) { outFeatures[RO[ilp] + iy] = in; } } } } } } template __global__ void maxPoolBwdBlockKernel(const T *outFeatures, const T *inFeatures, const T *dout, T *din, const Index *indicesIn, const Index *indicesOut, int numHot, int numPlanes) { // see http://www.nvidia.com/content/GTC-2010/pdfs/2238_GTC2010.pdf. T in, out; Index idxo, idxi; int ILPStrideY[NumILP]; #pragma unroll for (int ilp = 0; ilp < NumILP; ilp++) ILPStrideY[ilp] = threadIdx.y + ilp * blockDim.y; outFeatures += blockIdx.y * NumTLP; inFeatures += blockIdx.y * NumTLP; dout += blockIdx.y * NumTLP; din += blockIdx.y * NumTLP; for (int ix = blockIdx.x * blockDim.x; ix < numHot; ix += blockDim.x * gridDim.x) { { #pragma unroll for (int ilp = 0; ilp < NumILP; ++ilp) { idxi = indicesIn[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x; idxo = indicesOut[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x; in = inFeatures[idxi]; out = outFeatures[idxo]; if (in == out) { din[idxi] += dout[idxo]; } } } } } template __global__ void maxPoolBwdGenericBlockKernel(const T *outFeatures, const T *inFeatures, const T *dout, T *din, const Index *indicesIn, const Index *indicesOut, int numHot, int numPlanes) { // see http://www.nvidia.com/content/GTC-2010/pdfs/2238_GTC2010.pdf. int ILPStrideX[NumILP]; Index RI[NumILP]; Index RO[NumILP]; T in, out; #pragma unroll for (int ilp = 0; ilp < NumILP; ilp++) ILPStrideX[ilp] = ilp * gridDim.x * blockDim.x; for (int ix : tv::KernelLoopX(numHot)) { #pragma unroll for (int ilp = 0; ilp < NumILP; ilp++) { RI[ilp] = indicesIn[ix + ILPStrideX[ilp]] * numPlanes; RO[ilp] = indicesOut[ix + ILPStrideX[ilp]] * numPlanes; } for (int iy : tv::KernelLoopY(numPlanes)) { #pragma unroll for (int ilp = 0; ilp < NumILP; ++ilp) { in = inFeatures[RI[ilp] + iy]; out = outFeatures[RO[ilp] + iy]; if (in == out) { din[RI[ilp] + iy] += dout[RO[ilp] + iy]; } } } } } template __global__ void maxPoolBwdVecBlockKernel(const T *outFeatures, const T *inFeatures, const T *dout, T *din, const Index *indicesIn, const Index *indicesOut, int numHot, int numPlanes) { // see http://www.nvidia.com/content/GTC-2010/pdfs/2238_GTC2010.pdf. int ILPStrideY[NumILP]; constexpr int vecloadFactor = sizeof(VecType) / sizeof(T); T bufi[vecloadFactor]; T bufo[vecloadFactor]; T bufdi[vecloadFactor]; T bufdo[vecloadFactor]; Index idxi, idxo; #pragma unroll for (int ilp = 0; ilp < NumILP; ilp++) ILPStrideY[ilp] = threadIdx.y + ilp * blockDim.y; outFeatures += blockIdx.y * NumTLP; inFeatures += blockIdx.y * NumTLP; for (int ix = blockIdx.x * blockDim.x * vecloadFactor; ix < numHot; ix += blockDim.x * gridDim.x * vecloadFactor) { #pragma unroll for (int ilp = 0; ilp < NumILP; ++ilp) { idxi = indicesIn[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x; idxo = indicesOut[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x; reinterpret_cast(bufo)[0] = reinterpret_cast(outFeatures)[idxo]; reinterpret_cast(bufi)[0] = reinterpret_cast(inFeatures)[idxi]; reinterpret_cast(bufdo)[0] = reinterpret_cast(dout)[idxo]; reinterpret_cast(bufdi)[0] = reinterpret_cast(din)[idxi]; #pragma unroll for (int i = 0; i < vecloadFactor; i++) { if (bufi[i] == bufo[i]) { bufdi[i] += bufdo[i]; } } reinterpret_cast(din)[idxi] = reinterpret_cast(bufdi)[0]; } } } template __global__ void maxPoolBwdGenericKernel(const T *outFeatures, const T *inFeatures, const T *dout, T *din, const Index *indicesIn, const Index *indicesOut, int numHot, int numPlanes) { // see http://www.nvidia.com/content/GTC-2010/pdfs/2238_GTC2010.pdf. int ILPStrideX[NumILP]; Index RI[NumILP]; Index RO[NumILP]; T in, out; #pragma unroll for (int ilp = 0; ilp < NumILP; ilp++) ILPStrideX[ilp] = ilp * gridDim.x * blockDim.x; for (int ix : tv::KernelLoopX(numHot)) { #pragma unroll for (int ilp = 0; ilp < NumILP; ilp++) { if (ix + ILPStrideX[ilp] < numHot) { RI[ilp] = indicesIn[ix + ILPStrideX[ilp]] * numPlanes; RO[ilp] = indicesOut[ix + ILPStrideX[ilp]] * numPlanes; } } for (int iy : tv::KernelLoopY(numPlanes)) { #pragma unroll for (int ilp = 0; ilp < NumILP; ++ilp) { if (ix + ILPStrideX[ilp] < numHot) { in = inFeatures[RI[ilp] + iy]; out = outFeatures[RO[ilp] + iy]; if (in == out) { din[RI[ilp] + iy] += dout[RO[ilp] + iy]; } } } } } } namespace functor { template struct SparseMaxPoolForwardFunctor { using vecload_type_t = std::conditional_t::value, int2, int4>; using kernel_block_t = mp_list_c; void operator()(const tv::GPU &d, tv::TensorView outFeatures, tv::TensorView inFeatures, tv::TensorView indices, int size) { if (size <= 0) return; int numPlanes = inFeatures.dim(1); bool notFound = true; constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T); mp_for_each([=, &outFeatures, &inFeatures, &indices, ¬Found](auto NumTLP) { constexpr int NumILP = NumTLP / 4; int numHotBlock = (size / NumTLP) * NumTLP; if (notFound) { if (numPlanes % NumTLP == 0) { if (numHotBlock >= NumTLP) { maxPoolFwdVecBlockKernel <<>>(outFeatures.data(), inFeatures.data(), indices.subview(0).data(), indices.subview(1).data(), numHotBlock, numPlanes / vecloadFactor); TV_CHECK_CUDA_ERR(); } if (size > numHotBlock) { maxPoolFwdGenericKernel <<>>(outFeatures.data(), inFeatures.data(), indices.subview(0).data() + numHotBlock, indices.subview(1).data() + numHotBlock, size - numHotBlock, numPlanes); TV_CHECK_CUDA_ERR(); } notFound = false; } } }); if (notFound) { constexpr int NumTLP = 64; constexpr int NumILP = NumTLP / 4; int numHotBlock = (size / NumTLP) * NumTLP; if (numHotBlock >= NumTLP) { maxPoolFwdGenericBlockKernel <<>>( outFeatures.data(), inFeatures.data(), indices.subview(0).data(), indices.subview(1).data(), numHotBlock, numPlanes); TV_CHECK_CUDA_ERR(); } if (size > numHotBlock) { maxPoolFwdGenericKernel <<>>( outFeatures.data(), inFeatures.data(), indices.subview(0).data() + numHotBlock, indices.subview(1).data() + numHotBlock, size - numHotBlock, numPlanes); TV_CHECK_CUDA_ERR(); } } } }; template struct SparseMaxPoolBackwardFunctor { using vecload_type_t = std::conditional_t::value, int2, int4>; using kernel_block_t = mp_list_c; void operator()(const tv::GPU &d, tv::TensorView outFeatures, tv::TensorView inFeatures, tv::TensorView dout, tv::TensorView din, tv::TensorView indices, int size) { if (size <= 0) return; int numPlanes = inFeatures.dim(1); bool notFound = true; constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T); mp_for_each([=, &outFeatures, &inFeatures, &dout, &din, &indices, ¬Found](auto NumTLP) { constexpr int NumILP = NumTLP / 4; int numHotBlock = (size / NumTLP) * NumTLP; if (notFound) { if (numPlanes % NumTLP == 0) { if (numHotBlock >= NumTLP) { maxPoolBwdVecBlockKernel <<>>(outFeatures.data(), inFeatures.data(), dout.data(), din.data(), indices.subview(0).data(), indices.subview(1).data(), numHotBlock, numPlanes / vecloadFactor); TV_CHECK_CUDA_ERR(); } if (size > numHotBlock) { maxPoolBwdGenericKernel <<>>(outFeatures.data(), inFeatures.data(), dout.data(), din.data(), indices.subview(0).data() + numHotBlock, indices.subview(1).data() + numHotBlock, size - numHotBlock, numPlanes); TV_CHECK_CUDA_ERR(); } notFound = false; } } }); if (notFound) { constexpr int NumTLP = 64; constexpr int NumILP = NumTLP / 4; int numHotBlock = (size / NumTLP) * NumTLP; if (numHotBlock >= NumTLP) { maxPoolBwdGenericBlockKernel <<>>( outFeatures.data(), inFeatures.data(), dout.data(), din.data(), indices.subview(0).data(), indices.subview(1).data(), numHotBlock, numPlanes); TV_CHECK_CUDA_ERR(); } if (size > numHotBlock) { maxPoolBwdGenericKernel <<>>( outFeatures.data(), inFeatures.data(), dout.data(), din.data(), indices.subview(0).data() + numHotBlock, indices.subview(1).data() + numHotBlock, size - numHotBlock, numPlanes); TV_CHECK_CUDA_ERR(); } } } }; } // namespace functor #define DECLARE_GPU_SPECS_T_INDEX(T, Index) \ template struct functor::SparseMaxPoolForwardFunctor; \ template struct functor::SparseMaxPoolBackwardFunctor; #define DECLARE_GPU_SPECS(T) DECLARE_GPU_SPECS_T_INDEX(T, int); DECLARE_GPU_SPECS(float); DECLARE_GPU_SPECS(double); DECLARE_GPU_SPECS(at::Half); #undef DECLARE_GPU_SPECS #undef DECLARE_GPU_SPECS_T_INDEX } // namespace spconv