/************************************************************************* * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. * Modifications Copyright (c) 2019-2022 Advanced Micro Devices, Inc. All rights reserved. * * See LICENSE.txt for license information ************************************************************************/ #include "devcomm.h" #include "collectives.h" #include "primitives.h" namespace { template #if defined(USE_INDIRECT_FUNCTION_CALL) && !defined(__gfx940__) && !defined(__gfx941__) && !defined(__gfx942__) __device__ void runRing(ncclWorkElem *args) { #else __device__ __attribute__((noinline)) void runRing(ncclWorkElem *args) { #endif const int tid = threadIdx.x; const int nthreads = args->nWarps*WARP_SIZE; const int bid = args->bid; const int nChannels = args->nChannels; ncclRing *ring = &ncclShmem.channel.ring; int const *ringRanks = ring->userRanks; const ssize_t chunkSize = int(Proto::calcBytePerStep()/sizeof(T) * (Proto::Id == NCCL_PROTO_SIMPLE ? REDUCESCATTER_CHUNKSTEPS : 1)); // We should not need the final /2 but it makes performance much, much smoother. Might be a bug somewhere. const ssize_t minChunkSizeLL128 = int(nthreads*(Proto::calcBytePerGrain()/sizeof(T))/2); const int nranks = ncclShmem.comm.nRanks; const ssize_t loopSize = nChannels*chunkSize; const ssize_t size = args->count; #if defined (ENABLE_TIMELINE) int elems = 0, totalElems = 0; uint64_t clkStamp = 0ULL; struct ncclDevComm* comm = &ncclShmem.comm; uint64_t entryStamp = __builtin_amdgcn_s_memrealtime(); Timeline::CollectGpuPrimEvent(comm->gpuEventContext, TIMELINE_EVENT_REDUCE_SCATTER_ENTRY, 0, entryStamp, comm->cpuTimestamp); #endif Primitives, 0, Proto, 0> prims(tid, nthreads, &ring->prev, &ring->next, args->sendbuff, args->recvbuff, args->redOpArg, 0, args->connIndex, args->connIndex); #ifdef HYGON_SDMA_FEATURE prims.ringIx = ring->index; INIT_PRIMS_SDMA(prims, args); #endif for (ssize_t gridOffset = 0; gridOffset < size; gridOffset += loopSize) { ssize_t realChunkSize; if (Proto::Id == NCCL_PROTO_SIMPLE) { realChunkSize = min(chunkSize, divUp(size-gridOffset, nChannels)); realChunkSize = roundUp(realChunkSize, nthreads*sizeof(uint64_t)/sizeof(T)); } else if (Proto::Id == NCCL_PROTO_LL) realChunkSize = size-gridOffset < loopSize ? args->lastChunkSize : chunkSize; else if (Proto::Id == NCCL_PROTO_LL128) realChunkSize = min(divUp(size-gridOffset, nChannels*minChunkSizeLL128)*minChunkSizeLL128, chunkSize); realChunkSize = int(realChunkSize); ssize_t chunkOffset = gridOffset + bid*int(realChunkSize); /////////////// begin ReduceScatter steps /////////////// ssize_t offset; int nelem = min(realChunkSize, size-chunkOffset); int rankDest; // step 0: push data to next GPU rankDest = ringRanks[nranks-1]; offset = chunkOffset + rankDest * size; #if defined (ENABLE_TIMELINE) elems = max(0, nelem); clkStamp = __builtin_amdgcn_s_memrealtime(); Timeline::CollectGpuPrimEvent(comm->gpuEventContext, TIMELINE_EVENT_PRIM_SEND_ENTRY, elems*sizeof(T), clkStamp, comm->cpuTimestamp); #endif prims.send(offset, nelem); #if defined (ENABLE_TIMELINE) totalElems += elems; Timeline::CollectGpuPrimEvent(comm->gpuEventContext, TIMELINE_EVENT_PRIM_SEND_EXIT, elems*sizeof(T), __builtin_amdgcn_s_memrealtime() - clkStamp, comm->cpuTimestamp); #endif // k-2 steps: reduce and copy to next GPU for (int j=2; jgpuEventContext, TIMELINE_EVENT_PRIM_RECV_REDUCE_SEND_ENTRY, elems*sizeof(T), clkStamp, comm->cpuTimestamp); #endif prims.recvReduceSend(offset, nelem); #if defined (ENABLE_TIMELINE) totalElems += elems; Timeline::CollectGpuPrimEvent(comm->gpuEventContext, TIMELINE_EVENT_PRIM_RECV_REDUCE_SEND_EXIT, elems*sizeof(T), __builtin_amdgcn_s_memrealtime() - clkStamp, comm->cpuTimestamp); #endif } // step k-1: reduce this buffer and data, which will produce the final result rankDest = ringRanks[0]; offset = chunkOffset + rankDest * size; #if defined (ENABLE_TIMELINE) elems = max(0, nelem); clkStamp = __builtin_amdgcn_s_memrealtime(); Timeline::CollectGpuPrimEvent(comm->gpuEventContext, TIMELINE_EVENT_PRIM_RECV_REDUCE_COPY_ENTRY, elems*sizeof(T), clkStamp, comm->cpuTimestamp); #endif prims.recvReduceCopy(offset, chunkOffset, nelem, /*postOp=*/true); #if defined (ENABLE_TIMELINE) totalElems += elems; Timeline::CollectGpuPrimEvent(comm->gpuEventContext, TIMELINE_EVENT_PRIM_RECV_REDUCE_COPY_EXIT, elems*sizeof(T), __builtin_amdgcn_s_memrealtime() - clkStamp, comm->cpuTimestamp); #endif } #if defined (ENABLE_TIMELINE) Timeline::CollectGpuPrimEvent(comm->gpuEventContext, TIMELINE_EVENT_REDUCE_SCATTER_EXIT, totalElems*sizeof(T), __builtin_amdgcn_s_memrealtime() - entryStamp, comm->cpuTimestamp); #endif } } template struct RunWorkElement { __device__ __forceinline__ void run(ncclWorkElem *args) { using Proto = ProtoSimple; runRing(args); } }; template struct RunWorkElement { __device__ __forceinline__ void run(ncclWorkElem *args) { runRing(args); } }; template struct RunWorkElement { __device__ __forceinline__ void run(ncclWorkElem *args) { runRing(args); } }; template struct RunWorkElement { __device__ __forceinline__ void run(ncclWorkElem *args) { const int tid = threadIdx.x; const int bid = args->bid; const int nChannels = args->nChannels; struct ncclNvls* nvls = &ncclShmem.channel.nvls; const ssize_t chunkSize = int(args->lastChunkSize); const ssize_t size = args->count; const ssize_t loopSize = nChannels*chunkSize; const int nThreadsScatter = 128 + WARP_SIZE; const int nThreadsReduce = 384; const int tidEndScatter = nThreadsScatter; const int tidEndReduce = tidEndScatter + nThreadsReduce; using Proto = ProtoSimple<1, 1>; if (tid < tidEndScatter) { // Scatter Primitives, /*Direct=*/0, Proto, 0> prims(tid, nThreadsScatter, NULL, nvls->up, args->sendbuff, NULL, args->redOpArg, 0*Proto::MaxGroupWidth, 0, 0); for (ssize_t gridOffset = 0; gridOffset < size; gridOffset += loopSize) { ssize_t offset = gridOffset + bid*chunkSize; int nelem = min(chunkSize, size-offset); prims.scatter(offset, nvls->nHeads*size, nelem, size, -1, 0); } } else if (tid < tidEndReduce) { // Reduce through NVLS Primitives, /*Direct=*/0, Proto, 0> prims(tid-tidEndScatter, nThreadsReduce, &nvls->down, NULL, NULL, args->recvbuff, args->redOpArg, 3*Proto::MaxGroupWidth, 1, 1); for (ssize_t gridOffset = 0; gridOffset < size; gridOffset += loopSize) { ssize_t offset = gridOffset + bid*chunkSize; int nelem = min(chunkSize, size-offset); prims.recv(offset, nelem); } } } };