/************************************************************************* * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. * Modifications Copyright (c) 2019-2022 Advanced Micro Devices, Inc. All rights reserved. * * See LICENSE.txt for license information ************************************************************************/ #ifndef SCCL_DEVICE_H_ #define SCCL_DEVICE_H_ #include "check.h" #include "sccl_bfloat16.h" #include "align.h" #if defined(ENABLE_NPKIT) #include "npkit/npkit_struct.h" #endif #if defined(ENABLE_TIMELINE) #include "timeline/timeline.h" #endif #include #ifdef HCU_SDMA_FEATURE #include "hsa/hsa_ext_amd.h" #include "hsa_extra.h" // #define HCU_PRINT_DEBUG #endif namespace sccl { #define PRINT_ERR(...) #define PRINT_INFO(...) #define PRINT_INFOM(...) #define PRINT_INFOT(tid, ...) #define PRINT_DEBUG(...) #if defined(ENABLE_NPKIT) && defined(HCU_SDMA_FEATURE) #define NPKIT_SET_GPU_EVENT(event, size, cost) \ NpKit::CollectGpuEvent(event, size, cost, NPKIT_GET_GPU_TIMESTAMP(), scclShmem.comm.npKitEventCollectContexts + npKitCtxIdx); #define NPKIT_SET_GPU_EVENT_TM(event, size, cost, tm) NpKit::CollectGpuEvent(event, size, cost, tm, scclShmem.comm.npKitEventCollectContexts + npKitCtxIdx); #else #define NPKIT_SET_GPU_EVENT(event, size, cost) #define NPKIT_SET_GPU_EVENT_TM(event, size, cost, tm) #endif #ifdef HCU_SDMA_FEATURE #define INIT_PRIMS_SDMA(prims, args) \ { \ prims.rank = scclShmem.comm.rank; \ prims.useSdmaConfig = args->useSdma; \ prims.useSdmaCopy = args->useSdma && prims.sdmaQueueCtx; \ prims.preFnOps = args->preFnOps; \ prims.sdmaMinCopySize = args->useSdma && prims.sdmaQueueCtx ? prims.sdmaQueueCtx->minCopySize : 0; \ prims.sdmaCountEnable = args->useSdma && prims.sdmaQueueCtx ? prims.sdmaQueueCtx->copyCountEnable : 0; \ prims.sdmaCopyCount = 0; \ prims.allCopyCount = 0; \ } #endif #define SCCL_NUM_FUNCTIONS 5 // SendRecv and AllToAllPivot not included for now typedef enum { scclFuncBroadcast, scclFuncReduce, scclFuncAllGather, scclFuncReduceScatter, scclFuncAllReduce, scclFuncSendRecv, scclFuncSend, scclFuncRecv, scclFuncAllToAllPivot, scclNumFuncs } scclFunc_t; extern const char* scclFuncStr[SCCL_NUM_FUNCTIONS + 2]; #define SCCL_NUM_ALGORITHMS 6 // Tree/Ring/CollNet* #define SCCL_ALGO_TREE 0 #define SCCL_ALGO_RING 1 #define SCCL_ALGO_COLLNET_DIRECT 2 #define SCCL_ALGO_COLLNET_CHAIN 3 #define SCCL_ALGO_NVLS 4 #define SCCL_ALGO_NVLS_TREE 5 enum scclAlgo { SCCL_ALGO_TREE = 0, // 树形算法 SCCL_ALGO_RING = 1, // 环形算法 SCCL_ALGO_COLLNET_DIRECT = 2, // 直接网络算法 SCCL_ALGO_COLLNET_CHAIN = 3, // 链式网络算法 SCCL_ALGO_NVLS = 4, // NVLink算法 SCCL_ALGO_NVLS_TREE = 5, // NVLink树形算法 }; extern const char* scclAlgoStr[SCCL_NUM_ALGORITHMS]; #define SCCL_NUM_PROTOCOLS 3 // Simple/LL/LL128 #define SCCL_PROTO_LL 0 #define SCCL_PROTO_LL128 1 #define SCCL_PROTO_SIMPLE 2 extern const char* scclProtoStr[SCCL_NUM_PROTOCOLS]; #define SCCL_MAX_OPS 2048 #define SCCL_STEPS 8 union scclLLFifoLine { /* Flags have to be *after* data, because otherwise, an incomplete receive from the network may receive the flag but not the data. Note this is assuming that either we receive contiguous chunks of data (sockets) or data is written with an atomicity of 8 bytes (IB/RDMA). */ struct { uint32_t data1; uint32_t flag1; uint32_t data2; uint32_t flag2; }; uint64_t v[2]; int4 i4; }; #define WARP_SIZE warpSize #define MAXCHANNELS 32 #define SCCL_MAX_NTHREADS 256 #define SCCL_SIMPLE_MAX_NTHREADS SCCL_MAX_NTHREADS #define SCCL_LL_MAX_NTHREADS SCCL_MAX_NTHREADS #define SCCL_LL_LINES_PER_THREAD 8 #ifdef TEST_LL_CLEANUP #define SCCL_LL_CLEAN_MASK 0x078 // Set to 0x100 to disable cleanup #define SCCL_LL_FLAG_MAX 0x100 #define SCCL_LL_FLAG(a) ((uint32_t)((a) % SCCL_LL_FLAG_MAX)) #else #define SCCL_LL_CLEAN_MASK 0x7ffffff8 #define SCCL_LL_FLAG(a) ((uint32_t)(a)) #endif // Make sure the clean mask will last for at least SCCL_NSTEPS static_assert(SCCL_LL_CLEAN_MASK % SCCL_STEPS == 0, "Invalid SCCL_LL_CLEAN_MASK value"); #define SCCL_LL128_LINESIZE 64 #define SCCL_LL128_LINEELEMS (SCCL_LL128_LINESIZE / sizeof(uint64_t)) #define SCCL_LL128_DATAELEMS (SCCL_LL128_LINEELEMS - 1) #define SCCL_LL128_MAX_NTHREADS 256 #define SCCL_LL128_ELEMS_PER_THREAD 28 #define SCCL_LL128_SHMEM_ELEMS_PER_THREAD 4 #define SCCL_LL128_SHMEM_SIZE (SCCL_LL128_SHMEM_ELEMS_PER_THREAD * SCCL_LL128_MAX_NTHREADS) #define SCCL_DIRECT_WRITE 0x01 #define SCCL_DIRECT_READ 0x02 #define SCCL_DIRECT_NIC 0x04 #define SCCL_IPC_WRITE 0x08 #define SCCL_IPC_READ 0x10 #define SCCL_NVLS_MIN_POLL 0x20 #ifdef HCU_SDMA_FEATURE #define SDMA_CTX_VALID_MAGIC 0xD65A #endif struct scclConnInfo { // Regular comm mechanism char* buffs[SCCL_NUM_PROTOCOLS]; // Local for recv, remote for send uint64_t* tail; // Local for recv, remote for send uint64_t* head; // Local for send, remote for recv int flags; // Direct communication / other flags int shared; // Buffers are shared void** ptrExchange; // Pointer exchange for direct communication uint64_t* redOpArgExchange; // PreOp scaler exchange for direct pull case int* sizesFifo; // Sizes fifo from GPU to proxy int* offsFifo; // Buffer fifo from proxy to GPU uint64_t step; // Keep where we are uint64_t llLastCleaning; // GPU's HDP_MEM_FLUSH_ADDR: HDP Memory Coherency Flush Control. This register // allows software to explicitly initiate a flush read to HDP memory. See more // descriptions in primitives.h. uint32_t* next_hdp_reg; // Next GPU in ring (for p2p transport use only) uint32_t* curr_hdp_reg; // Current GPU's HDP register #ifdef HCU_SDMA_FEATURE struct sdmaQueueContext* sdmaQueueCtx; uint32_t sdmaCtxValidMagic; #endif }; struct scclProxyConnector { int tpRank; int tpLocalRank; int sameProcess; struct scclProxyConnection* connection; }; struct scclConnector { int connected; struct scclProxyConnector proxyConn; struct scclTransportComm* transportComm; void* transportResources; struct scclConnInfo conn; }; struct scclRing { // Shortcuts for userRanks[1] and userRanks[n-1] int prev; int next; // Maps an internal sccl index to user-specified rank order. This is necessary // since we need to know how the user expects data to be ordered across // devices. Ordered from current device. int* userRanks; int index; // This rank's index in the ring }; // The root of each tree only has one node down (+1 intra-node). #define SCCL_MAX_TREE_ARITY_TOP 2 // Nodes inside the binary tree can have to two nodes down (+1 intra-node). #define SCCL_MAX_TREE_ARITY 3 struct scclTree { int depth; int up; int down[SCCL_MAX_TREE_ARITY]; }; #define SCCL_MAX_DIRECT_ARITY 7 struct scclDirect { int depth; int out; int nHeads; // Number of parallel N<->1<->net operations we'll do in parallel; size of up/down int headRank; // Index in 0..nHeads-1 I am the head rank of. -1 if I'm not a head rank (no local NIC) int shift; // Shuffling of send/recv for scatter/gather operations, basically localRank%nHeads int up[SCCL_MAX_DIRECT_ARITY]; int down[SCCL_MAX_DIRECT_ARITY]; }; #define SCCL_CONN_IDX_P2P_NET 2 #define SCCL_MAX_NVLS_ARITY 8 #define SCCL_MAX_NVLS_TREE_ARITY 3 struct scclNvls { int out; int nHeads; // Number of parallel N<->1<->net operations we'll do in parallel; size of up/down int headRank; // Index in 0..nHeads-1 I am the head rank of. -1 if I'm not a head rank (no local NIC) int up[SCCL_MAX_NVLS_ARITY]; int down; int treeUp; int treeDown[SCCL_MAX_NVLS_TREE_ARITY]; int node; int nNodes; }; #define SCCL_MAX_CONNS 3 struct scclChannelPeer { struct scclConnector send[SCCL_MAX_CONNS]; struct scclConnector recv[SCCL_MAX_CONNS]; int refCount; }; struct scclDevComm; #pragma pack(push) /* push current alignment to stack */ #pragma pack(8) /* set alignment to 8 bytes boundary */ /* scclWork is to be a power of two, currently 8x64 bytes, */ /* to make sure reads to host from the CUDA kernel are aligned. */ /* Make sure to adjust padding at the end of scclWorkElem. */ #define SCCL_WORK_SIZE 256 enum scclWorkType : uint8_t { scclWorkTypeUnused = 0, scclWorkTypeColl = 1, scclWorkTypeP2p = 2, scclWorkTypeRegColl = 3 }; enum scclWorkP2PType : uint8_t { scclWorkP2pTypeUnused = 0, scclWorkP2pTypeSend, scclWorkP2pTypeRecv }; struct scclWorkHeader { union { int32_t workNext; // when isLast=0: Offset from kernel argument workHead uint32_t doneAcks; // when isLast=1: Monotonic (mod 1<<32) ack value to send back. }; uint16_t funcIndex; uint8_t isLast : 1; // last work for this kernel uint8_t inFifo : 1; // is this work in the fifo enum scclWorkType type; }; struct scclWorkElem { union { uint8_t flagBits; struct { uint8_t isUsed : 1, redOpArgIsPtr : 1, regUsed : 1, nWarps : 5; }; }; uint8_t direct; uint8_t bid; uint8_t nChannels; struct { uint32_t root : 28; uint32_t preFnOps : 1; uint32_t useSdma : 1; uint32_t connIndex : 2; }; const void* sendbuff; void* recvbuff; size_t count; union { size_t lastChunkSize; // Pivot A2A kernel computes chunk size itself. // Instead, it needs the number of bidirectional rings. size_t pivotA2ANumBiRings; }; uint64_t redOpArg; uint64_t opCount; }; static_assert((SCCL_WORK_SIZE - alignUp(sizeof(scclWorkHeader), alignof(scclWorkElem))) / sizeof(scclWorkElem) == 4, "Sanity check: SCCL_MAX_WORK_ELEMENTS == 4"); #define SCCL_MAX_WORK_ELEMENTS 1 struct scclWorkElemP2p { struct { int32_t peer : 26; uint32_t preFnOps : 1; uint32_t useSdma : 1; uint32_t connIndex : 2; int32_t proto : 2; }; union { uint16_t flagBits; struct { enum scclWorkP2PType p2pType : 4; uint16_t nWarps : 4; uint16_t warpStart : 4; uint16_t ngroups : 4; }; }; uint16_t opCount; // Important not to use any fields with greater than 4-byte alignment since // we need sizeof(scclWorkElemP2p)==28, but that would be padded up to 32 if // there were 8-byte fields. // void* buff; uint32_t buffHi32, buffLo32; // buff = buffHi32<<32 | buffLo32; // size_t count; uint32_t countHi32, countLo32; // count = countHi32<<32 | countLo32; int chunkSize; }; static_assert(((SCCL_WORK_SIZE - alignUp(sizeof(scclWorkHeader), alignof(scclWorkElemP2p))) / sizeof(scclWorkElemP2p)) == 8, "Sanity check: SCCL_MAX_WORK_ELEMENTS_P2P == 8"); #define SCCL_MAX_WORK_ELEMENTS_P2P 2 struct scclWorkElemReg { struct scclWorkElem elem; void* dnInputs[SCCL_MAX_DIRECT_ARITY + 1]; void* dnOutputs[SCCL_MAX_DIRECT_ARITY + 1]; void* upOutputs[SCCL_MAX_DIRECT_ARITY + 1]; }; #define SCCL_MAX_WORK_ELEMENTS_REG ((SCCL_WORK_SIZE - alignUp(sizeof(scclWorkHeader), alignof(scclWorkElemReg))) / sizeof(scclWorkElemReg)) static_assert(SCCL_MAX_WORK_ELEMENTS_REG == 1, "Sanity check: SCCL_MAX_WORK_ELEMENTS_REG == 1"); // Number of named barriers supported by CUDA #define SCCL_MAX_GROUPS (SCCL_MAX_NTHREADS / WARP_SIZE) struct scclWork { struct scclWorkHeader header; union { char pad[SCCL_WORK_SIZE - sizeof(struct scclWorkHeader)]; struct scclWorkElem elems[SCCL_MAX_WORK_ELEMENTS]; struct scclWorkElemP2p p2pElems[SCCL_MAX_WORK_ELEMENTS_P2P]; struct scclWorkElemReg regElems[SCCL_MAX_WORK_ELEMENTS_REG]; }; }; static_assert(sizeof(struct scclWork) == SCCL_WORK_SIZE, "Sanity check: sizeof(struct scclWork) == SCCL_WORK_SIZE"); static_assert(sizeof(struct scclWork) % 16 == 0, "Sanity check: sizeof(struct scclWork)%16 == 0"); struct scclDevChannelPeer { // Stripped version of scclChannelPeer where we only keep the scclConnInfo // instead of the full scclConnector. struct scclConnInfo send[SCCL_MAX_CONNS]; struct scclConnInfo recv[SCCL_MAX_CONNS]; }; #pragma pack(pop) /* restore original alignment from stack */ #ifdef ENABLE_PROFILING #define PROFILE_NUM_ITEMS 31 #define PROFILE_NUM_LAUNCHES 1024 struct scclProf { uint32_t count; uint32_t seq; // only entry from first launch is used struct { uint64_t line : 16; uint64_t timeStamp : 48; } elem[PROFILE_NUM_ITEMS]; }; static_assert(sizeof(struct scclProf) == 256, "scclProf must have size of 256"); #endif #ifdef ENABLE_COLLTRACE typedef enum { scclCollTraceNotReady = 0, scclCollTraceKernelLaunchType = 1, scclCollTraceKernelEndType = 2, scclCollTraceCollLaunchType = 3, scclCollTraceAbortType = 4, scclCollTraceDataType = 5, scclCollTraceCollElemType = (1 << 4), scclCollTraceP2pElemType = (1 << 5), } scclCollTraceDataType_t; struct scclCollTrace { uint8_t type; uint8_t bid; int16_t funcIndex; uint32_t data_0; uint64_t timeStamp; union { uint64_t opCount; uint32_t p2pOpCount[2]; }; union { uint64_t data_1; struct { uint8_t nWarps; uint8_t bid; uint8_t nChannels; } coll; struct { int16_t peer; uint8_t ngroups : 4; uint8_t connIndex : 4; uint8_t warpStart : 4; uint8_t nWarps : 4; } p2p[2]; }; }; static_assert(sizeof(struct scclCollTrace) == 8 * sizeof(int), "scclCollTrace must have a pow2 size"); union scclCollTraceTail { uint32_t tail; char padding[4096]; }; #define COLLTRACE_NUM_ITEMS 8192 #endif #ifdef HCU_SDMA_FEATURE struct sdmaQueueContext { hsa_sdma_info_t* sdmaInfo; uint64_t pkgIndex; uint32_t queueId; uint32_t sumSdmaCopyCount; uint32_t sumAllCopyCount; uint32_t queueLock; uint32_t minCopySize; uint32_t copyCountEnable; uint32_t sdmaQueueDepth; uint32_t sdmaPkgLen; uint32_t sdmaQueueLen; }; #endif struct alignas(16) scclDevChannel { struct scclDevChannelPeer** peers; struct scclRing ring; struct scclTree tree; struct scclTree collnetChain; struct scclDirect collnetDirect; struct scclTree binTree; struct scclNvls nvls; uint32_t* workFifoDone; // Location of done counter, device writes index+1 of last work processed }; struct scclDevComm { int rank; int nRanks; int buffSizes[SCCL_NUM_PROTOCOLS]; // Operation list for aggregation int workFifoDepth; struct scclWork* workFifoHeap; // may be cudaHost or GDR memory // Flag to ask SCCL kernels to abort volatile uint32_t* abortFlag; // Channels, device side struct scclDevChannel* channels /*[MAXCHANNELS]*/; #if defined(ENABLE_NPKIT) NpKitEventCollectContext* npKitEventCollectContexts; #endif #ifdef ENABLE_COLLTRACE struct scclCollTrace* collTrace; union scclCollTraceTail* collTraceTail; pthread_t collTraceThread; #endif #ifdef ENABLE_PROFILING struct scclProf* devProf; #endif #if defined(ENABLE_TIMELINE) TimelineGpuEventContext* gpuEventContext; #endif #if defined(ENABLE_NPKIT) || defined(ENABLE_TIMELINE) uint64_t* cpuTimestamp; #endif }; struct alignas(16) scclDevCommAndChannels { struct scclDevComm comm; struct scclDevChannel channels[MAXCHANNELS]; }; #ifdef __CUDA_ARCH__ #define SCCL_CUDA_ARCH __CUDA_ARCH__ #else #define SCCL_CUDA_ARCH 0 #endif template __host__ __device__ constexpr T min_constexpr(T a) { return a; } template __host__ __device__ constexpr T min_constexpr(T a, T b, Ts... c) { return min_constexpr((a < b ? a : b), c...); } template __host__ __device__ constexpr T max_constexpr(T a) { return a; } template __host__ __device__ constexpr T max_constexpr(T a, T b, Ts... c) { return max_constexpr((a > b ? a : b), c...); } // Calculate the unroll factor given: // * bytePerPack: number of bytes accessed per instruction // * insns: max permissible unroll value // * bytes: desired number of in-flight bytes per iteration ( = unroll*bytePerPack) __host__ __device__ constexpr int scclCalcUnroll(int bytePerPack, int insns, int bytes) { return min_constexpr(insns, (bytes + bytePerPack - 1) / bytePerPack); } // Note that all unroll value logic should depend on a given cudaArch argument // and not __CUDA_ARCH__ since these need to be host-side executable where the // arch value is strictly runtime only. By defaulting to SCCL_CUDA_ARCH, device // side code can elide passing the arch for brevity. __host__ __device__ constexpr int scclCollUnroll(int cudaArch = SCCL_CUDA_ARCH) { // Our collective unroll should move to the same bytes&insns model as NVLS. return cudaArch >= 800 ? 8 : 4; } __host__ __device__ constexpr int scclNvlsUnrollBytes(int cudaArch = SCCL_CUDA_ARCH) { return 4 * 16; } __host__ __device__ constexpr int scclNvlsUnrollInsns(int cudaArch = SCCL_CUDA_ARCH) { return 16; } __host__ __device__ constexpr int scclNvlsUnroll(int bytePerPack, int cudaArch = SCCL_CUDA_ARCH) { return scclCalcUnroll(bytePerPack, scclNvlsUnrollInsns(cudaArch), scclNvlsUnrollBytes(cudaArch)); } // The amount of dynamic shmem per warp __host__ __device__ constexpr int scclShmemScratchWarpSize(int cudaArch = SCCL_CUDA_ARCH) { return (max_constexpr( /*LL */ 0, /*LL128 */ (SCCL_LL128_SHMEM_ELEMS_PER_THREAD * WARP_SIZE) * sizeof(uint64_t), /*SIMPLE*/ (scclCollUnroll(cudaArch) * WARP_SIZE + 1) * 16, // NVLS needs an extra 16B to read unaligned data. /*NVLS */ WARP_SIZE * (cudaArch >= 900 ? scclNvlsUnrollBytes(cudaArch) : 0) + 16) + 15) & -16; // pad to 16 bytes } // The amount of dynamic shmem per block __host__ __device__ constexpr int scclShmemDynamicSize(int cudaArch = SCCL_CUDA_ARCH) { return cudaArch < 700 ? 0 : scclShmemScratchWarpSize(cudaArch) * (SCCL_MAX_NTHREADS / WARP_SIZE); } } // namespace sccl #endif