devcomm.h 18.7 KB
Newer Older
lishen's avatar
lishen committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
/*************************************************************************
 * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
 * Modifications Copyright (c) 2019-2022 Advanced Micro Devices, Inc. All rights reserved.
 *
 * See LICENSE.txt for license information
 ************************************************************************/

#ifndef SCCL_DEVICE_H_
#define SCCL_DEVICE_H_

#include "check.h"
#include "sccl_bfloat16.h"
#include "align.h"
#if defined(ENABLE_NPKIT)
#include "npkit/npkit_struct.h"
#endif
#if defined(ENABLE_TIMELINE)
#include "timeline/timeline.h"
#endif
#include <stdint.h>

#ifdef HCU_SDMA_FEATURE
#include "hsa/hsa_ext_amd.h"
#include "hsa_extra.h"
// #define HCU_PRINT_DEBUG
#endif

namespace sccl {

#define PRINT_ERR(...)
#define PRINT_INFO(...)
#define PRINT_INFOM(...)
#define PRINT_INFOT(tid, ...)
#define PRINT_DEBUG(...)

#if defined(ENABLE_NPKIT) && defined(HCU_SDMA_FEATURE)
#define NPKIT_SET_GPU_EVENT(event, size, cost) \
    NpKit::CollectGpuEvent(event, size, cost, NPKIT_GET_GPU_TIMESTAMP(), scclShmem.comm.npKitEventCollectContexts + npKitCtxIdx);
#define NPKIT_SET_GPU_EVENT_TM(event, size, cost, tm) NpKit::CollectGpuEvent(event, size, cost, tm, scclShmem.comm.npKitEventCollectContexts + npKitCtxIdx);
#else
#define NPKIT_SET_GPU_EVENT(event, size, cost)
#define NPKIT_SET_GPU_EVENT_TM(event, size, cost, tm)
#endif

#ifdef HCU_SDMA_FEATURE
#define INIT_PRIMS_SDMA(prims, args)                                                                           \
    {                                                                                                          \
        prims.rank            = scclShmem.comm.rank;                                                           \
        prims.useSdmaConfig   = args->useSdma;                                                                 \
        prims.useSdmaCopy     = args->useSdma && prims.sdmaQueueCtx;                                           \
        prims.preFnOps        = args->preFnOps;                                                                \
        prims.sdmaMinCopySize = args->useSdma && prims.sdmaQueueCtx ? prims.sdmaQueueCtx->minCopySize : 0;     \
        prims.sdmaCountEnable = args->useSdma && prims.sdmaQueueCtx ? prims.sdmaQueueCtx->copyCountEnable : 0; \
        prims.sdmaCopyCount   = 0;                                                                             \
        prims.allCopyCount    = 0;                                                                             \
    }
#endif

#define SCCL_NUM_FUNCTIONS 5 // SendRecv and AllToAllPivot not included for now
typedef enum {
    scclFuncBroadcast,
    scclFuncReduce,
    scclFuncAllGather,
    scclFuncReduceScatter,
    scclFuncAllReduce,
    scclFuncSendRecv,
    scclFuncSend,
    scclFuncRecv,
    scclFuncAllToAllPivot,
    scclNumFuncs
} scclFunc_t;
extern const char* scclFuncStr[SCCL_NUM_FUNCTIONS + 2];

#define SCCL_NUM_ALGORITHMS 6 // Tree/Ring/CollNet*
#define SCCL_ALGO_TREE 0
#define SCCL_ALGO_RING 1
#define SCCL_ALGO_COLLNET_DIRECT 2
#define SCCL_ALGO_COLLNET_CHAIN 3
#define SCCL_ALGO_NVLS 4
#define SCCL_ALGO_NVLS_TREE 5

enum scclAlgo {
    SCCL_ALGO_TREE           = 0, // 树形算法
    SCCL_ALGO_RING           = 1, // 环形算法
    SCCL_ALGO_COLLNET_DIRECT = 2, // 直接网络算法
    SCCL_ALGO_COLLNET_CHAIN  = 3, // 链式网络算法
    SCCL_ALGO_NVLS           = 4, // NVLink算法
    SCCL_ALGO_NVLS_TREE      = 5, // NVLink树形算法
};

extern const char* scclAlgoStr[SCCL_NUM_ALGORITHMS];

#define SCCL_NUM_PROTOCOLS 3 // Simple/LL/LL128
#define SCCL_PROTO_LL 0
#define SCCL_PROTO_LL128 1
#define SCCL_PROTO_SIMPLE 2
extern const char* scclProtoStr[SCCL_NUM_PROTOCOLS];

#define SCCL_MAX_OPS 2048
#define SCCL_STEPS 8

union scclLLFifoLine {
    /* Flags have to be *after* data, because otherwise, an incomplete receive
       from the network may receive the flag but not the data.
       Note this is assuming that either we receive contiguous chunks of data
       (sockets) or data is written with an atomicity of 8 bytes (IB/RDMA). */
    struct {
        uint32_t data1;
        uint32_t flag1;
        uint32_t data2;
        uint32_t flag2;
    };
    uint64_t v[2];
    int4 i4;
};

#define WARP_SIZE warpSize
#define MAXCHANNELS 32
#define SCCL_MAX_NTHREADS 256
#define SCCL_SIMPLE_MAX_NTHREADS SCCL_MAX_NTHREADS
#define SCCL_LL_MAX_NTHREADS SCCL_MAX_NTHREADS
#define SCCL_LL_LINES_PER_THREAD 8
#ifdef TEST_LL_CLEANUP
#define SCCL_LL_CLEAN_MASK 0x078 // Set to 0x100 to disable cleanup
#define SCCL_LL_FLAG_MAX 0x100
#define SCCL_LL_FLAG(a) ((uint32_t)((a) % SCCL_LL_FLAG_MAX))
#else
#define SCCL_LL_CLEAN_MASK 0x7ffffff8
#define SCCL_LL_FLAG(a) ((uint32_t)(a))
#endif
// Make sure the clean mask will last for at least SCCL_NSTEPS
static_assert(SCCL_LL_CLEAN_MASK % SCCL_STEPS == 0, "Invalid SCCL_LL_CLEAN_MASK value");

#define SCCL_LL128_LINESIZE 64
#define SCCL_LL128_LINEELEMS (SCCL_LL128_LINESIZE / sizeof(uint64_t))
#define SCCL_LL128_DATAELEMS (SCCL_LL128_LINEELEMS - 1)

#define SCCL_LL128_MAX_NTHREADS 256
#define SCCL_LL128_ELEMS_PER_THREAD 28

#define SCCL_LL128_SHMEM_ELEMS_PER_THREAD 4
#define SCCL_LL128_SHMEM_SIZE (SCCL_LL128_SHMEM_ELEMS_PER_THREAD * SCCL_LL128_MAX_NTHREADS)

#define SCCL_DIRECT_WRITE 0x01
#define SCCL_DIRECT_READ 0x02
#define SCCL_DIRECT_NIC 0x04
#define SCCL_IPC_WRITE 0x08
#define SCCL_IPC_READ 0x10
#define SCCL_NVLS_MIN_POLL 0x20

#ifdef HCU_SDMA_FEATURE
#define SDMA_CTX_VALID_MAGIC 0xD65A
#endif

struct scclConnInfo {
    // Regular comm mechanism
    char* buffs[SCCL_NUM_PROTOCOLS]; // Local for recv, remote for send
    uint64_t* tail;                  // Local for recv, remote for send
    uint64_t* head;                  // Local for send, remote for recv

    int flags;                  // Direct communication / other flags
    int shared;                 // Buffers are shared
    void** ptrExchange;         // Pointer exchange for direct communication
    uint64_t* redOpArgExchange; // PreOp scaler exchange for direct pull case

    int* sizesFifo; // Sizes fifo from GPU to proxy
    int* offsFifo;  // Buffer fifo from proxy to GPU

    uint64_t step; // Keep where we are
    uint64_t llLastCleaning;

    // GPU's HDP_MEM_FLUSH_ADDR: HDP Memory Coherency Flush Control. This register
    // allows software to explicitly initiate a flush read to HDP memory. See more
    // descriptions in primitives.h.
    uint32_t* next_hdp_reg; // Next GPU in ring (for p2p transport use only)
    uint32_t* curr_hdp_reg; // Current GPU's HDP register

#ifdef HCU_SDMA_FEATURE
    struct sdmaQueueContext* sdmaQueueCtx;
    uint32_t sdmaCtxValidMagic;
#endif
};

struct scclProxyConnector {
    int tpRank;
    int tpLocalRank;
    int sameProcess;
    struct scclProxyConnection* connection;
};

struct scclConnector {
    int connected;
    struct scclProxyConnector proxyConn;
    struct scclTransportComm* transportComm;
    void* transportResources;
    struct scclConnInfo conn;
};

struct scclRing {
    // Shortcuts for userRanks[1] and userRanks[n-1]
    int prev;
    int next;

    // Maps an internal sccl index to user-specified rank order. This is necessary
    // since we need to know how the user expects data to be ordered across
    // devices. Ordered from current device.
    int* userRanks;

    int index; // This rank's index in the ring
};

// The root of each tree only has one node down (+1 intra-node).
#define SCCL_MAX_TREE_ARITY_TOP 2
// Nodes inside the binary tree can have to two nodes down (+1 intra-node).
#define SCCL_MAX_TREE_ARITY 3
struct scclTree {
    int depth;
    int up;
    int down[SCCL_MAX_TREE_ARITY];
};

#define SCCL_MAX_DIRECT_ARITY 7
struct scclDirect {
    int depth;
    int out;
    int nHeads;   // Number of parallel N<->1<->net operations we'll do in parallel; size of up/down
    int headRank; // Index in 0..nHeads-1 I am the head rank of. -1 if I'm not a head rank (no local NIC)
    int shift;    // Shuffling of send/recv for scatter/gather operations, basically localRank%nHeads
    int up[SCCL_MAX_DIRECT_ARITY];
    int down[SCCL_MAX_DIRECT_ARITY];
};

#define SCCL_CONN_IDX_P2P_NET 2
#define SCCL_MAX_NVLS_ARITY 8
#define SCCL_MAX_NVLS_TREE_ARITY 3
struct scclNvls {
    int out;
    int nHeads;   // Number of parallel N<->1<->net operations we'll do in parallel; size of up/down
    int headRank; // Index in 0..nHeads-1 I am the head rank of. -1 if I'm not a head rank (no local NIC)
    int up[SCCL_MAX_NVLS_ARITY];
    int down;
    int treeUp;
    int treeDown[SCCL_MAX_NVLS_TREE_ARITY];
    int node;
    int nNodes;
};

#define SCCL_MAX_CONNS 3
struct scclChannelPeer {
    struct scclConnector send[SCCL_MAX_CONNS];
    struct scclConnector recv[SCCL_MAX_CONNS];
    int refCount;
};

struct scclDevComm;

#pragma pack(push) /* push current alignment to stack */
#pragma pack(8)    /* set alignment to 8 bytes boundary */
/* scclWork is to be a power of two, currently 8x64 bytes, */
/* to make sure reads to host from the CUDA kernel are aligned. */
/* Make sure to adjust padding at the end of scclWorkElem. */
#define SCCL_WORK_SIZE 256

enum scclWorkType : uint8_t {
    scclWorkTypeUnused  = 0,
    scclWorkTypeColl    = 1,
    scclWorkTypeP2p     = 2,
    scclWorkTypeRegColl = 3
};
enum scclWorkP2PType : uint8_t {
    scclWorkP2pTypeUnused = 0,
    scclWorkP2pTypeSend,
    scclWorkP2pTypeRecv
};

struct scclWorkHeader {
    union {
        int32_t workNext;  // when isLast=0: Offset from kernel argument workHead
        uint32_t doneAcks; // when isLast=1: Monotonic (mod 1<<32) ack value to send back.
    };
    uint16_t funcIndex;
    uint8_t isLast : 1; // last work for this kernel
    uint8_t inFifo : 1; // is this work in the fifo
    enum scclWorkType type;
};

struct scclWorkElem {
    union {
        uint8_t flagBits;
        struct {
            uint8_t isUsed : 1, redOpArgIsPtr : 1, regUsed : 1, nWarps : 5;
        };
    };
    uint8_t direct;
    uint8_t bid;
    uint8_t nChannels;
    struct {
        uint32_t root : 28;
        uint32_t preFnOps : 1;
        uint32_t useSdma : 1;
        uint32_t connIndex : 2;
    };

    const void* sendbuff;
    void* recvbuff;

    size_t count;
    union {
        size_t lastChunkSize;
        // Pivot A2A kernel computes chunk size itself.
        // Instead, it needs the number of bidirectional rings.
        size_t pivotA2ANumBiRings;
    };
    uint64_t redOpArg;
    uint64_t opCount;
};

static_assert((SCCL_WORK_SIZE - alignUp(sizeof(scclWorkHeader), alignof(scclWorkElem))) / sizeof(scclWorkElem) == 4,
              "Sanity check: SCCL_MAX_WORK_ELEMENTS == 4");
#define SCCL_MAX_WORK_ELEMENTS 1

struct scclWorkElemP2p {
    struct {
        int32_t peer : 26;
        uint32_t preFnOps : 1;
        uint32_t useSdma : 1;
        uint32_t connIndex : 2;
        int32_t proto : 2;
    };
    union {
        uint16_t flagBits;
        struct {
            enum scclWorkP2PType p2pType : 4;
            uint16_t nWarps : 4;
            uint16_t warpStart : 4;
            uint16_t ngroups : 4;
        };
    };
    uint16_t opCount;
    // Important not to use any fields with greater than 4-byte alignment since
    // we need sizeof(scclWorkElemP2p)==28, but that would be padded up to 32 if
    // there were 8-byte fields.
    // void* buff;
    uint32_t buffHi32, buffLo32; // buff = buffHi32<<32 | buffLo32;
    // size_t count;
    uint32_t countHi32, countLo32; // count = countHi32<<32 | countLo32;
    int chunkSize;
};

static_assert(((SCCL_WORK_SIZE - alignUp(sizeof(scclWorkHeader), alignof(scclWorkElemP2p))) / sizeof(scclWorkElemP2p)) == 8,
              "Sanity check: SCCL_MAX_WORK_ELEMENTS_P2P == 8");
#define SCCL_MAX_WORK_ELEMENTS_P2P 2

struct scclWorkElemReg {
    struct scclWorkElem elem;
    void* dnInputs[SCCL_MAX_DIRECT_ARITY + 1];
    void* dnOutputs[SCCL_MAX_DIRECT_ARITY + 1];
    void* upOutputs[SCCL_MAX_DIRECT_ARITY + 1];
};

#define SCCL_MAX_WORK_ELEMENTS_REG ((SCCL_WORK_SIZE - alignUp(sizeof(scclWorkHeader), alignof(scclWorkElemReg))) / sizeof(scclWorkElemReg))
static_assert(SCCL_MAX_WORK_ELEMENTS_REG == 1, "Sanity check: SCCL_MAX_WORK_ELEMENTS_REG == 1");

// Number of named barriers supported by CUDA
#define SCCL_MAX_GROUPS (SCCL_MAX_NTHREADS / WARP_SIZE)

struct scclWork {
    struct scclWorkHeader header;
    union {
        char pad[SCCL_WORK_SIZE - sizeof(struct scclWorkHeader)];
        struct scclWorkElem elems[SCCL_MAX_WORK_ELEMENTS];
        struct scclWorkElemP2p p2pElems[SCCL_MAX_WORK_ELEMENTS_P2P];
        struct scclWorkElemReg regElems[SCCL_MAX_WORK_ELEMENTS_REG];
    };
};
static_assert(sizeof(struct scclWork) == SCCL_WORK_SIZE, "Sanity check: sizeof(struct scclWork) == SCCL_WORK_SIZE");
static_assert(sizeof(struct scclWork) % 16 == 0, "Sanity check: sizeof(struct scclWork)%16 == 0");

struct scclDevChannelPeer {
    // Stripped version of scclChannelPeer where we only keep the scclConnInfo
    // instead of the full scclConnector.
    struct scclConnInfo send[SCCL_MAX_CONNS];
    struct scclConnInfo recv[SCCL_MAX_CONNS];
};
#pragma pack(pop) /* restore original alignment from stack */

#ifdef ENABLE_PROFILING
#define PROFILE_NUM_ITEMS 31
#define PROFILE_NUM_LAUNCHES 1024

struct scclProf {
    uint32_t count;
    uint32_t seq; // only entry from first launch is used
    struct {
        uint64_t line : 16;
        uint64_t timeStamp : 48;
    } elem[PROFILE_NUM_ITEMS];
};
static_assert(sizeof(struct scclProf) == 256, "scclProf must have size of 256");
#endif

#ifdef ENABLE_COLLTRACE
typedef enum {
    scclCollTraceNotReady         = 0,
    scclCollTraceKernelLaunchType = 1,
    scclCollTraceKernelEndType    = 2,
    scclCollTraceCollLaunchType   = 3,
    scclCollTraceAbortType        = 4,
    scclCollTraceDataType         = 5,
    scclCollTraceCollElemType     = (1 << 4),
    scclCollTraceP2pElemType      = (1 << 5),
} scclCollTraceDataType_t;

struct scclCollTrace {
    uint8_t type;
    uint8_t bid;
    int16_t funcIndex;
    uint32_t data_0;
    uint64_t timeStamp;
    union {
        uint64_t opCount;
        uint32_t p2pOpCount[2];
    };
    union {
        uint64_t data_1;
        struct {
            uint8_t nWarps;
            uint8_t bid;
            uint8_t nChannels;
        } coll;
        struct {
            int16_t peer;
            uint8_t ngroups : 4;
            uint8_t connIndex : 4;
            uint8_t warpStart : 4;
            uint8_t nWarps : 4;
        } p2p[2];
    };
};
static_assert(sizeof(struct scclCollTrace) == 8 * sizeof(int), "scclCollTrace must have a pow2 size");

union scclCollTraceTail {
    uint32_t tail;
    char padding[4096];
};

#define COLLTRACE_NUM_ITEMS 8192
#endif

#ifdef HCU_SDMA_FEATURE
struct sdmaQueueContext {
    hsa_sdma_info_t* sdmaInfo;
    uint64_t pkgIndex;
    uint32_t queueId;
    uint32_t sumSdmaCopyCount;
    uint32_t sumAllCopyCount;
    uint32_t queueLock;
    uint32_t minCopySize;
    uint32_t copyCountEnable;
    uint32_t sdmaQueueDepth;
    uint32_t sdmaPkgLen;
    uint32_t sdmaQueueLen;
};
#endif

struct alignas(16) scclDevChannel {
    struct scclDevChannelPeer** peers;
    struct scclRing ring;
    struct scclTree tree;
    struct scclTree collnetChain;
    struct scclDirect collnetDirect;
    struct scclTree binTree;
    struct scclNvls nvls;
    uint32_t* workFifoDone; // Location of done counter, device writes index+1 of last work processed
};

struct scclDevComm {
    int rank;
    int nRanks;
    int buffSizes[SCCL_NUM_PROTOCOLS];

    // Operation list for aggregation
    int workFifoDepth;
    struct scclWork* workFifoHeap; // may be cudaHost or GDR memory

    // Flag to ask SCCL kernels to abort
    volatile uint32_t* abortFlag;

    // Channels, device side
    struct scclDevChannel* channels /*[MAXCHANNELS]*/;

#if defined(ENABLE_NPKIT)
    NpKitEventCollectContext* npKitEventCollectContexts;
#endif

#ifdef ENABLE_COLLTRACE
    struct scclCollTrace* collTrace;
    union scclCollTraceTail* collTraceTail;
    pthread_t collTraceThread;
#endif

#ifdef ENABLE_PROFILING
    struct scclProf* devProf;
#endif
#if defined(ENABLE_TIMELINE)
    TimelineGpuEventContext* gpuEventContext;
#endif
#if defined(ENABLE_NPKIT) || defined(ENABLE_TIMELINE)
    uint64_t* cpuTimestamp;
#endif
};

struct alignas(16) scclDevCommAndChannels {
    struct scclDevComm comm;
    struct scclDevChannel channels[MAXCHANNELS];
};

#ifdef __CUDA_ARCH__
#define SCCL_CUDA_ARCH __CUDA_ARCH__
#else
#define SCCL_CUDA_ARCH 0
#endif

template <typename T>
__host__ __device__ constexpr T min_constexpr(T a) {
    return a;
}
template <typename T, typename... Ts>
__host__ __device__ constexpr T min_constexpr(T a, T b, Ts... c) {
    return min_constexpr<T>((a < b ? a : b), c...);
}

template <typename T>
__host__ __device__ constexpr T max_constexpr(T a) {
    return a;
}
template <typename T, typename... Ts>
__host__ __device__ constexpr T max_constexpr(T a, T b, Ts... c) {
    return max_constexpr<T>((a > b ? a : b), c...);
}

// Calculate the unroll factor given:
// * bytePerPack: number of bytes accessed per instruction
// * insns: max permissible unroll value
// * bytes: desired number of in-flight bytes per iteration ( = unroll*bytePerPack)
__host__ __device__ constexpr int scclCalcUnroll(int bytePerPack, int insns, int bytes) {
    return min_constexpr(insns, (bytes + bytePerPack - 1) / bytePerPack);
}

// Note that all unroll value logic should depend on a given cudaArch argument
// and not __CUDA_ARCH__ since these need to be host-side executable where the
// arch value is strictly runtime only. By defaulting to SCCL_CUDA_ARCH, device
// side code can elide passing the arch for brevity.

__host__ __device__ constexpr int scclCollUnroll(int cudaArch = SCCL_CUDA_ARCH) {
    // Our collective unroll should move to the same bytes&insns model as NVLS.
    return cudaArch >= 800 ? 8 : 4;
}

__host__ __device__ constexpr int scclNvlsUnrollBytes(int cudaArch = SCCL_CUDA_ARCH) { return 4 * 16; }
__host__ __device__ constexpr int scclNvlsUnrollInsns(int cudaArch = SCCL_CUDA_ARCH) { return 16; }

__host__ __device__ constexpr int scclNvlsUnroll(int bytePerPack, int cudaArch = SCCL_CUDA_ARCH) {
    return scclCalcUnroll(bytePerPack, scclNvlsUnrollInsns(cudaArch), scclNvlsUnrollBytes(cudaArch));
}

// The amount of dynamic shmem per warp
__host__ __device__ constexpr int scclShmemScratchWarpSize(int cudaArch = SCCL_CUDA_ARCH) {
    return (max_constexpr<int>(
                /*LL    */ 0,
                /*LL128 */ (SCCL_LL128_SHMEM_ELEMS_PER_THREAD * WARP_SIZE) * sizeof(uint64_t),
                /*SIMPLE*/ (scclCollUnroll(cudaArch) * WARP_SIZE + 1) * 16,
                // NVLS needs an extra 16B to read unaligned data.
                /*NVLS  */ WARP_SIZE * (cudaArch >= 900 ? scclNvlsUnrollBytes(cudaArch) : 0) + 16) +
            15) &
           -16; // pad to 16 bytes
}

// The amount of dynamic shmem per block
__host__ __device__ constexpr int scclShmemDynamicSize(int cudaArch = SCCL_CUDA_ARCH) {
    return cudaArch < 700 ? 0 : scclShmemScratchWarpSize(cudaArch) * (SCCL_MAX_NTHREADS / WARP_SIZE);
}

} // namespace sccl

#endif