extensions.h 25.5 KB
Newer Older
Przemek Tredak's avatar
Przemek Tredak committed
1
/*************************************************************************
2
 * Copyright (c) 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Przemek Tredak's avatar
Przemek Tredak committed
3
4
5
6
 *
 * See LICENSE for license information.
 ************************************************************************/

7
8
9
#ifndef TRANSFORMER_ENGINE_PYTORCH_CSRC_EXTENSIONS_H_
#define TRANSFORMER_ENGINE_PYTORCH_CSRC_EXTENSIONS_H_

10
11
#include <optional>

Przemek Tredak's avatar
Przemek Tredak committed
12
13
#include "common.h"

14
15
namespace transformer_engine::pytorch {

16
17
18
19
20
/***************************************************************************************************
 * Permutation
 **************************************************************************************************/

std::tuple<at::Tensor, at::Tensor, std::vector<at::Tensor>> moe_permute_fwd(
21
22
    at::Tensor input, const DType dtype, at::Tensor indices, int64_t num_out_tokens,
    std::vector<at::Tensor> workspace, int64_t max_expanded_token_num);
23

24
25
at::Tensor moe_permute_bwd(at::Tensor input, const DType dtype, at::Tensor row_id_map,
                           at::Tensor prob, int64_t num_tokens, int64_t topK);
26

27
28
at::Tensor moe_unpermute_fwd(at::Tensor input, const DType dtype, at::Tensor row_id_map,
                             at::Tensor prob, int64_t num_tokens, int64_t topK);
29
30

std::tuple<at::Tensor, at::Tensor> moe_unpermute_bwd(at::Tensor input_bwd, at::Tensor input_fwd,
31
32
                                                     const DType dtype, at::Tensor row_id_map,
                                                     at::Tensor prob);
33

34
35
36
37
/***************************************************************************************************
 * Attention
 **************************************************************************************************/

38
NVTE_Fused_Attn_Backend get_fused_attn_backend(const DType q_dtype, const DType kv_dtype,
39
40
41
42
43
44
                                               NVTE_QKV_Layout qkv_layout, NVTE_Bias_Type bias_type,
                                               NVTE_Mask_Type attn_mask_type, float p_dropout,
                                               size_t num_attn_heads, size_t num_gqa_groups,
                                               size_t max_seqlen_q, size_t max_seqlen_kv,
                                               size_t head_dim_qk, size_t head_dim_v,
                                               int64_t window_size_left, int64_t window_size_right);
cyanguwa's avatar
cyanguwa committed
45

46
std::vector<py::object> fused_attn_fwd(
47
48
    size_t max_seqlen_q, size_t max_seqlen_kv, bool is_training, float attn_scale, float p_dropout,
    bool set_zero, NVTE_QKV_Layout qkv_layout, NVTE_Bias_Type bias_type,
49
    NVTE_Mask_Type attn_mask_type, const std::vector<int64_t> window_size,
50
51
    const at::Tensor cu_seqlens_q, const at::Tensor cu_seqlens_kv, const py::handle Q,
    const py::handle K, const py::handle V, const at::ScalarType fake_dtype,
52
53
54
55
56
    const std::optional<at::Tensor> cu_seqlens_q_padded,
    const std::optional<at::Tensor> cu_seqlens_kv_padded,
    const std::optional<at::Tensor> page_table_k, const std::optional<at::Tensor> page_table_v,
    py::handle s_quantizer, py::handle o_quantizer, const std::optional<at::Tensor> Bias,
    const std::optional<at::Generator> rng_gen, size_t rng_elts_per_thread);
cyanguwa's avatar
cyanguwa committed
57

58
std::vector<py::object> fused_attn_bwd(
59
60
    size_t max_seqlen_q, size_t max_seqlen_kv, float attn_scale, float p_dropout, bool set_zero,
    NVTE_QKV_Layout qkv_layout, NVTE_Bias_Type bias_type, NVTE_Mask_Type attn_mask_type,
61
    const std::vector<int64_t> window_size, bool deterministic, const at::Tensor cu_seqlens_q,
62
    const at::Tensor cu_seqlens_kv, const py::handle Q, const py::handle K, const py::handle V,
63
64
    const py::handle O, const py::handle dO, const at::ScalarType fake_dtype, const DType dqkv_type,
    const std::vector<at::Tensor> Aux_CTX_Tensors,
65
66
    const std::optional<at::Tensor> cu_seqlens_q_padded,
    const std::optional<at::Tensor> cu_seqlens_kv_padded, py::handle s_quantizer,
67
    py::handle dp_quantizer, py::handle dqkv_quantizer);
Przemek Tredak's avatar
Przemek Tredak committed
68

69
70
71
at::Tensor fa_prepare_fwd(at::Tensor qkvi);
at::Tensor fa_prepare_bwd(at::Tensor q, at::Tensor k, at::Tensor v);

72
73
at::Tensor convert_thd_to_bshd(at::Tensor tensor, at::Tensor cu_seqlens, int b, int max_seq_len);
at::Tensor convert_bshd_to_thd(at::Tensor tensor, at::Tensor cu_seqlens, int t);
74
75
76
77
void copy_to_kv_cache(at::Tensor new_k, at::Tensor new_v, at::Tensor k_cache, at::Tensor v_cache,
                      at::Tensor page_table, at::Tensor cu_new_lens, at::Tensor cu_cached_lens,
                      NVTE_QKV_Format kv_format, int b, int max_ctx_len, int max_seq_len,
                      int max_pages_per_seq, bool is_non_paged);
78

79
80
81
82
/***************************************************************************************************
 * GEMM
 **************************************************************************************************/

83
using MaybeTensor = std::optional<at::Tensor>;
84

85
86
87
88
89
90
91
92
93
std::vector<py::object> gemm(py::handle A, bool transa, py::handle B, bool transb, py::object D,
                             py::handle quantizer, std::optional<DType> out_dtype, MaybeTensor bias,
                             DType bias_type, bool gelu, MaybeTensor gelu_in, bool grad,
                             at::Tensor workspace, size_t workspaceSize, bool accumulate,
                             bool use_split_accumulator, CommOverlapCore *comm_overlap = nullptr,
                             std::optional<CommOverlapType> comm_type = std::nullopt,
                             MaybeTensor extra_output = std::nullopt, bool bulk_overlap = false);

void te_atomic_gemm(at::Tensor A, at::Tensor A_scale_inverse, DType A_type,
94
                    std::vector<int64_t> A_scaling_mode, bool transa, at::Tensor B,
95
96
97
98
                    at::Tensor B_scale_inverse, DType B_type, std::vector<int64_t> B_scaling_mode,
                    bool transb, at::Tensor D, at::Tensor D_scale, DType D_type, at::Tensor D_amax,
                    at::Tensor bias, DType bias_type, at::Tensor pre_gelu_out, bool grad,
                    at::Tensor workspace, size_t workspaceSize, bool accumulate,
99
100
                    bool use_split_accumulator, int math_sm_count, int m_split, int n_split,
                    bool gemm_producer, at::Tensor counter);
Przemek Tredak's avatar
Przemek Tredak committed
101

102
103
std::optional<std::vector<at::Tensor>> te_general_grouped_gemm(
    std::vector<py::handle> A, bool transa, std::vector<py::handle> B, bool transb,
104
105
106
107
    std::optional<std::vector<at::Tensor>> D, DType D_type, std::vector<int64_t> m_splits,
    std::vector<at::Tensor> bias, DType bias_type, bool single_output,
    std::vector<at::Tensor> pre_gelu_out, bool grad, std::vector<at::Tensor> workspace,
    size_t workspaceSize, bool accumulate, bool use_split_accumulator, int math_sm_count);
108

yuguo's avatar
yuguo committed
109
#ifdef __HIP_PLATFORM_AMD__
yuguo's avatar
yuguo committed
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
void te_batchgemm(std::vector<at::Tensor> A, at::Tensor A_scale_inverse, int A_offset,
                     transformer_engine::DType A_type, bool transa, std::vector<at::Tensor> B,
                     at::Tensor B_scale_inverse, int B_offset, transformer_engine::DType B_type,
                     bool transb, std::vector<at::Tensor> D, int D_offset, at::Tensor D_scale,
                     transformer_engine::DType D_type, at::Tensor D_amax,
                     std::vector<at::Tensor> bias, transformer_engine::DType bias_type,
                     std::vector<at::Tensor> pre_gelu_out, bool grad,
                     std::vector<at::Tensor> workspace, size_t workspaceSize, bool accumulate,
                     bool use_split_accumulator, int math_sm_count);

std::vector<at::Tensor> te_batchgemm_ts(
    std::vector<at::Tensor> A, at::Tensor A_scale_inverse, int64_t A_offset, int64_t A_type,
    int64_t transa, std::vector<at::Tensor> B, at::Tensor B_scale_inverse, int64_t B_offset,
    int64_t B_type, int64_t transb, std::vector<at::Tensor> D, int64_t D_offset, at::Tensor D_scale,
    int64_t D_type, at::Tensor D_amax, std::vector<at::Tensor> bias, int64_t bias_type,
    std::vector<at::Tensor> pre_gelu_out, int64_t grad, std::vector<at::Tensor> workspace,
    int64_t workspaceSize, int64_t accumulate, int64_t use_split_accumulator);
yuguo's avatar
yuguo committed
127
128
#endif

129
130
131
132
/***************************************************************************************************
 * Transpose
 **************************************************************************************************/

133
134
std::vector<py::object> fused_multi_quantize(std::vector<at::Tensor> input_list,
                                             std::optional<std::vector<py::object>> output_list,
135
                                             std::vector<py::handle> quantizer_list, DType otype);
136

137
at::Tensor fp8_transpose(at::Tensor input, DType otype,
138
139
                         std::optional<at::Tensor> output = std::nullopt);

140
141
142
143
/***************************************************************************************************
 * Activations
 **************************************************************************************************/

144
145
146
py::object gelu(const at::Tensor &input, py::handle quantizer);

py::object relu(const at::Tensor &input, py::handle quantizer);
147

148
py::object geglu(const at::Tensor &input, py::handle quantizer);
149

150
py::object qgeglu(const at::Tensor &input, py::handle quantizer);
151

152
py::object reglu(const at::Tensor &input, py::handle quantizer);
153

154
py::object swiglu(const at::Tensor &input, py::handle quantizer);
155

156
py::object qgelu(const at::Tensor &input, py::handle quantizer);
157

158
py::object srelu(const at::Tensor &input, py::handle quantizer);
159

160
py::object dgelu(const at::Tensor &grad, const at::Tensor &input, py::handle quantizer);
161

162
py::object drelu(const at::Tensor &grad, const at::Tensor &input, py::handle quantizer);
163

164
py::object dgeglu(const at::Tensor &grad, const at::Tensor &input, py::handle quantizer);
165

166
py::object dqgeglu(const at::Tensor &grad, const at::Tensor &input, py::handle quantizer);
167

168
py::object dreglu(const at::Tensor &grad, const at::Tensor &input, py::handle quantizer);
169

170
py::object dswiglu(const at::Tensor &grad, const at::Tensor &input, py::handle quantizer);
171

172
173
174
175
py::object dqgelu(const at::Tensor &grad, const at::Tensor &input, py::handle quantizer);

py::object dsrelu(const at::Tensor &grad, const at::Tensor &input, py::handle quantizer);

176
177
178
/***************************************************************************************************
 * LayerNorm
 **************************************************************************************************/
Przemek Tredak's avatar
Przemek Tredak committed
179

180
std::vector<py::object> layernorm_bwd(const at::Tensor &dz, const at::Tensor &x,
181
182
183
184
                                      const at::Tensor &mu, const at::Tensor &rsigma,
                                      const at::Tensor &gamma, const int sm_margin,
                                      const bool zero_centered_gamma);

185
186
std::vector<py::object> layernorm_fwd(py::handle input, py::handle weight, MaybeTensor bias,
                                      float eps, py::object ln_out, py::handle quantizer,
187
                                      DType out_dtype, const int sm_margin,
188
189
                                      const bool zero_centered_gamma);

190
191
192
193
/***************************************************************************************************
 * RMSNorm
 **************************************************************************************************/

194
std::vector<py::object> rmsnorm_bwd(const at::Tensor &dz, const at::Tensor &x,
195
196
                                    const at::Tensor &rsigma, const at::Tensor &gamma,
                                    const int sm_margin, const bool zero_centered_gamma);
197

198
std::vector<py::object> rmsnorm_fwd(const py::handle &input, const py::handle &weight, float eps,
199
200
                                    py::object ln_out, py::handle quantizer, DType otype,
                                    const int sm_margin, const bool zero_centered_gamma);
201
202
203
204

/***************************************************************************************************
 * Cast
 **************************************************************************************************/
205

206
207
208
py::object quantize(const at::Tensor &tensor, py::handle quantizer, const py::object &output,
                    std::optional<at::Tensor> noop);

209
py::object dequantize(const py::handle &input, DType otype);
210

211
/***************************************************************************************************
212
 * Bias gradient fusions
213
 **************************************************************************************************/
214

215
216
std::vector<py::object> bgrad_quantize(const at::Tensor &input, py::handle py_quantizer);

217
218
219
220
221
222
223
224
225
226
227
std::vector<py::object> dbias_dgelu(const at::Tensor &grad_output, const at::Tensor &act_input,
                                    py::handle quantizer);

std::vector<py::object> dbias_dsilu(const at::Tensor &grad_output, const at::Tensor &act_input,
                                    py::handle quantizer);

std::vector<py::object> dbias_drelu(const at::Tensor &grad_output, const at::Tensor &act_input,
                                    py::handle quantizer);

std::vector<py::object> dbias_dqgelu(const at::Tensor &grad_output, const at::Tensor &act_input,
                                     py::handle quantizer);
228

229
230
std::vector<py::object> dbias_dsrelu(const at::Tensor &grad_output, const at::Tensor &act_input,
                                     py::handle quantizer);
231

232
233
234
/***************************************************************************************************
 * Softmax
 **************************************************************************************************/
235

236
at::Tensor scaled_softmax_forward(at::Tensor input, float scale_factor);
237

238
239
at::Tensor scaled_softmax_backward(at::Tensor output_grad_, at::Tensor softmax_results_,
                                   float scale_factor);
240

241
at::Tensor scaled_masked_softmax_forward(at::Tensor input, at::Tensor mask, float scale_factor);
242

243
244
at::Tensor scaled_masked_softmax_backward(at::Tensor output_grad_, at::Tensor softmax_results_,
                                          float scale_factor);
245

246
at::Tensor scaled_upper_triang_masked_softmax_forward(at::Tensor input, float scale_factor);
247
248
249

at::Tensor scaled_upper_triang_masked_softmax_backward(at::Tensor output_grads_,
                                                       at::Tensor softmax_results_,
250
                                                       float scale_factor);
251

252
at::Tensor scaled_aligned_causal_masked_softmax_forward(at::Tensor input, float scale_factor);
253
254
255

at::Tensor scaled_aligned_causal_masked_softmax_backward(at::Tensor output_grads_,
                                                         at::Tensor softmax_results_,
256
                                                         float scale_factor);
257

258
259
260
261
/***************************************************************************************************
 * FP8 recipe
 **************************************************************************************************/

262
263
void compute_amax(const at::Tensor &tensor, at::Tensor &amax);

264
265
void compute_channel_colwise_amax(const at::Tensor &tensor, at::Tensor &amax, at::Tensor &fp8_scale);

266
267
268
269
void fused_amax_and_scale_update_after_reduction(const at::Tensor &amax_reduction_buffer,
                                                 std::vector<at::Tensor> amax_histories,
                                                 std::vector<at::Tensor> scales,
                                                 const std::string &amax_compute_algo,
270
                                                 DType fp8_dtype, float margin);
271

272
273
274
275
276
277
278
// Note that the start_offset is the logical offset along the tensor dimension.
// The offset in bytes is start_offset * sizeof(tensor.dtype)
void fp8_block_scaling_compute_partial_amax(const at::Tensor &tensor, at::Tensor amax, size_t h,
                                            size_t w, size_t start_offset, size_t block_len);

void fp8_block_scaling_partial_cast(const at::Tensor &inp, at::Tensor out, const at::Tensor &scale,
                                    size_t h, size_t w, size_t start_offset, size_t block_len,
279
                                    const DType out_dtype);
280

281
282
283
284
/***************************************************************************************************
 * Rotary positional embedding
 **************************************************************************************************/

285
at::Tensor fused_rope_forward(const at::Tensor &input, const at::Tensor &freqs,
Sudhakar Singh's avatar
Sudhakar Singh committed
286
                              const std::optional<at::Tensor> start_positions,
287
                              const NVTE_QKV_Format qkv_format, const bool interleaved,
288
                              const std::optional<at::Tensor> cu_seqlens, const int cp_size,
289
                              const int cp_rank);
290

291
at::Tensor fused_rope_backward(const at::Tensor &output_grads, const at::Tensor &freqs,
292
                               const NVTE_QKV_Format qkv_format, const bool interleaved,
293
                               const std::optional<at::Tensor> cu_seqlens, const int cp_size,
294
                               const int cp_rank);
295
296

/***************************************************************************************************
297
 * Miscellaneous
298
299
 **************************************************************************************************/

300
301
size_t get_cublasLt_version();

302
303
size_t get_cudnn_version();

304
305
306
307
/***************************************************************************************************
 * Support THD format for Context Parallel
 **************************************************************************************************/

308
309
310
311
at::Tensor thd_read_half_tensor(const at::Tensor &tensor, const at::Tensor &cu_seqlens,
                                int half_idx);

void thd_second_half_lse_correction(at::Tensor lse, const at::Tensor &lse_per_step,
312
                                    const at::Tensor &cu_seqlens, bool lse_packed);
313
314

at::Tensor thd_read_second_half_lse(const at::Tensor &lse, const at::Tensor &cu_seqlens,
315
                                    bool lse_packed, int second_half_lse_seqlen);
316
317
318

void thd_out_correction(at::Tensor out, const at::Tensor &out_per_step, const at::Tensor &lse,
                        const at::Tensor &lse_per_step, const at::Tensor &cu_seqlens,
319
                        bool only_second_half, bool lse_packed);
320
321
322
323

void thd_grad_correction(at::Tensor grad, const at::Tensor &grad_per_step,
                         const at::Tensor &cu_seqlens, const std::string &first_half,
                         const std::string &second_half);
324

325
326
at::Tensor thd_get_partitioned_indices(const at::Tensor &cu_seqlens, int total_tokens,
                                       int world_size, int rank);
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348

/***************************************************************************************************
 * multi_tensor_* kernels
 **************************************************************************************************/

void multi_tensor_scale_cuda(int chunk_size, at::Tensor noop_flag,
                             std::vector<std::vector<at::Tensor>> tensor_lists, float scale);

std::tuple<at::Tensor, at::Tensor> multi_tensor_l2norm_cuda(
    int chunk_size, at::Tensor noop_flag, std::vector<std::vector<at::Tensor>> tensor_lists,
    at::optional<bool> per_tensor_python);

std::tuple<at::Tensor, at::Tensor> multi_tensor_unscale_l2norm_cuda(
    int chunk_size, at::Tensor noop_flag, std::vector<std::vector<at::Tensor>> tensor_lists,
    at::Tensor inv_scale, at::optional<bool> per_tensor_python);

void multi_tensor_adam_cuda(int chunk_size, at::Tensor noop_flag,
                            std::vector<std::vector<at::Tensor>> tensor_lists, const float lr,
                            const float beta1, const float beta2, const float epsilon,
                            const int step, const int mode, const int bias_correction,
                            const float weight_decay);

349
350
351
352
353
354
void multi_tensor_adam_param_remainder_cuda(int chunk_size, at::Tensor noop_flag,
                                            std::vector<std::vector<at::Tensor>> tensor_lists,
                                            const float lr, const float beta1, const float beta2,
                                            const float epsilon, const int step, const int mode,
                                            const int bias_correction, const float weight_decay);

355
356
357
358
359
360
void multi_tensor_adam_fp8_cuda(int chunk_size, at::Tensor noop_flag,
                                std::vector<std::vector<at::Tensor>> tensor_lists, const float lr,
                                const float beta1, const float beta2, const float epsilon,
                                const int step, const int mode, const int bias_correction,
                                const float weight_decay, DType fp8_dtype);

361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
void multi_tensor_adam_capturable_cuda(int chunk_size, at::Tensor noop_flag,
                                       std::vector<std::vector<at::Tensor>> tensor_lists,
                                       at::Tensor lr, const float beta1, const float beta2,
                                       const float epsilon, at::Tensor step, const int mode,
                                       const int bias_correction, const float weight_decay,
                                       at::Tensor inv_scale);

void multi_tensor_adam_capturable_master_cuda(int chunk_size, at::Tensor noop_flag,
                                              std::vector<std::vector<at::Tensor>> tensor_lists,
                                              at::Tensor lr, const float beta1, const float beta2,
                                              const float epsilon, at::Tensor step, const int mode,
                                              const int bias_correction, const float weight_decay,
                                              at::Tensor inv_scale);

void multi_tensor_sgd_cuda(int chunk_size, at::Tensor noop_flag,
                           std::vector<std::vector<at::Tensor>> tensor_lists, float wd,
                           float momentum, float dampening, float lr, bool nesterov, bool first_run,
                           bool wd_after_momentum, float scale);
379

380
381
382
383
void multi_tensor_compute_scale_and_scale_inv_cuda(
    int chunk_size, at::Tensor noop_flag, std::vector<std::vector<at::Tensor>> tensor_lists,
    float max_fp8, bool force_pow_2_scales, float epsilon);

384
385
386
387
388
389
390
391
/***************************************************************************************************
 * padding
 **************************************************************************************************/

void fused_multi_row_padding(at::Tensor input, at::Tensor output,
                             std::vector<size_t> input_row_list,
                             std::vector<size_t> padded_input_row_list);

392
/***************************************************************************************************
393
 * NVSHMEM APIs
394
395
 **************************************************************************************************/

396
397
void init_nvshmem_backend(c10d::ProcessGroup *process_group);

398
at::Tensor create_nvshmem_tensor(const std::vector<int64_t> &shape, c10::ScalarType dtype);
399

400
void nvshmem_send_on_current_stream(at::Tensor src, at::Tensor dst, int peer, at::Tensor signal);
401

402
void nvshmem_wait_on_current_stream(at::Tensor signal, const std::string &wait_kind);
403
404
405

void nvshmem_finalize();

406
}  // namespace transformer_engine::pytorch
407

408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
/***************************************************************************************************
 * Comm+GEMM Overlap Wrappers
 **************************************************************************************************/

class CommOverlapHelper : torch::CustomClassHolder {
 private:
  bool initialized{false};
  bool backend_is_nccl{false};
  std::map<std::string, c10d::ProcessGroup *> pgs;

 public:
  int myrank = -1;
  int numranks = -1;
  int mylocal = -1;
  int numlocal = -1;
  int mynode = -1;
  int numnodes = -1;

  CommOverlapHelper();

  CommOverlapHelper(c10d::ProcessGroup *world_group,
429
                    std::optional<c10d::ProcessGroup *> intra_node_group);
430
431
432
433
434
435
436
437
438
439
440
441
442
443

  ~CommOverlapHelper();

  void ub_allgather(void *globaldata, size_t globalbytes, void *localdata, size_t localbytes,
                    ExtComm comm);

  void ub_barrier(ExtComm comm);
};

class CommOverlap : torch::CustomClassHolder, public transformer_engine::CommOverlapBase {
 public:
  CommOverlap(const std::vector<size_t> &buffer_shape, at::ScalarType buffer_dtype,
              CommOverlapHelper *helper, int tp_size, int num_splits = 3,
              int num_max_streams = NVTE_COMM_OVERLAP_MAX_STREAMS, int comm_cga_size = 2,
444
445
446
447
448
449
              int gemm_priority = 0, int comm_priority = 0, int num_comm_sm = 16,
              bool set_sm_margin = true, bool atomic_gemm = false,
              bool rs_overlap_first_gemm = false);

  ~CommOverlap() {}

450
  void copy_into_buffer(const at::Tensor &input, bool local_chunk = false);
451

452
453
  at::Tensor get_buffer(bool local_chunk = false,
                        std::optional<std::vector<int64_t>> shape = std::nullopt);
454

455
456
457
458
459
460
461
462
};  // CommOverlap

class CommOverlapP2P : torch::CustomClassHolder, public transformer_engine::CommOverlapP2PBase {
 public:
  CommOverlapP2P(const std::vector<size_t> &buffer_shape, at::ScalarType buffer_dtype,
                 CommOverlapHelper *helper, int tp_size,
                 transformer_engine::CommOverlapType comm_type,
                 int num_max_streams = NVTE_COMM_OVERLAP_MAX_STREAMS, int comm_cga_size = 2,
463
464
465
466
467
468
                 int gemm_priority = 0, int comm_priority = 0, int num_comm_sm = 3,
                 bool set_sm_margin = true, bool atomic_gemm = false, bool use_ce = true,
                 bool aggregate = false);

  ~CommOverlapP2P() {}

469
  void copy_into_buffer(const at::Tensor &input, bool local_chunk = false);
470

471
472
  at::Tensor get_buffer(bool local_chunk = false,
                        std::optional<std::vector<int64_t>> shape = std::nullopt);
473

474
475
};  // CommOverlapP2P

476
#endif  // TRANSFORMER_ENGINE_PYTORCH_CSRC_EXTENSIONS_H_