extensions.h 23 KB
Newer Older
Przemek Tredak's avatar
Przemek Tredak committed
1
/*************************************************************************
2
 * Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Przemek Tredak's avatar
Przemek Tredak committed
3
4
5
6
7
 *
 * See LICENSE for license information.
 ************************************************************************/

#include "common.h"
Tim Moon's avatar
Tim Moon committed
8
#include "common/common.h"
Przemek Tredak's avatar
Przemek Tredak committed
9

10
11
12
13
14
15
NVTE_Fused_Attn_Backend get_fused_attn_backend(
                const transformer_engine::DType q_dtype,
                const transformer_engine::DType kv_dtype,
                NVTE_QKV_Layout qkv_layout,
                NVTE_Bias_Type bias_type,
                NVTE_Mask_Type attn_mask_type,
16
17
18
19
                float p_dropout,
                size_t num_attn_heads, size_t num_gqa_groups,
                size_t max_seqlen_q, size_t max_seqlen_kv,
                size_t head_dim);
cyanguwa's avatar
cyanguwa committed
20
21

std::vector<at::Tensor> fused_attn_fwd_qkvpacked(
22
                size_t max_seqlen, bool is_training,
23
24
25
26
                float attn_scale, float p_dropout, bool set_zero,
                NVTE_QKV_Layout qkv_layout,
                NVTE_Bias_Type bias_type,
                NVTE_Mask_Type attn_mask_type,
cyanguwa's avatar
cyanguwa committed
27
28
29
30
31
32
33
34
35
                const at::Tensor cu_seqlens,
                const at::Tensor QKV,
                const transformer_engine::DType qkv_type,
                const c10::optional<at::Tensor> descale_QKV,
                const c10::optional<at::Tensor> scale_S,
                const c10::optional<at::Tensor> scale_O,
                c10::optional<at::Tensor> amax_S,
                c10::optional<at::Tensor> amax_O,
                const c10::optional<at::Tensor> Bias,
36
37
                const c10::optional<at::Generator> rng_gen,
                size_t rng_elts_per_thread);
cyanguwa's avatar
cyanguwa committed
38
39

std::vector<at::Tensor> fused_attn_bwd_qkvpacked(
40
                size_t max_seqlen, float attn_scale,
41
42
43
44
                float p_dropout, bool set_zero,
                NVTE_QKV_Layout qkv_layout,
                NVTE_Bias_Type bias_type,
                NVTE_Mask_Type attn_mask_type,
cyanguwa's avatar
cyanguwa committed
45
46
47
48
49
50
51
52
53
54
55
56
57
58
                const at::Tensor cu_seqlens,
                const at::Tensor QKV,
                const at::Tensor O,
                const at::Tensor dO,
                const transformer_engine::DType qkv_type,
                const std::vector<at::Tensor> Aux_CTX_Tensors,
                const c10::optional<at::Tensor> descale_QKV,
                const c10::optional<at::Tensor> descale_S,
                const c10::optional<at::Tensor> descale_O,
                const c10::optional<at::Tensor> descale_dO,
                const c10::optional<at::Tensor> scale_S,
                const c10::optional<at::Tensor> scale_dP,
                const c10::optional<at::Tensor> scale_dQKV,
                c10::optional<at::Tensor> amax_dP,
59
                c10::optional<at::Tensor> amax_dQKV);
cyanguwa's avatar
cyanguwa committed
60
61

std::vector<at::Tensor> fused_attn_fwd_kvpacked(
62
                size_t max_seqlen_q, size_t max_seqlen_kv, bool is_training,
63
64
65
66
                float attn_scale, float p_dropout, bool set_zero,
                NVTE_QKV_Layout qkv_layout,
                NVTE_Bias_Type bias_type,
                NVTE_Mask_Type attn_mask_type,
cyanguwa's avatar
cyanguwa committed
67
68
69
70
71
72
73
74
75
76
77
                const at::Tensor cu_seqlens_q,
                const at::Tensor cu_seqlens_kv,
                const at::Tensor Q,
                const at::Tensor KV,
                const transformer_engine::DType qkv_type,
                const c10::optional<at::Tensor> descale_QKV,
                const c10::optional<at::Tensor> scale_S,
                const c10::optional<at::Tensor> scale_O,
                c10::optional<at::Tensor> amax_S,
                c10::optional<at::Tensor> amax_O,
                const c10::optional<at::Tensor> Bias,
78
79
                const c10::optional<at::Generator> rng_gen,
                size_t rng_elts_per_thread);
cyanguwa's avatar
cyanguwa committed
80
81

std::vector<at::Tensor> fused_attn_bwd_kvpacked(
82
83
                size_t max_seqlen_q, size_t max_seqlen_kv,
                float attn_scale, float p_dropout, bool set_zero,
84
85
86
                NVTE_QKV_Layout qkv_layout,
                NVTE_Bias_Type bias_type,
                NVTE_Mask_Type attn_mask_type,
cyanguwa's avatar
cyanguwa committed
87
88
89
90
91
92
93
94
95
96
                const at::Tensor cu_seqlens_q,
                const at::Tensor cu_seqlens_kv,
                const at::Tensor Q,
                const at::Tensor KV,
                const at::Tensor O,
                const at::Tensor dO,
                const transformer_engine::DType qkv_type,
                const std::vector<at::Tensor> Aux_CTX_Tensors,
                const c10::optional<at::Tensor> descale_QKV,
                const c10::optional<at::Tensor> descale_S,
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
                const c10::optional<at::Tensor> descale_O,
                const c10::optional<at::Tensor> descale_dO,
                const c10::optional<at::Tensor> scale_S,
                const c10::optional<at::Tensor> scale_dP,
                const c10::optional<at::Tensor> scale_dQKV,
                c10::optional<at::Tensor> amax_dP,
                c10::optional<at::Tensor> amax_dQKV);

std::vector<at::Tensor> fused_attn_fwd(
                size_t max_seqlen_q, size_t max_seqlen_kv, bool is_training,
                float attn_scale, float p_dropout, bool set_zero,
                NVTE_QKV_Layout qkv_layout,
                NVTE_Bias_Type bias_type,
                NVTE_Mask_Type attn_mask_type,
                const at::Tensor cu_seqlens_q,
                const at::Tensor cu_seqlens_kv,
                const at::Tensor Q,
                const at::Tensor K,
                const at::Tensor V,
                const transformer_engine::DType qkv_type,
                const c10::optional<at::Tensor> descale_QKV,
                const c10::optional<at::Tensor> scale_S,
                const c10::optional<at::Tensor> scale_O,
                c10::optional<at::Tensor> amax_S,
                c10::optional<at::Tensor> amax_O,
                const c10::optional<at::Tensor> Bias,
                const c10::optional<at::Generator> rng_gen,
                size_t rng_elts_per_thread);

std::vector<at::Tensor> fused_attn_bwd(
                size_t max_seqlen_q, size_t max_seqlen_kv,
                float attn_scale, float p_dropout, bool set_zero,
                NVTE_QKV_Layout qkv_layout,
                NVTE_Bias_Type bias_type,
                NVTE_Mask_Type attn_mask_type,
                const at::Tensor cu_seqlens_q,
                const at::Tensor cu_seqlens_kv,
                const at::Tensor Q,
                const at::Tensor K,
                const at::Tensor V,
                const at::Tensor O,
                const at::Tensor dO,
                const transformer_engine::DType qkv_type,
                const std::vector<at::Tensor> Aux_CTX_Tensors,
                const c10::optional<at::Tensor> descale_QKV,
                const c10::optional<at::Tensor> descale_S,
cyanguwa's avatar
cyanguwa committed
143
144
145
146
147
148
                const c10::optional<at::Tensor> descale_O,
                const c10::optional<at::Tensor> descale_dO,
                const c10::optional<at::Tensor> scale_S,
                const c10::optional<at::Tensor> scale_dP,
                const c10::optional<at::Tensor> scale_dQKV,
                c10::optional<at::Tensor> amax_dP,
149
                c10::optional<at::Tensor> amax_dQKV);
Przemek Tredak's avatar
Przemek Tredak committed
150

151
152
153
154
at::Tensor fa_prepare_fwd(at::Tensor qkvi);

at::Tensor fa_prepare_bwd(at::Tensor q, at::Tensor k, at::Tensor v);

Przemek Tredak's avatar
Przemek Tredak committed
155
156
157
158
159
160
161
162
163
void te_gemm(at::Tensor A,
             at::Tensor A_scale_inverse,
             transformer_engine::DType A_type,
             bool transa,
             at::Tensor B,
             at::Tensor B_scale_inverse,
             transformer_engine::DType B_type,
             bool transb,
             at::Tensor D,
164
             at::Tensor D_scale,
Przemek Tredak's avatar
Przemek Tredak committed
165
             transformer_engine::DType D_type,
166
             at::Tensor D_amax,
Przemek Tredak's avatar
Przemek Tredak committed
167
             at::Tensor bias,
168
             transformer_engine::DType bias_type,
Przemek Tredak's avatar
Przemek Tredak committed
169
170
171
172
173
             at::Tensor pre_gelu_out,
             bool grad,
             at::Tensor workspace,
             size_t workspaceSize,
             bool accumulate,
174
175
             bool use_split_accumulator,
             int math_sm_count
Przemek Tredak's avatar
Przemek Tredak committed
176
177
);

178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
void te_atomic_gemm(at::Tensor A,
                    at::Tensor A_scale_inverse,
                    transformer_engine::DType A_type,
                    bool transa,
                    at::Tensor B,
                    at::Tensor B_scale_inverse,
                    transformer_engine::DType B_type,
                    bool transb,
                    at::Tensor D,
                    at::Tensor D_scale,
                    transformer_engine::DType D_type,
                    at::Tensor D_amax,
                    at::Tensor bias,
                    transformer_engine::DType bias_type,
                    at::Tensor pre_gelu_out,
                    bool grad,
                    at::Tensor workspace,
                    size_t workspaceSize,
                    bool accumulate,
                    bool use_split_accumulator,
                    int math_sm_count,
                    int m_split,
                    int n_split,
                    bool gemm_producer,
                    at::Tensor counter
);
Przemek Tredak's avatar
Przemek Tredak committed
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222

void fused_cast_transpose(at::Tensor input,
                          at::Tensor scale,
                          at::Tensor amax,
                          at::Tensor scale_inv,
                          at::Tensor input_cast,
                          at::Tensor input_transpose,
                          transformer_engine::DType otype
);


std::vector<at::Tensor> fused_cast_transpose_bgrad(at::Tensor grad_output,
                                                   at::Tensor scale,
                                                   at::Tensor amax,
                                                   at::Tensor scale_inv,
                                                   transformer_engine::DType otype
);


223
224
225
226
227
228
229
230
231
std::vector<at::Tensor> fused_fp8_transpose_bgrad(at::Tensor grad_output,
                                              at::Tensor scale,
                                              at::Tensor amax,
                                              at::Tensor scale_inv,
                                              transformer_engine::DType otype,
                                              transformer_engine::DType grad_bias_type
);


Przemek Tredak's avatar
Przemek Tredak committed
232
233
234
235
236
237
238
239
240
std::vector<at::Tensor> fused_cast_transpose_bgrad_dgelu(at::Tensor grad_output,
                                                         at::Tensor gelu_input,
                                                         at::Tensor scale,
                                                         at::Tensor amax,
                                                         at::Tensor scale_inv,
                                                         transformer_engine::DType otype
);


Tim Moon's avatar
Tim Moon committed
241
242
243
244
245
246
247
248
249
250
void fused_multi_cast_transpose(std::vector<at::Tensor> input_list,
                                std::vector<at::Tensor> scale_list,
                                std::vector<at::Tensor> cast_output_list,
                                std::vector<at::Tensor> transposed_output_list,
                                std::vector<at::Tensor> amax_output_list,
                                std::vector<at::Tensor> scale_inv_output_list,
                                transformer_engine::DType otype
);


Przemek Tredak's avatar
Przemek Tredak committed
251
252
253
254
at::Tensor fp8_transpose(at::Tensor input,
                         transformer_engine::DType otype
);

255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
/***************************************************************************************************
 * Activations
 **************************************************************************************************/

at::Tensor gelu(at::Tensor input,
                at::Tensor scale,
                at::Tensor amax,
                at::Tensor scale_inv,
                transformer_engine::DType otype
);

at::Tensor relu(at::Tensor input,
                at::Tensor scale,
                at::Tensor amax,
                at::Tensor scale_inv,
                transformer_engine::DType otype
);

at::Tensor geglu(at::Tensor input,
                 at::Tensor scale,
                 at::Tensor amax,
                 at::Tensor scale_inv,
                 transformer_engine::DType otype
);

at::Tensor reglu(at::Tensor input,
                 at::Tensor scale,
                 at::Tensor amax,
                 at::Tensor scale_inv,
                 transformer_engine::DType otype
);

at::Tensor swiglu(at::Tensor input,
                  at::Tensor scale,
                  at::Tensor amax,
                  at::Tensor scale_inv,
                  transformer_engine::DType otype
);

at::Tensor dgelu(at::Tensor grad,
                 at::Tensor input,
                 transformer_engine::DType otype
);

at::Tensor drelu(at::Tensor grad,
                 at::Tensor input,
                 transformer_engine::DType otype
);

at::Tensor dgeglu(at::Tensor grad,
                  at::Tensor input,
                  transformer_engine::DType otype
);

at::Tensor dreglu(at::Tensor grad,
                  at::Tensor input,
                  transformer_engine::DType otype
);
Przemek Tredak's avatar
Przemek Tredak committed
313

314
315
316
at::Tensor dswiglu(at::Tensor grad,
                   at::Tensor input,
                   transformer_engine::DType otype
Przemek Tredak's avatar
Przemek Tredak committed
317
318
);

319
320
321
/***************************************************************************************************
 * LayerNorm
 **************************************************************************************************/
Przemek Tredak's avatar
Przemek Tredak committed
322
323
324
325
326

std::vector<at::Tensor> layernorm_bwd(const at::Tensor &dz,
                                      const at::Tensor &x,
                                      const at::Tensor &mu,
                                      const at::Tensor &rsigma,
327
                                      const at::Tensor &gamma,
328
329
                                      const int sm_margin,
                                      const bool zero_centered_gamma
Przemek Tredak's avatar
Przemek Tredak committed
330
331
332
333
334
335
336
337
338
339
);


std::vector<at::Tensor> layernorm_fwd_fp8(const at::Tensor &input,
                                          const at::Tensor &weight,
                                          const at::Tensor &bias,
                                          float eps,
                                          at::Tensor scale,
                                          at::Tensor amax,
                                          at::Tensor scale_inv,
340
                                          transformer_engine::DType otype,
341
342
                                          const int sm_margin,
                                          const bool zero_centered_gamma
Przemek Tredak's avatar
Przemek Tredak committed
343
344
);

345
346
347
348
349
350
351
352
353
354
355
356
357
std::vector<at::Tensor> layernorm_fwd_fp8_noalloc(const at::Tensor &input,
                                                  const at::Tensor &weight,
                                                  const at::Tensor &bias,
                                                  float eps,
                                                  at::Tensor scale,
                                                  at::Tensor ln_out,
                                                  at::Tensor amax,
                                                  at::Tensor scale_inv,
                                                  transformer_engine::DType otype,
                                                  const int sm_margin,
                                                  const bool zero_centered_gamma
);

358
359
360
361
362
363
364
at::Tensor layernorm_fwd_fp8_inf(const at::Tensor &input,
                                 const at::Tensor &weight,
                                 const at::Tensor &bias,
                                 float eps,
                                 at::Tensor scale,
                                 at::Tensor amax,
                                 at::Tensor scale_inv,
365
366
                                 transformer_engine::DType otype,
                                 const bool zero_centered_gamma
367
);
Przemek Tredak's avatar
Przemek Tredak committed
368
369
370
371

std::vector<at::Tensor> layernorm_fwd(const at::Tensor &input,
                                      const at::Tensor &weight,
                                      const at::Tensor &bias,
372
                                      float eps,
373
374
                                      const int sm_margin,
                                      const bool zero_centered_gamma
Przemek Tredak's avatar
Przemek Tredak committed
375
376
);

377
378
379
380
381
382
383
384
385
std::vector<at::Tensor> layernorm_fwd_noalloc(const at::Tensor &input,
                                      const at::Tensor &weight,
                                      const at::Tensor &bias,
                                      at::Tensor ln_out,
                                      float eps,
                                      const int sm_margin,
                                      const bool zero_centered_gamma
);

386
387
388
at::Tensor layernorm_fwd_inf(const at::Tensor &input,
                             const at::Tensor &weight,
                             const at::Tensor &bias,
389
390
                             float eps,
                             const bool zero_centered_gamma
391
);
Przemek Tredak's avatar
Przemek Tredak committed
392

393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
/***************************************************************************************************
 * RMSNorm
 **************************************************************************************************/

std::vector<at::Tensor> rmsnorm_bwd(const at::Tensor &dz,
                                    const at::Tensor &x,
                                    const at::Tensor &rsigma,
                                    const at::Tensor &gamma,
                                    const int sm_margin,
                                    const bool zero_centered_gamma
);


std::vector<at::Tensor> rmsnorm_fwd_fp8(const at::Tensor &input,
                                        const at::Tensor &weight,
                                        float eps,
                                        at::Tensor scale,
                                        at::Tensor amax,
                                        at::Tensor scale_inv,
                                        transformer_engine::DType otype,
                                        const int sm_margin,
                                        const bool zero_centered_gamma
);

std::vector<at::Tensor> rmsnorm_fwd_fp8_noalloc(const at::Tensor &input,
                                                const at::Tensor &weight,
                                                float eps,
                                                at::Tensor scale,
                                                at::Tensor ln_out,
                                                at::Tensor amax,
                                                at::Tensor scale_inv,
                                                transformer_engine::DType otype,
                                                const int sm_margin,
                                                const bool zero_centered_gamma
);

at::Tensor rmsnorm_fwd_fp8_inf(const at::Tensor &input,
                               const at::Tensor &weight,
                               float eps,
                               at::Tensor scale,
                               at::Tensor amax,
                               at::Tensor scale_inv,
                               transformer_engine::DType otype,
                               const bool zero_centered_gamma
);

std::vector<at::Tensor> rmsnorm_fwd(const at::Tensor &input,
                                    const at::Tensor &weight,
                                    float eps,
                                    const int sm_margin,
                                    const bool zero_centered_gamma
);

std::vector<at::Tensor> rmsnorm_fwd_noalloc(const at::Tensor &input,
                                    const at::Tensor &weight,
                                    at::Tensor ln_out,
                                    float eps,
                                    const int sm_margin,
                                    const bool zero_centered_gamma
);

at::Tensor rmsnorm_fwd_inf(const at::Tensor &input,
                           const at::Tensor &weight,
                           float eps,
                           const bool zero_centered_gamma
);

/***************************************************************************************************
 * Cast
 **************************************************************************************************/

Przemek Tredak's avatar
Przemek Tredak committed
464
465
466
467
468
469
470
471
at::Tensor cast_to_fp8(const at::Tensor &input,
                       const at::Tensor &scale,
                       at::Tensor amax,
                       at::Tensor scale_inv,
                       transformer_engine::DType otype
);


472
473
474
475
476
477
478
479
480
void cast_to_fp8_noalloc(const at::Tensor &input,
                         const at::Tensor &scale,
                         at::Tensor output,
                         at::Tensor amax,
                         at::Tensor scale_inv,
                         transformer_engine::DType otype
);


Przemek Tredak's avatar
Przemek Tredak committed
481
482
483
484
485
at::Tensor cast_from_fp8(const at::Tensor &input,
                         const at::Tensor &scale_inv,
                         transformer_engine::DType itype,
                         transformer_engine::DType otype
);
486

487
488
489
/***************************************************************************************************
 * Softmax
 **************************************************************************************************/
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522

at::Tensor scaled_softmax_forward(at::Tensor input,
                                  float scale_factor
);


at::Tensor scaled_softmax_backward(at::Tensor output_grad_,
                                   at::Tensor softmax_results_,
                                   float scale_factor
);


at::Tensor scaled_masked_softmax_forward(at::Tensor input,
                                         at::Tensor mask,
                                         float scale_factor
);


at::Tensor scaled_masked_softmax_backward(at::Tensor output_grad_,
                                          at::Tensor softmax_results_,
                                          float scale_factor
);


at::Tensor scaled_upper_triang_masked_softmax_forward(at::Tensor input,
                                                      float scale_factor
);


at::Tensor scaled_upper_triang_masked_softmax_backward(at::Tensor output_grads_,
                                                       at::Tensor softmax_results_,
                                                       float scale_factor
);
523

524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
/***************************************************************************************************
 * Rotary positional embedding
 **************************************************************************************************/

at::Tensor fused_rope_forward(const at::Tensor &input,
                              const at::Tensor &freqs,
                              const bool transpose_output_memory
);

at::Tensor fused_rope_backward(const at::Tensor &output_grads,
                               const at::Tensor &freqs,
                               const bool transpose_output_memory
);

at::Tensor fused_rope_thd_forward(const at::Tensor &input,
                                  const at::Tensor &cu_seqlens,
                                  const at::Tensor &freqs
);

at::Tensor fused_rope_thd_backward(const at::Tensor &output_grads,
                                   const at::Tensor &cu_seqlens,
                                   const at::Tensor &freqs
);

/***************************************************************************************************
 * Misc
 **************************************************************************************************/

552
553
size_t get_cublasLt_version();

554
555
size_t get_cudnn_version();

556
557
558
bool userbuf_comm_available();

void placeholder();