extensions.h 6.61 KB
Newer Older
1
/*************************************************************************
2
 * Copyright (c) 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
4
5
6
7
8
9
 *
 * See LICENSE for license information.
 ************************************************************************/

#ifndef TRANSFORMER_ENGINE_JAX_CSRC_FP8_MODULES_H_
#define TRANSFORMER_ENGINE_JAX_CSRC_FP8_MODULES_H_

10
11
12
13
14
15
#include <cublasLt.h>
#include <cublas_v2.h>
#include <cuda_runtime_api.h>
#include <cudnn.h>
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
Phuong Nguyen's avatar
Phuong Nguyen committed
16
#include <transformer_engine/comm_gemm_overlap.h>
17
#include <transformer_engine/normalization.h>
18
19
#include <transformer_engine/transformer_engine.h>

20
21
22
#include <cassert>
#include <cstddef>
#include <cstdint>
23
#include <iostream>
24
25
#include <stdexcept>
#include <string>
26
#include <vector>
27

28
#include "common/common.h"
29
#include "common/util/logging.h"
30
31
#include "extensions/ffi.h"
#include "extensions/misc.h"
32
#include "extensions/utils.h"
33
#include "transformer_engine/activation.h"
34
#include "transformer_engine/multi_stream.h"
35

36
37
38
namespace transformer_engine {
namespace jax {

39
40
41
42
43
44
45
46
47
struct ClampedSwigluConfig {
  float limit;
  float alpha;
};

struct ActivationConfig {
  ClampedSwigluConfig clamped_swiglu;
};

48
inline bool use_fp8(DType type) { return type == DType::kFloat8E4M3 || type == DType::kFloat8E5M2; }
49

50
51
// Activation

52
XLA_FFI_DECLARE_HANDLER_SYMBOL(ActLuHandler);
53
XLA_FFI_DECLARE_HANDLER_SYMBOL(ActLuInitializeHandler);
54

55
XLA_FFI_DECLARE_HANDLER_SYMBOL(DActLuDBiasQuantizeHandler);
56
XLA_FFI_DECLARE_HANDLER_SYMBOL(DActLuDBiasQuantizeInitializeHandler);
57
58
59
60
61

pybind11::tuple GetDActDBiasQuantizeWorkspaceSizes(size_t batch_size, size_t hidden_size,
                                                   DType in_dtype, DType out_dtype,
                                                   JAXX_Scaling_Mode scaling_mode, bool is_2x);

62
// Normalization
63
XLA_FFI_DECLARE_HANDLER_SYMBOL(NormForwardInitializeHandler);
64
XLA_FFI_DECLARE_HANDLER_SYMBOL(NormForwardHandler);
65

66
XLA_FFI_DECLARE_HANDLER_SYMBOL(NormBackwardInitializeHandler);
67
XLA_FFI_DECLARE_HANDLER_SYMBOL(NormBackwardHandler);
68

69
70
pybind11::tuple GetNormForwardWorkspaceSizes(size_t batch_size, size_t hidden_size, DType in_dtype,
                                             DType w_dtype, DType out_dtype,
71
72
                                             NVTE_Norm_Type norm_type,
                                             JAXX_Scaling_Mode scaling_mode,
73
74
                                             bool zero_centered_gamma, float epsilon, int sm_margin,
                                             bool is_training);
75

76
77
78
pybind11::tuple GetNormBackwardWorkspaceSizes(size_t batch_size, size_t hidden_size, DType in_dtype,
                                              DType w_dtype, NVTE_Norm_Type norm_type,
                                              bool zero_centered_gamma, int sm_margin);
79

80
// Quantization
81
XLA_FFI_DECLARE_HANDLER_SYMBOL(DBiasQuantizeHandler);
82

83
84
XLA_FFI_DECLARE_HANDLER_SYMBOL(GroupedQuantizeHandler);

85
86
XLA_FFI_DECLARE_HANDLER_SYMBOL(DequantizeHandler);

87
pybind11::tuple GetDBiasQuantizeWorkspaceSizes(size_t batch_size, size_t hidden_size,
88
89
90
                                               DType in_dtype, DType out_dtype,
                                               JAXX_Scaling_Mode scaling_mode,
                                               QuantizeLayout q_layout);
91

92
// Softmax
93
94
95
96
97
98
99
100
101
102
103
104
XLA_FFI_DECLARE_HANDLER_SYMBOL(ScaledSoftmaxForwardHandler);

XLA_FFI_DECLARE_HANDLER_SYMBOL(ScaledSoftmaxBackwardHandler);

XLA_FFI_DECLARE_HANDLER_SYMBOL(ScaledMaskedSoftmaxForwardHandler);

XLA_FFI_DECLARE_HANDLER_SYMBOL(ScaledMaskedSoftmaxBackwardHandler);

XLA_FFI_DECLARE_HANDLER_SYMBOL(ScaledUpperTriangMaskedSoftmaxForwardHandler);

XLA_FFI_DECLARE_HANDLER_SYMBOL(ScaledUpperTriangMaskedSoftmaxBackwardHandler);

105
// Attention
106
XLA_FFI_DECLARE_HANDLER_SYMBOL(FusedAttnForwardHandler);
107

108
XLA_FFI_DECLARE_HANDLER_SYMBOL(FusedAttnBackwardHandler);
109

110
NVTE_Fused_Attn_Backend GetFusedAttnBackend(bool is_training, DType q_dtype, DType kv_dtype,
111
112
113
114
                                            NVTE_QKV_Layout qkv_layout, NVTE_Bias_Type bias_type,
                                            NVTE_Mask_Type mask_type, float dropout_probability,
                                            size_t q_num_heads, size_t kv_num_heads,
                                            size_t q_max_seqlen, size_t kv_max_seqlen,
115
116
                                            size_t qk_head_dim, size_t v_head_dim,
                                            int64_t window_size_left, int64_t window_size_right);
117

118
pybind11::tuple GetFusedAttnForwardWorkspaceSizes(
119
    size_t input_batch, size_t bias_batch, size_t q_max_seqlen, size_t kv_max_seqlen,
120
121
    size_t attn_heads, size_t num_gqa_groups, size_t bias_heads, size_t qk_head_dim,
    size_t v_head_dim, float scaling_factor, float dropout_probability, NVTE_Bias_Type bias_type,
122
    NVTE_Mask_Type mask_type, NVTE_QKV_Layout qkv_layout, DType dtype, bool is_training,
123
    size_t max_segments_per_seq, int64_t window_size_left, int64_t window_size_right);
124
125

pybind11::tuple GetFusedAttnBackwardWorkspaceSizes(
126
    size_t input_batch, size_t bias_batch, size_t q_max_seqlen, size_t kv_max_seqlen,
127
128
    size_t attn_heads, size_t num_gqa_groups, size_t bias_heads, size_t qk_head_dim,
    size_t v_head_dim, float scaling_factor, float dropout_probability, NVTE_Bias_Type bias_type,
129
    NVTE_Mask_Type mask_type, NVTE_QKV_Layout qkv_layout, DType dtype, bool is_training,
130
131
    bool deterministic, size_t max_segments_per_seq, int64_t window_size_left,
    int64_t window_size_right);
132

Alp Dener's avatar
Alp Dener committed
133
134
// GEMM
XLA_FFI_DECLARE_HANDLER_SYMBOL(GemmHandler);
Phuong Nguyen's avatar
Phuong Nguyen committed
135
XLA_FFI_DECLARE_HANDLER_SYMBOL(CollectiveGemmInitHandler);
Alp Dener's avatar
Alp Dener committed
136

137
// Grouped GEMM
138
XLA_FFI_DECLARE_HANDLER_SYMBOL(GroupedGemmD2HGroupSizesHandler);
139
XLA_FFI_DECLARE_HANDLER_SYMBOL(GroupedGemmHandler);
140

141
142
143
144
145
// Cudnn helpers
XLA_FFI_DECLARE_HANDLER_SYMBOL(CudnnHandleInitHandler);

// CuBLAS helpers
XLA_FFI_DECLARE_HANDLER_SYMBOL(CublasHandleInitHandler);
146

147
148
149
}  // namespace jax
}  // namespace transformer_engine

150
151
152
153
154
155
156
157
XLA_FFI_REGISTER_STRUCT_ATTR_DECODING(transformer_engine::jax::ClampedSwigluConfig,
                                      ::xla::ffi::StructMember<float>("limit"),
                                      ::xla::ffi::StructMember<float>("alpha"));

XLA_FFI_REGISTER_STRUCT_ATTR_DECODING(
    transformer_engine::jax::ActivationConfig,
    ::xla::ffi::StructMember<transformer_engine::jax::ClampedSwigluConfig>("clamped_swiglu"));

Phuong Nguyen's avatar
Phuong Nguyen committed
158
159
160
161
// ENUM_ATTR and DICT_ATTR recoding need to be registered in the global namespace
XLA_FFI_REGISTER_ENUM_ATTR_DECODING(transformer_engine::jax::JAXX_Scaling_Mode);
XLA_FFI_REGISTER_ENUM_ATTR_DECODING(transformer_engine::jax::JAXX_Collective_Op);

162
#endif  // TRANSFORMER_ENGINE_JAX_CSRC_FP8_MODULES_H_