per_token_group_quant_8bit.cu 10.2 KB
Newer Older
1
#include <ATen/cuda/CUDAContext.h>
2
#include <cuda_fp8.h>
3
4

#include <cmath>
5
#include <flashinfer/vec_dtypes.cuh>
6
7
8

#include "utils.h"

9
10
11
12
13
14
15
16
__device__ __forceinline__ float GroupReduceMax(float val, const int tid) {
  unsigned mask = 0xffff;

  val = fmaxf(val, __shfl_xor_sync(mask, val, 8));
  val = fmaxf(val, __shfl_xor_sync(mask, val, 4));
  val = fmaxf(val, __shfl_xor_sync(mask, val, 2));
  val = fmaxf(val, __shfl_xor_sync(mask, val, 1));
  return val;
17
18
}

19
20
21
22
23
24
template <
    typename T,
    typename DST_DTYPE,
    bool IS_COLUMN_MAJOR = false,
    bool SCALE_UE8M0 = false,
    typename scale_packed_t = std::conditional_t<SCALE_UE8M0, uint32_t, float>>
25
__global__ void per_token_group_quant_8bit_kernel(
26
27
    const T* __restrict__ input,
    void* __restrict__ output_q,
28
    scale_packed_t* __restrict__ output_s,
29
30
    const int group_size,
    const int num_groups,
31
    const int groups_per_block,
32
    const float eps,
33
    const float min_8bit,
34
    const float max_8bit,
35
    const int num_groups_per_row = 0,
36
    const int scale_stride = 0) {
37
  const int threads_per_group = 16;
38
  const int64_t local_group_id = threadIdx.x / threads_per_group;
39
  const int lane_id = threadIdx.x % threads_per_group;
40

41
42
43
  const int64_t block_group_id = blockIdx.x * groups_per_block;
  const int64_t global_group_id = block_group_id + local_group_id;
  const int64_t block_group_offset = global_group_id * group_size;
44
45
46

  float local_absmax = eps;

47
48
49
  using scale_element_t = std::conditional_t<SCALE_UE8M0, uint8_t, float>;
  static_assert(sizeof(scale_packed_t) % sizeof(scale_element_t) == 0);

50
  const T* group_input = input + block_group_offset;
51
  DST_DTYPE* group_output = static_cast<DST_DTYPE*>(output_q) + block_group_offset;
52
  scale_element_t* scale_output;
53
54

  if constexpr (IS_COLUMN_MAJOR) {
55
    const int num_elems_per_pack = static_cast<int>(sizeof(scale_packed_t) / sizeof(scale_element_t));
56
57
58
59
    const int row_idx = global_group_id / num_groups_per_row;
    const int col_idx_unpacked = global_group_id % num_groups_per_row;
    const int col_idx = col_idx_unpacked / num_elems_per_pack;
    const int pack_idx = col_idx_unpacked % num_elems_per_pack;
60
61
    scale_output = reinterpret_cast<scale_element_t*>(output_s) +
                   (col_idx * scale_stride * num_elems_per_pack + row_idx * num_elems_per_pack + pack_idx);
62
  } else {
63
    static_assert(!SCALE_UE8M0);
64
65
    scale_output = output_s + global_group_id;
  }
66
67
68
69
70
71
72
73
74

  constexpr uint32_t vec_size = 16 / sizeof(T);
  using vec_t = flashinfer::vec_t<T, vec_size>;

  const int32_t num_vec_elems = group_size / vec_size;

  for (int32_t i = lane_id; i < num_vec_elems; i += 16) {
    vec_t input_vec;
    input_vec.cast_load(group_input + i * vec_size);
75

76
77
78
#pragma unroll
    for (uint32_t j = 0; j < vec_size; ++j) {
      float val = static_cast<float>(input_vec[j]);
79
80
81
      float abs_val = fabsf(val);
      local_absmax = fmaxf(local_absmax, abs_val);
    }
82
  }
83

84
  local_absmax = GroupReduceMax(local_absmax, lane_id);
85

86
87
  float y_s = local_absmax / max_8bit;
  if constexpr (SCALE_UE8M0) {
88
    y_s = exp2f(ceilf(log2f(fmaxf(y_s, 1e-10f))));
89
90
91
92
93
94
95
96
97
  }

  // TODO can optimize
  scale_element_t y_s_quant;
  if constexpr (SCALE_UE8M0) {
    y_s_quant = (uint8_t)(((int)log2f(y_s)) + 127);
  } else {
    y_s_quant = y_s;
  }
98

99
  if (lane_id == 0) {
100
    *scale_output = y_s_quant;
101
  }
102

103
104
105
  for (int32_t i = lane_id; i < num_vec_elems; i += 16) {
    vec_t input_vec;
    input_vec.cast_load(group_input + i * vec_size);
106

107
108
109
#pragma unroll
    for (uint32_t j = 0; j < vec_size; ++j) {
      float val = static_cast<float>(input_vec[j]);
110
111
      float q_val = fminf(fmaxf(val / y_s, min_8bit), max_8bit);
      group_output[i * vec_size + j] = DST_DTYPE(q_val);
112
113
114
115
    }
  }
}

116
void sgl_per_token_group_quant_8bit(
117
118
119
120
121
    torch::Tensor input,
    torch::Tensor output_q,
    torch::Tensor output_s,
    int64_t group_size,
    double eps,
122
    double min_8bit,
123
124
    double max_8bit,
    bool scale_ue8m0 = false) {
125
126
127
128
129
130
  CHECK_INPUT(input);
  CHECK_INPUT(output_q);

  const int num_groups = input.numel() / group_size;

  CHECK_EQ(input.numel() % group_size, 0);
131
  CHECK_EQ(output_s.dim(), 2);
132
133
134

  cudaStream_t stream = at::cuda::getCurrentCUDAStream();

135
136
137
138
139
140
141
142
143
144
145
146
147
148
  constexpr int THREADS_PER_GROUP = 16;

  int groups_per_block = 1;

  if (num_groups % 16 == 0) {
    groups_per_block = 16;
  } else if (num_groups % 8 == 0) {
    groups_per_block = 8;
  } else if (num_groups % 4 == 0) {
    groups_per_block = 4;
  } else if (num_groups % 2 == 0) {
    groups_per_block = 2;
  }

149
150
151
152
  auto dst_type = output_q.scalar_type();
  const int num_blocks = num_groups / groups_per_block;
  const int num_threads = groups_per_block * THREADS_PER_GROUP;

153
  const bool is_column_major = output_s.stride(0) < output_s.stride(1);
154
155
  const int hidden_dim = input.size(input.dim() - 1);
  const int num_groups_per_row = hidden_dim / group_size;
156
157
  const int scale_stride = output_s.stride(1);

158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
#define LAUNCH_KERNEL(T, DST_DTYPE)                                                               \
  do {                                                                                            \
    dim3 grid(num_blocks);                                                                        \
    dim3 block(num_threads);                                                                      \
    if (is_column_major) {                                                                        \
      if (scale_ue8m0) {                                                                          \
        per_token_group_quant_8bit_kernel<T, DST_DTYPE, true, true><<<grid, block, 0, stream>>>(  \
            static_cast<T*>(input.data_ptr()),                                                    \
            output_q.data_ptr(),                                                                  \
            static_cast<uint32_t*>(output_s.data_ptr()),                                          \
            group_size,                                                                           \
            num_groups,                                                                           \
            groups_per_block,                                                                     \
            (float)eps,                                                                           \
            (float)min_8bit,                                                                      \
            (float)max_8bit,                                                                      \
174
            num_groups_per_row,                                                                   \
175
176
177
178
179
180
181
182
183
184
185
186
            scale_stride);                                                                        \
      } else {                                                                                    \
        per_token_group_quant_8bit_kernel<T, DST_DTYPE, true, false><<<grid, block, 0, stream>>>( \
            static_cast<T*>(input.data_ptr()),                                                    \
            output_q.data_ptr(),                                                                  \
            static_cast<float*>(output_s.data_ptr()),                                             \
            group_size,                                                                           \
            num_groups,                                                                           \
            groups_per_block,                                                                     \
            (float)eps,                                                                           \
            (float)min_8bit,                                                                      \
            (float)max_8bit,                                                                      \
187
            num_groups_per_row,                                                                   \
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
            scale_stride);                                                                        \
      }                                                                                           \
    } else {                                                                                      \
      assert(!scale_ue8m0);                                                                       \
      per_token_group_quant_8bit_kernel<T, DST_DTYPE, false><<<grid, block, 0, stream>>>(         \
          static_cast<T*>(input.data_ptr()),                                                      \
          output_q.data_ptr(),                                                                    \
          static_cast<float*>(output_s.data_ptr()),                                               \
          group_size,                                                                             \
          num_groups,                                                                             \
          groups_per_block,                                                                       \
          (float)eps,                                                                             \
          (float)min_8bit,                                                                        \
          (float)max_8bit);                                                                       \
    }                                                                                             \
203
204
  } while (0)

205
  DISPATCH_PYTORCH_DTYPE_TO_CTYPE_FLOAT_FP16(input.scalar_type(), scalar_t, [&] {
206
207
208
209
    if (dst_type == at::ScalarType::Char) {
      LAUNCH_KERNEL(scalar_t, int8_t);
      return true;
    } else if (dst_type == at::ScalarType::Float8_e4m3fn) {
210
      LAUNCH_KERNEL(scalar_t, __nv_fp8_e4m3);
211
      return true;
212
    }
213
    return false;
214
  });
215
216

#undef LAUNCH_KERNEL
217
}
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236

void sgl_per_token_group_quant_int8(
    torch::Tensor input,
    torch::Tensor output_q,
    torch::Tensor output_s,
    int64_t group_size,
    double eps,
    double int8_min,
    double int8_max) {
  sgl_per_token_group_quant_8bit(input, output_q, output_s, group_size, eps, int8_min, int8_max);
}

void sgl_per_token_group_quant_fp8(
    torch::Tensor input,
    torch::Tensor output_q,
    torch::Tensor output_s,
    int64_t group_size,
    double eps,
    double fp8_min,
237
238
239
    double fp8_max,
    bool scale_ue8m0) {
  sgl_per_token_group_quant_8bit(input, output_q, output_s, group_size, eps, fp8_min, fp8_max, scale_ue8m0);
240
}