common.cpp 12.3 KB
Newer Older
Przemek Tredak's avatar
Przemek Tredak committed
1
/*************************************************************************
2
 * Copyright (c) 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Przemek Tredak's avatar
Przemek Tredak committed
3
4
5
6
7
 *
 * See LICENSE for license information.
 ************************************************************************/

#include "common.h"
8

9
10
#include "c10/util/ArrayRef.h"
#include "pybind.h"
Przemek Tredak's avatar
Przemek Tredak committed
11
#include "transformer_engine/transformer_engine.h"
12

13
14
namespace transformer_engine::pytorch {

15
16
17
18
19
20
21
22
23
24
25
26
27
28
/*! convert fp4 data shape back to original shape */
std::vector<size_t> convert_shape_back_from_fp4(const std::vector<size_t>& shape, bool transpose) {
  std::vector<size_t> ret;
  size_t start_idx = (transpose) ? 1 : 0;
  for (size_t i = start_idx; i < shape.size() - 1; ++i) {
    ret.push_back(shape[i]);
  }
  ret.push_back(shape.back() * 2);
  if (transpose) {
    ret.push_back(shape.front());
  }
  return ret;
}

29
std::vector<size_t> getTensorShape(const at::Tensor& t) {
30
31
32
33
34
35
36
  std::vector<size_t> shape;
  for (auto s : t.sizes()) {
    shape.push_back(s);
  }
  return shape;
}

37
38
39
40
41
42
43
44
45
46
47
48
49
50
NVTEShape convertTorchShape(const c10::IntArrayRef torch_shape) {
  NVTEShape ret;
  ret.ndim = torch_shape.size();
  constexpr int max_dimensions = sizeof(ret.data) / sizeof(size_t);
  NVTE_CHECK(ret.ndim < max_dimensions,
             "Torch tensor has too many dimensions. Max supported: ", max_dimensions, " and got ",
             ret.ndim, ".");
  for (size_t i = 0; i < ret.ndim; ++i) {
    const auto& v = torch_shape[i];
    ret.data[i] = static_cast<size_t>(v);
  }
  return ret;
}

51
52
53
54
55
56
57
58
59
60
61
62
63
64
std::unique_ptr<Quantizer> convert_quantizer(py::handle quantizer) {
  init_extension();
  if (quantizer.is_none()) {
    return std::make_unique<NoneQuantizer>(quantizer);
  }
  for (auto [_check_type, check_quantizer_type, _create_tensor, create_quantizer] :
       detail::custom_types_converters) {
    if (check_quantizer_type(quantizer.ptr())) {
      return create_quantizer(quantizer);
    }
  }

  NVTE_ERROR("Unexpected type for quantizer");
}
Przemek Tredak's avatar
Przemek Tredak committed
65
66

transformer_engine::DType getTransformerEngineFP8Type(bool e4m3_if_hybrid,
67
68
69
70
71
72
                                                      const std::string& fp8_recipe) {
  // if e4m3 or hybrid + forward
  if ((fp8_recipe == "E4M3") || ((fp8_recipe == "HYBRID") && e4m3_if_hybrid)) {
    return transformer_engine::DType::kFloat8E4M3;
  }
  return transformer_engine::DType::kFloat8E5M2;
Przemek Tredak's avatar
Przemek Tredak committed
73
74
}

75
76
77
TensorWrapper makeTransformerEngineTensor(py::handle tensor, py::handle quantizer) {
  NVTE_CHECK(!tensor.is_none(), "Tensor is not allocated!");
  std::unique_ptr<Quantizer> my_quantizer = convert_quantizer(quantizer);
78
79
80
81
  // check for both quantizer & tensor type:
  // mxfp8 tensor -> mxfp8 quantizer
  // float8 tensor -> delayed scaling quantizer OR current scaling quantizer
  // also during dequantize, the quantizer param is unknown -> so quantizer is NoneQuantizer
82
83
84
  for (auto [check_type, check_quantizer_type, create_tensor, _] :
       detail::custom_types_converters) {
    if (check_type(tensor.ptr())) {
85
86
87
      if (!(quantizer.is_none() || check_quantizer_type(quantizer.ptr()))) {
        continue;
      }
88
89
90
91
      auto x = create_tensor(tensor, my_quantizer.get());
      return x;
    }
  }
92
93
  NVTE_CHECK(dynamic_cast<NoneQuantizer*>(my_quantizer.get()) != nullptr,
             "Unexpected quantization params type.");
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109

  // Regular pyTorch tensor
  at::Tensor torch_tensor = tensor.cast<at::Tensor>();

  // #TODO (pgadzinski) - needed in attention for non-contiguous tensors.
  //if (!torch_tensor.is_contiguous()) {
  //  torch_tensor = torch_tensor.contiguous();
  //}
  auto ret = TensorWrapper(my_quantizer->get_scaling_mode());
  ret.set_rowwise_data(torch_tensor.data_ptr(),
                       GetTransformerEngineDType(torch_tensor.scalar_type()),
                       getTensorShape(torch_tensor));
  my_quantizer->set_quantization_params(&ret);
  return ret;
}

Przemek Tredak's avatar
Przemek Tredak committed
110
transformer_engine::TensorWrapper makeTransformerEngineTensor(
111
    void* data_ptr, const NVTEShape& shape, const transformer_engine::DType type) {
Przemek Tredak's avatar
Przemek Tredak committed
112
113
114
115
  return transformer_engine::TensorWrapper(data_ptr, shape, type);
}

transformer_engine::TensorWrapper makeTransformerEngineTensor(
116
    void* data_ptr, const std::vector<size_t>& shape, const transformer_engine::DType type) {
Przemek Tredak's avatar
Przemek Tredak committed
117
118
119
120
  return transformer_engine::TensorWrapper(data_ptr, shape, type);
}

transformer_engine::TensorWrapper makeTransformerEngineTensor(at::Tensor tensor) {
121
122
123
124
125
126
  transformer_engine::DType dtype = GetTransformerEngineDType(tensor.scalar_type());
  std::vector<size_t> shape;
  for (auto s : tensor.sizes()) {
    shape.push_back(s);
  }
  return makeTransformerEngineTensor(tensor.data_ptr(), shape, dtype);
Przemek Tredak's avatar
Przemek Tredak committed
127
128
}

129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
std::tuple<std::vector<transformer_engine::TensorWrapper>, std::vector<std::vector<NVTETensor>>,
           std::vector<NVTETensor*>, size_t, size_t>
makeTransformerEngineTensorList(std::vector<std::vector<at::Tensor>> at_tensor_lists) {
  size_t num_lists = at_tensor_lists.size();

  NVTE_CHECK(num_lists > 0, "List of tensors is empty.");

  size_t num_tensors = at_tensor_lists[0].size();

  std::vector<std::vector<NVTETensor>> nvte_tensor_lists;
  std::vector<NVTETensor*> nvte_tensor_list_ptrs;
  std::vector<transformer_engine::TensorWrapper> tensorWrappers;
  nvte_tensor_lists.reserve(num_lists);
  nvte_tensor_list_ptrs.reserve(num_lists);
  tensorWrappers.reserve(num_lists * num_tensors);

  for (const auto& at_list : at_tensor_lists) {
    NVTE_CHECK(at_list.size() == num_tensors, "Wrong number of tensors");
    std::vector<NVTETensor> te_list;
    te_list.reserve(num_tensors);

    for (const auto& at_tensor : at_list) {
      tensorWrappers.push_back(makeTransformerEngineTensor(at_tensor));
      te_list.push_back(tensorWrappers.back().data());
    }

    nvte_tensor_lists.push_back(std::move(te_list));
  }

  for (auto& te_tensor_list : nvte_tensor_lists) {
    nvte_tensor_list_ptrs.push_back(te_tensor_list.data());
  }

  return std::make_tuple(std::move(tensorWrappers), std::move(nvte_tensor_lists),
                         std::move(nvte_tensor_list_ptrs), num_lists, num_tensors);
}

166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
transformer_engine::TensorWrapper makeTransformerEngineTensor(
    void* data_ptr, const std::vector<size_t>& shape, const transformer_engine::DType type,
    void* amax_ptr, void* scale_ptr, void* scale_inv_ptr, std::vector<size_t> scale_inv_shape,
    NVTEScalingMode scaling_mode) {
  TensorWrapper ret(scaling_mode);
  ret.set_rowwise_data(data_ptr, type, shape);
  const std::vector<size_t> meta_shape{1};
  ret.set_amax(amax_ptr, DType::kFloat32, meta_shape);
  ret.set_scale(scale_ptr, DType::kFloat32, meta_shape);
  auto scale_inv_dtype =
      (scaling_mode == NVTE_MXFP8_1D_SCALING) ? DType::kFloat8E8M0 : DType::kFloat32;
  ret.set_rowwise_scale_inv(scale_inv_ptr, scale_inv_dtype, scale_inv_shape);
  return ret;
}

transformer_engine::TensorWrapper makeTransformerEngineTensor(
    void* data_ptr, void* columnwise_data_ptr, const std::vector<size_t>& shape,
    const std::vector<size_t>& columnwise_shape, const transformer_engine::DType type,
    void* amax_ptr, void* scale_ptr, void* scale_inv_ptr, void* columnwise_scale_inv_ptr,
    const std::vector<size_t>& scale_inv_shape,
    const std::vector<size_t>& columnwise_scale_inv_shape, NVTEScalingMode scaling_mode) {
  TensorWrapper ret(scaling_mode);
  ret.set_rowwise_data(data_ptr, type, shape);
  ret.set_columnwise_data(columnwise_data_ptr, type, columnwise_shape);
  const std::vector<size_t> meta_shape{1};
  ret.set_amax(amax_ptr, DType::kFloat32, meta_shape);
  ret.set_scale(scale_ptr, DType::kFloat32, meta_shape);
  auto scale_inv_dtype =
      (scaling_mode == NVTE_MXFP8_1D_SCALING) ? DType::kFloat8E8M0 : DType::kFloat32;
  ret.set_rowwise_scale_inv(scale_inv_ptr, scale_inv_dtype, scale_inv_shape);
  ret.set_columnwise_scale_inv(columnwise_scale_inv_ptr, scale_inv_dtype,
                               columnwise_scale_inv_shape);
  return ret;
199
200
}

201
transformer_engine::TensorWrapper makeTransformerEngineTensor(at::Tensor tensor, at::Tensor amax,
202
                                                              const at::Tensor scale,
203
204
                                                              at::Tensor scale_inv,
                                                              NVTEScalingMode scaling_mode) {
205
206
  transformer_engine::DType dtype = GetTransformerEngineDType(tensor.scalar_type());

207
208
209
  auto tensor_shape = getTensorShape(tensor);
  auto scale_inv_shape = getTensorShape(scale_inv);

210
211
212
213
  NVTE_CHECK(amax.scalar_type() == at::kFloat);
  NVTE_CHECK(scale.scalar_type() == at::kFloat);
  NVTE_CHECK(scale_inv.scalar_type() == at::kFloat);

214
215
216
  return makeTransformerEngineTensor(tensor.data_ptr(), tensor_shape, dtype, amax.data_ptr(),
                                     scale.data_ptr(), scale_inv.data_ptr(), scale_inv_shape,
                                     scaling_mode);
217
218
}

219
220
221
template <typename T>
T product(const std::vector<T>& shape) {
  T ret = 1;
222
223
224
225
  for (auto s : shape) {
    ret *= s;
  }
  return ret;
Przemek Tredak's avatar
Przemek Tredak committed
226
227
}

228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
template size_t product<size_t>(const std::vector<size_t>& shape);
template int64_t product<int64_t>(const std::vector<int64_t>& shape);

size_t product(const NVTEShape& shape, size_t begin, size_t end) {
  NVTE_CHECK(begin <= end && end <= shape.ndim, "Attempted to access entries ", begin, " to ", end,
             " in a shape with ", shape.ndim, " entries");
  size_t ret = 1;
  for (size_t i = begin; i < end; ++i) {
    ret *= shape.data[i];
  }
  return ret;
}

std::vector<size_t> nvte_shape_to_vector(const NVTEShape& nvte_shape) {
  std::vector<size_t> shape;
  for (size_t i = 0; i < nvte_shape.ndim; i++) {
    shape.push_back(nvte_shape.data[i]);
  }
  return shape;
}

249
at::Tensor allocateSpace(const std::vector<size_t>& shape, const transformer_engine::DType type,
cyanguwa's avatar
cyanguwa committed
250
                         bool init_to_zeros) {
251
252
253
254
255
256
257
  std::vector<int64_t> shape_int64(shape.begin(), shape.end());
  c10::IntArrayRef ar_shape(shape_int64);
  if (init_to_zeros) {
    return at::zeros(ar_shape, at::CUDA(GetATenDType(type)));
  } else {
    return at::empty(ar_shape, at::CUDA(GetATenDType(type)));
  }
cyanguwa's avatar
cyanguwa committed
258
259
}

260
at::Tensor allocateSpace(const NVTEShape& shape, const transformer_engine::DType type,
Przemek Tredak's avatar
Przemek Tredak committed
261
                         bool init_to_zeros) {
262
263
264
265
266
267
268
269
270
271
272
273
274
  auto size = shape.ndim;
  if (size == 2 && init_to_zeros) {
    return at::zeros({static_cast<int64_t>(shape.data[0]), static_cast<int64_t>(shape.data[1])},
                     at::CUDA(GetATenDType(type)));
  } else if (size == 2) {
    return at::empty({static_cast<int64_t>(shape.data[0]), static_cast<int64_t>(shape.data[1])},
                     at::CUDA(GetATenDType(type)));
  } else if (size == 1 && init_to_zeros) {
    return at::zeros({static_cast<int64_t>(shape.data[0])}, at::CUDA(GetATenDType(type)));
  } else if (size == 1) {
    return at::empty({static_cast<int64_t>(shape.data[0])}, at::CUDA(GetATenDType(type)));
  }
  NVTE_CHECK(false, "Should never reach here! func: allocateSpace");
Przemek Tredak's avatar
Przemek Tredak committed
275
276
}

277
278
279
at::Tensor allocateTorchTensor(int M, int N, transformer_engine::DType dtype) {
  return at::empty({static_cast<int64_t>(M), static_cast<int64_t>(N)},
                   at::CUDA(GetATenDType(dtype)));
Przemek Tredak's avatar
Przemek Tredak committed
280
281
}

282
283
at::Tensor allocateTorchTensor(int M, transformer_engine::DType dtype) {
  return at::empty({static_cast<int64_t>(M)}, at::CUDA(GetATenDType(dtype)));
Przemek Tredak's avatar
Przemek Tredak committed
284
}
285

286
void* getDataPtr(at::Tensor tensor, int offset) {
287
288
289
290
291
292
293
294
295
296
  void* dptr = nullptr;
  if (tensor.numel() > 0) {
    dptr = tensor.data_ptr();
  }
  if (dptr != nullptr && offset != 0) {
    char* char_ptr = reinterpret_cast<char*>(dptr);
    char_ptr += offset * tensor.element_size();
    dptr = reinterpret_cast<void*>(char_ptr);
  }
  return dptr;
297
}
298
299
300
301
302

std::vector<size_t> convertShape(const NVTEShape& shape) {
  return std::vector<size_t>(shape.data, shape.data + shape.ndim);
}

303
size_t roundup(const size_t value, const size_t multiple) {
304
305
306
307
  assert(multiple > 0);
  return ((value + multiple - 1) / multiple) * multiple;
}

308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
void philox_unpack(at::PhiloxCudaState arg, int64_t* rng_state_ptr) {
  NVTE_SCOPED_GIL_RELEASE({
    nvte_extract_seed_and_offset(rng_state_ptr, arg.captured_, arg.seed_.ptr, arg.seed_.val,
                                 arg.offset_.ptr, arg.offset_.val, arg.offset_intragraph_,
                                 at::cuda::getCurrentCUDAStream());
  });
}

// extract PhiloxCudaState from CUDA random number generator
at::PhiloxCudaState init_philox_state(at::CUDAGeneratorImpl* gen, size_t elts_per_thread) {
  at::PhiloxCudaState philox_args;
  std::lock_guard<std::mutex> lock(gen->mutex_);
  philox_args = gen->philox_cuda_state(elts_per_thread);
  return philox_args;
}

324
}  // namespace transformer_engine::pytorch