common.h 19.1 KB
Newer Older
Przemek Tredak's avatar
Przemek Tredak committed
1
/*************************************************************************
2
 * Copyright (c) 2022-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Przemek Tredak's avatar
Przemek Tredak committed
3
4
5
6
7
8
9
10
 *
 * See LICENSE for license information.
 ************************************************************************/

#ifndef TRANSFORMER_ENGINE_PYTORCH_CSRC_COMMON_H_
#define TRANSFORMER_ENGINE_PYTORCH_CSRC_COMMON_H_

#include <ATen/ATen.h>
cyanguwa's avatar
cyanguwa committed
11
#include <ATen/Dispatch.h>
Tim Moon's avatar
Tim Moon committed
12
#include <ATen/cuda/CUDAContext.h>
cyanguwa's avatar
cyanguwa committed
13
#include <ATen/cuda/CUDAGeneratorImpl.h>
Tim Moon's avatar
Tim Moon committed
14
15
16
#include <ATen/cudnn/Handle.h>
#include <ATen/native/DispatchStub.h>
#include <c10/macros/Macros.h>
17
18
#include <c10/util/Float8_e4m3fn.h>
#include <c10/util/Float8_e5m2.h>
Tim Moon's avatar
Tim Moon committed
19
#include <cublasLt.h>
Przemek Tredak's avatar
Przemek Tredak committed
20
21
#include <cuda.h>
#include <cuda_bf16.h>
Tim Moon's avatar
Tim Moon committed
22
#include <cuda_runtime.h>
23
#include <cudnn.h>
Tim Moon's avatar
Tim Moon committed
24
25
26
27
#include <torch/extension.h>
#include <torch/torch.h>
#include <transformer_engine/activation.h>
#include <transformer_engine/cast.h>
28
#include <transformer_engine/cast_transpose_noop.h>
29
#include <transformer_engine/comm_gemm_overlap.h>
Tim Moon's avatar
Tim Moon committed
30
#include <transformer_engine/fused_attn.h>
31
#include <transformer_engine/fused_rope.h>
32
#include <transformer_engine/fused_router.h>
Tim Moon's avatar
Tim Moon committed
33
#include <transformer_engine/gemm.h>
34
#include <transformer_engine/hadamard_transform.h>
35
#include <transformer_engine/multi_stream.h>
36
#include <transformer_engine/multi_tensor.h>
37
#include <transformer_engine/normalization.h>
38
#include <transformer_engine/padding.h>
39
#include <transformer_engine/permutation.h>
40
#include <transformer_engine/recipe.h>
Tim Moon's avatar
Tim Moon committed
41
#include <transformer_engine/softmax.h>
42
#include <transformer_engine/swizzle.h>
Tim Moon's avatar
Tim Moon committed
43
44
#include <transformer_engine/transformer_engine.h>
#include <transformer_engine/transpose.h>
45
46

#include <ATen/cuda/CUDAGraphsUtils.cuh>
47
#include <cassert>
48
49
50
#include <cstring>
#include <iostream>
#include <memory>
51
#include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
52
53
#include <vector>

54
#include "c10/util/ArrayRef.h"
55
#include "common/util/logging.h"
Przemek Tredak's avatar
Przemek Tredak committed
56

57
namespace transformer_engine::pytorch {
Przemek Tredak's avatar
Przemek Tredak committed
58

59
60
61
// in python we have: dist_group_type = torch.distributed.ProcessGroup
using dist_group_type = c10d::ProcessGroup;

Przemek Tredak's avatar
Przemek Tredak committed
62
63
64
65
// Each tensor here is shape (N, ) holding all scaling
// data for a single FP8 block, e.g. LayerNormLinear
class FP8TensorMeta {
 public:
66
67
68
  at::Tensor scale;
  at::Tensor scale_inv;
  at::Tensor amax_history;
Przemek Tredak's avatar
Przemek Tredak committed
69
70
71
72
73
};

// Used as named indices on the `scale`, `scale_inv`,
// and `amax` tensors in the `FP8TensorMeta` class.
enum FP8FwdTensors {
74
75
76
77
78
79
80
81
82
  GEMM1_INPUT = 0,
  GEMM1_WEIGHT = 1,
  GEMM1_OUTPUT = 2,
  GEMM2_INPUT = 3,
  GEMM2_WEIGHT = 4,
  GEMM2_OUTPUT = 5,
  GEMM3_INPUT = 6,
  GEMM3_WEIGHT = 7,
  GEMM3_OUTPUT = 8
Przemek Tredak's avatar
Przemek Tredak committed
83
84
85
86
87
};

// Used as named indices on the `scale`, `scale_inv`,
// and `amax` tensors in the `FP8TensorMeta` class.
enum FP8BwdTensors {
88
89
90
91
92
93
  GRAD_OUTPUT1 = 0,
  GRAD_INPUT1 = 1,
  GRAD_OUTPUT2 = 2,
  GRAD_INPUT2 = 3,
  GRAD_OUTPUT3 = 4,
  GRAD_INPUT3 = 5
Przemek Tredak's avatar
Przemek Tredak committed
94
95
};

96
97
98
99
100
101
class Quantizer {
 public:
  virtual NVTEScalingMode get_scaling_mode() const = 0;

  virtual void set_quantization_params(TensorWrapper* tensor) const = 0;

102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
  /*! @brief Construct a tensor with uninitialized data */
  virtual std::pair<TensorWrapper, py::object> create_tensor(const std::vector<size_t>& shape,
                                                             DType dtype) const = 0;

  /*! @brief Convert a PyTorch tensor into a Transformer Engine C++ tensor
   *
   * The PyTorch tensor's attributes are modified to match the
   * quantizer's configuration.
   */
  virtual std::pair<TensorWrapper, py::object> convert_and_update_tensor(
      py::object tensor) const = 0;

  /*! @brief Convert to a quantized data format */
  virtual void quantize(const TensorWrapper& input, TensorWrapper& out,
                        const std::optional<TensorWrapper>& noop_flag = std::nullopt) = 0;
117
118
119
120
121
122

  virtual ~Quantizer() = default;

  bool rowwise_usage = true;
  bool columnwise_usage = true;
  bool internal = false;
123
  bool optimize_for_gemm = false;
124
125
126
127
128
129
130
131
132
133
134
135
136
137
  py::handle quantizer;

 protected:
  explicit Quantizer(const py::handle& quantizer);
};

class NoneQuantizer : public Quantizer {
 public:
  explicit NoneQuantizer(const py::handle& quantizer) : Quantizer(quantizer) {}

  NVTEScalingMode get_scaling_mode() const override { return NVTE_DELAYED_TENSOR_SCALING; }

  void set_quantization_params(TensorWrapper* tensor) const override {}

138
139
140
141
142
143
144
145
146
147
148
  std::pair<TensorWrapper, py::object> create_tensor(const std::vector<size_t>& shape,
                                                     DType dtype) const override;

  /*! @brief Construct a tensor with pre-initialized data */
  std::pair<TensorWrapper, py::object> create_tensor(const std::vector<size_t>& shape, DType dtype,
                                                     at::Tensor data) const;

  std::pair<TensorWrapper, py::object> convert_and_update_tensor(py::object tensor) const override;

  void quantize(const TensorWrapper& input, TensorWrapper& out,
                const std::optional<TensorWrapper>& noop_flag = std::nullopt) override;
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
};

class Float8Quantizer : public Quantizer {
 public:
  at::Tensor scale;
  at::Tensor scale_inv;
  at::Tensor amax;
  DType dtype;

  explicit Float8Quantizer(const py::handle& quantizer);

  NVTEScalingMode get_scaling_mode() const override { return NVTE_DELAYED_TENSOR_SCALING; }

  void set_quantization_params(TensorWrapper* tensor) const override;

164
165
166
167
168
169
170
171
172
173
174
175
176
  std::pair<TensorWrapper, py::object> create_tensor(const std::vector<size_t>& shape,
                                                     DType dtype) const override;

  /*! @brief Construct a tensor with pre-initialized data */
  std::pair<TensorWrapper, py::object> create_tensor(const std::vector<size_t>& shape, DType dtype,
                                                     std::optional<at::Tensor> data,
                                                     std::optional<at::Tensor> transpose,
                                                     std::optional<at::Tensor> scale_inv) const;

  std::pair<TensorWrapper, py::object> convert_and_update_tensor(py::object shape) const override;

  void quantize(const TensorWrapper& input, TensorWrapper& out,
                const std::optional<TensorWrapper>& noop_flag = std::nullopt) override;
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
};

class Float8CurrentScalingQuantizer : public Quantizer {
 public:
  at::Tensor scale;
  at::Tensor scale_inv;
  at::Tensor amax;
  DType dtype;
  bool with_amax_reduction;
  c10::intrusive_ptr<dist_group_type> amax_reduction_group;
  bool force_pow_2_scales = false;
  float amax_epsilon = 0.0;

  explicit Float8CurrentScalingQuantizer(const py::handle& quantizer);

  NVTEScalingMode get_scaling_mode() const override { return NVTE_DELAYED_TENSOR_SCALING; }

  void set_quantization_params(TensorWrapper* tensor) const override;

196
197
198
  std::pair<TensorWrapper, py::object> create_tensor(const std::vector<size_t>& shape,
                                                     DType dtype) const override;

199
200
201
202
  /*! @brief Construct an unquantized tensor that shares the quantizer's amax pointer.
   *
   * The amax is zeroed out. Most TE kernels that output amax expect
   * amax to be initialized to zero.
203
  */
204
  std::pair<TensorWrapper, py::object> create_unquantized_tensor_with_amax(
205
      const std::vector<size_t>& shape, DType dtype, std::optional<at::Tensor> data = std::nullopt);
206

207
208
209
210
  std::pair<TensorWrapper, py::object> convert_and_update_tensor(py::object shape) const override;

  void quantize(const TensorWrapper& input, TensorWrapper& out,
                const std::optional<TensorWrapper>& noop_flag = std::nullopt) override;
211

212
213
214
215
216
217
  /*! @brief Quantize to FP8, skipping local amax computation
   *
   * The quantizer's amax pointer is assumed to already hold the local
   * amax. The amax may still be reduced across the amax reduction
   * group.
   */
218
219
220
221
222
223
  void quantize_with_amax(TensorWrapper& input, TensorWrapper& out,
                          const std::optional<TensorWrapper>& noop_flag = std::nullopt);

 private:
  void quantize_impl(const TensorWrapper& input, TensorWrapper& out,
                     const std::optional<TensorWrapper>& noop_flag, bool compute_amax);
224
225
226
227
228
229
230
231
232
233
234
};

class Float8BlockQuantizer : public Quantizer {
 public:
  // Which float8 type is used for q data.
  DType dtype;
  // Options about how to quantize the tensor
  // Quantization scales are rounded down to powers of 2.
  bool force_pow_2_scales = false;
  // Amax within quantization tile has a floor of epsilon.
  float amax_epsilon = 0.0;
235
236

 private:
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
  int block_scaling_dim = 2;

 public:
  // Initializes from a python handle to a Float8BlockQuantizer
  explicit Float8BlockQuantizer(const py::handle& quantizer);

  NVTEScalingMode get_scaling_mode() const override {
    return (block_scaling_dim == 2) ? NVTE_BLOCK_SCALING_2D : NVTE_BLOCK_SCALING_1D;
  }

  // Gets rowwise and columnwise_data from tensor and sets them on wrapper
  void set_quantization_params(TensorWrapper* tensor) const override;

  // Create a python Float8BlockQuantized tensor and C++ wrapper
  // for the tensor. Should set quantized data, scales for rowwise
  // and optionally columnwise usage.
253
254
255
256
257
258
259
  std::pair<TensorWrapper, py::object> create_tensor(const std::vector<size_t>& shape,
                                                     DType dtype) const override;

  std::pair<TensorWrapper, py::object> convert_and_update_tensor(py::object shape) const override;

  void quantize(const TensorWrapper& input, TensorWrapper& out,
                const std::optional<TensorWrapper>& noop_flag = std::nullopt) override;
260
261

  std::vector<size_t> get_scale_shape(const std::vector<size_t>& shape, bool columnwise) const;
262
263
264
265
266
267
268
269
270
271
272
273
};

class MXFP8Quantizer : public Quantizer {
 public:
  DType dtype;

  explicit MXFP8Quantizer(const py::handle& quantizer);

  NVTEScalingMode get_scaling_mode() const override { return NVTE_MXFP8_1D_SCALING; }

  void set_quantization_params(TensorWrapper* tensor) const override;

274
275
276
277
278
279
280
  std::pair<TensorWrapper, py::object> create_tensor(const std::vector<size_t>& shape,
                                                     DType dtype) const override;

  std::pair<TensorWrapper, py::object> convert_and_update_tensor(py::object shape) const override;

  void quantize(const TensorWrapper& input, TensorWrapper& out,
                const std::optional<TensorWrapper>& noop_flag = std::nullopt) override;
281
282

  std::vector<size_t> get_scale_shape(const std::vector<size_t>& shape, bool columnwise) const;
283
284
};

285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
class NVFP4Quantizer : public Quantizer {
 public:
  // fp4 dtype
  DType dtype;
  // amax reduction for low precision FP4 AG
  bool with_amax_reduction;
  c10::intrusive_ptr<dist_group_type> amax_reduction_group;
  // random hadamard transform
  bool with_rht;
  bool with_post_rht_amax;
  // 2D block scaling
  bool with_2d_quantization;
  bool stochastic_rounding;

  int rht_matrix_random_sign_mask_t;
  at::Tensor rht_matrix;

  explicit NVFP4Quantizer(const py::handle& quantizer);

  NVTEScalingMode get_scaling_mode() const override { return NVTE_NVFP4_1D_SCALING; }

  void set_quantization_params(TensorWrapper* tensor) const override;

  std::pair<TensorWrapper, py::object> create_tensor(const std::vector<size_t>& shape,
                                                     DType dtype) const override;

  /*! @brief Construct an unquantized tensor that shares NVFP4 tensor's amax pointer
   *
   * The amax is zeroed out. Most TE kernels that output amax expect
   * amax to be initialized to zero.
   */
  std::pair<TensorWrapper, py::object> create_unquantized_tensor_with_amax(
      TensorWrapper& quantized_tensor, DType dtype);

  std::pair<TensorWrapper, py::object> convert_and_update_tensor(py::object shape) const override;

  void quantize(const TensorWrapper& input, TensorWrapper& out,
                const std::optional<TensorWrapper>& noop_flag = std::nullopt) override;

  /*! @brief Quantize to NVFP4, skipping local amax computation
   *
   * The input tensor's amax pointer is assumed to already hold the
   * local amax. The amax may still be reduced across the amax
   * reduction group.
   */
  void quantize_with_amax(TensorWrapper& input, TensorWrapper& out);

  std::vector<size_t> get_scale_shape(const std::vector<size_t>& shape, bool columnwise) const;

 private:
  void quantize_impl(const TensorWrapper& input, TensorWrapper& out,
                     const std::optional<TensorWrapper>& noop_flag, bool compute_amax);
};

339
340
std::unique_ptr<Quantizer> convert_quantizer(py::handle quantizer);

341
std::vector<size_t> getTensorShape(const at::Tensor& t);
Przemek Tredak's avatar
Przemek Tredak committed
342
343

transformer_engine::DType getTransformerEngineFP8Type(bool e4m3_if_hybrid,
344
                                                      const std::string& fp8_recipe);
Przemek Tredak's avatar
Przemek Tredak committed
345

346
inline size_t typeToNumBits(transformer_engine::DType t) {
347
348
  switch (t) {
    case transformer_engine::DType::kInt64:
349
      return 64;
350
351
    case transformer_engine::DType::kInt32:
    case transformer_engine::DType::kFloat32:
352
      return 32;
353
354
355
    case transformer_engine::DType::kInt16:
    case transformer_engine::DType::kFloat16:
    case transformer_engine::DType::kBFloat16:
356
      return 16;
357
358
359
    case transformer_engine::DType::kByte:
    case transformer_engine::DType::kFloat8E4M3:
    case transformer_engine::DType::kFloat8E5M2:
360
    case transformer_engine::DType::kFloat8E8M0:
361
362
363
      return 8;
    case transformer_engine::DType::kFloat4E2M1:
      return 4;
364
    default:
365
      NVTE_ERROR("Invalid type (", static_cast<int>(t), ").");
366
367
368
  }
}

Przemek Tredak's avatar
Przemek Tredak committed
369
inline at::ScalarType GetATenDType(transformer_engine::DType t) {
370
  switch (t) {
371
372
    case transformer_engine::DType::kInt16:
      return torch::kInt16;
373
374
375
376
377
378
379
380
381
382
383
    case transformer_engine::DType::kInt32:
      return torch::kInt32;
    case transformer_engine::DType::kInt64:
      return torch::kInt64;
    case transformer_engine::DType::kFloat32:
      return at::kFloat;
    case transformer_engine::DType::kFloat16:
      return at::kHalf;
    case transformer_engine::DType::kBFloat16:
      return at::kBFloat16;
    case transformer_engine::DType::kByte:
384
      return at::kByte;
385
    case transformer_engine::DType::kFloat8E4M3:
386
      return at::kFloat8_e4m3fn;
387
    case transformer_engine::DType::kFloat8E5M2:
388
      return at::kFloat8_e5m2;
389
390
    case transformer_engine::DType::kFloat8E8M0:
      return at::kByte;  // e8m0 dtype requires PyTorch 2.7.0+
391
    default:
392
      NVTE_ERROR("Invalid type (", static_cast<int>(t), ").");
393
  }
Przemek Tredak's avatar
Przemek Tredak committed
394
395
396
}

inline transformer_engine::DType GetTransformerEngineDType(at::ScalarType t) {
397
  switch (t) {
398
399
400
401
    case at::kFloat8_e4m3fn:
      return transformer_engine::DType::kFloat8E4M3;
    case at::kFloat8_e5m2:
      return transformer_engine::DType::kFloat8E5M2;
402
403
404
405
406
407
408
409
410
411
    case at::kHalf:
      return transformer_engine::DType::kFloat16;
    case at::kFloat:
      return transformer_engine::DType::kFloat32;
    case at::kBFloat16:
      return transformer_engine::DType::kBFloat16;
    case at::kBool:
      return transformer_engine::DType::kByte;
    case torch::kByte:
      return transformer_engine::DType::kByte;
412
413
    case torch::kInt16:
      return transformer_engine::DType::kInt16;
414
415
416
417
418
    case torch::kInt32:
      return transformer_engine::DType::kInt32;
    case torch::kInt64:
      return transformer_engine::DType::kInt64;
    default:
419
      NVTE_ERROR("Invalid type (", static_cast<int>(t), ").");
420
  }
Przemek Tredak's avatar
Przemek Tredak committed
421
422
423
}

inline transformer_engine::DType GetTransformerEngineDType(int DType_value) {
424
  return static_cast<transformer_engine::DType>(DType_value);
Przemek Tredak's avatar
Przemek Tredak committed
425
426
427
428
}

transformer_engine::TensorWrapper makeTransformerEngineTensor(void* data_ptr,
                                                              const std::vector<size_t>& shape,
429
                                                              const transformer_engine::DType type);
Przemek Tredak's avatar
Przemek Tredak committed
430

431
432
433
434
435
436
437
438
439
440
441
442
transformer_engine::TensorWrapper makeTransformerEngineTensor(
    void* data_ptr, const std::vector<size_t>& shape, const transformer_engine::DType type,
    void* amax_ptr, void* scale_ptr, void* scale_inv_ptr, std::vector<size_t> scale_inv_shape = {1},
    NVTEScalingMode scaling_mode = NVTE_DELAYED_TENSOR_SCALING);

transformer_engine::TensorWrapper makeTransformerEngineTensor(
    void* data_ptr, void* columnwise_data_ptr, const std::vector<size_t>& shape,
    const std::vector<size_t>& columnwise_shape, const transformer_engine::DType type,
    void* amax_ptr, void* scale_ptr, void* scale_inv_ptr, void* columnwise_scale_inv_ptr,
    const std::vector<size_t>& scale_inv_shape = {1},
    const std::vector<size_t>& columnwise_scale_inv_shape = {1},
    NVTEScalingMode scaling_mode = NVTE_DELAYED_TENSOR_SCALING);
Przemek Tredak's avatar
Przemek Tredak committed
443
444
445

transformer_engine::TensorWrapper makeTransformerEngineTensor(void* data_ptr,
                                                              const NVTEShape& shape,
446
                                                              const transformer_engine::DType type);
Przemek Tredak's avatar
Przemek Tredak committed
447
448
449

transformer_engine::TensorWrapper makeTransformerEngineTensor(at::Tensor tensor);

450
451
452
453
std::tuple<std::vector<transformer_engine::TensorWrapper>, std::vector<std::vector<NVTETensor>>,
           std::vector<NVTETensor*>, size_t, size_t>
makeTransformerEngineTensorList(std::vector<std::vector<at::Tensor>> at_tensor_lists);

454
455
456
457
458
459
460
461
TensorWrapper makeTransformerEngineTensor(py::handle tensor, py::handle quantizer);

transformer_engine::TensorWrapper makeTransformerEngineTensor(
    at::Tensor tensor, at::Tensor amax, const at::Tensor scale, at::Tensor scale_inv,
    NVTEScalingMode scaling_mode = NVTE_DELAYED_TENSOR_SCALING);

template <typename T>
T product(const std::vector<T>& shape);
462

463
464
465
size_t product(const NVTEShape& shape, size_t begin, size_t end);

std::vector<size_t> nvte_shape_to_vector(const NVTEShape& nvte_shape);
Przemek Tredak's avatar
Przemek Tredak committed
466

467
at::Tensor allocateSpace(const std::vector<size_t>& shape, const transformer_engine::DType type,
cyanguwa's avatar
cyanguwa committed
468
                         bool init_to_zeros);
Przemek Tredak's avatar
Przemek Tredak committed
469

470
at::Tensor allocateSpace(const NVTEShape& shape, const transformer_engine::DType type,
Przemek Tredak's avatar
Przemek Tredak committed
471
472
                         bool init_to_zeros = false);

473
at::Tensor allocateTorchTensor(int M, int N, transformer_engine::DType dtype);
Przemek Tredak's avatar
Przemek Tredak committed
474

475
at::Tensor allocateTorchTensor(int M, transformer_engine::DType dtype);
Przemek Tredak's avatar
Przemek Tredak committed
476

477
void* getDataPtr(at::Tensor tensor, int offset = 0);
478

479
480
std::vector<size_t> convertShape(const NVTEShape& shape);

481
482
483
size_t roundup(size_t value, size_t multiple);

size_t ceildiv(size_t numer, size_t denom);
484

485
NVTEShape convertTorchShape(const c10::IntArrayRef torch_shape);
486
487
488
489
490
491
492
493
494

std::vector<size_t> convert_shape_back_from_fp4(const std::vector<size_t>& shape, bool transpose);

// unpack the PhiloxCudaState into CUDA tensor
void philox_unpack(at::PhiloxCudaState arg, int64_t* rng_state_ptr);

// extract PhiloxCudaState from CUDA random number generator
at::PhiloxCudaState init_philox_state(at::CUDAGeneratorImpl* gen, size_t elts_per_thread);

495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
}  // namespace transformer_engine::pytorch

namespace std {
template <typename T>
string to_string(const vector<T>& vec) {
  string ret = "[";
  for (const auto& val : vec) {
    ret += to_string(val) + ",";
  }
  if (ret.size() > 1) {
    ret[ret.size() - 1] = ']';
  } else {
    ret += "]";
  }
  return ret;
}

// Torch shape -> string
template <typename T>
string to_string(const c10::ArrayRef<T>& vec) {
  string ret = "[";
  for (const auto& val : vec) {
    ret += to_string(val) + ",";
  }
  if (ret.size() > 1) {
    ret[ret.size() - 1] = ']';
  } else {
    ret += "]";
  }
  return ret;
}

inline string to_string(const NVTEShape& s) {
  string ret = "[";
  for (size_t i = 0; i < s.ndim; ++i) {
    ret += to_string(s.data[i]) + ",";
  }
  if (ret.size() > 1) {
    ret[ret.size() - 1] = ']';
  } else {
    ret += "]";
  }
  return ret;
}
}  // namespace std

Przemek Tredak's avatar
Przemek Tredak committed
541
#endif  // TRANSFORMER_ENGINE_PYTORCH_CSRC_COMMON_H_