common.h 6.07 KB
Newer Older
Przemek Tredak's avatar
Przemek Tredak committed
1
/*************************************************************************
2
 * Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Przemek Tredak's avatar
Przemek Tredak committed
3
4
5
6
7
8
9
10
11
 *
 * See LICENSE for license information.
 ************************************************************************/

#ifndef TRANSFORMER_ENGINE_PYTORCH_CSRC_COMMON_H_
#define TRANSFORMER_ENGINE_PYTORCH_CSRC_COMMON_H_

#include <transformer_engine/gemm.h>
#include <transformer_engine/layer_norm.h>
12
#include <transformer_engine/rmsnorm.h>
Przemek Tredak's avatar
Przemek Tredak committed
13
14
15
16
17
#include <transformer_engine/transpose.h>
#include <transformer_engine/activation.h>
#include <transformer_engine/logging.h>
#include <transformer_engine/transformer_engine.h>
#include <transformer_engine/cast.h>
18
#include <transformer_engine/softmax.h>
cyanguwa's avatar
cyanguwa committed
19
#include <transformer_engine/fused_attn.h>
Przemek Tredak's avatar
Przemek Tredak committed
20
21
22
#include <ATen/ATen.h>
#include <ATen/cudnn/Handle.h>
#include <ATen/cuda/CUDAContext.h>
cyanguwa's avatar
cyanguwa committed
23
24
25
26
27
#include <c10/macros/Macros.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/cuda/CUDAGeneratorImpl.h>
#include <ATen/cuda/CUDAGraphsUtils.cuh>
Przemek Tredak's avatar
Przemek Tredak committed
28
29
30
31
32
#include <torch/extension.h>
#include <torch/torch.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_bf16.h>
33
#include <cublasLt.h>
34
#include <cudnn.h>
Przemek Tredak's avatar
Przemek Tredak committed
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
#include <stdexcept>
#include <memory>
#include <iomanip>
#include <random>
#include <cstring>
#include <vector>
#include <iostream>


namespace transformer_engine {

// Each tensor here is shape (N, ) holding all scaling
// data for a single FP8 block, e.g. LayerNormLinear
class FP8TensorMeta {
 public:
    at::Tensor scale;
    at::Tensor scale_inv;
    at::Tensor amax_history;
};

// Used as named indices on the `scale`, `scale_inv`,
// and `amax` tensors in the `FP8TensorMeta` class.
enum FP8FwdTensors {
    GEMM1_INPUT  = 0,
    GEMM1_WEIGHT = 1,
60
61
62
    GEMM1_OUTPUT = 2,
    GEMM2_INPUT  = 3,
    GEMM2_WEIGHT = 4,
63
64
65
66
    GEMM2_OUTPUT = 5,
    GEMM3_INPUT  = 6,
    GEMM3_WEIGHT = 7,
    GEMM3_OUTPUT = 8
Przemek Tredak's avatar
Przemek Tredak committed
67
68
69
70
71
72
};

// Used as named indices on the `scale`, `scale_inv`,
// and `amax` tensors in the `FP8TensorMeta` class.
enum FP8BwdTensors {
    GRAD_OUTPUT1 = 0,
73
74
    GRAD_INPUT1 = 1,
    GRAD_OUTPUT2 = 2,
75
76
77
    GRAD_INPUT2 = 3,
    GRAD_OUTPUT3 = 4,
    GRAD_INPUT3 = 5
Przemek Tredak's avatar
Przemek Tredak committed
78
79
80
81
82
83
84
85
86
87
88
89
90
};


}  // namespace transformer_engine


transformer_engine::DType getTransformerEngineFP8Type(bool e4m3_if_hybrid,
                                                      const std::string &fp8_recipe);


inline at::ScalarType GetATenDType(transformer_engine::DType t) {
    switch (t) {
        case transformer_engine::DType::kInt32:
91
92
93
            return torch::kInt32;
        case transformer_engine::DType::kInt64:
            return torch::kInt64;
Przemek Tredak's avatar
Przemek Tredak committed
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
        case transformer_engine::DType::kFloat32:
            return at::kFloat;
        case transformer_engine::DType::kFloat16:
            return at::kHalf;
        case transformer_engine::DType::kBFloat16:
            return at::kBFloat16;
        case transformer_engine::DType::kByte:
        case transformer_engine::DType::kFloat8E4M3:
        case transformer_engine::DType::kFloat8E5M2:
            return at::kByte;
        default:
            NVTE_ERROR("Invalid type");
    }
}


inline transformer_engine::DType GetTransformerEngineDType(at::ScalarType t) {
    switch (t) {
        case at::kHalf:
            return transformer_engine::DType::kFloat16;
        case at::kFloat:
            return transformer_engine::DType::kFloat32;
        case at::kBFloat16:
            return transformer_engine::DType::kBFloat16;
118
119
        case at::kBool:
            return transformer_engine::DType::kByte;
cyanguwa's avatar
cyanguwa committed
120
121
122
123
124
125
        case torch::kByte:
            return transformer_engine::DType::kByte;
        case torch::kInt32:
            return transformer_engine::DType::kInt32;
        case torch::kInt64:
            return transformer_engine::DType::kInt64;
Przemek Tredak's avatar
Przemek Tredak committed
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
        default:
            NVTE_ERROR("Invalid type");
    }
}


inline transformer_engine::DType GetTransformerEngineDType(int DType_value) {
    return static_cast<transformer_engine::DType>(DType_value);
}

transformer_engine::TensorWrapper makeTransformerEngineTensor(void* data_ptr,
                                                              const std::vector<size_t>& shape,
                                                              const transformer_engine::DType type
);

141
142
143
144
145
146
147
148
transformer_engine::TensorWrapper makeTransformerEngineTensor(void* data_ptr,
                                                              const std::vector<size_t>& shape,
                                                              const transformer_engine::DType type,
                                                              void* amax_ptr,
                                                              void* scale_ptr,
                                                              void* scale_inv_ptr
);

Przemek Tredak's avatar
Przemek Tredak committed
149
150
151
152
153
154
155
156
157

transformer_engine::TensorWrapper makeTransformerEngineTensor(void* data_ptr,
                                                              const NVTEShape& shape,
                                                              const transformer_engine::DType type
);


transformer_engine::TensorWrapper makeTransformerEngineTensor(at::Tensor tensor);

158
159
160
161
162
transformer_engine::TensorWrapper makeTransformerEngineTensor(at::Tensor tensor,
                                                              at::Tensor amax,
                                                              const at::Tensor scale,
                                                              at::Tensor scale_inv);

Przemek Tredak's avatar
Przemek Tredak committed
163
164
165

size_t product(const std::vector<size_t> &shape);

cyanguwa's avatar
cyanguwa committed
166
167
168
at::Tensor allocateSpace(const std::vector<size_t>& shape,
                         const transformer_engine::DType type,
                         bool init_to_zeros);
Przemek Tredak's avatar
Przemek Tredak committed
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184

at::Tensor allocateSpace(const NVTEShape &shape,
                         const transformer_engine::DType type,
                         bool init_to_zeros = false);


at::Tensor allocateTorchTensor(int M,
                               int N,
                               transformer_engine::DType dtype
);


at::Tensor allocateTorchTensor(int M,
                               transformer_engine::DType dtype
);

185
186
void *getDataPtr(at::Tensor t);

Przemek Tredak's avatar
Przemek Tredak committed
187
#endif  // TRANSFORMER_ENGINE_PYTORCH_CSRC_COMMON_H_