"...git@developer.sourcefind.cn:chenpangpang/open-webui.git" did not exist on "6bed7b1d34ea7d3fd6c501f3e8731cecd3bbedc3"
runtime.cc 13 KB
Newer Older
1
2
3
4
5
6
7
8
9
/*!
 * \file tl/runtime/runtime.h
 * \brief Runtime functions.
 *
 */

#include "runtime.h"

#include "../target/cuda.h"
10
11
#include <tvm/ffi/function.h>
#include <tvm/node/node.h>
12
13
14
15

namespace tvm {
namespace tl {

16
17
18
19
20
21
#if 1
// Thread-local storage for restoring the L2 persisting cache limit
static thread_local size_t __tl_prev_persisting_l2_cache_size = 0;
static thread_local bool __tl_prev_persisting_l2_cache_saved = false;
#endif

22
#if (CUDA_MAJOR_VERSION >= 12)
23
template <typename T> static std::string ArrayToStr(const T *ptr, size_t n) {
24
25
26
  std::stringstream ss;
  ss << "[";
  for (size_t i = 0; i < n; i++) {
27
28
    if (i > 0)
      ss << ", ";
29
    ss << ptr[i]; // NOLINT(clang-analyzer-security.ArrayBound)
30
31
32
33
34
35
  }
  ss << "]";
  return ss.str();
}

struct TensorMapArgs {
36
  CUtensorMap *map;
37
38
  CUtensorMapDataType type;
  cuuint32_t tensorRank;
39
  void *globalAddress;
40
41
42
43
44
45
46
  cuuint64_t globalDim[5], globalStride[5];
  cuuint32_t boxDim[5], elementStrides[5];
  CUtensorMapInterleave interleave;
  CUtensorMapSwizzle swizzle;
  CUtensorMapL2promotion l2Promotion;
  CUtensorMapFloatOOBfill oobFill;

47
  static TensorMapArgs Extract(PackedArgs args) {
48
49
    TensorMapArgs T;
    int idx = 0;
50
51
52
53
54
    ICHECK(args.size() >= 8);
    T.map = reinterpret_cast<CUtensorMap *>(args[idx++].cast<void *>());
    T.type = static_cast<CUtensorMapDataType>(args[idx++].cast<int64_t>());
    T.tensorRank = static_cast<cuuint32_t>(args[idx++].cast<int64_t>());
    T.globalAddress = args[idx++].cast<void *>();
55
    ICHECK(T.tensorRank >= 1 && T.tensorRank <= 5);
56
    ICHECK(args.size() == static_cast<int>(8 + T.tensorRank * 4));
57
    for (size_t i = 0; i < T.tensorRank; i++) {
58
      T.globalDim[i] = args[idx++].cast<cuuint64_t>();
59
60
    }
    for (size_t i = 0; i < T.tensorRank; i++) {
61
      T.globalStride[i] = args[idx++].cast<cuuint64_t>();
62
63
    }
    for (size_t i = 0; i < T.tensorRank; i++) {
64
      T.boxDim[i] = args[idx++].cast<cuuint64_t>();
65
66
    }
    for (size_t i = 0; i < T.tensorRank; i++) {
67
      T.elementStrides[i] = args[idx++].cast<cuuint64_t>();
68
    }
69
    T.interleave =
70
71
        static_cast<CUtensorMapInterleave>(args[idx++].cast<int64_t>());
    T.swizzle = static_cast<CUtensorMapSwizzle>(args[idx++].cast<int64_t>());
72
    T.l2Promotion =
73
        static_cast<CUtensorMapL2promotion>(args[idx++].cast<int64_t>());
74
    T.oobFill =
75
        static_cast<CUtensorMapFloatOOBfill>(args[idx++].cast<int64_t>());
76
77
78
79
80
    return T;
  }

  std::string ToDebugString() {
    std::stringstream ss;
81
82
83
84
85
86
87
88
89
90
91
92
    ss << "TMA Desc Addr:   " << map << '\n'
       << "format         " << type << '\n'
       << "dim            " << tensorRank << '\n'
       << "gmem_address   " << globalAddress << '\n'
       << "globalDim      " << ArrayToStr(globalDim, tensorRank) << '\n'
       << "globalStrides  " << ArrayToStr(globalStride, tensorRank) << '\n'
       << "boxDim         " << ArrayToStr(boxDim, tensorRank) << '\n'
       << "elementStrides " << ArrayToStr(elementStrides, tensorRank) << '\n'
       << "interleave     " << interleave << '\n'
       << "swizzle        " << swizzle << '\n'
       << "l2Promotion    " << l2Promotion << '\n'
       << "oobFill        " << oobFill << '\n';
93
94
95
96
97
    return ss.str();
  }
};

// set device api
98
TVM_FFI_STATIC_INIT_BLOCK() {
99
  namespace refl = tvm::ffi::reflection;
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
  // Register using the canonical names defined in runtime.h
  refl::GlobalDef().def_packed(
      tl::tvm_tensormap_create_tiled, [](PackedArgs args, Any *ret) {
        TensorMapArgs T = TensorMapArgs::Extract(args);
        CUresult result = cuTensorMapEncodeTiled(
            T.map, T.type, T.tensorRank, T.globalAddress, T.globalDim,
            T.globalStride + 1, T.boxDim, T.elementStrides, T.interleave,
            T.swizzle, T.l2Promotion, T.oobFill);
        if (result != CUDA_SUCCESS) {
          LOG_FATAL << "Failed to initialize the TMA descriptor " << result
                    << '\n'
                    << T.ToDebugString();
        }
        *ret = static_cast<int>(result);
      });
115
}
116
117

struct TensorMapIm2ColArgs {
118
  CUtensorMap *map;
119
120
  CUtensorMapDataType type;
  cuuint32_t tensorRank;
121
  void *globalAddress;
122
123
124
125
126
127
128
129
130
  cuuint64_t globalDim[5], globalStride[5];
  cuuint32_t elementStrides[5];
  int pixelBoxLowerCorner[3], pixelBoxUpperCorner[3];
  cuuint32_t smem_box_channel, smem_box_pixel;
  CUtensorMapInterleave interleave;
  CUtensorMapSwizzle swizzle;
  CUtensorMapL2promotion l2Promotion;
  CUtensorMapFloatOOBfill oobFill;

131
  static TensorMapIm2ColArgs Extract(PackedArgs args) {
132
133
    TensorMapIm2ColArgs T;
    int idx = 0;
134
135
136
137
138
    ICHECK(args.size() >= 8);
    T.map = reinterpret_cast<CUtensorMap *>(args[idx++].cast<void *>());
    T.type = static_cast<CUtensorMapDataType>(args[idx++].cast<int64_t>());
    T.tensorRank = static_cast<cuuint32_t>(args[idx++].cast<int64_t>());
    T.globalAddress = args[idx++].cast<void *>();
139
    ICHECK(T.tensorRank >= 3 && T.tensorRank <= 5);
140
    ICHECK(args.size() == static_cast<int>(6 + T.tensorRank * 5));
141
    for (size_t i = 0; i < T.tensorRank; i++) {
142
      T.globalDim[i] = args[idx++].cast<cuuint64_t>();
143
144
    }
    for (size_t i = 0; i < T.tensorRank; i++) {
145
      T.globalStride[i] = args[idx++].cast<cuuint64_t>();
146
147
    }
    for (size_t i = 0; i < T.tensorRank; i++) {
148
      T.elementStrides[i] = args[idx++].cast<cuuint64_t>();
149
150
    }
    for (size_t i = 0; i < T.tensorRank - 2; i++) {
151
      T.pixelBoxLowerCorner[i] = args[idx++].cast<int>();
152
153
    }
    for (size_t i = 0; i < T.tensorRank - 2; i++) {
154
      T.pixelBoxUpperCorner[i] = args[idx++].cast<int>();
155
    }
156
157
    T.smem_box_pixel = args[idx++].cast<cuuint64_t>();
    T.smem_box_channel = args[idx++].cast<cuuint64_t>();
158
    T.interleave =
159
160
        static_cast<CUtensorMapInterleave>(args[idx++].cast<int64_t>());
    T.swizzle = static_cast<CUtensorMapSwizzle>(args[idx++].cast<int64_t>());
161
    T.l2Promotion =
162
        static_cast<CUtensorMapL2promotion>(args[idx++].cast<int64_t>());
163
    T.oobFill =
164
        static_cast<CUtensorMapFloatOOBfill>(args[idx++].cast<int64_t>());
165
166
167
168
169
    return T;
  }

  std::string ToDebugString() {
    std::stringstream ss;
170
171
172
173
174
175
176
177
    ss << "TMA Desc Addr:   " << map << '\n'
       << "format         " << type << '\n'
       << "dim            " << tensorRank << '\n'
       << "gmem_address   " << globalAddress << '\n'
       << "globalDim      " << ArrayToStr(globalDim, tensorRank) << '\n'
       << "globalStrides  " << ArrayToStr(globalStride, tensorRank) << '\n'
       << "smem_box_pixel " << smem_box_pixel << '\n'
       << "smem_box_channel " << smem_box_channel << '\n'
178
       << "pixelBoxLowerCorner  "
179
       << ArrayToStr(pixelBoxLowerCorner, tensorRank - 2) << '\n'
180
       << "pixelBoxUpperCorner  "
181
182
183
184
185
186
       << ArrayToStr(pixelBoxUpperCorner, tensorRank - 2) << '\n'
       << "elementStrides " << ArrayToStr(elementStrides, tensorRank) << '\n'
       << "interleave     " << interleave << '\n'
       << "swizzle        " << swizzle << '\n'
       << "l2Promotion    " << l2Promotion << '\n'
       << "oobFill        " << oobFill << '\n';
187
188
189
190
    return ss.str();
  }
};

191
TVM_FFI_STATIC_INIT_BLOCK() {
192
193
  namespace refl = tvm::ffi::reflection;
  refl::GlobalDef().def_packed(
194
      tl::tvm_tensormap_create_im2col, [](PackedArgs args, Any *ret) {
195
196
197
198
199
200
201
202
        TensorMapIm2ColArgs T = TensorMapIm2ColArgs::Extract(args);
        CUresult result = cuTensorMapEncodeIm2col(
            T.map, T.type, T.tensorRank, T.globalAddress, T.globalDim,
            T.globalStride + 1, T.pixelBoxLowerCorner, T.pixelBoxUpperCorner,
            T.smem_box_channel, T.smem_box_pixel, T.elementStrides,
            T.interleave, T.swizzle, T.l2Promotion, T.oobFill);
        if (result != CUDA_SUCCESS) {
          LOG_FATAL << "Failed to initialize the TMA descriptor " << result
203
                    << '\n'
204
205
206
207
                    << T.ToDebugString();
        }
        *ret = static_cast<int>(result);
      });
208
}
209

210
#endif // (CUDA_MAJOR_VERSION >= 12)
211

212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
//
// CUDA L2 Persisting Cache Access Policy Window helpers.
// Exposed as TVM FFI packed functions similar to TMA initialization.
//
TVM_FFI_STATIC_INIT_BLOCK() {
  namespace refl = tvm::ffi::reflection;
  // Set stream access policy window and adjust persisting L2 cache size
  // Args:
  //  [0]: void* base_ptr (required)
  //  [1]: int64 num_bytes (required)
  //  [2]: float hit_ratio (optional, default 0.8)
  //  [3]: void* stream (optional, default 0 => default stream)
  //  [4]: int64 l2_limit_bytes (optional, default = num_bytes)
  refl::GlobalDef().def_packed(
      tl::tvm_cuda_stream_set_access_policy_window,
      [](PackedArgs args, Any *ret) {
        ICHECK(args.size() >= 2) << "Expected at least base_ptr and num_bytes";

        void *base_ptr = args[0].cast<void *>();
        size_t num_bytes = static_cast<size_t>(args[1].cast<int64_t>());
        float hit_ratio = 0.8f;
        if (args.size() >= 3) {
          // Accept double/float
          hit_ratio = static_cast<float>(args[2].cast<double>());
        }
        CUstream stream = nullptr;
        if (args.size() >= 4) {
          stream = reinterpret_cast<CUstream>(args[3].cast<void *>());
        }
        size_t l2_limit_bytes = num_bytes;
        if (args.size() >= 5) {
          l2_limit_bytes = static_cast<size_t>(args[4].cast<int64_t>());
        }

        // Clamp requested limit to device capability
        CUdevice device;
        CUresult result = cuCtxGetDevice(&device);
        if (result != CUDA_SUCCESS) {
          LOG_FATAL << "Failed to get current CUDA device: " << result;
        }
        int max_persisting = 0;
        result = cuDeviceGetAttribute(
            &max_persisting, CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE,
            device);
        if (result != CUDA_SUCCESS) {
          LOG_FATAL << "Failed to query MAX_PERSISTING_L2_CACHE_SIZE: "
                    << result;
        }
        if (max_persisting > 0 &&
            l2_limit_bytes > static_cast<size_t>(max_persisting)) {
          l2_limit_bytes = static_cast<size_t>(max_persisting);
        }

        // Save current limit to restore later
        size_t init_persisting_l2_cache_size = 0;
        result = cuCtxGetLimit(&init_persisting_l2_cache_size,
                               CU_LIMIT_PERSISTING_L2_CACHE_SIZE);
        if (result != CUDA_SUCCESS) {
          LOG_FATAL << "Failed to get current persisting L2 cache size limit: "
                    << result;
        }
        __tl_prev_persisting_l2_cache_size = init_persisting_l2_cache_size;
        __tl_prev_persisting_l2_cache_saved = true;

        // Set new limit
        result =
            cuCtxSetLimit(CU_LIMIT_PERSISTING_L2_CACHE_SIZE, l2_limit_bytes);
        if (result != CUDA_SUCCESS) {
          LOG_FATAL << "Failed to set persisting L2 cache size limit: "
                    << result;
        }

        // Apply access policy window to stream
        CUstreamAttrValue stream_attribute;
        memset(&stream_attribute, 0, sizeof(stream_attribute));
        stream_attribute.accessPolicyWindow.base_ptr = base_ptr;
        stream_attribute.accessPolicyWindow.num_bytes = l2_limit_bytes;
        stream_attribute.accessPolicyWindow.hitRatio = hit_ratio;
        stream_attribute.accessPolicyWindow.hitProp =
            CU_ACCESS_PROPERTY_PERSISTING;
        stream_attribute.accessPolicyWindow.missProp =
            CU_ACCESS_PROPERTY_STREAMING;

        result = cuStreamSetAttribute(stream,
                                      CU_STREAM_ATTRIBUTE_ACCESS_POLICY_WINDOW,
                                      &stream_attribute);
        if (result != CUDA_SUCCESS) {
          LOG_FATAL << "Failed to set stream access policy window: " << result;
        }

        *ret = static_cast<int>(result);
      });

  // Reset stream access policy window and restore the previous L2 cache size
  // Args:
  //  [0]: void* stream (optional, default 0)
  refl::GlobalDef().def_packed(
      tl::tvm_cuda_stream_reset_access_policy_window,
      [](PackedArgs args, Any *ret) {
        CUstream stream = nullptr;
        if (args.size() >= 1) {
          stream = reinterpret_cast<CUstream>(args[0].cast<void *>());
        }

        CUstreamAttrValue stream_attribute;
        memset(&stream_attribute, 0, sizeof(stream_attribute));
        // num_bytes = 0 disables the access policy window on the stream
        stream_attribute.accessPolicyWindow.num_bytes = 0;

        CUresult result = cuStreamSetAttribute(
            stream, CU_STREAM_ATTRIBUTE_ACCESS_POLICY_WINDOW,
            &stream_attribute);
        if (result != CUDA_SUCCESS) {
          LOG_FATAL << "Failed to reset stream access policy window: "
                    << result;
        }

        result = cuCtxResetPersistingL2Cache();
        if (result != CUDA_SUCCESS) {
          LOG_FATAL << "Failed to reset persisting L2 cache lines: " << result;
        }

        if (__tl_prev_persisting_l2_cache_saved) {
          result = cuCtxSetLimit(CU_LIMIT_PERSISTING_L2_CACHE_SIZE,
                                 __tl_prev_persisting_l2_cache_size);
          if (result != CUDA_SUCCESS) {
            LOG_FATAL << "Failed to restore persisting L2 cache size limit: "
                      << result;
          }
          __tl_prev_persisting_l2_cache_saved = false;
        }

        *ret = static_cast<int>(result);
      });
}

348
349
} // namespace tl
} // namespace tvm