kvcacheio.py 9.77 KB
Newer Older
1
2
from typing import List

3
4
5
import torch


6
7
8
9
10
11
def is_hip() -> bool:
    return torch.version.hip is not None


_is_hip = is_hip()

linhai1's avatar
linhai1 committed
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
def dcu_create_extend_after_decode_spec_info(
    verified_id: torch.Tensor,
    seq_lens: torch.Tensor,
    accept_lens: torch.Tensor,
    positions: torch.Tensor,
    new_verified_id: torch.Tensor,
    bs: int,
):
    torch.ops.sgl_kernel.dcu_create_extend_after_decode_spec_info(
        verified_id,
        seq_lens,
        accept_lens,
        positions,
        new_verified_id,
        bs,
    )
28

liucong8560's avatar
liucong8560 committed
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
def dcu_alloc_extend_kernel(
    pre_lens_ptr: torch.Tensor,
    seq_lens_ptr: torch.Tensor,
    last_loc_ptr: torch.Tensor,
    free_page_ptr: torch.Tensor,
    out_indices: torch.Tensor,
    bs: int,
    page_size: int,
):
    torch.ops.sgl_kernel.dcu_alloc_extend_kernel(
        pre_lens_ptr,
        seq_lens_ptr,
        last_loc_ptr,
        free_page_ptr,
        out_indices,
        bs,
        page_size,
    )

def dcu_alloc_decode_kernel(
    seq_lens_ptr: torch.Tensor,   
    last_loc_ptr: torch.Tensor,    
    free_page_ptr: torch.Tensor ,   
    out_indices: torch.Tensor , 
    bs: int,          
    page_size: int,              
):
    torch.ops.sgl_kernel.dcu_alloc_decode_kernel(
        seq_lens_ptr,
        last_loc_ptr,
        free_page_ptr,
        out_indices,
        bs,
        page_size,
    )
64

65
66
67
68
69
70
71
72
73
def transfer_kv_per_layer(
    src_k: torch.Tensor,
    dst_k: torch.Tensor,
    src_v: torch.Tensor,
    dst_v: torch.Tensor,
    src_indices: torch.Tensor,
    dst_indices: torch.Tensor,
    item_size: int,
    block_quota: int = 2,
74
    num_warps_per_block: int = 16 if _is_hip else 32,
75
):
76
    torch.ops.sgl_kernel.transfer_kv_per_layer.default(
Zhiqiang Xie's avatar
Zhiqiang Xie committed
77
78
79
80
81
82
83
84
85
86
        src_k,
        dst_k,
        src_v,
        dst_v,
        src_indices,
        dst_indices,
        item_size,
        block_quota,
        num_warps_per_block,
    )
87
88


89
def transfer_kv_per_layer_pf_lf(
90
91
92
93
94
95
    src_k: torch.Tensor,
    dst_k: torch.Tensor,
    src_v: torch.Tensor,
    dst_v: torch.Tensor,
    src_indices: torch.Tensor,
    dst_indices: torch.Tensor,
96
    layer_id: int,
97
98
99
    item_size: int,
    src_layout_dim: int,
    block_quota: int = 2,
100
    num_warps_per_block: int = 16 if _is_hip else 32,
101
):
102
    torch.ops.sgl_kernel.transfer_kv_per_layer_pf_lf.default(
103
104
105
106
107
108
        src_k,
        dst_k,
        src_v,
        dst_v,
        src_indices,
        dst_indices,
109
        layer_id,
110
111
112
113
114
115
116
        item_size,
        src_layout_dim,
        block_quota,
        num_warps_per_block,
    )


117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
def transfer_kv_per_layer_ph_lf(
    src_k: torch.Tensor,
    dst_k: torch.Tensor,
    src_v: torch.Tensor,
    dst_v: torch.Tensor,
    src_indices: torch.Tensor,
    dst_indices: torch.Tensor,
    layer_id: int,
    item_size: int,
    src_layout_dim: int,
    page_size: int,
    head_num: int,
    block_quota: int = 2,
    num_warps_per_block: int = 16 if _is_hip else 32,
):
    torch.ops.sgl_kernel.transfer_kv_per_layer_ph_lf.default(
        src_k,
        dst_k,
        src_v,
        dst_v,
        src_indices,
        dst_indices,
        layer_id,
        item_size,
        src_layout_dim,
        page_size,
        head_num,
        block_quota,
        num_warps_per_block,
    )


149
150
151
152
153
154
155
def transfer_kv_all_layer(
    src_k_layers: torch.Tensor,
    dst_k_layers: torch.Tensor,
    src_v_layers: torch.Tensor,
    dst_v_layers: torch.Tensor,
    src_indices: torch.Tensor,
    dst_indices: torch.Tensor,
156
157
158
    item_size: int,
    num_layers: int,
    block_quota: int = 2,
159
    num_warps_per_block: int = 16 if _is_hip else 32,
160
):
161
    torch.ops.sgl_kernel.transfer_kv_all_layer.default(
Zhiqiang Xie's avatar
Zhiqiang Xie committed
162
163
164
165
166
167
168
169
170
171
172
        src_k_layers,
        dst_k_layers,
        src_v_layers,
        dst_v_layers,
        src_indices,
        dst_indices,
        item_size,
        num_layers,
        block_quota,
        num_warps_per_block,
    )
173
174


175
176
177
178
179
180
181
182
183
184
185
def transfer_kv_all_layer_lf_pf(
    src_k_layers: torch.Tensor,
    dst_k: torch.Tensor,
    src_v_layers: torch.Tensor,
    dst_v: torch.Tensor,
    src_indices: torch.Tensor,
    dst_indices: torch.Tensor,
    item_size: int,
    dst_layout_dim: int,
    num_layers: int,
    block_quota: int = 2,
186
    num_warps_per_block: int = 16 if _is_hip else 32,
187
):
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
    torch.ops.sgl_kernel.transfer_kv_all_layer_lf_pf.default(
        src_k_layers,
        dst_k,
        src_v_layers,
        dst_v,
        src_indices,
        dst_indices,
        item_size,
        dst_layout_dim,
        num_layers,
        block_quota,
        num_warps_per_block,
    )


def transfer_kv_all_layer_lf_ph(
    src_k_layers: torch.Tensor,
    dst_k: torch.Tensor,
    src_v_layers: torch.Tensor,
    dst_v: torch.Tensor,
    src_indices: torch.Tensor,
    dst_indices: torch.Tensor,
    item_size: int,
    dst_layout_dim: int,
    num_layers: int,
    page_size: int,
    head_num: int,
    block_quota: int = 2,
    num_warps_per_block: int = 16 if _is_hip else 32,
):
    torch.ops.sgl_kernel.transfer_kv_all_layer_lf_ph.default(
219
220
221
222
223
224
225
226
227
        src_k_layers,
        dst_k,
        src_v_layers,
        dst_v,
        src_indices,
        dst_indices,
        item_size,
        dst_layout_dim,
        num_layers,
228
229
        page_size,
        head_num,
230
231
232
233
234
235
236
237
238
239
240
241
        block_quota,
        num_warps_per_block,
    )


def transfer_kv_direct(
    src_layers: List[torch.Tensor],
    dst_layers: List[torch.Tensor],
    src_indices: torch.Tensor,
    dst_indices: torch.Tensor,
    page_size: int,
):
242
    torch.ops.sgl_kernel.transfer_kv_direct.default(
243
        src_layers, dst_layers, src_indices, dst_indices, page_size
244
245
246
247
248
249
250
251
252
253
254
    )


def transfer_kv_per_layer_direct_pf_lf(
    src_ptrs: List[torch.Tensor],
    dst_ptrs: List[torch.Tensor],
    src_indices: torch.Tensor,
    dst_indices: torch.Tensor,
    layer_id: int,
    page_size: int,
):
255
    torch.ops.sgl_kernel.transfer_kv_per_layer_direct_pf_lf.default(
256
257
258
259
260
261
262
263
264
265
266
        src_ptrs, dst_ptrs, src_indices, dst_indices, layer_id, page_size
    )


def transfer_kv_all_layer_direct_lf_pf(
    src_ptrs: List[torch.Tensor],
    dst_ptrs: List[torch.Tensor],
    src_indices: torch.Tensor,
    dst_indices: torch.Tensor,
    page_size: int,
):
267
    torch.ops.sgl_kernel.transfer_kv_all_layer_direct_lf_pf.default(
268
        src_ptrs, dst_ptrs, src_indices, dst_indices, page_size
269
270
271
    )


272
273
274
275
276
277
278
def transfer_kv_per_layer_mla(
    src: torch.Tensor,
    dst: torch.Tensor,
    src_indices: torch.Tensor,
    dst_indices: torch.Tensor,
    item_size: int,
    block_quota: int = 2,
279
    num_warps_per_block: int = 16 if _is_hip else 32,
280
):
281
    torch.ops.sgl_kernel.transfer_kv_per_layer_mla.default(
Zhiqiang Xie's avatar
Zhiqiang Xie committed
282
283
284
285
286
287
288
289
        src,
        dst,
        src_indices,
        dst_indices,
        item_size,
        block_quota,
        num_warps_per_block,
    )
290
291


292
def transfer_kv_per_layer_mla_pf_lf(
293
294
295
296
    src: torch.Tensor,
    dst: torch.Tensor,
    src_indices: torch.Tensor,
    dst_indices: torch.Tensor,
297
    layer_id: int,
298
299
300
    item_size: int,
    src_layout_dim: int,
    block_quota: int = 2,
301
    num_warps_per_block: int = 16 if _is_hip else 32,
302
):
303
    torch.ops.sgl_kernel.transfer_kv_per_layer_mla_pf_lf.default(
304
305
306
307
        src,
        dst,
        src_indices,
        dst_indices,
308
        layer_id,
309
310
311
312
313
314
315
316
317
318
319
320
        item_size,
        src_layout_dim,
        block_quota,
        num_warps_per_block,
    )


def transfer_kv_all_layer_mla(
    src_layers: torch.Tensor,
    dst_layers: torch.Tensor,
    src_indices: torch.Tensor,
    dst_indices: torch.Tensor,
321
322
323
    item_size: int,
    num_layers: int,
    block_quota: int = 2,
324
    num_warps_per_block: int = 16 if _is_hip else 32,
325
):
326
    torch.ops.sgl_kernel.transfer_kv_all_layer_mla.default(
Zhiqiang Xie's avatar
Zhiqiang Xie committed
327
328
329
330
331
332
333
334
335
        src_layers,
        dst_layers,
        src_indices,
        dst_indices,
        item_size,
        num_layers,
        block_quota,
        num_warps_per_block,
    )
336
337
338
339
340
341
342
343
344
345
346


def transfer_kv_all_layer_mla_lf_pf(
    src_layers: torch.Tensor,
    dst: torch.Tensor,
    src_indices: torch.Tensor,
    dst_indices: torch.Tensor,
    item_size: int,
    dst_layout_dim: int,
    num_layers: int,
    block_quota: int = 2,
347
    num_warps_per_block: int = 16 if _is_hip else 32,
348
):
349
    torch.ops.sgl_kernel.transfer_kv_all_layer_mla_lf_pf.default(
350
351
352
353
354
355
356
357
358
359
        src_layers,
        dst,
        src_indices,
        dst_indices,
        item_size,
        dst_layout_dim,
        num_layers,
        block_quota,
        num_warps_per_block,
    )
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432

def dcu_assign_req_to_token_pool(
    req_pool_indices:torch.Tensor,
    req_to_token:torch.Tensor,
    allocate_lens:torch.Tensor,
    new_allocate_lens:torch.Tensor,
    out_cache_loc:torch.Tensor,
    shape:int,
    bs:int,
):
    torch.ops.sgl_kernel.dcu_assign_req_to_token_pool(
        req_pool_indices,
        req_to_token,
        allocate_lens,
        new_allocate_lens,
        out_cache_loc,
        shape,
        bs,
    )

def dcu_get_last_loc(
    req_to_token: torch.Tensor,
    req_pool_indices: torch.Tensor,
    prefix_lens: torch.Tensor,
):
    result = torch.ops.sgl_kernel.dcu_get_last_loc(
        req_to_token,
        req_pool_indices,
        prefix_lens,
    )
    return result


def dcu_assign_extend_cache_locs(
    req_pool_indices: torch.Tensor,
    req_to_token: torch.Tensor,
    start_offset: torch.Tensor,
    end_offset: torch.Tensor,
    out_cache_loc: torch.Tensor,
    pool_len: int,
    bs: int,
):
    torch.ops.sgl_kernel.dcu_assign_extend_cache_locs(
        req_pool_indices,
        req_to_token, 
        start_offset, 
        end_offset,
        out_cache_loc, 
        pool_len, 
        bs,
    )

def dcu_create_chunked_prefix_cache_kv_indices(
    req_to_token: torch.Tensor,
    req_pool_indices: torch.Tensor,
    chunk_starts: torch.Tensor,
    chunk_seq_lens: torch.Tensor,
    chunk_cu_seq_lens: torch.Tensor,
    chunk_kv_indices: torch.Tensor,
    col_num: int,
    bs: int,
):
    torch.ops.sgl_kernel.dcu_create_chunked_prefix_cache_kv_indices(
        req_to_token,
        req_pool_indices, 
        chunk_starts, 
        chunk_seq_lens,
        chunk_cu_seq_lens, 
        chunk_kv_indices, 
        col_num, 
        bs,
    )