"model/vscode:/vscode.git/clone" did not exist on "fbd82ba5bb35c42a6b09f5bd50ff1aa0690b9626"
flash_causal_lm.py 74.9 KB
Newer Older
1
from contextlib import nullcontext
2
import math
3
import os
4
import time
5
6
7
import torch
import torch.distributed

8
9
import numpy as np

10
from loguru import logger
11
12
from dataclasses import dataclass
from opentelemetry import trace
13
14
15
16
17
18
from transformers import (
    PreTrainedTokenizerBase,
    AutoConfig,
    AutoTokenizer,
    GenerationConfig,
)
19
from typing import Any, ContextManager, Iterable, Optional, Tuple, List, Type, Dict
fxmarty's avatar
fxmarty committed
20

drbh's avatar
drbh committed
21
from text_generation_server.adapters import AdapterBatchData, AdapterBatchMetadata
fxmarty's avatar
fxmarty committed
22
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE
Daniël de Kok's avatar
Daniël de Kok committed
23
from text_generation_server.utils.chunks import concat_text_chunks
Nicolas Patry's avatar
Nicolas Patry committed
24
from text_generation_server.utils.import_utils import SYSTEM
OlivierDehaene's avatar
OlivierDehaene committed
25
from text_generation_server.models import Model
26
from text_generation_server.utils.log import log_master
27
from text_generation_server.utils.tokens import batch_top_tokens
Nicolas Patry's avatar
Nicolas Patry committed
28
from text_generation_server.utils.speculate import get_speculate
29
30
31
32
33
from text_generation_server.utils import (
    initialize_torch_distributed,
    weight_files,
    Weights,
)
34
35
from text_generation_server.models.types import (
    Batch,
Nicolas Patry's avatar
Nicolas Patry committed
36
    Tokens,
37
38
39
40
    Generation,
    GeneratedText,
)
from text_generation_server.pb import generate_pb2
Nicolas Patry's avatar
Nicolas Patry committed
41
42
from text_generation_server.models.globals import (
    MEM_POOL,
43
    ATTENTION,
44
    BLOCK_SIZE,
Nicolas Patry's avatar
Nicolas Patry committed
45
    CUDA_GRAPHS,
46
    TGI_WIGGLE_ROOM,
Nicolas Patry's avatar
Nicolas Patry committed
47
48
    get_adapter_to_index,
)
49
from text_generation_server.layers.attention import Seqlen
50
from text_generation_server.utils import StoppingCriteria, HeterogeneousNextTokenChooser
51
from text_generation_server.utils.dist import MEMORY_FRACTION
52
from text_generation_server.utils.quantization import get_loader
drbh's avatar
drbh committed
53
from text_generation_server.utils.segments import SegmentConcatBuilder, find_segments
54

Nicolas Patry's avatar
Nicolas Patry committed
55
from text_generation_server.utils.import_utils import (
Nicolas Patry's avatar
Nicolas Patry committed
56
57
58
    empty_cache,
    synchronize,
    get_free_memory,
Nicolas Patry's avatar
Nicolas Patry committed
59
60
)

Nicolas Patry's avatar
Nicolas Patry committed
61
62
tracer = trace.get_tracer(__name__)

63
64
65
66
67
68
69
70
71
72
73
74
75
76

# Will be set in init
SLIDING_WINDOW: Optional[int] = None


def set_sliding_window(sliding_window: int):
    global SLIDING_WINDOW
    SLIDING_WINDOW = sliding_window


def get_sliding_windows() -> int:
    global SLIDING_WINDOW
    return SLIDING_WINDOW

77

78
79
80
81
82
83
84
def init_cpu_threads_env(rank_id: int, world_size: int):
    import importlib.util

    if importlib.util.find_spec("numa") is not None:
        import numa
        import psutil

85
        nodes = numa.info.get_max_node() + 1
86
87
88
89
90
91
92
93
        rank_per_node = math.ceil(world_size / nodes)
        num_cpus_per_nodes = int(psutil.cpu_count(logical=False) / nodes)
        node_id = int(rank_id / rank_per_node)
        rank_offset_per_node = rank_id % rank_per_node
        if os.getenv("OMP_NUM_THREADS") is None:
            num_cpus_per_rank = max(int(num_cpus_per_nodes / rank_per_node), 1)
        else:
            num_cpus_per_rank = int(os.getenv("OMP_NUM_THREADS"))
94
95
        if len(numa.memory.get_membind_nodes()) == nodes:
            numa.memory.set_membind_nodes((node_id))
96
        torch.set_num_threads(num_cpus_per_rank)
97
        if len(numa.schedule.get_affinitive_cpus(0)) == psutil.cpu_count(logical=True):
98
            cpu_start = num_cpus_per_rank * rank_offset_per_node
99
            numa.schedule.run_on_cpus(
100
                0,
101
102
103
104
105
                *(
                    numa.info.node_to_cpus(node_id)[
                        cpu_start : cpu_start + num_cpus_per_rank
                    ]
                ),
106
            )
107
108
109
        logger.info(
            f"affinity={numa.schedule.get_affinitive_cpus(0)}, membind = {numa.memory.get_membind_nodes()}"
        )
110
111


112
113
114
115
@dataclass
class FlashCausalLMBatch(Batch):
    batch_id: int
    requests: List[generate_pb2.Request]
116
117
    # request id -> idx in list mapping
    requests_idx_mapping: Dict[int, int]
118
119

    # Decoder values
120
121
    input_ids: torch.Tensor
    position_ids: torch.Tensor
122
    speculative_ids: Optional[torch.Tensor]
123

124
125
126
127
    # Flash Attention values

    # tensor of length b containing the cumulative sequence lengths of the sequences in the batch, only used in prefill
    cu_seqlen_prefill: Optional[torch.Tensor]
128
129
130
    # Prefill cache indices is used to slice into the kv tensor before caching it into the paged attention buffers
    # as we only keep SLIDING_WINDOW values instead of the whole tensor
    prefill_cache_indices: Optional[torch.Tensor]
131
132
133
134
135
136
137
138
139
140

    # Paged Attention values

    # Set when creating the batch
    # CPU tensor of length b indicating the start of each sequence in slots
    start_slots: torch.Tensor
    # tensor of indices of the currently used slots, length = \sum_{i=0}^{b} s_i in prefill, length = b in decode
    slot_indices: torch.Tensor

    # list of length b of list of length s_i // block_size
141
    block_tables: List[List[int]]
142
    # tensor of size [b, max_total_seqlen // block_size] holding the paged attention block tables for all sequences
143
    block_tables_tensor: torch.Tensor
144
    # tensor of length \sum_{i=0}^{b} max_s_i  holding the paged attention slots for all sequences
145
    slots: torch.Tensor
Nicolas Patry's avatar
Nicolas Patry committed
146
147
148
    # size [b], containing the number of blocks that can be retrieved from the cache
    prefix_lens: List[int]
    prefix_lens_tensor: torch.Tensor
149

150
151
    max_seqlen: int

152
153
154
155
156
    # Prefill metadata tensors to efficiently compute logprobs
    prefill_head_indices: Optional[torch.Tensor]
    prefill_next_token_indices: Optional[torch.tensor]
    prefill_cu_outlens: Optional[List[int]]

Nicolas Patry's avatar
Nicolas Patry committed
157
158
159
    # Prefixes
    prefix_ids: List[List[int]]

160
161
    # All tokens
    all_input_ids: List[List[int]]
162
    all_input_ids_tensor: torch.Tensor
163
164
165

    # Lengths of all generations present in the batch
    input_lengths: List[int]
166
    input_lengths_tensor: torch.Tensor
167
168
    prefix_offsets: List[Optional[int]]
    read_offsets: List[Optional[int]]
169
170

    # Generation helpers
171
    next_token_chooser: HeterogeneousNextTokenChooser
172
    stopping_criterias: List[StoppingCriteria]
Nicolas Patry's avatar
Nicolas Patry committed
173
174
    top_n_tokens: List[int]
    top_n_tokens_tensor: torch.Tensor
175

drbh's avatar
drbh committed
176
177
178
    # Adapter metadata for each request
    adapter_meta: AdapterBatchMetadata

179
    # Number of blocks in this batch
180
    num_blocks: int
181
182
    # Maximum number of blocks
    max_blocks: int
183

184
185
    def to_pb(self) -> generate_pb2.CachedBatch:
        return generate_pb2.CachedBatch(
186
            id=self.batch_id,
187
            request_ids=[r.id for r in self.requests],
188
            size=len(self),
189
            max_tokens=self.num_blocks * BLOCK_SIZE,
190
191
192
        )

    @classmethod
Daniël de Kok's avatar
Daniël de Kok committed
193
194
195
    def batch_tokenized_inputs(
        cls, requests: Iterable[generate_pb2.Request], tokenizer
    ):
196
197
198
        max_length = 0
        all_input_ids = []
        batch_size = 0
199
        for r in requests:
200
201
202
203
204
205
206
207
208
209
210
            batch_size += 1
            inputs = concat_text_chunks(r.input_chunks.chunks)
            input_ids = tokenizer(
                inputs,
                truncation=True,
                max_length=r.truncate,
                add_special_tokens=r.add_special_tokens,
            )["input_ids"]
            max_length = max(max_length, len(input_ids))
            all_input_ids.append(input_ids)
        return all_input_ids
211

drbh's avatar
drbh committed
212
213
214
215
216
217
218
219
220
    @classmethod
    def from_tokenized(
        cls,
        pb: generate_pb2.Batch,
        tokenizer: PreTrainedTokenizerBase,
        batch_tokenized_inputs,
        dtype: torch.dtype,
        device: torch.device,
    ) -> "FlashCausalLMBatch":
221
        sliding_window = get_sliding_windows()
222
        position_ids = []
223
        cu_seqlen_prefill = [0]
224
225
        start_slots = []
        slot_indices = []
226
        prefill_cache_indices = []
227
228

        input_lengths = []
229
230
        prefix_offsets = []
        read_offsets = []
231
        all_input_ids = []
Nicolas Patry's avatar
Nicolas Patry committed
232
        prefix_ids = []
233
        requests_idx_mapping = {}
234

235
236
237
238
239
240
        all_prefill_logprobs = True
        no_prefill_logprobs = True
        prefill_head_indices = []
        prefill_next_token_indices = []
        prefill_cu_outlens = [0]

241
        next_token_chooser_parameters = []
242
        stopping_criterias = []
Nicolas Patry's avatar
Nicolas Patry committed
243
        top_n_tokens = []
244

drbh's avatar
drbh committed
245
246
247
        adapter_indices_list = []
        adapter_set = set()

248
249
        # Cumulative length
        cumulative_length = 0
Nicolas Patry's avatar
Nicolas Patry committed
250
        cumulative_slot_tokens = 0
251
        prefill_out_cumulative_length = 0
252

253
        num_blocks = 0
254
        max_seqlen = 0
255
        max_length = 0
256
        max_blocks = 0
257

258
259
        block_tables = []
        slots = []
Nicolas Patry's avatar
Nicolas Patry committed
260
        prefix_lens = []
261

262
        # Parse batch
263
264
265
        for i, (r, tokenized_input) in enumerate(
            zip(pb.requests, batch_tokenized_inputs)
        ):
266
267
268
            # request id -> idx in list mapping
            requests_idx_mapping[r.id] = i

Nicolas Patry's avatar
Nicolas Patry committed
269
270
            orig_input_length = len(tokenized_input)

271
272
273
274
275
276
277
            prefix_len = r.prefix_len
            assert (
                prefix_len <= orig_input_length
            ), f"Prefix {prefix_len} vs input {orig_input_length}"
            if prefix_len == orig_input_length:
                assert prefix_len > 0
                prefix_len -= 1
Nicolas Patry's avatar
Nicolas Patry committed
278

Nicolas Patry's avatar
Nicolas Patry committed
279
280
            # Commented as it's costly.
            # log_master(logger.debug, "Tokenized input ids {tokenized_input}")
Nicolas Patry's avatar
Nicolas Patry committed
281
282
283
            prefix_ids.append(tokenized_input[:prefix_len])
            tokenized_input = tokenized_input[prefix_len:]

284
285
            input_length = len(tokenized_input)
            input_lengths.append(input_length)
286

287
            prefix_offsets.append(input_length - 5)
288
            read_offsets.append(input_length)
289

290
            all_input_ids.append(tokenized_input)
291
292

            # Position ids
Nicolas Patry's avatar
Nicolas Patry committed
293
294
295
            request_position_ids = torch.arange(
                prefix_len, orig_input_length, dtype=torch.int32
            )
296
            position_ids.append(request_position_ids)
297
298

            # Add cumulative lengths of all previous inputs
299
            cu_seqlen_prefill.append(cumulative_length + input_length)
300

301
            next_token_chooser_parameters.append(r.parameters)
302

303
304
305
            stopping_criteria = StoppingCriteria.from_pb(
                r.stopping_parameters, tokenizer
            )
306
            max_new_tokens = stopping_criteria.max_new_tokens
307
            stopping_criterias.append(stopping_criteria)
Nicolas Patry's avatar
Nicolas Patry committed
308
            top_n_tokens.append(r.top_n_tokens)
309

Nicolas Patry's avatar
Nicolas Patry committed
310
311
            ADAPTER_TO_INDEX = get_adapter_to_index()
            adapter_index = ADAPTER_TO_INDEX.get(r.adapter_id, 0)
drbh's avatar
drbh committed
312
313
314
            adapter_indices_list.append(torch.full((input_length,), adapter_index))
            adapter_set.add(adapter_index)

315
316
            # Paged attention
            # Remove one as the first token des not have a past
Nicolas Patry's avatar
Nicolas Patry committed
317
            speculative_length = get_speculate()
drbh's avatar
drbh committed
318
            speculative_length = 0 if speculative_length is None else speculative_length
Nicolas Patry's avatar
Nicolas Patry committed
319
320
321
322
323
324
325

            # Tokens that need to be mapped to blocks.
            block_tokens = orig_input_length + max_new_tokens - 1 + speculative_length

            # Tokens that need to be mapped to slots. We don't need slots for the
            # cached prefix (if present).
            slot_tokens = input_length + max_new_tokens - 1 + speculative_length
326
327
328

            # blocks and slots can be empty (for example in warmup)
            if not r.blocks:
Nicolas Patry's avatar
Nicolas Patry committed
329
                needed_blocks = math.ceil(block_tokens / BLOCK_SIZE)
330
331
332
333
334
335
336
337
338
339
                request_blocks = [
                    b for b in range(num_blocks, num_blocks + needed_blocks)
                ]
                request_slots = [
                    s
                    for b in request_blocks
                    for s in range(b * BLOCK_SIZE, (b + 1) * BLOCK_SIZE)
                ]
            else:
                request_blocks = r.blocks
Nicolas Patry's avatar
Nicolas Patry committed
340
341
342
                request_slots = r.slots[
                    prefix_len:  #: orig_input_length + max_new_tokens + speculative_length
                ]
343
344

            block_tables.append(request_blocks)
Nicolas Patry's avatar
Nicolas Patry committed
345
346
347

            slots.extend(request_slots)
            prefix_lens.append(prefix_len)
348
            num_blocks += len(request_blocks)
Nicolas Patry's avatar
Nicolas Patry committed
349
            start_slots.append(cumulative_slot_tokens)
350
351

            request_slot_indices = torch.arange(
Nicolas Patry's avatar
Nicolas Patry committed
352
353
                cumulative_slot_tokens,
                cumulative_slot_tokens + input_length,
354
355
356
357
                dtype=torch.int64,
            )
            slot_indices.append(request_slot_indices)

358
359
360
361
362
363
364
365
366
            # Create tensor to slice into the kv tensor in prefill
            if sliding_window is not None:
                request_prefill_cache_indices = torch.arange(
                    cumulative_length + max(0, input_length - sliding_window),
                    cumulative_length + input_length,
                    dtype=torch.int64,
                )
                prefill_cache_indices.append(request_prefill_cache_indices)

367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
            all_prefill_logprobs = all_prefill_logprobs and r.prefill_logprobs
            no_prefill_logprobs = no_prefill_logprobs and not r.prefill_logprobs

            if r.prefill_logprobs:
                prefill_head_indices.append(request_position_ids + cumulative_length)
                prefill_next_token_indices.append(
                    prefill_out_cumulative_length + input_length - 1
                )
                prefill_cu_outlens.append(prefill_out_cumulative_length + input_length)
                prefill_out_cumulative_length += input_length
            else:
                prefill_head_indices.append(
                    torch.tensor(
                        [cumulative_length + input_length - 1], dtype=torch.int32
                    )
                )
                prefill_next_token_indices.append(prefill_out_cumulative_length)
                prefill_cu_outlens.append(prefill_out_cumulative_length + 1)
                prefill_out_cumulative_length += 1

387
388
            # Update
            cumulative_length += input_length
Nicolas Patry's avatar
Nicolas Patry committed
389
            cumulative_slot_tokens += slot_tokens
390
            max_seqlen = max(max_seqlen, input_length)
391
            max_blocks = max(max_blocks, len(request_blocks))
OlivierDehaene's avatar
OlivierDehaene committed
392
393
394
            max_length = max(
                max_length, input_length + max_new_tokens + speculative_length
            )
395

drbh's avatar
drbh committed
396
397
398
399
        adapter_indices = torch.cat(adapter_indices_list).to(
            dtype=torch.int64, device=device
        )

400
        next_token_chooser = HeterogeneousNextTokenChooser.from_pb(
drbh's avatar
drbh committed
401
            next_token_chooser_parameters, dtype, device, tokenizer
402
        )
403
        start_slots = torch.tensor(start_slots, dtype=torch.int64)
404
405
406
407
408
409
410

        # Padded all_input_ids_tensor
        all_input_ids_tensor = np.zeros(
            (len(all_input_ids), max_length), dtype=np.int64
        )
        for i, input_ids in enumerate(all_input_ids):
            all_input_ids_tensor[i, : len(input_ids)] = input_ids
411

412
413
414
415
416
        # Create tensors on device
        all_input_ids_tensor = torch.tensor(
            all_input_ids_tensor, dtype=torch.int64, device=device
        )

417
418
419
        if len(pb.requests) > 1:
            input_ids = np.concatenate(all_input_ids, dtype=np.int64)
            position_ids = torch.cat(position_ids)
420
            slot_indices = torch.cat(slot_indices)
421
422
            if sliding_window is not None:
                prefill_cache_indices = torch.cat(prefill_cache_indices)
423
424
425
        else:
            input_ids = all_input_ids[0]
            position_ids = position_ids[0]
426
            slot_indices = slot_indices[0]
427
428
            if sliding_window is not None:
                prefill_cache_indices = prefill_cache_indices[0]
429

430
431
        cu_seqlen_prefill = torch.tensor(
            cu_seqlen_prefill, device=device, dtype=torch.int32
432
433
434
        )
        position_ids = position_ids.to(device)
        slot_indices = slot_indices.to(device)
435
436
437
        prefill_cache_indices = (
            prefill_cache_indices.to(device) if sliding_window is not None else None
        )
438
        input_ids = torch.tensor(input_ids, dtype=torch.int64, device=device)
439
440
        input_lengths_tensor = torch.tensor(
            input_lengths, dtype=torch.int32, device=device
441
        )
442

drbh's avatar
drbh committed
443
444
445
446
447
        adapter_segments, adapter_segment_indices = find_segments(adapter_indices)
        adapter_segments = torch.tensor(
            adapter_segments, dtype=torch.int32, device=device
        )

448
449
        if all_prefill_logprobs:
            prefill_head_indices = None
450
            prefill_next_token_indices = cu_seqlen_prefill[1:] - 1
451
        elif no_prefill_logprobs:
452
            prefill_head_indices = cu_seqlen_prefill[1:] - 1
453
454
455
456
457
458
459
460
            prefill_next_token_indices = None
        else:
            prefill_head_indices = torch.tensor(
                torch.cat(prefill_head_indices), dtype=torch.int64, device=device
            )
            prefill_next_token_indices = torch.tensor(
                prefill_next_token_indices, dtype=torch.int64, device=device
            )
Nicolas Patry's avatar
Nicolas Patry committed
461
462
463
        top_n_tokens_tensor = torch.tensor(
            top_n_tokens, device=device, dtype=torch.int64
        )
464

465
        slots = torch.tensor(slots, dtype=torch.int64, device=device)
Nicolas Patry's avatar
Nicolas Patry committed
466

467
468
469
470
471
472
        block_tables_tensor = torch.zeros(
            (len(block_tables), max_blocks), dtype=torch.int32, device="cpu"
        )
        for i, request_blocks in enumerate(block_tables):
            block_tables_tensor[i, : len(request_blocks)] = torch.tensor(request_blocks)
        block_tables_tensor = block_tables_tensor.to(device)
Nicolas Patry's avatar
Nicolas Patry committed
473
        prefix_lens_tensor = torch.tensor(prefix_lens, dtype=torch.int32, device=device)
474

475
476
477
        return cls(
            batch_id=pb.id,
            requests=pb.requests,
478
            requests_idx_mapping=requests_idx_mapping,
479
480
            input_ids=input_ids,
            position_ids=position_ids,
481
            cu_seqlen_prefill=cu_seqlen_prefill,
482
            prefill_cache_indices=prefill_cache_indices,
483
484
            start_slots=start_slots,
            slot_indices=slot_indices,
485
486
487
            block_tables=block_tables,
            block_tables_tensor=block_tables_tensor,
            slots=slots,
Nicolas Patry's avatar
Nicolas Patry committed
488
489
            prefix_lens=prefix_lens,
            prefix_lens_tensor=prefix_lens_tensor,
490
            max_seqlen=max_seqlen,
491
492
493
            prefill_head_indices=prefill_head_indices,
            prefill_next_token_indices=prefill_next_token_indices,
            prefill_cu_outlens=prefill_cu_outlens,
494
            input_lengths=input_lengths,
495
            input_lengths_tensor=input_lengths_tensor,
496
497
            prefix_offsets=prefix_offsets,
            read_offsets=read_offsets,
498
            all_input_ids=all_input_ids,
499
            all_input_ids_tensor=all_input_ids_tensor,
Nicolas Patry's avatar
Nicolas Patry committed
500
            prefix_ids=prefix_ids,
501
            next_token_chooser=next_token_chooser,
502
            stopping_criterias=stopping_criterias,
Nicolas Patry's avatar
Nicolas Patry committed
503
504
            top_n_tokens=top_n_tokens,
            top_n_tokens_tensor=top_n_tokens_tensor,
505
            num_blocks=num_blocks,
506
            max_blocks=max_blocks,
drbh's avatar
drbh committed
507
508
509
510
511
512
            adapter_meta=AdapterBatchMetadata(
                adapter_indices=adapter_indices,
                adapter_set=adapter_set,
                adapter_segments=adapter_segments,
                segment_indices=adapter_segment_indices,
            ),
Nicolas Patry's avatar
Nicolas Patry committed
513
            speculative_ids=None,
514
515
        )

516
517
518
519
520
521
522
523
    @classmethod
    def from_pb(
        cls,
        pb: generate_pb2.Batch,
        tokenizer: PreTrainedTokenizerBase,
        dtype: torch.dtype,
        device: torch.device,
    ) -> "FlashCausalLMBatch":
524
        assert len(pb.requests) > 0
525
526
527
        batch_tokenized_inputs = cls.batch_tokenized_inputs(pb.requests, tokenizer)
        return cls.from_tokenized(pb, tokenizer, batch_tokenized_inputs, dtype, device)

528
    @tracer.start_as_current_span("filter")
529
530
    def filter(self, request_ids: List[int]) -> "FlashCausalLMBatch":
        if len(request_ids) == 0:
531
532
            raise ValueError("Batch must have at least one request")
        # We assume that if len(requests) == len(self) then the requests are the same
533
        if len(request_ids) == len(self):
534
535
            return self

536
        device = self.input_ids.device
537

538
539
540
        # New values after filtering
        requests_idx_mapping = {}

541
542
543
        # Used to index into tensors
        indices = []

544
545
546
        # slots to keep after filtering
        slot_filtering_indices = torch.zeros(
            self.slots.shape[0], dtype=torch.bool, device=device
547
548
        )

549
        # Create on CPU to only move to GPU once instead of at every copy
550
        slot_indices = torch.empty(len(request_ids), dtype=torch.int64)
551
552
        max_seqlen = 0

553
        requests = []
554
555
        start_slots = []
        block_tables = []
556
        all_input_ids = []
Nicolas Patry's avatar
Nicolas Patry committed
557
        prefix_ids = []
558

559
        input_lengths = []
Nicolas Patry's avatar
Nicolas Patry committed
560
        prefix_lens = []
561
562
        prefix_offsets = []
        read_offsets = []
563

564
        stopping_criterias = []
Nicolas Patry's avatar
Nicolas Patry committed
565
        top_n_tokens = []
drbh's avatar
drbh committed
566
        adapter_set = set()
567

568
        num_blocks = 0
569
570
571
572
        max_blocks = 0
        # Cumulative length
        cumulative_max_length = 0

573
574
        for i, request_id in enumerate(request_ids):
            idx = self.requests_idx_mapping[request_id]
575
            indices.append(idx)
576
577
578
            requests_idx_mapping[request_id] = i

            requests.append(self.requests[idx])
579
580
581

            # Get length
            request_input_length = self.input_lengths[idx]
Nicolas Patry's avatar
Nicolas Patry committed
582
            prefix_len = self.prefix_lens[idx]
583
            max_seqlen = max(max_seqlen, request_input_length)
584

585
            all_input_ids.append(self.all_input_ids[idx])
Nicolas Patry's avatar
Nicolas Patry committed
586
            prefix_ids.append(self.prefix_ids[idx])
587
588

            input_lengths.append(request_input_length)
Nicolas Patry's avatar
Nicolas Patry committed
589
            prefix_lens.append(prefix_len)
590
591
            prefix_offsets.append(self.prefix_offsets[idx])
            read_offsets.append(self.read_offsets[idx])
592

593
594
            stopping_criteria = self.stopping_criterias[idx]
            stopping_criterias.append(stopping_criteria)
595

Nicolas Patry's avatar
Nicolas Patry committed
596
597
            top_n_tokens.append(self.top_n_tokens[idx])

Nicolas Patry's avatar
Nicolas Patry committed
598
599
            ADAPTER_TO_INDEX = get_adapter_to_index()
            adapter_index = ADAPTER_TO_INDEX.get(self.requests[idx].adapter_id, 0)
drbh's avatar
drbh committed
600
601
            adapter_set.add(adapter_index)

602
            remaining_tokens = (
603
604
                stopping_criteria.max_new_tokens - stopping_criteria.current_tokens
            )
605

606
            request_block_table = self.block_tables[idx]
607
            num_blocks += len(request_block_table)
608
609
610
            block_tables.append(request_block_table)
            start_slots.append(cumulative_max_length)

611
            # Copy to tensor (CPU)
612
            slot_indices[i] = cumulative_max_length + request_input_length - 1
613
614

            # Set slice
615
616
617
618
619
            slot_filtering_indices[
                self.start_slots[idx] : self.start_slots[idx]
                + request_input_length
                + remaining_tokens
                - 1
620
621
622
            ] = True

            cumulative_max_length += request_input_length + remaining_tokens - 1
623

624
625
            max_blocks = max(max_blocks, len(request_block_table))

626
627
628
        # Index into tensors
        input_ids = self.input_ids[indices]
        position_ids = self.position_ids[indices]
drbh's avatar
drbh committed
629
        adapter_indices = self.adapter_meta.adapter_indices[indices]
630
        all_input_ids_tensor = self.all_input_ids_tensor[indices]
631
632
633
        block_tables_tensor = self.block_tables_tensor[indices]
        input_lengths_tensor = self.input_lengths_tensor[indices]
        slots = self.slots[slot_filtering_indices]
Nicolas Patry's avatar
Nicolas Patry committed
634
        prefix_lens_tensor = self.prefix_lens_tensor[indices]
635
        next_token_chooser = self.next_token_chooser.filter(indices)
Nicolas Patry's avatar
Nicolas Patry committed
636
        top_n_tokens_tensor = self.top_n_tokens_tensor[indices]
OlivierDehaene's avatar
OlivierDehaene committed
637
638
639
        speculative_ids = (
            self.speculative_ids[indices] if self.speculative_ids is not None else None
        )
640
641

        start_slots = torch.tensor(start_slots, dtype=torch.int64)
642

643
        # Move to GPU now that we have the whole tensor
644
        slot_indices = slot_indices.to(device)
645

drbh's avatar
drbh committed
646
647
648
649
        adapter_segments, adapter_segment_indices = find_segments(adapter_indices)
        adapter_segments = torch.tensor(
            adapter_segments, dtype=torch.int32, device=device
        )
650
        # assert sum(len(b) for b in block_tables) == (block_tables_tensor != 0).sum()
drbh's avatar
drbh committed
651

652
        return type(self)(
653
654
655
656
657
            batch_id=self.batch_id,
            requests=requests,
            requests_idx_mapping=requests_idx_mapping,
            input_ids=input_ids,
            position_ids=position_ids,
658
            cu_seqlen_prefill=None,
659
            prefill_cache_indices=None,
660
661
662
663
664
            start_slots=start_slots,
            slot_indices=slot_indices,
            block_tables=block_tables,
            block_tables_tensor=block_tables_tensor,
            slots=slots,
665
            max_seqlen=max_seqlen,
666
667
668
            prefill_head_indices=None,
            prefill_next_token_indices=None,
            prefill_cu_outlens=None,
669
            input_lengths=input_lengths,
670
            input_lengths_tensor=input_lengths_tensor,
Nicolas Patry's avatar
Nicolas Patry committed
671
672
            prefix_lens=prefix_lens,
            prefix_lens_tensor=prefix_lens_tensor,
673
674
            prefix_offsets=prefix_offsets,
            read_offsets=read_offsets,
675
676
            all_input_ids=all_input_ids,
            all_input_ids_tensor=all_input_ids_tensor,
Nicolas Patry's avatar
Nicolas Patry committed
677
            prefix_ids=prefix_ids,
678
            next_token_chooser=next_token_chooser,
679
            stopping_criterias=stopping_criterias,
Nicolas Patry's avatar
Nicolas Patry committed
680
681
            top_n_tokens=top_n_tokens,
            top_n_tokens_tensor=top_n_tokens_tensor,
682
            num_blocks=num_blocks,
683
            max_blocks=max_blocks,
Nicolas Patry's avatar
Nicolas Patry committed
684
            speculative_ids=speculative_ids,
drbh's avatar
drbh committed
685
686
687
688
689
690
            adapter_meta=AdapterBatchMetadata(
                adapter_indices=adapter_indices,
                adapter_set=adapter_set,
                adapter_segments=adapter_segments,
                segment_indices=adapter_segment_indices,
            ),
691
692
693
694
695
696
697
698
699
        )

    @classmethod
    @tracer.start_as_current_span("concatenate")
    def concatenate(cls, batches: List["FlashCausalLMBatch"]) -> "FlashCausalLMBatch":
        # Batch attributes
        requests = []
        requests_idx_mapping = {}

700
        num_blocks = 0
701
702
703
704
705
706
707
708
        total_batch_size = 0
        total_slots = 0
        max_blocks = 0
        max_length = 0
        max_seqlen = 0
        for b in batches:
            total_batch_size += len(b)
            total_slots += len(b.slots)
709
            num_blocks += b.num_blocks
OlivierDehaene's avatar
OlivierDehaene committed
710
711
712
            speculative_length = (
                b.speculative_ids.shape[1] if b.speculative_ids is not None else 0
            )
713
714
715
716
717
718
719
            max_blocks = max(max_blocks, b.max_blocks)
            max_seqlen = max(max_seqlen, b.max_seqlen)
            max_length = max(
                max_length,
                max(
                    input_length
                    + stopping_criteria.max_new_tokens
Nicolas Patry's avatar
Nicolas Patry committed
720
                    + speculative_length
721
722
723
724
725
726
                    - stopping_criteria.current_tokens
                    for input_length, stopping_criteria in zip(
                        b.input_lengths, b.stopping_criterias
                    )
                ),
            )
727
728
729

        input_ids = batches[0].input_ids.new_empty(total_batch_size)
        position_ids = batches[0].position_ids.new_empty(total_batch_size)
730
731
732
733
734
735
736
737
        slots = batches[0].slots.new_empty(total_slots)
        slot_indices = batches[0].slot_indices.new_empty(total_batch_size)
        input_lengths_tensor = batches[0].input_lengths_tensor.new_empty(
            total_batch_size
        )
        block_tables_tensor = batches[0].block_tables_tensor.new_zeros(
            (total_batch_size, max_blocks)
        )
Nicolas Patry's avatar
Nicolas Patry committed
738
        prefix_lens_tensor = batches[0].prefix_lens_tensor.new_empty(total_batch_size)
739
740
        all_input_ids_tensor = batches[0].all_input_ids_tensor.new_zeros(
            (total_batch_size, max_length)
741
        )
Nicolas Patry's avatar
Nicolas Patry committed
742
743
744
        top_n_tokens_tensor = batches[0].top_n_tokens_tensor.new_zeros(
            total_batch_size,
        )
drbh's avatar
drbh committed
745
746
747
748
749
750
751
752
        total_indices_size = sum(
            b.adapter_meta.adapter_indices.shape[0] for b in batches
        )
        adapter_indices = batches[0].adapter_meta.adapter_indices.new_empty(
            total_indices_size
        )
        adapter_set = set()
        adapter_segment_builder = SegmentConcatBuilder()
753

754
755
        start_slots = []
        block_tables = []
Nicolas Patry's avatar
Nicolas Patry committed
756
        prefix_lens = []
757
        all_input_ids = []
Nicolas Patry's avatar
Nicolas Patry committed
758
        prefix_ids = []
759
760

        input_lengths = []
761
762
        prefix_offsets = []
        read_offsets = []
763

764
        next_token_chooser_parameters = []
765
        fsm_grammar_states = []
766
        stopping_criterias = []
Nicolas Patry's avatar
Nicolas Patry committed
767
        top_n_tokens = []
768

769
        # Cumulative length
770
        cumulative_batch_size = 0
771
        cumulative_slots = 0
drbh's avatar
drbh committed
772
        cumulative_adapter_indices_size = 0
773
774
775

        for i, batch in enumerate(batches):
            requests.extend(batch.requests)
776
777
778
779
780
781
782
783

            if i == 0:
                requests_idx_mapping = batch.requests_idx_mapping
            else:
                # We need to offset the mapping for each batch by the cumulative batch size
                for k, v in batch.requests_idx_mapping.items():
                    requests_idx_mapping[k] = v + cumulative_batch_size

784
785
            start_index = cumulative_batch_size
            end_index = cumulative_batch_size + len(batch)
786
787
            slots_start_index = cumulative_slots
            slots_end_index = cumulative_slots + len(batch.slots)
788
789
790
791

            # Copy tensors (GPU)
            input_ids[start_index:end_index] = batch.input_ids
            position_ids[start_index:end_index] = batch.position_ids
792
793
            slot_indices[start_index:end_index] = batch.slot_indices + cumulative_slots
            input_lengths_tensor[start_index:end_index] = batch.input_lengths_tensor
Nicolas Patry's avatar
Nicolas Patry committed
794
            top_n_tokens_tensor[start_index:end_index] = batch.top_n_tokens_tensor
795
            slots[slots_start_index:slots_end_index] = batch.slots
796

drbh's avatar
drbh committed
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
            # Copy over adapter indices
            adapter_start_index = cumulative_adapter_indices_size
            adapter_end_index = (
                cumulative_adapter_indices_size
                + batch.adapter_meta.adapter_indices.shape[0]
            )
            adapter_indices[adapter_start_index:adapter_end_index] = (
                batch.adapter_meta.adapter_indices
            )
            cumulative_adapter_indices_size = adapter_end_index
            adapter_set.update(batch.adapter_meta.adapter_set)
            adapter_segment_builder.concat(
                batch.adapter_meta.adapter_segments, batch.adapter_meta.segment_indices
            )

812
813
814
            all_input_ids_tensor[
                start_index:end_index, : batch.all_input_ids_tensor.shape[1]
            ] = batch.all_input_ids_tensor[:, :max_length]
815

816
817
818
            block_tables_tensor[
                start_index:end_index, : batch.block_tables_tensor.shape[1]
            ] = batch.block_tables_tensor[:, :max_blocks]
819

Nicolas Patry's avatar
Nicolas Patry committed
820
821
            prefix_lens_tensor[start_index:end_index] = batch.prefix_lens_tensor

822
823
824
            start_slots.append(batch.start_slots + cumulative_slots)

            block_tables.extend(batch.block_tables)
Nicolas Patry's avatar
Nicolas Patry committed
825
            prefix_lens.extend(batch.prefix_lens)
826
            all_input_ids.extend(batch.all_input_ids)
Nicolas Patry's avatar
Nicolas Patry committed
827
            prefix_ids.extend(batch.prefix_ids)
828

829
            input_lengths.extend(batch.input_lengths)
830
831
            prefix_offsets.extend(batch.prefix_offsets)
            read_offsets.extend(batch.read_offsets)
832

833
            next_token_chooser_parameters.extend([r.parameters for r in batch.requests])
834
            fsm_grammar_states.extend(batch.next_token_chooser.fsm_grammar_states)
835
836
            stopping_criterias.extend(batch.stopping_criterias)

Nicolas Patry's avatar
Nicolas Patry committed
837
838
            top_n_tokens.extend(batch.top_n_tokens)

839
            # Update
840
            cumulative_batch_size += len(batch)
841
            cumulative_slots += len(batch.slots)
842

843
        start_slots = torch.concat(start_slots)
844

845
846
        # assert sum(len(b) for b in block_tables) == (block_tables_tensor != 0).sum()

847
        next_token_chooser = HeterogeneousNextTokenChooser.from_pb(
848
849
850
            next_token_chooser_parameters,
            dtype=batches[0].next_token_chooser.dtype,
            device=batches[0].next_token_chooser.device,
drbh's avatar
drbh committed
851
            tokenizer=batches[0].next_token_chooser.tokenizer,
852
            fsm_grammar_states=fsm_grammar_states,
853
854
        )

OlivierDehaene's avatar
OlivierDehaene committed
855
856
857
858
859
        speculative_ids = (
            torch.cat([b.speculative_ids for b in batches], dim=0)
            if batches[0].speculative_ids is not None
            else None
        )
Nicolas Patry's avatar
Nicolas Patry committed
860

drbh's avatar
drbh committed
861
862
        adapter_segments, adapter_segment_indices = adapter_segment_builder.build()

863
        return cls(
864
865
            batch_id=batches[0].batch_id,
            requests=requests,
866
            requests_idx_mapping=requests_idx_mapping,
867
868
            input_ids=input_ids,
            position_ids=position_ids,
869
            cu_seqlen_prefill=None,
870
            prefill_cache_indices=None,
871
872
873
874
            start_slots=start_slots,
            slot_indices=slot_indices,
            block_tables=block_tables,
            block_tables_tensor=block_tables_tensor,
Nicolas Patry's avatar
Nicolas Patry committed
875
876
            prefix_lens=prefix_lens,
            prefix_lens_tensor=prefix_lens_tensor,
877
            slots=slots,
878
            max_seqlen=max_seqlen,
879
880
881
            prefill_head_indices=None,
            prefill_next_token_indices=None,
            prefill_cu_outlens=None,
882
            input_lengths=input_lengths,
883
            input_lengths_tensor=input_lengths_tensor,
884
885
            prefix_offsets=prefix_offsets,
            read_offsets=read_offsets,
886
887
            all_input_ids=all_input_ids,
            all_input_ids_tensor=all_input_ids_tensor,
Nicolas Patry's avatar
Nicolas Patry committed
888
            prefix_ids=prefix_ids,
889
            next_token_chooser=next_token_chooser,
890
            stopping_criterias=stopping_criterias,
Nicolas Patry's avatar
Nicolas Patry committed
891
892
            top_n_tokens=top_n_tokens,
            top_n_tokens_tensor=top_n_tokens_tensor,
893
            num_blocks=num_blocks,
894
            max_blocks=max_blocks,
OlivierDehaene's avatar
OlivierDehaene committed
895
            speculative_ids=speculative_ids,
drbh's avatar
drbh committed
896
897
898
899
900
901
            adapter_meta=AdapterBatchMetadata(
                adapter_indices=adapter_indices,
                adapter_set=adapter_set,
                adapter_segments=adapter_segments,
                segment_indices=adapter_segment_indices,
            ),
902
903
904
905
906
907
        )

    def __len__(self):
        return len(self.requests)


908
909
910
911
912
913
914
915
916
917
918
919
ADAPTER_LAYERS = [
    "q_proj",
    "k_proj",
    "v_proj",
    "o_proj",
    "gate_proj",
    "up_proj",
    "down_proj",
]
ROW_PARALLEL = {"o_proj", "down_proj", "lm_head"}


920
921
922
class FlashCausalLM(Model):
    def __init__(
        self,
drbh's avatar
drbh committed
923
        model_id: str,
924
925
926
927
928
929
930
931
932
933
934
935
        model_class,
        revision: Optional[str] = None,
        quantize: Optional[str] = None,
        speculator: Optional[str] = None,
        dtype: Optional[torch.dtype] = None,
        trust_remote_code: bool = False,
        lora_adapter_ids: Optional[list] = [],
        tokenizer_class: PreTrainedTokenizerBase = AutoTokenizer,
        config_class: PreTrainedTokenizerBase = AutoConfig,
        default_dtype=torch.float16,
        aliases=None,
        # Used for Santacoder override of config
936
937
938
        num_kv_heads: Optional[int] = None,
        # Deepseek V2 uses different QK and V dims.
        head_size: Optional[int] = None,
939
        skip_special_tokens: bool = True,
940
    ):
Nicolas Patry's avatar
Nicolas Patry committed
941
        self.quantize = quantize
942
943
944
945
946
947
948
949
950
951
952
953
        self.process_group, rank, world_size = initialize_torch_distributed()
        if torch.cuda.is_available():
            device = torch.device(f"cuda:{rank}")
            dtype = default_dtype if dtype is None else dtype
        elif SYSTEM == "ipex":
            if hasattr(torch, "xpu") and torch.xpu.is_available():
                device = torch.device(f"xpu:{rank}")
                dtype = default_dtype if dtype is None else dtype
            else:
                device = torch.device("cpu")
                # Float16 doesn't exist on target.
                dtype = torch.bfloat16 if dtype is None else dtype
954
                init_cpu_threads_env(rank_id=rank, world_size=world_size)
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
        else:
            raise NotImplementedError(f"{model_class} is only available on GPU")

        tokenizer = tokenizer_class.from_pretrained(
            model_id,
            revision=revision,
            padding_side="left",
            truncation_side="left",
            trust_remote_code=trust_remote_code,
        )
        try:
            generation_config = GenerationConfig.from_pretrained(
                model_id, revision=revision, trust_remote_code=trust_remote_code
            )
            if isinstance(generation_config.eos_token_id, (list, set)):
                # TODO Huge hack
                tokenizer._eos_token_ids = set(generation_config.eos_token_id)
        except Exception:
            pass

        config = config_class.from_pretrained(
            model_id, revision=revision, trust_remote_code=trust_remote_code
        )
        config.quantize = quantize
        config.speculator = speculator

        torch.distributed.barrier(group=self.process_group)

983
        weights_loader = get_loader(quantize, model_id, revision)
984
985
        filenames = weight_files(model_id, revision=revision, extension=".safetensors")
        weights = Weights(
986
987
988
989
990
991
            filenames,
            device,
            dtype,
            process_group=self.process_group,
            aliases=aliases,
            weights_loader=weights_loader,
992
993
994
995
996
997
998
999
1000
1001
        )

        prefix = ""
        model = model_class(prefix, config, weights)
        torch.distributed.barrier(group=self.process_group)

        # VLM models define the config we care about in their text_config
        text_config = getattr(config, "text_config", None)
        if text_config is not None:
            config = text_config
1002
1003
1004
1005
1006
1007

        if getattr(config, "sliding_window", None) is not None:
            set_sliding_window(config.sliding_window)
        else:
            config.sliding_window = None

1008
        self.num_layers = config.num_hidden_layers
1009
        self.num_heads = config.num_attention_heads // self.process_group.size()
1010
1011
        # Validation is done in the model itself
        if num_kv_heads is None:
1012
1013
            num_kv_heads = getattr(config, "num_key_value_heads", None)
            # GPT-2 workaround
1014
            if num_kv_heads is None:
1015
1016
1017
                num_kv_heads = getattr(config, "n_head", None)
        if num_kv_heads is None:
            raise ValueError("Cannot get the number of key/value heads")
1018
1019
1020
1021
1022
1023
        self.num_kv_heads = (
            num_kv_heads // self.process_group.size()
            if num_kv_heads > 1
            else num_kv_heads
        )
        assert self.num_kv_heads > 0
1024
1025

        if head_size is None:
Nicolas Patry's avatar
Nicolas Patry committed
1026
1027
1028
1029
1030
1031
            # Some models use GQA and different sizes for o_proj
            # and q_proj, that allows for that.
            if hasattr(config, "head_dim"):
                self.head_size = config.head_dim
            else:
                self.head_size = config.hidden_size // config.num_attention_heads
1032
1033
        else:
            self.head_size = head_size
1034

1035
        self.cuda_graphs = {}
1036
        self.kv_cache = []
1037

1038
        if ATTENTION == "flashinfer":
Nicolas Patry's avatar
Nicolas Patry committed
1039
            from text_generation_server.layers.attention.flashinfer import (
1040
1041
                create_prefill_state,
                create_decode_state,
Nicolas Patry's avatar
Nicolas Patry committed
1042
                create_prefill_with_paged_kv_state,
1043
1044
1045
            )

            self.prefill_state = create_prefill_state(device=device)
Nicolas Patry's avatar
Nicolas Patry committed
1046
1047
1048
            self.prefill_with_paged_kv_state = create_prefill_with_paged_kv_state(
                device=device
            )
1049

Nicolas Patry's avatar
Nicolas Patry committed
1050
1051
1052
1053
1054
            self.decode_state = create_decode_state(
                device=device,
                num_heads=self.num_heads,
                num_kv_heads=self.num_kv_heads,
            )
1055

1056
        super().__init__(
drbh's avatar
drbh committed
1057
            model_id=model_id,
1058
            model=model,
1059
1060
1061
1062
            tokenizer=tokenizer,
            requires_padding=False,
            dtype=dtype,
            device=device,
1063
1064
            rank=rank,
            world_size=world_size,
1065
            sliding_window=config.sliding_window,
1066
1067
1068
1069
1070
1071
        )

    @property
    def batch_type(self) -> Type[FlashCausalLMBatch]:
        return FlashCausalLMBatch

1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
    def max_past(self) -> int:
        return getattr(self.model, "max_past", None)

    def init_kv_cache(
        self,
        num_blocks: int,
        num_layers: int,
        num_heads: int,
        head_size: int,
        dtype: torch.dtype,
        device: torch.device,
    ):
        self.kv_cache = []
        empty_cache()

        element_size = torch.tensor([], dtype=dtype).element_size()
Wang, Yi's avatar
Wang, Yi committed
1088
1089
1090
1091
        if SYSTEM == "ipex" and device.type == "xpu":
            x = 1
        else:
            x = BLOCK_SIZE // element_size
1092

1093
        if ATTENTION in {"flashdecoding", "flashinfer"}:
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
            self.kv_cache = [
                (
                    torch.empty(
                        (num_blocks, BLOCK_SIZE, num_heads, head_size),
                        dtype=dtype,
                        device=device,
                    ),
                    torch.empty(
                        (num_blocks, BLOCK_SIZE, num_heads, head_size),
                        dtype=dtype,
                        device=device,
                    ),
                )
                for _ in range(num_layers)
            ]
        elif SYSTEM == "ipex" and device == torch.device("cpu"):
Wang, Yi's avatar
Wang, Yi committed
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
            self.kv_cache = [
                (
                    torch.empty(
                        (num_blocks, num_heads, BLOCK_SIZE, head_size),
                        dtype=dtype,
                        device=device,
                    ),
                    torch.empty(
                        (num_blocks, num_heads, BLOCK_SIZE, head_size),
                        dtype=dtype,
                        device=device,
                    ),
                )
                for _ in range(num_layers)
            ]
        else:
            self.kv_cache = [
                (
                    torch.empty(
                        (num_blocks, num_heads, head_size // x, BLOCK_SIZE, x),
                        dtype=dtype,
                        device=device,
                    ),
                    torch.empty(
                        (num_blocks, num_heads, head_size, BLOCK_SIZE),
                        dtype=dtype,
                        device=device,
                    ),
                )
                for _ in range(num_layers)
            ]
1141

1142
1143
1144
    def cuda_graph_warmup(self, bs: int, max_s: int, max_bt: int):
        input_ids = torch.zeros(bs, dtype=torch.int64, device=self.device)
        position_ids = torch.zeros(bs, dtype=torch.int32, device=self.device)
1145
        slots = torch.arange(bs, dtype=torch.int64, device=self.device)
Nicolas Patry's avatar
Nicolas Patry committed
1146
1147
1148
1149
        input_lengths = [max_s] * bs
        prefix_lengths = [0] * bs
        input_lengths_tensor = (
            torch.ones(bs, dtype=torch.int32, device=self.device) * max_s
1150
        )
Nicolas Patry's avatar
Nicolas Patry committed
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
        prefix_lengths_tensor = torch.zeros(bs, dtype=torch.int32, device=self.device)
        block_tables = torch.arange(
            max_bt, dtype=torch.int32, device=self.device
        ).repeat(bs)
        block_tables = block_tables.reshape((bs, max_bt))

        if ATTENTION == "flashinfer":
            block_tables = block_tables_to_ragged(
                block_tables=block_tables,
                input_lengths=input_lengths,
                prefix_lens=prefix_lengths,
            )
            from text_generation_server.layers.attention.flashinfer import (
1164
1165
1166
1167
1168
1169
1170
1171
1172
                create_decode_state_cuda_graphs,
            )

            block_tables_ptr = torch.zeros(
                bs + 1, dtype=torch.int32, device=self.device
            )
            last_page_len = torch.ones(bs, dtype=torch.int32, device=self.device)
            state = create_decode_state_cuda_graphs(
                device=input_ids.device,
Nicolas Patry's avatar
Nicolas Patry committed
1173
                block_tables=block_tables,
1174
1175
1176
1177
1178
1179
1180
1181
                block_tables_ptr=block_tables_ptr,
                last_page_len=last_page_len,
                num_heads=self.num_heads,
                num_kv_heads=self.num_kv_heads,
            )
        else:
            state = None

1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
        graph = torch.cuda.CUDAGraph()
        self.cuda_graphs[bs] = {
            "input_ids": input_ids,
            "position_ids": position_ids,
            "kv_cache": self.kv_cache,
            "block_tables": block_tables,
            "slots": slots,
            "input_lengths": input_lengths_tensor,
            "prefix_lengths": prefix_lengths_tensor,
            "state": state,
            "graph": graph,
        }

1195
1196
        torch.cuda.synchronize()
        # Run once outside to warmup
1197
        with self._forward_context(
1198
            block_tables=block_tables,
1199
            cu_seqlen_prefill=None,
Nicolas Patry's avatar
Nicolas Patry committed
1200
            input_lengths_tensor=input_lengths_tensor,
1201
            state=state,
Nicolas Patry's avatar
Nicolas Patry committed
1202
            prefix_lens_tensor=prefix_lengths_tensor,
1203
        ):
1204
1205
1206
1207
1208
1209
1210
            seqlen = Seqlen(
                input_lengths=input_lengths_tensor,
                prefix_lengths=prefix_lengths_tensor,
                cu_seqlen_q=None,
                max_q=1,
                max_k=max_s,
            )
1211
            self.model.forward(
1212
1213
1214
                input_ids=input_ids,
                position_ids=position_ids,
                cu_seqlen_prefill=None,
1215
                kv_cache=self.kv_cache,
1216
1217
                block_tables=block_tables,
                slots=slots,
1218
                seqlen=seqlen,
1219
                max_s=max_s,
1220
                prefill_cache_indices=None,
1221
1222
                lm_head_indices=None,
            )
1223
            del seqlen
1224
1225
1226
1227

            torch.cuda.synchronize()

            with torch.cuda.graph(graph, pool=MEM_POOL):
1228
1229
1230
1231
1232
1233
1234
                seqlen = Seqlen(
                    input_lengths=input_lengths_tensor,
                    prefix_lengths=prefix_lengths_tensor,
                    cu_seqlen_q=None,
                    max_q=1,
                    max_k=max_s,
                )
1235
1236
1237
1238
1239
1240
1241
                logits, speculative_logits = self.model.forward(
                    input_ids=input_ids,
                    position_ids=position_ids,
                    cu_seqlen_prefill=None,
                    kv_cache=self.kv_cache,
                    block_tables=block_tables,
                    slots=slots,
1242
                    seqlen=seqlen,
1243
1244
1245
1246
1247
1248
                    max_s=max_s,
                    prefill_cache_indices=None,
                    lm_head_indices=None,
                )
                self.cuda_graphs[bs]["logits"] = logits
                self.cuda_graphs[bs]["speculative_logits"] = speculative_logits
1249
1250
        torch.cuda.synchronize()

1251
    def warmup(self, batch: FlashCausalLMBatch):
1252
        # The warmup batch is the biggest batch we could ever receive
Nicolas Patry's avatar
Nicolas Patry committed
1253
1254
        empty_cache()

1255
        try:
1256
1257
            self.init_kv_cache(
                batch.num_blocks,
1258
1259
1260
1261
1262
1263
                self.num_layers,
                self.num_kv_heads,
                self.head_size,
                self.dtype,
                self.device,
            )
1264
            max_bt = batch.max_blocks
1265
            max_s = max_bt * BLOCK_SIZE
fxmarty's avatar
fxmarty committed
1266
1267
1268

            if SYSTEM == "rocm" and os.environ.get("PYTORCH_TUNABLEOP_ENABLED", False):
                torch.cuda.tunable.tuning_enable(False)
1269
            _, batch, _ = self.generate_token(batch)
OlivierDehaene's avatar
OlivierDehaene committed
1270
        except torch.cuda.OutOfMemoryError as e:
1271
            raise RuntimeError(
1272
1273
                f"Not enough memory to handle {len(batch.input_ids)} prefill tokens. "
                f"You need to decrease `--max-batch-prefill-tokens`"
1274
            ) from e
1275

Nicolas Patry's avatar
Nicolas Patry committed
1276
        synchronize(self.device)
1277

1278
1279
        # Inspired by the original implementation in [vllm](https://github.com/vllm-project/vllm)
        # Calculate the number of blocks that can be allocated with the free memory
1280
1281
1282
1283
        dtype_size = torch.tensor([], dtype=self.dtype).element_size()
        cache_block_size = BLOCK_SIZE * self.num_kv_heads * self.head_size
        total_cache_size = self.num_layers * cache_block_size * 2 * dtype_size

Nicolas Patry's avatar
Nicolas Patry committed
1284
        free_memory = get_free_memory(self.device, MEMORY_FRACTION)
drbh's avatar
drbh committed
1285
        batch_num_blocks = batch.num_blocks if batch is not None else 0
1286
1287

        num_blocks = (
1288
            # Leave 5% for some wiggle room
1289
            int((free_memory * TGI_WIGGLE_ROOM) // total_cache_size)
1290
            # Add batch.num_blocks as we allocated it above, so it is included in the peak memory.
drbh's avatar
drbh committed
1291
            + batch_num_blocks
1292
1293
        )

1294
        del batch
1295

1296
        self.init_kv_cache(
1297
1298
1299
1300
1301
1302
1303
1304
            num_blocks,
            self.num_layers,
            self.num_kv_heads,
            self.head_size,
            self.dtype,
            self.device,
        )

fxmarty's avatar
fxmarty committed
1305
1306
1307
1308
1309
        if SYSTEM == "rocm":
            if (
                os.environ.get("PYTORCH_TUNABLEOP_ENABLED") is None
                or os.environ.get("PYTORCH_TUNABLEOP_ENABLED") == "1"
            ):
1310
1311
                torch.cuda.tunable.enable()

fxmarty's avatar
fxmarty committed
1312
1313
1314
1315
1316
1317
1318
1319
                if os.environ.get("PYTORCH_TUNABLEOP_TUNING") != "0":
                    torch.cuda.tunable.tuning_enable(True)

                if os.environ.get("PYTORCH_TUNABLEOP_SEQLENS") is not None:
                    tuning_sequences = [
                        int(val)
                        for val in os.environ["PYTORCH_TUNABLEOP_SEQLENS"].split(",")
                    ]
1320
                elif CUDA_GRAPHS is not None:
fxmarty's avatar
fxmarty committed
1321
                    tuning_sequences = CUDA_GRAPHS
1322
1323
1324
                else:
                    # For seqlen = 1, we dispatch to LLMM1 kernel.
                    tuning_sequences = [2, 3, 4, 5, 6, 7]
fxmarty's avatar
fxmarty committed
1325
1326
1327

                tunableop_filepath = os.path.join(
                    HUGGINGFACE_HUB_CACHE,
drbh's avatar
drbh committed
1328
                    f"tunableop_{self.model_id.replace('/', '-')}_tp{self.world_size}_rank{self.rank}.csv",
fxmarty's avatar
fxmarty committed
1329
1330
                )

1331
1332
1333
                log_master(
                    logger.info,
                    f"PyTorch TunableOp (https://github.com/fxmarty/pytorch/tree/2.3-patched/aten/src/ATen/cuda/tunable) is enabled. The warmup may take several minutes, picking the ROCm optimal matrix multiplication kernel for the target lengths {', '.join([str(seqlen) for seqlen in tuning_sequences])}, with typical 5-8% latency improvement for small sequence lengths. The picked GEMMs are saved in the file {tunableop_filepath}. To disable TunableOp, please launch TGI with `PYTORCH_TUNABLEOP_ENABLED=0`.",
fxmarty's avatar
fxmarty committed
1334
1335
1336
                )

                if os.path.isfile(tunableop_filepath):
1337
1338
1339
                    log_master(
                        logger.info,
                        f"The file {tunableop_filepath} already exists and will be reused.",
fxmarty's avatar
fxmarty committed
1340
1341
1342
1343
1344
1345
                    )
                    torch.cuda.tunable.read_file(tunableop_filepath)

                os.makedirs(HUGGINGFACE_HUB_CACHE, exist_ok=True)

                for seqlen in tuning_sequences:
1346
                    log_master(logger.info, f"Warming up TunableOp for seqlen={seqlen}")
fxmarty's avatar
fxmarty committed
1347
1348
1349
1350
                    self.tunableop_warmup(seqlen)
                    torch.cuda.tunable.write_file(tunableop_filepath)
                torch.cuda.tunable.tuning_enable(False)
            else:
1351
1352
1353
                log_master(
                    logger.info,
                    "PyTorch ROCm TunableOp (https://github.com/pytorch/pytorch/tree/main/aten/src/ATen/cuda/tunable) is disabled. TunableOp brings an additional 5-8% latency improvement for small sequence lengths but requires a warmup. If necessary, please use the environment variable PYTORCH_TUNABLEOP_ENABLED=1 to enable TunableOp.",
fxmarty's avatar
fxmarty committed
1354
1355
                )

1356
        if CUDA_GRAPHS:
1357
            try:
1358
1359
1360
                log_master(
                    logger.info, f"Cuda Graphs are enabled for sizes {CUDA_GRAPHS}"
                )
1361
                # Warmup cuda graphs
1362
                for bs in CUDA_GRAPHS:
1363
1364
                    if self.speculate is None or self.speculate + 1 <= bs:
                        self.cuda_graph_warmup(bs, max_s, max_bt)
OlivierDehaene's avatar
OlivierDehaene committed
1365
            except torch.cuda.OutOfMemoryError:
1366
                logger.exception("Decode cuda graph warmup failed")
1367
        else:
1368
1369
1370
            log_master(
                logger.info, f"Cuda Graphs are disabled (CUDA_GRAPHS={CUDA_GRAPHS})."
            )
1371

1372
        return int(num_blocks * BLOCK_SIZE)
1373

fxmarty's avatar
fxmarty committed
1374
1375
1376
1377
1378
    def tunableop_warmup(self, seqlen: int):
        input_ids = torch.zeros(seqlen, dtype=torch.int64, device=self.device)
        position_ids = torch.zeros(seqlen, dtype=torch.int32, device=self.device)
        slots = torch.arange(seqlen, dtype=torch.int64, device=self.device)

fxmarty's avatar
fxmarty committed
1379
1380
        # Dummy value, some models (starcoder2) don't accept `None`.
        input_lengths = torch.ones(seqlen, dtype=torch.int32, device=self.device)
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
        prefix_lens_tensor = torch.zeros(seqlen, dtype=torch.int32, device=self.device)
        cu_seqlen_prefill = torch.tensor(
            [0, seqlen], device=self.device, dtype=torch.int32
        )
        seqlen = Seqlen(
            input_lengths=input_lengths,
            prefix_lengths=prefix_lens_tensor,
            cu_seqlen_q=cu_seqlen_prefill,
            max_q=1,
            max_k=seqlen,
        )
fxmarty's avatar
fxmarty committed
1392

fxmarty's avatar
fxmarty committed
1393
1394
1395
1396
        # We pass a `cu_seqlen_prefill` in order not to have to deal with paged attention cache allocation/deallocation.
        self.model.forward(
            input_ids=input_ids,
            position_ids=position_ids,
1397
            cu_seqlen_prefill=cu_seqlen_prefill,
1398
            kv_cache=self.kv_cache,
fxmarty's avatar
fxmarty committed
1399
            block_tables=None,
1400
            seqlen=seqlen,
fxmarty's avatar
fxmarty committed
1401
1402
1403
            slots=slots,
            max_s=seqlen,
            lm_head_indices=None,
1404
            prefill_cache_indices=None,
fxmarty's avatar
fxmarty committed
1405
1406
        )

1407
    def forward(
drbh's avatar
drbh committed
1408
        self, batch: FlashCausalLMBatch, adapter_data: AdapterBatchData
1409
    ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
1410
        # Model Forward
Nicolas Patry's avatar
Nicolas Patry committed
1411
        if batch.speculative_ids is not None:
OlivierDehaene's avatar
OlivierDehaene committed
1412
1413
1414
            input_ids = batch.input_ids
            position_ids = batch.position_ids
            cu_seqlen_prefill = batch.cu_seqlen_prefill
1415
            kv_cache = self.kv_cache
OlivierDehaene's avatar
OlivierDehaene committed
1416
1417
1418
1419
1420
            block_tables = batch.block_tables_tensor
            slots = batch.slots[batch.slot_indices]
            input_lengths = batch.input_lengths_tensor
            max_s = batch.max_seqlen
            lm_head_indices = batch.prefill_head_indices
Nicolas Patry's avatar
Nicolas Patry committed
1421
1422
1423

            speculative_ids = batch.speculative_ids

OlivierDehaene's avatar
OlivierDehaene committed
1424
            B, speculative_length = speculative_ids.shape
Nicolas Patry's avatar
Nicolas Patry committed
1425
            new_length = speculative_length + 1
OlivierDehaene's avatar
OlivierDehaene committed
1426
1427
1428
            new_input_ids = torch.cat(
                [input_ids.unsqueeze(-1), speculative_ids], dim=1
            ).reshape(-1)
Nicolas Patry's avatar
Nicolas Patry committed
1429
1430
            arange = torch.arange(new_length, device=position_ids.device).unsqueeze(0)
            arange_int = arange.to(dtype=torch.int32)
OlivierDehaene's avatar
OlivierDehaene committed
1431
1432
1433
            new_position_ids = (
                position_ids.unsqueeze(-1).expand(B, new_length) + arange
            ).view(-1)
Nicolas Patry's avatar
Nicolas Patry committed
1434
            slots = (slots.unsqueeze(-1).expand(B, new_length) + arange_int).view(-1)
OlivierDehaene's avatar
OlivierDehaene committed
1435
1436
1437
            input_lengths = (
                input_lengths.unsqueeze(-1).expand(B, new_length) + arange_int
            ).view(-1)
Nicolas Patry's avatar
Nicolas Patry committed
1438
1439
1440
            prefix_lens_tensor = (
                batch.prefix_lens_tensor.unsqueeze(-1).expand(B, new_length)
            ).reshape(-1)
Nicolas Patry's avatar
Nicolas Patry committed
1441
1442

            # Add Copy the block tables for all members
OlivierDehaene's avatar
OlivierDehaene committed
1443
1444
1445
1446
1447
1448
            block_tables = (
                block_tables.unsqueeze(1)
                .expand(B, new_length, -1)
                .reshape(B * new_length, -1)
                .contiguous()
            )
Nicolas Patry's avatar
Nicolas Patry committed
1449
1450
1451
1452
1453
            max_s = max_s + speculative_length

            input_ids = new_input_ids
            position_ids = new_position_ids
        else:
OlivierDehaene's avatar
OlivierDehaene committed
1454
1455
1456
            input_ids = batch.input_ids
            position_ids = batch.position_ids
            cu_seqlen_prefill = batch.cu_seqlen_prefill
1457
            kv_cache = self.kv_cache
OlivierDehaene's avatar
OlivierDehaene committed
1458
1459
1460
            block_tables = batch.block_tables_tensor
            slots = batch.slots[batch.slot_indices]
            input_lengths = batch.input_lengths_tensor
Nicolas Patry's avatar
Nicolas Patry committed
1461
            prefix_lens_tensor = batch.prefix_lens_tensor
OlivierDehaene's avatar
OlivierDehaene committed
1462
1463
            max_s = batch.max_seqlen
            lm_head_indices = batch.prefill_head_indices
Nicolas Patry's avatar
Nicolas Patry committed
1464

1465
1466
1467
1468
1469
1470
        if cu_seqlen_prefill is None and self.max_past() is not None:
            # In decode, not prefill, we're actually overwriting the KV-cache
            # in a circular buffer mode.
            # This makes sure the max_s for the decode pass is correct.
            max_s = min(self.max_past(), max_s)

1471
        bs = input_ids.shape[0]
OlivierDehaene's avatar
OlivierDehaene committed
1472
1473
1474
1475
1476
1477
1478
1479
        sorted_padded_bs = sorted([k for k in self.cuda_graphs.keys() if k >= bs])
        if sorted_padded_bs:
            # Get associated cuda graph
            cuda_graph = self.cuda_graphs[sorted_padded_bs[0]]
        else:
            cuda_graph = None

        if cu_seqlen_prefill is not None or cuda_graph is None:
1480
            if ATTENTION == "flashinfer":
Nicolas Patry's avatar
Nicolas Patry committed
1481
1482
1483
1484
1485
                block_tables = block_tables_to_ragged(
                    block_tables=block_tables,
                    input_lengths=batch.input_lengths,
                    prefix_lens=batch.prefix_lens,
                )
1486
            with self._forward_context(
1487
                block_tables=block_tables,
1488
                cu_seqlen_prefill=cu_seqlen_prefill,
1489
                input_lengths_tensor=input_lengths,
Nicolas Patry's avatar
Nicolas Patry committed
1490
                prefix_lens_tensor=prefix_lens_tensor,
1491
            ):
1492
1493
1494
1495
1496
1497
1498
1499
                max_k = (input_lengths + prefix_lens_tensor).max().item()
                seqlen = Seqlen(
                    input_lengths=input_lengths,
                    prefix_lengths=prefix_lens_tensor,
                    cu_seqlen_q=cu_seqlen_prefill,
                    max_q=max_s,
                    max_k=max_k,
                )
1500
1501
1502
1503
1504
1505
1506
                logits, speculative_logits = self.model.forward(
                    input_ids=input_ids,
                    position_ids=position_ids,
                    cu_seqlen_prefill=cu_seqlen_prefill,
                    kv_cache=kv_cache,
                    block_tables=block_tables,
                    slots=slots,
1507
                    seqlen=seqlen,
1508
1509
1510
1511
1512
1513
1514
1515
                    max_s=max_s,
                    prefill_cache_indices=batch.prefill_cache_indices,
                    lm_head_indices=lm_head_indices,
                    adapter_data=adapter_data,
                )
                if batch.prefill_cache_indices is not None:
                    batch.prefill_cache_indices = None
                return logits, speculative_logits
1516
1517
1518
1519
1520

        # Copy inputs to the static inputs of the cuda graph
        # Static inputs are potentially padded
        cuda_graph["input_ids"][: input_ids.shape[0]] = input_ids
        cuda_graph["position_ids"][: position_ids.shape[0]] = position_ids
Nicolas Patry's avatar
Nicolas Patry committed
1521
1522
1523
1524
1525
1526
        if ATTENTION == "flashinfer":
            block_tables = block_tables_to_ragged(
                block_tables=block_tables,
                input_lengths=batch.input_lengths,
                prefix_lens=batch.prefix_lens,
            )
1527
            # assert block_tables.shape[0] >= slots.shape[0]
Nicolas Patry's avatar
Nicolas Patry committed
1528
1529
1530
1531
1532
            cuda_graph["block_tables"][: block_tables.shape[0]] = block_tables
        else:
            cuda_graph["block_tables"][
                : block_tables.shape[0], : block_tables.shape[1]
            ] = block_tables
1533
1534
1535
1536

        # XXX: This is working only because block 0 is reserved for the healthcheck
        # so it doesn't matter if we override it with bogus values.
        cuda_graph["slots"].fill_(0)
1537
1538
        cuda_graph["slots"][: slots.shape[0]] = slots
        cuda_graph["input_lengths"].zero_()
1539
1540
1541
        cuda_graph["input_lengths"][: input_lengths.shape[0]] = input_lengths
        cuda_graph["prefix_lengths"].zero_()
        cuda_graph["prefix_lengths"][: prefix_lens_tensor.shape[0]] = prefix_lens_tensor
1542

1543
        with self._forward_context(
Nicolas Patry's avatar
Nicolas Patry committed
1544
            block_tables=cuda_graph["block_tables"],
1545
            cu_seqlen_prefill=None,
Nicolas Patry's avatar
Nicolas Patry committed
1546
            input_lengths_tensor=cuda_graph["input_lengths"],
1547
1548
            prefix_lens_tensor=cuda_graph["prefix_lengths"],
            state=cuda_graph["state"],
1549
1550
1551
1552
        ):
            # Replay the graph
            cuda_graph["graph"].replay()

1553
        # Slice output to the correct shape
1554
1555
1556
1557
1558
1559
1560
        speculative_logits = (
            cuda_graph["speculative_logits"][:bs]
            if cuda_graph["speculative_logits"] is not None
            else None
        )
        logits = cuda_graph["logits"][:bs]
        return logits, speculative_logits
1561
1562
1563
1564

    @tracer.start_as_current_span("generate_token")
    def generate_token(
        self, batch: FlashCausalLMBatch
1565
1566
    ) -> Tuple[List[Generation], Optional[FlashCausalLMBatch], Tuple[int, int]]:
        start = time.time_ns()
1567
        prefill = batch.cu_seqlen_prefill is not None
1568
        prefill_logprobs = batch.prefill_next_token_indices is not None
1569

drbh's avatar
drbh committed
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
        # Update adapter indices for speculative tokens (if present)
        adapter_meta = batch.adapter_meta
        if batch.speculative_ids is not None:
            B, speculative_length = batch.speculative_ids.shape
            new_length = speculative_length + 1
            adapter_indices = (
                adapter_meta.adapter_indices.unsqueeze(-1)
                .expand(B, new_length)
                .reshape(-1)
            )
            adapter_segments = adapter_meta.adapter_segments * new_length
            adapter_meta = AdapterBatchMetadata(
                adapter_indices=adapter_indices,
                adapter_set=adapter_meta.adapter_set,
                adapter_segments=adapter_segments,
                segment_indices=adapter_meta.segment_indices,
            )

        # Assign pointers to adapter weights
        # TODO(travis): don't update this if indices haven't changed
        adapter_data = AdapterBatchData.from_meta(
            adapter_meta,
            self.layer_to_adapter_weights,
            prefill,
            batch.prefill_head_indices,
        )

        out, speculative_logits = self.forward(batch, adapter_data)
1598

1599
1600
        if prefill:
            next_token_logits = (
1601
                out[batch.prefill_next_token_indices] if prefill_logprobs else out
1602
            )
Nicolas Patry's avatar
Nicolas Patry committed
1603
1604
            if speculative_logits is not None:
                speculative_logits = (
OlivierDehaene's avatar
OlivierDehaene committed
1605
1606
1607
                    speculative_logits[batch.prefill_next_token_indices]
                    if prefill_logprobs
                    else speculative_logits
Nicolas Patry's avatar
Nicolas Patry committed
1608
                )
drbh's avatar
drbh committed
1609
1610
1611
1612
            next_adapter_indices = batch.adapter_meta.adapter_indices.new_empty(
                len(batch)
            )

1613
1614
        else:
            next_token_logits = out
drbh's avatar
drbh committed
1615
            next_adapter_indices = batch.adapter_meta.adapter_indices
1616

Nicolas Patry's avatar
Nicolas Patry committed
1617
        speculate = get_speculate()
OlivierDehaene's avatar
OlivierDehaene committed
1618
1619
1620
1621
1622
1623
1624
1625
1626
        (
            next_input_ids,
            next_token_logprobs,
            logprobs,
            accepted_ids,
            speculative_ids,
        ) = batch.next_token_chooser(
            batch.all_input_ids_tensor[:, : batch.max_seqlen],
            next_token_logits,
Nicolas Patry's avatar
Nicolas Patry committed
1627
            speculate,
OlivierDehaene's avatar
OlivierDehaene committed
1628
1629
            batch.speculative_ids,
            speculative_logits,
1630
1631
        )

Nicolas Patry's avatar
Nicolas Patry committed
1632
        batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens(
Nicolas Patry's avatar
Nicolas Patry committed
1633
            batch.top_n_tokens, batch.top_n_tokens_tensor, logprobs, accepted_ids
Nicolas Patry's avatar
Nicolas Patry committed
1634
1635
        )

1636
        if prefill:
1637
            if len(batch) > 1 and prefill_logprobs:
1638
1639
                # We create the prefill_tokens_indices tensor that will be used to gather prefill logprobs
                # When batch == 1, we will just use the batch.input_ids values directly
1640
                prefill_tokens_indices = batch.input_ids.new_zeros(len(out))
1641
1642

            next_position_ids = batch.position_ids.new_empty(len(batch))
1643
1644
1645
            batch.slot_indices = batch.slot_indices[batch.cu_seqlen_prefill[1:] - 1]
            # We do not need cu_seqlen_prefill anymore
            batch.cu_seqlen_prefill = None
1646
1647
1648
1649
        else:
            prefill_logprobs = None
            next_position_ids = batch.position_ids

1650
1651
1652
1653
1654
        # Cumulative length
        cumulative_length = 0

        # Results
        generations: List[Generation] = []
1655
        stopped = True
1656
1657

        # Zipped iterator
OlivierDehaene's avatar
OlivierDehaene committed
1658
        iterator = zip(batch.input_lengths, batch.all_input_ids, accepted_ids)
1659

1660
1661
1662
1663
        # We do two for loops as the first one can run completely asynchronously from the GPU while for the second
        # one, we need to first do a GPU <-> CPU sync
        # It is faster if we delay this sync for the maximum amount of time

1664
        # For each member of the batch
Nicolas Patry's avatar
Nicolas Patry committed
1665
        index = 0
OlivierDehaene's avatar
OlivierDehaene committed
1666
        for i, (input_length, all_input_ids, n_accepted_ids) in enumerate(iterator):
1667
            # Indexing metadata
1668
1669
1670
            start_index = cumulative_length
            end_index = cumulative_length + input_length

1671
            if prefill:
1672
1673
1674
1675
1676
                # Indexing metadata
                out_start_index = batch.prefill_cu_outlens[i]
                out_end_index = batch.prefill_cu_outlens[i + 1]
                out_length = out_end_index - out_start_index

1677
1678
1679
1680
                # Initialize position_ids
                # In decode, we do not need this as we can just increment position ids
                next_position_ids[i] = batch.position_ids[end_index - 1]

drbh's avatar
drbh committed
1681
1682
1683
1684
1685
1686
                # Initialize adapter indices
                # In decode, we only have one token per row in the batch, so grab last index
                next_adapter_indices[i] = batch.adapter_meta.adapter_indices[
                    end_index - 1
                ]

1687
1688
                # Used to gather prefill logprobs
                # Copy batch.input_ids to prefill_token_indices
1689
1690
                if prefill_logprobs:
                    if len(batch) > 1:
drbh's avatar
drbh committed
1691
1692
1693
                        prefill_tokens_indices[out_start_index : out_end_index - 1] = (
                            batch.input_ids[start_index + 1 : start_index + out_length]
                        )
1694
1695
1696
1697
1698
                    else:
                        # Set prefill_tokens_indices to the correct slice
                        prefill_tokens_indices = batch.input_ids[
                            start_index + 1 : start_index + out_length
                        ]
1699

Nicolas Patry's avatar
Nicolas Patry committed
1700
1701
1702
            for j in range(n_accepted_ids):
                batch.all_input_ids_tensor[i, input_length + j] = next_input_ids[index]
                index += 1
1703
1704
1705

            cumulative_length += input_length

drbh's avatar
drbh committed
1706
        # Update values
Nicolas Patry's avatar
Nicolas Patry committed
1707
1708
1709
1710
1711
        batch.input_ids = next_input_ids[accepted_ids.cumsum(dim=-1) - 1]
        batch.speculative_ids = speculative_ids
        batch.position_ids = next_position_ids + accepted_ids
        batch.input_lengths_tensor += accepted_ids
        batch.slot_indices += accepted_ids
drbh's avatar
drbh committed
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
        batch.adapter_meta.adapter_indices = next_adapter_indices

        if prefill:
            # adjust segment lengths to account for all request lengths being 1 during decoding
            adapter_segments, _ = find_segments(batch.adapter_meta.adapter_indices)
            batch.adapter_meta.adapter_segments = torch.tensor(
                adapter_segments,
                dtype=torch.int32,
                device=batch.adapter_meta.adapter_segments.device,
            )
1722

1723
        if prefill and prefill_logprobs:
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
            # Get prefill logprobs
            prefill_logprobs_tensor = torch.log_softmax(out, -1)
            prefill_logprobs = torch.gather(
                prefill_logprobs_tensor, 1, prefill_tokens_indices.view(-1, 1)
            )
            # GPU <-> CPU sync
            prefill_logprobs = prefill_logprobs.view(-1).tolist()

        # GPU <-> CPU sync
        next_token_logprobs = next_token_logprobs.tolist()
Nicolas Patry's avatar
Nicolas Patry committed
1734
        next_token_ids = next_input_ids.tolist()
1735
1736
        accepted_ids = accepted_ids.tolist()
        start_decode = time.time_ns()
1737
1738
1739
1740
1741

        # Zipped iterator
        iterator = zip(
            batch.requests,
            batch.input_lengths,
1742
1743
            batch.prefix_offsets,
            batch.read_offsets,
1744
1745
            batch.stopping_criterias,
            batch.all_input_ids,
Nicolas Patry's avatar
Nicolas Patry committed
1746
            batch.prefix_ids,
1747
1748
            batch.next_token_chooser.do_sample,
            batch.next_token_chooser.seeds,
Nicolas Patry's avatar
Nicolas Patry committed
1749
            batch.top_n_tokens,
Nicolas Patry's avatar
Nicolas Patry committed
1750
            accepted_ids,
Nicolas Patry's avatar
Nicolas Patry committed
1751
1752
            batch_top_token_ids,
            batch_top_token_logprobs,
1753
1754
1755
        )

        # For each member of the batch
Nicolas Patry's avatar
Nicolas Patry committed
1756
        index = 0
1757
1758
1759
        for i, (
            request,
            input_length,
1760
1761
            prefix_offset,
            read_offset,
1762
1763
            stopping_criteria,
            all_input_ids,
Nicolas Patry's avatar
Nicolas Patry committed
1764
            prefix_ids,
1765
1766
            do_sample,
            seed,
Nicolas Patry's avatar
Nicolas Patry committed
1767
            top_n_tokens,
Nicolas Patry's avatar
Nicolas Patry committed
1768
            n_accepted_ids,
Nicolas Patry's avatar
Nicolas Patry committed
1769
1770
            top_token_ids,
            top_token_logprobs,
1771
        ) in enumerate(iterator):
1772
            # Append next token to all tokens
Nicolas Patry's avatar
Nicolas Patry committed
1773
1774
1775
            next_token_texts = []
            left = 0

1776
            if n_accepted_ids > 1:
1777
                log_master(logger.debug, f"speculated ids {n_accepted_ids - 1}")
1778

Nicolas Patry's avatar
Nicolas Patry committed
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
            current_stopped = False
            for j in range(index, index + n_accepted_ids):
                # Generated token
                next_token_id = next_token_ids[j]
                all_input_ids.append(next_token_id)
                next_token_text, prefix_offset, read_offset = self.decode_token(
                    all_input_ids,
                    prefix_offset,
                    read_offset,
                )
                next_token_texts.append(next_token_text)
1790

Nicolas Patry's avatar
Nicolas Patry committed
1791
1792
1793
1794
                stop, reason = stopping_criteria(
                    next_token_id,
                    next_token_text,
                )
1795

Nicolas Patry's avatar
Nicolas Patry committed
1796
1797
1798
1799
1800
1801
1802
                if stop:
                    left = index + n_accepted_ids - j - 1
                    current_stopped = True
                    break
                else:
                    current_stopped = False
            stopped = stopped and current_stopped
1803

OlivierDehaene's avatar
OlivierDehaene committed
1804
1805
1806
1807
            _next_token_ids = next_token_ids[index : index + n_accepted_ids - left]
            _next_token_logprobs = next_token_logprobs[
                index : index + n_accepted_ids - left
            ]
Nicolas Patry's avatar
Nicolas Patry committed
1808
            index += n_accepted_ids
1809

1810
1811
1812
1813
1814
            # Shard generations
            # All generations will be appended in the rust sharded client
            if i % self.world_size == self.rank:
                if stop:
                    # Decode generated tokens
1815
1816
                    output_text, _, _ = self.decode_token(
                        all_input_ids,
OlivierDehaene's avatar
OlivierDehaene committed
1817
1818
1819
1820
1821
1822
                        prefix_offset=len(all_input_ids)
                        - stopping_criteria.current_tokens
                        - 1,
                        read_offset=len(all_input_ids)
                        - stopping_criteria.current_tokens,
                        skip_special_tokens=True,
1823
1824
                    )
                    generated_text = GeneratedText(
1825
1826
1827
1828
                        output_text,
                        stopping_criteria.current_tokens,
                        reason,
                        seed if do_sample else None,
1829
1830
1831
1832
1833
                    )
                else:
                    generated_text = None

                # Prefill
1834
1835
1836
1837
                if prefill and request.prefill_logprobs:
                    out_start_index = batch.prefill_cu_outlens[i]
                    out_end_index = batch.prefill_cu_outlens[i + 1]

1838
                    # Remove generated token to only have prefill and add nan for first prompt token
Nicolas Patry's avatar
Nicolas Patry committed
1839
1840
1841
                    request_prefill_logprobs = (
                        [float("nan")] * (len(prefix_ids) + 1)
                    ) + prefill_logprobs[out_start_index : out_end_index - 1]
1842
1843
                    prefill_token_ids = all_input_ids[:-1]
                    prefill_texts = self.tokenizer.batch_decode(
Nicolas Patry's avatar
Nicolas Patry committed
1844
                        prefix_ids + prefill_token_ids,
1845
1846
1847
                        clean_up_tokenization_spaces=False,
                        skip_special_tokens=False,
                    )
Nicolas Patry's avatar
Nicolas Patry committed
1848
1849

                    prefill_tokens = Tokens(
Nicolas Patry's avatar
Nicolas Patry committed
1850
                        prefix_ids + prefill_token_ids,
OlivierDehaene's avatar
OlivierDehaene committed
1851
1852
1853
                        request_prefill_logprobs,
                        prefill_texts,
                        is_special=[],
1854
1855
1856
1857
                    )
                else:
                    prefill_tokens = None

Nicolas Patry's avatar
Nicolas Patry committed
1858
                if top_n_tokens > 0:
Nicolas Patry's avatar
Nicolas Patry committed
1859
                    all_top_tokens = []
drbh's avatar
drbh committed
1860
                    for top_token_ids, top_token_logprobs in zip(
1861
1862
                        top_token_ids, top_token_logprobs
                    ):
Nicolas Patry's avatar
Nicolas Patry committed
1863
1864
1865
1866
1867
1868
                        toptoken_texts = self.tokenizer.batch_decode(
                            top_token_ids,
                            clean_up_tokenization_spaces=False,
                            skip_special_tokens=False,
                        )
                        special_toptokens = [
1869
1870
                            token_id in self.all_special_ids
                            for token_id in top_token_ids
Nicolas Patry's avatar
Nicolas Patry committed
1871
1872
1873
1874
1875
1876
1877
1878
1879
                        ]
                        top_tokens = Tokens(
                            top_token_ids,
                            top_token_logprobs,
                            toptoken_texts,
                            special_toptokens,
                        )
                        all_top_tokens.append(top_tokens)
                    top_tokens = all_top_tokens
Nicolas Patry's avatar
Nicolas Patry committed
1880
1881
1882
                else:
                    top_tokens = None

1883
1884
1885
                generation = Generation(
                    request.id,
                    prefill_tokens,
Nicolas Patry's avatar
Nicolas Patry committed
1886
1887
1888
1889
1890
1891
                    Tokens(
                        _next_token_ids,
                        _next_token_logprobs,
                        next_token_texts,
                        [nid in self.all_special_ids for nid in _next_token_ids],
                    ),
1892
                    generated_text,
Nicolas Patry's avatar
Nicolas Patry committed
1893
                    top_tokens,
1894
1895
                )

1896
                generations.append(generation)
1897

drbh's avatar
drbh committed
1898
1899
1900
            # accept each new token for this specific request since we may
            # have more than one new token per request with speculative decoding
            for next_token_id in _next_token_ids:
OlivierDehaene's avatar
OlivierDehaene committed
1901
1902
1903
                batch.next_token_chooser = (
                    batch.next_token_chooser.advance_grammar_single(i, next_token_id)
                )
drbh's avatar
drbh committed
1904

1905
            # Update values
1906
            batch.input_lengths[i] = input_length + n_accepted_ids
Nicolas Patry's avatar
Nicolas Patry committed
1907
1908
            if batch.input_lengths[i] > batch.max_seqlen:
                batch.max_seqlen = batch.input_lengths[i]
1909
1910
            batch.prefix_offsets[i] = prefix_offset
            batch.read_offsets[i] = read_offset
1911
1912
            batch.all_input_ids[i] = all_input_ids

1913
1914
        if stopped:
            # No need to return a batch if we know that all requests stopped
1915
1916
1917
            forward_ns = start_decode - start
            decode_ns = time.time_ns() - start_decode
            return generations, None, (forward_ns, decode_ns)
1918

1919
1920
1921
        batch.prefill_cu_outlens = None
        batch.prefill_head_indices = None
        batch.prefill_next_token_indices = None
1922

1923
1924
1925
        forward_ns = start_decode - start
        decode_ns = time.time_ns() - start_decode
        return generations, batch, (forward_ns, decode_ns)
1926
1927
1928
1929
1930
1931

    def _forward_context(
        self,
        *,
        block_tables: torch.Tensor,
        cu_seqlen_prefill: Optional[torch.Tensor],
Nicolas Patry's avatar
Nicolas Patry committed
1932
1933
        input_lengths_tensor: torch.Tensor,
        prefix_lens_tensor: torch.Tensor,
1934
1935
        state: Optional[Any] = None,
    ) -> ContextManager:
1936
        if ATTENTION != "flashinfer":
1937
1938
            return nullcontext()

Nicolas Patry's avatar
Nicolas Patry committed
1939
        from text_generation_server.layers.attention.flashinfer import (
1940
            use_decode_state,
Nicolas Patry's avatar
Nicolas Patry committed
1941
            use_prefill_with_paged_kv_state,
1942
1943
        )

Nicolas Patry's avatar
Nicolas Patry committed
1944
1945
        # has_prefix_lens = any(prefix_len > 0 for prefix_len in prefix_lens)

1946
        if cu_seqlen_prefill is not None:
Nicolas Patry's avatar
Nicolas Patry committed
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
            return use_prefill_with_paged_kv_state(
                state=(
                    state if state is not None else self.prefill_with_paged_kv_state
                ),
                # block_tables=block_tables_to_ragged(
                #     block_tables=block_tables,
                #     input_lengths=input_lengths,
                #     prefix_lens=prefix_lens,
                # ),
                block_tables=block_tables,
1957
                cu_seqlens=cu_seqlen_prefill,
1958
                input_lengths=input_lengths_tensor + prefix_lens_tensor,
1959
1960
1961
                num_heads=self.num_heads,
                num_kv_heads=self.num_kv_heads,
                head_size=self.head_size,
Nicolas Patry's avatar
Nicolas Patry committed
1962
                page_size=BLOCK_SIZE,
1963
1964
                dtype=self.dtype,
                window_left=self.sliding_window,
1965
1966
            )
        else:
Nicolas Patry's avatar
Nicolas Patry committed
1967
            assert input_lengths_tensor is not None
1968
1969
            return use_decode_state(
                state=state if state is not None else self.decode_state,
1970
                input_lengths=input_lengths_tensor + prefix_lens_tensor,
Nicolas Patry's avatar
Nicolas Patry committed
1971
                block_tables=block_tables,
1972
1973
1974
1975
                num_heads=self.num_heads,
                num_kv_heads=self.num_kv_heads,
                head_size=self.head_size,
                page_size=BLOCK_SIZE,
1976
1977
                dtype=self.dtype,
                window_left=self.sliding_window,
1978
            )
Nicolas Patry's avatar
Nicolas Patry committed
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998


def block_tables_to_ragged(
    *, block_tables: torch.Tensor, input_lengths: List[int], prefix_lens: List[int]
) -> torch.Tensor:
    """Convert block table to ragged format compatible with FlashInfer."""
    assert len(input_lengths) == len(prefix_lens)

    total_len = sum(input_lengths) + sum(prefix_lens)
    block_tables_ragged = torch.empty(
        total_len, dtype=torch.int32, device=block_tables.device
    )

    offset = 0
    for i, (input_length, prefix_len) in enumerate(zip(input_lengths, prefix_lens)):
        seq_len = prefix_len + input_length
        block_tables_ragged[offset : offset + seq_len] = block_tables[i][:seq_len]
        offset += seq_len

    return block_tables_ragged