flash_causal_lm.py 45 KB
Newer Older
1
import math
2
import os
3
import time
4
import itertools
5
6
7
import torch
import torch.distributed

8
9
import numpy as np

10
from loguru import logger
11
12
from dataclasses import dataclass
from opentelemetry import trace
13
from transformers import PreTrainedTokenizerBase
14
from typing import Optional, Tuple, List, Type, Dict
15

OlivierDehaene's avatar
OlivierDehaene committed
16
from text_generation_server.models import Model
17
from text_generation_server.utils.tokens import batch_top_tokens
Nicolas Patry's avatar
Nicolas Patry committed
18
from text_generation_server.utils.speculate import get_speculate
19
20
from text_generation_server.models.types import (
    Batch,
Nicolas Patry's avatar
Nicolas Patry committed
21
    Tokens,
22
23
24
    Generation,
    GeneratedText,
)
25
26
27
28
29
from text_generation_server.models.cache_manager import (
    get_cache_manager,
    set_cache_manager,
    BLOCK_SIZE,
)
30
from text_generation_server.pb import generate_pb2
Nicolas Patry's avatar
Nicolas Patry committed
31
from text_generation_server.models.globals import MEM_POOL, ENABLE_CUDA_GRAPHS
32
from text_generation_server.utils import StoppingCriteria, HeterogeneousNextTokenChooser
33
from text_generation_server.utils.dist import MEMORY_FRACTION
34
35
36

tracer = trace.get_tracer(__name__)

37

38
39
40
41
@dataclass
class FlashCausalLMBatch(Batch):
    batch_id: int
    requests: List[generate_pb2.Request]
42
43
    # request id -> idx in list mapping
    requests_idx_mapping: Dict[int, int]
44
45

    # Decoder values
46
47
    input_ids: torch.Tensor
    position_ids: torch.Tensor
Nicolas Patry's avatar
Nicolas Patry committed
48
    speculative_ids: torch.Tensor
49

50
51
52
53
    # Flash Attention values

    # tensor of length b containing the cumulative sequence lengths of the sequences in the batch, only used in prefill
    cu_seqlen_prefill: Optional[torch.Tensor]
54
55
56
57
58
59
60
61
62
63
64
65
66
67

    # Paged Attention values

    # Set when creating the batch
    # CPU tensor of length b indicating the start of each sequence in slots
    start_slots: torch.Tensor
    # tensor of indices of the currently used slots, length = \sum_{i=0}^{b} s_i in prefill, length = b in decode
    slot_indices: torch.Tensor
    # List of tuple of ints representing the number of blocks and slots needed by each sequence
    needed_blocks_slots: Optional[List[Tuple[int, int]]]

    # Set in prefill by the CacheManager
    # list of length b of list of length s_i // block_size
    block_tables: Optional[List[List[int]]]
68
    # tensor of size [b, max_total_seqlen // block_size] holding the paged attention block tables for all sequences
69
70
71
72
    block_tables_tensor: Optional[torch.Tensor]
    # tensor of length \sum_{i=0}^{b} max_s_i  holding the paged attention slots for all sequences
    slots: Optional[torch.Tensor]

73
74
    max_seqlen: int

75
76
77
78
79
    # Prefill metadata tensors to efficiently compute logprobs
    prefill_head_indices: Optional[torch.Tensor]
    prefill_next_token_indices: Optional[torch.tensor]
    prefill_cu_outlens: Optional[List[int]]

80
81
    # All tokens
    all_input_ids: List[List[int]]
82
    all_input_ids_tensor: torch.Tensor
83
84
85

    # Lengths of all generations present in the batch
    input_lengths: List[int]
86
    input_lengths_tensor: torch.Tensor
87
88
    prefix_offsets: List[Optional[int]]
    read_offsets: List[Optional[int]]
89
90

    # Generation helpers
91
    next_token_chooser: HeterogeneousNextTokenChooser
92
    stopping_criterias: List[StoppingCriteria]
Nicolas Patry's avatar
Nicolas Patry committed
93
94
    top_n_tokens: List[int]
    top_n_tokens_tensor: torch.Tensor
95

96
97
98
99
    # Number of blocks in this batch
    blocks: int
    # Maximum number of blocks
    max_blocks: int
100

101
102
    def to_pb(self) -> generate_pb2.CachedBatch:
        return generate_pb2.CachedBatch(
103
            id=self.batch_id,
104
            request_ids=[r.id for r in self.requests],
105
            size=len(self),
106
            max_tokens=self.blocks * BLOCK_SIZE,
107
108
109
110
111
112
113
        )

    @classmethod
    def from_pb(
        cls,
        pb: generate_pb2.Batch,
        tokenizer: PreTrainedTokenizerBase,
114
        dtype: torch.dtype,
115
        device: torch.device,
116
    ) -> "FlashCausalLMBatch":
117
118
119
120
121
122
123
124
125
126
        batch_inputs = []
        max_truncation = 0
        for r in pb.requests:
            batch_inputs.append(r.inputs)
            max_truncation = max(max_truncation, r.truncate)

        batch_tokenized_inputs = tokenizer(
            batch_inputs, truncation=True, max_length=max_truncation
        )["input_ids"]

127
        position_ids = []
Nicolas Patry's avatar
Nicolas Patry committed
128
        speculative_ids = []
129
        cu_seqlen_prefill = [0]
130
131
132
        needed_blocks_slots = []
        start_slots = []
        slot_indices = []
133
134

        input_lengths = []
135
136
        prefix_offsets = []
        read_offsets = []
137
        all_input_ids = []
138
        requests_idx_mapping = {}
139

140
141
142
143
144
145
        all_prefill_logprobs = True
        no_prefill_logprobs = True
        prefill_head_indices = []
        prefill_next_token_indices = []
        prefill_cu_outlens = [0]

146
        next_token_chooser_parameters = []
147
        stopping_criterias = []
Nicolas Patry's avatar
Nicolas Patry committed
148
        top_n_tokens = []
149
150
151

        # Cumulative length
        cumulative_length = 0
152
        cumulative_max_length = 0
153
        prefill_out_cumulative_length = 0
154

155
156
        blocks = 0
        max_seqlen = 0
157
        max_length = 0
158
        max_blocks = 0
159

160
        # Parse batch
161
162
163
        for i, (r, tokenized_input) in enumerate(
            zip(pb.requests, batch_tokenized_inputs)
        ):
164
165
166
            # request id -> idx in list mapping
            requests_idx_mapping[r.id] = i

167
            tokenized_input = tokenized_input[-r.truncate :]
168

169
170
            input_length = len(tokenized_input)
            input_lengths.append(input_length)
171

172
            prefix_offsets.append(input_length - 5)
173
            read_offsets.append(input_length)
174

175
            all_input_ids.append(tokenized_input)
176
177

            # Position ids
178
179
            request_position_ids = torch.arange(0, input_length, dtype=torch.int32)
            position_ids.append(request_position_ids)
180
181

            # Add cumulative lengths of all previous inputs
182
            cu_seqlen_prefill.append(cumulative_length + input_length)
183

184
            next_token_chooser_parameters.append(r.parameters)
185

186
187
188
            stopping_criteria = StoppingCriteria.from_pb(
                r.stopping_parameters, tokenizer
            )
189
            max_new_tokens = stopping_criteria.max_new_tokens
190
            stopping_criterias.append(stopping_criteria)
Nicolas Patry's avatar
Nicolas Patry committed
191
            top_n_tokens.append(r.top_n_tokens)
192

193
194
            # Paged attention
            # Remove one as the first token des not have a past
Nicolas Patry's avatar
Nicolas Patry committed
195
196
            speculative_length = get_speculate()
            total_tokens = input_length + max_new_tokens - 1 + speculative_length
197
198
199
200
201
202
203
204
205
206
207
208
            needed_blocks = math.ceil(total_tokens / BLOCK_SIZE)
            blocks += needed_blocks
            needed_blocks_slots.append((needed_blocks, total_tokens))
            start_slots.append(cumulative_max_length)

            request_slot_indices = torch.arange(
                cumulative_max_length,
                cumulative_max_length + input_length,
                dtype=torch.int64,
            )
            slot_indices.append(request_slot_indices)

209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
            all_prefill_logprobs = all_prefill_logprobs and r.prefill_logprobs
            no_prefill_logprobs = no_prefill_logprobs and not r.prefill_logprobs

            if r.prefill_logprobs:
                prefill_head_indices.append(request_position_ids + cumulative_length)
                prefill_next_token_indices.append(
                    prefill_out_cumulative_length + input_length - 1
                )
                prefill_cu_outlens.append(prefill_out_cumulative_length + input_length)
                prefill_out_cumulative_length += input_length
            else:
                prefill_head_indices.append(
                    torch.tensor(
                        [cumulative_length + input_length - 1], dtype=torch.int32
                    )
                )
                prefill_next_token_indices.append(prefill_out_cumulative_length)
                prefill_cu_outlens.append(prefill_out_cumulative_length + 1)
                prefill_out_cumulative_length += 1

229
230
            # Update
            cumulative_length += input_length
231
232
233
            cumulative_max_length += total_tokens
            max_seqlen = max(max_seqlen, input_length)
            max_blocks = max(max_blocks, needed_blocks)
OlivierDehaene's avatar
OlivierDehaene committed
234
235
236
            max_length = max(
                max_length, input_length + max_new_tokens + speculative_length
            )
237
238

        next_token_chooser = HeterogeneousNextTokenChooser.from_pb(
drbh's avatar
drbh committed
239
            next_token_chooser_parameters, dtype, device, tokenizer
240
        )
241
        start_slots = torch.tensor(start_slots, dtype=torch.int64)
242
243
244
245
246
247
248

        # Padded all_input_ids_tensor
        all_input_ids_tensor = np.zeros(
            (len(all_input_ids), max_length), dtype=np.int64
        )
        for i, input_ids in enumerate(all_input_ids):
            all_input_ids_tensor[i, : len(input_ids)] = input_ids
249

250
251
252
253
254
        # Create tensors on device
        all_input_ids_tensor = torch.tensor(
            all_input_ids_tensor, dtype=torch.int64, device=device
        )

255
256
257
        if len(pb.requests) > 1:
            input_ids = np.concatenate(all_input_ids, dtype=np.int64)
            position_ids = torch.cat(position_ids)
258
            slot_indices = torch.cat(slot_indices)
259
260
261
        else:
            input_ids = all_input_ids[0]
            position_ids = position_ids[0]
262
            slot_indices = slot_indices[0]
263

264
265
        cu_seqlen_prefill = torch.tensor(
            cu_seqlen_prefill, device=device, dtype=torch.int32
266
267
268
        )
        position_ids = position_ids.to(device)
        slot_indices = slot_indices.to(device)
269
        input_ids = torch.tensor(input_ids, dtype=torch.int64, device=device)
270
271
        input_lengths_tensor = torch.tensor(
            input_lengths, dtype=torch.int32, device=device
272
        )
273

274
275
        if all_prefill_logprobs:
            prefill_head_indices = None
276
            prefill_next_token_indices = cu_seqlen_prefill[1:] - 1
277
        elif no_prefill_logprobs:
278
            prefill_head_indices = cu_seqlen_prefill[1:] - 1
279
280
281
282
283
284
285
286
            prefill_next_token_indices = None
        else:
            prefill_head_indices = torch.tensor(
                torch.cat(prefill_head_indices), dtype=torch.int64, device=device
            )
            prefill_next_token_indices = torch.tensor(
                prefill_next_token_indices, dtype=torch.int64, device=device
            )
Nicolas Patry's avatar
Nicolas Patry committed
287
288
289
        top_n_tokens_tensor = torch.tensor(
            top_n_tokens, device=device, dtype=torch.int64
        )
290

291
292
293
        return cls(
            batch_id=pb.id,
            requests=pb.requests,
294
            requests_idx_mapping=requests_idx_mapping,
295
296
            input_ids=input_ids,
            position_ids=position_ids,
297
            cu_seqlen_prefill=cu_seqlen_prefill,
298
299
300
301
302
303
            start_slots=start_slots,
            slot_indices=slot_indices,
            needed_blocks_slots=needed_blocks_slots,
            block_tables=None,
            block_tables_tensor=None,
            slots=None,
304
            max_seqlen=max_seqlen,
305
306
307
            prefill_head_indices=prefill_head_indices,
            prefill_next_token_indices=prefill_next_token_indices,
            prefill_cu_outlens=prefill_cu_outlens,
308
            input_lengths=input_lengths,
309
            input_lengths_tensor=input_lengths_tensor,
310
311
            prefix_offsets=prefix_offsets,
            read_offsets=read_offsets,
312
            all_input_ids=all_input_ids,
313
314
            all_input_ids_tensor=all_input_ids_tensor,
            next_token_chooser=next_token_chooser,
315
            stopping_criterias=stopping_criterias,
Nicolas Patry's avatar
Nicolas Patry committed
316
317
            top_n_tokens=top_n_tokens,
            top_n_tokens_tensor=top_n_tokens_tensor,
318
319
            blocks=blocks,
            max_blocks=max_blocks,
Nicolas Patry's avatar
Nicolas Patry committed
320
            speculative_ids=None,
321
322
        )

323
    @tracer.start_as_current_span("filter")
324
325
    def filter(self, request_ids: List[int]) -> "FlashCausalLMBatch":
        if len(request_ids) == 0:
326
327
            raise ValueError("Batch must have at least one request")
        # We assume that if len(requests) == len(self) then the requests are the same
328
        if len(request_ids) == len(self):
329
330
            return self

331
        device = self.input_ids.device
332

333
334
335
        # New values after filtering
        requests_idx_mapping = {}

336
337
338
        # Used to index into tensors
        indices = []

339
340
341
        # slots to keep after filtering
        slot_filtering_indices = torch.zeros(
            self.slots.shape[0], dtype=torch.bool, device=device
342
343
        )

344
        # Create on CPU to only move to GPU once instead of at every copy
345
        slot_indices = torch.empty(len(request_ids), dtype=torch.int64)
346
347
        max_seqlen = 0

348
        requests = []
349
350
        start_slots = []
        block_tables = []
351
352
        all_input_ids = []

353
        input_lengths = []
354
355
        prefix_offsets = []
        read_offsets = []
356

357
        stopping_criterias = []
Nicolas Patry's avatar
Nicolas Patry committed
358
        top_n_tokens = []
359

360
361
362
363
364
        blocks = 0
        max_blocks = 0
        # Cumulative length
        cumulative_max_length = 0

365
366
        for i, request_id in enumerate(request_ids):
            idx = self.requests_idx_mapping[request_id]
367
            indices.append(idx)
368
369
370
            requests_idx_mapping[request_id] = i

            requests.append(self.requests[idx])
371
372
373
374

            # Get length
            request_input_length = self.input_lengths[idx]
            max_seqlen = max(max_seqlen, request_input_length)
375

376
377
378
            all_input_ids.append(self.all_input_ids[idx])

            input_lengths.append(request_input_length)
379
380
            prefix_offsets.append(self.prefix_offsets[idx])
            read_offsets.append(self.read_offsets[idx])
381

382
383
            stopping_criteria = self.stopping_criterias[idx]
            stopping_criterias.append(stopping_criteria)
384

Nicolas Patry's avatar
Nicolas Patry committed
385
386
            top_n_tokens.append(self.top_n_tokens[idx])

387
            remaining_tokens = (
388
389
                stopping_criteria.max_new_tokens - stopping_criteria.current_tokens
            )
390

391
392
393
394
395
            request_block_table = self.block_tables[idx]
            blocks += len(request_block_table)
            block_tables.append(request_block_table)
            start_slots.append(cumulative_max_length)

396
            # Copy to tensor (CPU)
397
            slot_indices[i] = cumulative_max_length + request_input_length - 1
398
399

            # Set slice
400
401
402
403
404
            slot_filtering_indices[
                self.start_slots[idx] : self.start_slots[idx]
                + request_input_length
                + remaining_tokens
                - 1
405
406
407
            ] = True

            cumulative_max_length += request_input_length + remaining_tokens - 1
408

409
410
411
412
413
414
415
416
417
            max_blocks = max(max_blocks, len(request_block_table))

        block_indices_to_free = []
        # Iterate on all requests
        for i, r in enumerate(self.requests):
            # Filter requests that are not part of the new batch
            if r.id not in requests_idx_mapping.keys():
                block_indices_to_free.extend(self.block_tables[i])
        # Free blocks
418
        get_cache_manager().free(block_indices_to_free)
419
420
421
        # Needed to avoid dropping blocks when the batches will go out of scope
        self.block_tables = None

422
423
424
425
        # Index into tensors
        input_ids = self.input_ids[indices]
        position_ids = self.position_ids[indices]
        all_input_ids_tensor = self.all_input_ids_tensor[indices]
426
427
428
        block_tables_tensor = self.block_tables_tensor[indices]
        input_lengths_tensor = self.input_lengths_tensor[indices]
        slots = self.slots[slot_filtering_indices]
429
        next_token_chooser = self.next_token_chooser.filter(indices)
Nicolas Patry's avatar
Nicolas Patry committed
430
        top_n_tokens_tensor = self.top_n_tokens_tensor[indices]
OlivierDehaene's avatar
OlivierDehaene committed
431
432
433
        speculative_ids = (
            self.speculative_ids[indices] if self.speculative_ids is not None else None
        )
434
435

        start_slots = torch.tensor(start_slots, dtype=torch.int64)
436

437
        # Move to GPU now that we have the whole tensor
438
        slot_indices = slot_indices.to(device)
439

440
        return type(self)(
441
442
443
444
445
            batch_id=self.batch_id,
            requests=requests,
            requests_idx_mapping=requests_idx_mapping,
            input_ids=input_ids,
            position_ids=position_ids,
446
            cu_seqlen_prefill=None,
447
448
449
450
451
452
            start_slots=start_slots,
            slot_indices=slot_indices,
            needed_blocks_slots=None,
            block_tables=block_tables,
            block_tables_tensor=block_tables_tensor,
            slots=slots,
453
            max_seqlen=max_seqlen,
454
455
456
            prefill_head_indices=None,
            prefill_next_token_indices=None,
            prefill_cu_outlens=None,
457
            input_lengths=input_lengths,
458
            input_lengths_tensor=input_lengths_tensor,
459
460
            prefix_offsets=prefix_offsets,
            read_offsets=read_offsets,
461
462
            all_input_ids=all_input_ids,
            all_input_ids_tensor=all_input_ids_tensor,
463
            next_token_chooser=next_token_chooser,
464
            stopping_criterias=stopping_criterias,
Nicolas Patry's avatar
Nicolas Patry committed
465
466
            top_n_tokens=top_n_tokens,
            top_n_tokens_tensor=top_n_tokens_tensor,
467
468
            blocks=blocks,
            max_blocks=max_blocks,
Nicolas Patry's avatar
Nicolas Patry committed
469
            speculative_ids=speculative_ids,
470
471
472
473
474
475
476
477
478
        )

    @classmethod
    @tracer.start_as_current_span("concatenate")
    def concatenate(cls, batches: List["FlashCausalLMBatch"]) -> "FlashCausalLMBatch":
        # Batch attributes
        requests = []
        requests_idx_mapping = {}

479
480
481
482
483
484
485
486
487
488
        blocks = 0
        total_batch_size = 0
        total_slots = 0
        max_blocks = 0
        max_length = 0
        max_seqlen = 0
        for b in batches:
            total_batch_size += len(b)
            total_slots += len(b.slots)
            blocks += b.blocks
OlivierDehaene's avatar
OlivierDehaene committed
489
490
491
            speculative_length = (
                b.speculative_ids.shape[1] if b.speculative_ids is not None else 0
            )
492
493
494
495
496
497
498
            max_blocks = max(max_blocks, b.max_blocks)
            max_seqlen = max(max_seqlen, b.max_seqlen)
            max_length = max(
                max_length,
                max(
                    input_length
                    + stopping_criteria.max_new_tokens
Nicolas Patry's avatar
Nicolas Patry committed
499
                    + speculative_length
500
501
502
503
504
505
                    - stopping_criteria.current_tokens
                    for input_length, stopping_criteria in zip(
                        b.input_lengths, b.stopping_criterias
                    )
                ),
            )
506
507
508

        input_ids = batches[0].input_ids.new_empty(total_batch_size)
        position_ids = batches[0].position_ids.new_empty(total_batch_size)
509
510
511
512
513
514
515
516
517
518
        slots = batches[0].slots.new_empty(total_slots)
        slot_indices = batches[0].slot_indices.new_empty(total_batch_size)
        input_lengths_tensor = batches[0].input_lengths_tensor.new_empty(
            total_batch_size
        )
        block_tables_tensor = batches[0].block_tables_tensor.new_zeros(
            (total_batch_size, max_blocks)
        )
        all_input_ids_tensor = batches[0].all_input_ids_tensor.new_zeros(
            (total_batch_size, max_length)
519
        )
Nicolas Patry's avatar
Nicolas Patry committed
520
521
522
        top_n_tokens_tensor = batches[0].top_n_tokens_tensor.new_zeros(
            total_batch_size,
        )
523

524
525
        start_slots = []
        block_tables = []
526
527
528
        all_input_ids = []

        input_lengths = []
529
530
        prefix_offsets = []
        read_offsets = []
531

532
        next_token_chooser_parameters = []
533
        stopping_criterias = []
Nicolas Patry's avatar
Nicolas Patry committed
534
        top_n_tokens = []
535

536
        # Cumulative length
537
        cumulative_batch_size = 0
538
        cumulative_slots = 0
539
540
541

        for i, batch in enumerate(batches):
            requests.extend(batch.requests)
542
543
544
545
546
547
548
549

            if i == 0:
                requests_idx_mapping = batch.requests_idx_mapping
            else:
                # We need to offset the mapping for each batch by the cumulative batch size
                for k, v in batch.requests_idx_mapping.items():
                    requests_idx_mapping[k] = v + cumulative_batch_size

550
551
            start_index = cumulative_batch_size
            end_index = cumulative_batch_size + len(batch)
552
553
            slots_start_index = cumulative_slots
            slots_end_index = cumulative_slots + len(batch.slots)
554
555
556
557

            # Copy tensors (GPU)
            input_ids[start_index:end_index] = batch.input_ids
            position_ids[start_index:end_index] = batch.position_ids
558
559
            slot_indices[start_index:end_index] = batch.slot_indices + cumulative_slots
            input_lengths_tensor[start_index:end_index] = batch.input_lengths_tensor
Nicolas Patry's avatar
Nicolas Patry committed
560
            top_n_tokens_tensor[start_index:end_index] = batch.top_n_tokens_tensor
561
            slots[slots_start_index:slots_end_index] = batch.slots
562

563
564
565
            all_input_ids_tensor[
                start_index:end_index, : batch.all_input_ids_tensor.shape[1]
            ] = batch.all_input_ids_tensor[:, :max_length]
566

567
568
569
            block_tables_tensor[
                start_index:end_index, : batch.block_tables_tensor.shape[1]
            ] = batch.block_tables_tensor[:, :max_blocks]
570

571
572
573
            start_slots.append(batch.start_slots + cumulative_slots)

            block_tables.extend(batch.block_tables)
574
575
            all_input_ids.extend(batch.all_input_ids)

576
            input_lengths.extend(batch.input_lengths)
577
578
            prefix_offsets.extend(batch.prefix_offsets)
            read_offsets.extend(batch.read_offsets)
579

580
            next_token_chooser_parameters.extend([r.parameters for r in batch.requests])
581
582
            stopping_criterias.extend(batch.stopping_criterias)

Nicolas Patry's avatar
Nicolas Patry committed
583
584
            top_n_tokens.extend(batch.top_n_tokens)

585
            # Update
586
            cumulative_batch_size += len(batch)
587
            cumulative_slots += len(batch.slots)
588

589
        start_slots = torch.concat(start_slots)
590

591
        next_token_chooser = HeterogeneousNextTokenChooser.from_pb(
592
593
594
            next_token_chooser_parameters,
            dtype=batches[0].next_token_chooser.dtype,
            device=batches[0].next_token_chooser.device,
drbh's avatar
drbh committed
595
            tokenizer=batches[0].next_token_chooser.tokenizer,
596
597
        )

OlivierDehaene's avatar
OlivierDehaene committed
598
599
600
601
602
        speculative_ids = (
            torch.cat([b.speculative_ids for b in batches], dim=0)
            if batches[0].speculative_ids is not None
            else None
        )
Nicolas Patry's avatar
Nicolas Patry committed
603

604
605
606
        # Needed to avoid dropping blocks when the batches will go out of scope
        for b in batches:
            b.block_tables = None
607
            del b
608

609
        return cls(
610
611
            batch_id=batches[0].batch_id,
            requests=requests,
612
            requests_idx_mapping=requests_idx_mapping,
613
614
            input_ids=input_ids,
            position_ids=position_ids,
615
            cu_seqlen_prefill=None,
616
617
618
619
620
621
            start_slots=start_slots,
            slot_indices=slot_indices,
            needed_blocks_slots=None,
            block_tables=block_tables,
            block_tables_tensor=block_tables_tensor,
            slots=slots,
622
            max_seqlen=max_seqlen,
623
624
625
            prefill_head_indices=None,
            prefill_next_token_indices=None,
            prefill_cu_outlens=None,
626
            input_lengths=input_lengths,
627
            input_lengths_tensor=input_lengths_tensor,
628
629
            prefix_offsets=prefix_offsets,
            read_offsets=read_offsets,
630
631
            all_input_ids=all_input_ids,
            all_input_ids_tensor=all_input_ids_tensor,
632
            next_token_chooser=next_token_chooser,
633
            stopping_criterias=stopping_criterias,
Nicolas Patry's avatar
Nicolas Patry committed
634
635
            top_n_tokens=top_n_tokens,
            top_n_tokens_tensor=top_n_tokens_tensor,
636
637
            blocks=blocks,
            max_blocks=max_blocks,
OlivierDehaene's avatar
OlivierDehaene committed
638
            speculative_ids=speculative_ids,
639
640
        )

641
642
643
    def __del__(self):
        if self.block_tables is not None and self.block_tables:
            # Free blocks
644
645
646
            get_cache_manager().free(
                list(itertools.chain.from_iterable(self.block_tables))
            )
647

648
649
650
651
652
653
654
    def __len__(self):
        return len(self.requests)


class FlashCausalLM(Model):
    def __init__(
        self,
655
656
657
658
659
660
661
662
663
        model: torch.nn.Module,
        tokenizer: PreTrainedTokenizerBase,
        num_layers: int,
        num_kv_heads: int,
        head_size: int,
        dtype: torch.dtype,
        device: torch.device,
        rank: int = 0,
        world_size: int = 1,
664
        sliding_window: Optional[int] = None,
665
    ):
666
667
668
        self.num_layers = num_layers
        self.num_kv_heads = num_kv_heads
        self.head_size = head_size
669

670
671
        self.cuda_graphs = {}

672
        super(FlashCausalLM, self).__init__(
673
            model=model,
674
675
676
677
            tokenizer=tokenizer,
            requires_padding=False,
            dtype=dtype,
            device=device,
678
679
            rank=rank,
            world_size=world_size,
680
            sliding_window=sliding_window,
681
682
683
684
685
686
        )

    @property
    def batch_type(self) -> Type[FlashCausalLMBatch]:
        return FlashCausalLMBatch

687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
    def cuda_graph_warmup(self, bs: int, max_s: int, max_bt: int):
        input_ids = torch.zeros(bs, dtype=torch.int64, device=self.device)
        position_ids = torch.zeros(bs, dtype=torch.int32, device=self.device)
        slots = torch.arange(bs, dtype=torch.int32, device=self.device)
        input_lengths = torch.ones(bs, dtype=torch.int32, device=self.device) * max_s
        block_tables = (
            torch.arange(max_bt, dtype=torch.int32, device=self.device)
            .repeat(bs)
            .reshape((bs, max_bt))
        )
        kv_cache = get_cache_manager().kv_cache

        self.cuda_graphs[bs] = {
            "input_ids": input_ids,
            "position_ids": position_ids,
            "kv_cache": kv_cache,
            "block_tables": block_tables,
            "slots": slots,
            "input_lengths": input_lengths,
        }
        graph = torch.cuda.CUDAGraph()
        self.cuda_graphs[bs]["graph"] = graph

        torch.cuda.synchronize()
        # Run once outside to warmup
        self.model.forward(
            input_ids=input_ids,
            position_ids=position_ids,
            cu_seqlen_prefill=None,
            kv_cache=kv_cache,
            block_tables=block_tables,
            slots=slots,
            input_lengths=input_lengths,
            max_s=max_s,
            lm_head_indices=None,
        )
        torch.cuda.synchronize()

        with torch.cuda.graph(graph, pool=MEM_POOL):
726
            logits, speculative_logits = self.model.forward(
727
728
729
730
731
732
733
734
735
736
                input_ids=input_ids,
                position_ids=position_ids,
                cu_seqlen_prefill=None,
                kv_cache=kv_cache,
                block_tables=block_tables,
                slots=slots,
                input_lengths=input_lengths,
                max_s=max_s,
                lm_head_indices=None,
            )
737
738
            self.cuda_graphs[bs]["logits"] = logits
            self.cuda_graphs[bs]["speculative_logits"] = speculative_logits
739
740
        torch.cuda.synchronize()

741
    def warmup(self, batch: FlashCausalLMBatch):
742
        # The warmup batch is the biggest batch we could ever receive
743
744
        torch.cuda.empty_cache()
        try:
745
            cache_manager = set_cache_manager(
746
                batch.blocks,
747
748
749
                self.num_layers,
                self.num_kv_heads,
                self.head_size,
750
                self.sliding_window is not None,
751
752
753
                self.dtype,
                self.device,
            )
754
755
            max_bt = batch.max_blocks
            max_s = max_bt * get_cache_manager().block_size
756
            _, batch, _ = self.generate_token(batch)
OlivierDehaene's avatar
OlivierDehaene committed
757
        except torch.cuda.OutOfMemoryError as e:
758
            raise RuntimeError(
759
760
                f"Not enough memory to handle {len(batch.input_ids)} prefill tokens. "
                f"You need to decrease `--max-batch-prefill-tokens`"
761
            ) from e
762
763
764

        torch.cuda.synchronize(self.device)

765
766
        # Inspired by the original implementation in [vllm](https://github.com/vllm-project/vllm)
        # Calculate the number of blocks that can be allocated with the free memory
767
768
769
770
        dtype_size = torch.tensor([], dtype=self.dtype).element_size()
        cache_block_size = BLOCK_SIZE * self.num_kv_heads * self.head_size
        total_cache_size = self.num_layers * cache_block_size * 2 * dtype_size

771
772
773
774
775
776
        total_free_memory, _ = torch.cuda.mem_get_info(self.device)
        total_gpu_memory = torch.cuda.get_device_properties(self.device).total_memory

        free_memory = max(
            0, total_free_memory - (1 - MEMORY_FRACTION) * total_gpu_memory
        )
777
778

        num_blocks = (
779
780
            # Leave 5% for some wiggle room
            int((free_memory * 0.95) // total_cache_size)
781
            # Add batch.blocks as we allocated it above, so it is included in the peak memory.
782
            + cache_manager.num_blocks
783
784
        )

785
        del batch
786
        del cache_manager
787

788
        set_cache_manager(
789
790
791
792
            num_blocks,
            self.num_layers,
            self.num_kv_heads,
            self.head_size,
793
            self.sliding_window is not None,
794
795
796
797
            self.dtype,
            self.device,
        )

Nicolas Patry's avatar
Nicolas Patry committed
798
        if ENABLE_CUDA_GRAPHS:
799
800
801
802
803
804
805
806
807
            try:
                logger.info("Experimental support for Cuda Graphs is enabled")
                # Warmup cuda graphs
                for bs in [1, 2, 4] + [8 * i for i in range(8)]:
                    if self.speculate is None or self.speculate + 1 <= bs:
                        self.cuda_graph_warmup(bs, max_s, max_bt)
            except Exception:
                logger.exception(f"Decode cuda graph warmup failed")

808
        return int(num_blocks * BLOCK_SIZE)
809

810
811
812
    def forward(
        self, batch: FlashCausalLMBatch
    ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
813
        # Model Forward
Nicolas Patry's avatar
Nicolas Patry committed
814
        if batch.speculative_ids is not None:
OlivierDehaene's avatar
OlivierDehaene committed
815
816
817
818
819
820
821
822
823
            input_ids = batch.input_ids
            position_ids = batch.position_ids
            cu_seqlen_prefill = batch.cu_seqlen_prefill
            kv_cache = get_cache_manager().kv_cache
            block_tables = batch.block_tables_tensor
            slots = batch.slots[batch.slot_indices]
            input_lengths = batch.input_lengths_tensor
            max_s = batch.max_seqlen
            lm_head_indices = batch.prefill_head_indices
Nicolas Patry's avatar
Nicolas Patry committed
824
825
826

            speculative_ids = batch.speculative_ids

OlivierDehaene's avatar
OlivierDehaene committed
827
            B, speculative_length = speculative_ids.shape
Nicolas Patry's avatar
Nicolas Patry committed
828
            new_length = speculative_length + 1
OlivierDehaene's avatar
OlivierDehaene committed
829
830
831
            new_input_ids = torch.cat(
                [input_ids.unsqueeze(-1), speculative_ids], dim=1
            ).reshape(-1)
Nicolas Patry's avatar
Nicolas Patry committed
832
833
            arange = torch.arange(new_length, device=position_ids.device).unsqueeze(0)
            arange_int = arange.to(dtype=torch.int32)
OlivierDehaene's avatar
OlivierDehaene committed
834
835
836
            new_position_ids = (
                position_ids.unsqueeze(-1).expand(B, new_length) + arange
            ).view(-1)
Nicolas Patry's avatar
Nicolas Patry committed
837
            slots = (slots.unsqueeze(-1).expand(B, new_length) + arange_int).view(-1)
OlivierDehaene's avatar
OlivierDehaene committed
838
839
840
            input_lengths = (
                input_lengths.unsqueeze(-1).expand(B, new_length) + arange_int
            ).view(-1)
Nicolas Patry's avatar
Nicolas Patry committed
841
842

            # Add Copy the block tables for all members
OlivierDehaene's avatar
OlivierDehaene committed
843
844
845
846
847
848
            block_tables = (
                block_tables.unsqueeze(1)
                .expand(B, new_length, -1)
                .reshape(B * new_length, -1)
                .contiguous()
            )
Nicolas Patry's avatar
Nicolas Patry committed
849
850
851
852
853
            max_s = max_s + speculative_length

            input_ids = new_input_ids
            position_ids = new_position_ids
        else:
OlivierDehaene's avatar
OlivierDehaene committed
854
855
856
857
858
859
860
861
862
            input_ids = batch.input_ids
            position_ids = batch.position_ids
            cu_seqlen_prefill = batch.cu_seqlen_prefill
            kv_cache = get_cache_manager().kv_cache
            block_tables = batch.block_tables_tensor
            slots = batch.slots[batch.slot_indices]
            input_lengths = batch.input_lengths_tensor
            max_s = batch.max_seqlen
            lm_head_indices = batch.prefill_head_indices
Nicolas Patry's avatar
Nicolas Patry committed
863

864
865
866
867
868
869
870
871
872
873
874
875
        bs = input_ids.shape[0]
        padded_bs = bs
        if bs == 3:
            padded_bs = 4
        elif 3 < bs <= 8:
            padded_bs = 8
        elif bs > 8:
            padded_bs = (bs + 7) // 8 * 8

        # Try to find an associated cuda graph
        cuda_graph = self.cuda_graphs.get(padded_bs, None)

drbh's avatar
drbh committed
876
877
878
879
880
        if (
            cu_seqlen_prefill is not None
            or cuda_graph is None
            or batch.speculative_ids is not None
        ):
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
            return self.model.forward(
                input_ids=input_ids,
                position_ids=position_ids,
                cu_seqlen_prefill=cu_seqlen_prefill,
                kv_cache=kv_cache,
                block_tables=block_tables,
                slots=slots,
                input_lengths=input_lengths,
                max_s=max_s,
                lm_head_indices=lm_head_indices,
            )

        # Copy inputs to the static inputs of the cuda graph
        # Static inputs are potentially padded
        cuda_graph["input_ids"][: input_ids.shape[0]] = input_ids
        cuda_graph["position_ids"][: position_ids.shape[0]] = position_ids
        cuda_graph["block_tables"][
            : block_tables.shape[0], : block_tables.shape[1]
        ] = block_tables
        cuda_graph["slots"].fill_(-1)
        cuda_graph["slots"][: slots.shape[0]] = slots
        cuda_graph["input_lengths"].zero_()
        cuda_graph["input_lengths"][: input_lengths.shape[0]] = input_lengths

        # Replay the graph
        cuda_graph["graph"].replay()
        # Slice output to the correct shape
908
909
910
911
912
913
914
        speculative_logits = (
            cuda_graph["speculative_logits"][:bs]
            if cuda_graph["speculative_logits"] is not None
            else None
        )
        logits = cuda_graph["logits"][:bs]
        return logits, speculative_logits
915
916
917
918

    @tracer.start_as_current_span("generate_token")
    def generate_token(
        self, batch: FlashCausalLMBatch
919
920
    ) -> Tuple[List[Generation], Optional[FlashCausalLMBatch], Tuple[int, int]]:
        start = time.time_ns()
921
        prefill = batch.cu_seqlen_prefill is not None
922
        prefill_logprobs = batch.prefill_next_token_indices is not None
923

924
925
        if batch.needed_blocks_slots:
            # Allocate blocks to this batch
926
927
928
929
930
931
932
933
934
935
            block_tables, block_tables_tensor, slots = get_cache_manager().allocate(
                batch.needed_blocks_slots,
                batch.blocks,
                batch.max_blocks,
                batch.input_ids.device,
            )
            batch.needed_blocks_slots = None
            batch.block_tables = block_tables
            batch.block_tables_tensor = block_tables_tensor
            batch.slots = slots
936

937
        try:
938
            out, speculative_logits = self.forward(batch)
939
940
941
        except Exception as e:
            del batch
            raise e
942

943
944
        if prefill:
            next_token_logits = (
945
                out[batch.prefill_next_token_indices] if prefill_logprobs else out
946
            )
Nicolas Patry's avatar
Nicolas Patry committed
947
948
            if speculative_logits is not None:
                speculative_logits = (
OlivierDehaene's avatar
OlivierDehaene committed
949
950
951
                    speculative_logits[batch.prefill_next_token_indices]
                    if prefill_logprobs
                    else speculative_logits
Nicolas Patry's avatar
Nicolas Patry committed
952
                )
953
954
955
        else:
            next_token_logits = out

Nicolas Patry's avatar
Nicolas Patry committed
956
        speculate = get_speculate()
OlivierDehaene's avatar
OlivierDehaene committed
957
958
959
960
961
962
963
964
965
        (
            next_input_ids,
            next_token_logprobs,
            logprobs,
            accepted_ids,
            speculative_ids,
        ) = batch.next_token_chooser(
            batch.all_input_ids_tensor[:, : batch.max_seqlen],
            next_token_logits,
Nicolas Patry's avatar
Nicolas Patry committed
966
            speculate,
OlivierDehaene's avatar
OlivierDehaene committed
967
968
            batch.speculative_ids,
            speculative_logits,
969
970
        )

Nicolas Patry's avatar
Nicolas Patry committed
971
        batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens(
Nicolas Patry's avatar
Nicolas Patry committed
972
            batch.top_n_tokens, batch.top_n_tokens_tensor, logprobs, accepted_ids
Nicolas Patry's avatar
Nicolas Patry committed
973
974
        )

975
        if prefill:
976
            if len(batch) > 1 and prefill_logprobs:
977
978
                # We create the prefill_tokens_indices tensor that will be used to gather prefill logprobs
                # When batch == 1, we will just use the batch.input_ids values directly
979
                prefill_tokens_indices = batch.input_ids.new_zeros(len(out))
980
981

            next_position_ids = batch.position_ids.new_empty(len(batch))
982
983
984
            batch.slot_indices = batch.slot_indices[batch.cu_seqlen_prefill[1:] - 1]
            # We do not need cu_seqlen_prefill anymore
            batch.cu_seqlen_prefill = None
985
986
987
988
        else:
            prefill_logprobs = None
            next_position_ids = batch.position_ids

989
990
991
992
993
        # Cumulative length
        cumulative_length = 0

        # Results
        generations: List[Generation] = []
994
        stopped = True
995
996

        # Zipped iterator
OlivierDehaene's avatar
OlivierDehaene committed
997
        iterator = zip(batch.input_lengths, batch.all_input_ids, accepted_ids)
998

999
1000
1001
1002
        # We do two for loops as the first one can run completely asynchronously from the GPU while for the second
        # one, we need to first do a GPU <-> CPU sync
        # It is faster if we delay this sync for the maximum amount of time

1003
        # For each member of the batch
Nicolas Patry's avatar
Nicolas Patry committed
1004
        index = 0
OlivierDehaene's avatar
OlivierDehaene committed
1005
        for i, (input_length, all_input_ids, n_accepted_ids) in enumerate(iterator):
1006
            # Indexing metadata
1007
1008
1009
            start_index = cumulative_length
            end_index = cumulative_length + input_length

1010
            if prefill:
1011
1012
1013
1014
1015
                # Indexing metadata
                out_start_index = batch.prefill_cu_outlens[i]
                out_end_index = batch.prefill_cu_outlens[i + 1]
                out_length = out_end_index - out_start_index

1016
1017
1018
1019
1020
1021
                # Initialize position_ids
                # In decode, we do not need this as we can just increment position ids
                next_position_ids[i] = batch.position_ids[end_index - 1]

                # Used to gather prefill logprobs
                # Copy batch.input_ids to prefill_token_indices
1022
1023
                if prefill_logprobs:
                    if len(batch) > 1:
drbh's avatar
drbh committed
1024
1025
1026
                        prefill_tokens_indices[out_start_index : out_end_index - 1] = (
                            batch.input_ids[start_index + 1 : start_index + out_length]
                        )
1027
1028
1029
1030
1031
                    else:
                        # Set prefill_tokens_indices to the correct slice
                        prefill_tokens_indices = batch.input_ids[
                            start_index + 1 : start_index + out_length
                        ]
1032

Nicolas Patry's avatar
Nicolas Patry committed
1033
1034
1035
            for j in range(n_accepted_ids):
                batch.all_input_ids_tensor[i, input_length + j] = next_input_ids[index]
                index += 1
1036
1037
1038

            cumulative_length += input_length

drbh's avatar
drbh committed
1039
        # Update values
Nicolas Patry's avatar
Nicolas Patry committed
1040
1041
1042
1043
1044
        batch.input_ids = next_input_ids[accepted_ids.cumsum(dim=-1) - 1]
        batch.speculative_ids = speculative_ids
        batch.position_ids = next_position_ids + accepted_ids
        batch.input_lengths_tensor += accepted_ids
        batch.slot_indices += accepted_ids
1045

1046
        if prefill and prefill_logprobs:
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
            # Get prefill logprobs
            prefill_logprobs_tensor = torch.log_softmax(out, -1)
            prefill_logprobs = torch.gather(
                prefill_logprobs_tensor, 1, prefill_tokens_indices.view(-1, 1)
            )
            # GPU <-> CPU sync
            prefill_logprobs = prefill_logprobs.view(-1).tolist()

        # GPU <-> CPU sync
        next_token_logprobs = next_token_logprobs.tolist()
Nicolas Patry's avatar
Nicolas Patry committed
1057
        next_token_ids = next_input_ids.tolist()
1058
1059
        accepted_ids = accepted_ids.tolist()
        start_decode = time.time_ns()
1060
1061
1062
1063
1064

        # Zipped iterator
        iterator = zip(
            batch.requests,
            batch.input_lengths,
1065
1066
            batch.prefix_offsets,
            batch.read_offsets,
1067
1068
            batch.stopping_criterias,
            batch.all_input_ids,
1069
1070
            batch.next_token_chooser.do_sample,
            batch.next_token_chooser.seeds,
Nicolas Patry's avatar
Nicolas Patry committed
1071
            batch.top_n_tokens,
Nicolas Patry's avatar
Nicolas Patry committed
1072
            accepted_ids,
Nicolas Patry's avatar
Nicolas Patry committed
1073
1074
            batch_top_token_ids,
            batch_top_token_logprobs,
1075
1076
1077
        )

        # For each member of the batch
Nicolas Patry's avatar
Nicolas Patry committed
1078
        index = 0
1079
1080
1081
        for i, (
            request,
            input_length,
1082
1083
            prefix_offset,
            read_offset,
1084
1085
            stopping_criteria,
            all_input_ids,
1086
1087
            do_sample,
            seed,
Nicolas Patry's avatar
Nicolas Patry committed
1088
            top_n_tokens,
Nicolas Patry's avatar
Nicolas Patry committed
1089
            n_accepted_ids,
Nicolas Patry's avatar
Nicolas Patry committed
1090
1091
            top_token_ids,
            top_token_logprobs,
1092
        ) in enumerate(iterator):
1093
            # Append next token to all tokens
Nicolas Patry's avatar
Nicolas Patry committed
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
            next_token_texts = []
            left = 0

            current_stopped = False
            for j in range(index, index + n_accepted_ids):
                # Generated token
                next_token_id = next_token_ids[j]
                all_input_ids.append(next_token_id)
                next_token_text, prefix_offset, read_offset = self.decode_token(
                    all_input_ids,
                    prefix_offset,
                    read_offset,
                )
                next_token_texts.append(next_token_text)
1108

Nicolas Patry's avatar
Nicolas Patry committed
1109
1110
1111
1112
                stop, reason = stopping_criteria(
                    next_token_id,
                    next_token_text,
                )
1113

Nicolas Patry's avatar
Nicolas Patry committed
1114
1115
1116
1117
1118
1119
1120
                if stop:
                    left = index + n_accepted_ids - j - 1
                    current_stopped = True
                    break
                else:
                    current_stopped = False
            stopped = stopped and current_stopped
1121

OlivierDehaene's avatar
OlivierDehaene committed
1122
1123
1124
1125
            _next_token_ids = next_token_ids[index : index + n_accepted_ids - left]
            _next_token_logprobs = next_token_logprobs[
                index : index + n_accepted_ids - left
            ]
Nicolas Patry's avatar
Nicolas Patry committed
1126
            index += n_accepted_ids
1127

1128
1129
1130
1131
1132
            # Shard generations
            # All generations will be appended in the rust sharded client
            if i % self.world_size == self.rank:
                if stop:
                    # Decode generated tokens
1133
1134
                    output_text, _, _ = self.decode_token(
                        all_input_ids,
OlivierDehaene's avatar
OlivierDehaene committed
1135
1136
1137
1138
1139
1140
                        prefix_offset=len(all_input_ids)
                        - stopping_criteria.current_tokens
                        - 1,
                        read_offset=len(all_input_ids)
                        - stopping_criteria.current_tokens,
                        skip_special_tokens=True,
1141
1142
                    )
                    generated_text = GeneratedText(
1143
1144
1145
1146
                        output_text,
                        stopping_criteria.current_tokens,
                        reason,
                        seed if do_sample else None,
1147
1148
1149
1150
1151
                    )
                else:
                    generated_text = None

                # Prefill
1152
1153
1154
1155
                if prefill and request.prefill_logprobs:
                    out_start_index = batch.prefill_cu_outlens[i]
                    out_end_index = batch.prefill_cu_outlens[i + 1]

1156
1157
                    # Remove generated token to only have prefill and add nan for first prompt token
                    request_prefill_logprobs = [float("nan")] + prefill_logprobs[
1158
                        out_start_index : out_end_index - 1
1159
1160
1161
1162
1163
1164
1165
                    ]
                    prefill_token_ids = all_input_ids[:-1]
                    prefill_texts = self.tokenizer.batch_decode(
                        prefill_token_ids,
                        clean_up_tokenization_spaces=False,
                        skip_special_tokens=False,
                    )
Nicolas Patry's avatar
Nicolas Patry committed
1166
1167

                    prefill_tokens = Tokens(
OlivierDehaene's avatar
OlivierDehaene committed
1168
1169
1170
1171
                        prefill_token_ids,
                        request_prefill_logprobs,
                        prefill_texts,
                        is_special=[],
1172
1173
1174
1175
                    )
                else:
                    prefill_tokens = None

Nicolas Patry's avatar
Nicolas Patry committed
1176
                if top_n_tokens > 0:
Nicolas Patry's avatar
Nicolas Patry committed
1177
                    all_top_tokens = []
drbh's avatar
drbh committed
1178
                    for top_token_ids, top_token_logprobs in zip(
1179
1180
                        top_token_ids, top_token_logprobs
                    ):
Nicolas Patry's avatar
Nicolas Patry committed
1181
1182
1183
1184
1185
1186
                        toptoken_texts = self.tokenizer.batch_decode(
                            top_token_ids,
                            clean_up_tokenization_spaces=False,
                            skip_special_tokens=False,
                        )
                        special_toptokens = [
1187
1188
                            token_id in self.all_special_ids
                            for token_id in top_token_ids
Nicolas Patry's avatar
Nicolas Patry committed
1189
1190
1191
1192
1193
1194
1195
1196
1197
                        ]
                        top_tokens = Tokens(
                            top_token_ids,
                            top_token_logprobs,
                            toptoken_texts,
                            special_toptokens,
                        )
                        all_top_tokens.append(top_tokens)
                    top_tokens = all_top_tokens
Nicolas Patry's avatar
Nicolas Patry committed
1198
1199
1200
                else:
                    top_tokens = None

1201
1202
1203
                generation = Generation(
                    request.id,
                    prefill_tokens,
Nicolas Patry's avatar
Nicolas Patry committed
1204
1205
1206
1207
1208
1209
                    Tokens(
                        _next_token_ids,
                        _next_token_logprobs,
                        next_token_texts,
                        [nid in self.all_special_ids for nid in _next_token_ids],
                    ),
1210
                    generated_text,
Nicolas Patry's avatar
Nicolas Patry committed
1211
                    top_tokens,
1212
1213
                )

1214
                generations.append(generation)
1215

drbh's avatar
drbh committed
1216
1217
1218
            # accept each new token for this specific request since we may
            # have more than one new token per request with speculative decoding
            for next_token_id in _next_token_ids:
OlivierDehaene's avatar
OlivierDehaene committed
1219
1220
1221
                batch.next_token_chooser = (
                    batch.next_token_chooser.advance_grammar_single(i, next_token_id)
                )
drbh's avatar
drbh committed
1222

1223
            # Update values
1224
            batch.input_lengths[i] = input_length + n_accepted_ids
Nicolas Patry's avatar
Nicolas Patry committed
1225
1226
            if batch.input_lengths[i] > batch.max_seqlen:
                batch.max_seqlen = batch.input_lengths[i]
1227
1228
            batch.prefix_offsets[i] = prefix_offset
            batch.read_offsets[i] = read_offset
1229
1230
            batch.all_input_ids[i] = all_input_ids

1231
1232
1233
        if stopped:
            del batch
            # No need to return a batch if we know that all requests stopped
1234
1235
1236
            forward_ns = start_decode - start
            decode_ns = time.time_ns() - start_decode
            return generations, None, (forward_ns, decode_ns)
1237

1238
1239
1240
        batch.prefill_cu_outlens = None
        batch.prefill_head_indices = None
        batch.prefill_next_token_indices = None
1241

1242
1243
1244
        forward_ns = start_decode - start
        decode_ns = time.time_ns() - start_decode
        return generations, batch, (forward_ns, decode_ns)