queue.rs 20.8 KB
Newer Older
1
2
3
4
use crate::infer::InferError;
use crate::infer::InferStreamResponse;
use crate::validation::ValidGenerateRequest;
use nohash_hasher::{BuildNoHashHasher, IntMap};
5
use std::cmp::min;
6
use std::collections::VecDeque;
7
8
use text_generation_client::ChunksToString;
use text_generation_client::Input;
9
use text_generation_client::{Batch, Request};
OlivierDehaene's avatar
OlivierDehaene committed
10
use tokio::sync::{mpsc, oneshot};
11
use tokio::time::Instant;
12
use tracing::{info_span, instrument, Span};
13
14
15
16
17
18
19

/// Queue entry
#[derive(Debug)]
pub(crate) struct Entry {
    /// Request
    pub request: ValidGenerateRequest,
    /// Response sender to communicate between the Infer struct and the batching_task
OlivierDehaene's avatar
OlivierDehaene committed
20
    pub response_tx: mpsc::UnboundedSender<Result<InferStreamResponse, InferError>>,
21
22
23
24
25
26
    /// Span that will live as long as entry
    pub span: Span,
    /// Temporary span used as a guard when logging inference, wait times...
    pub temp_span: Option<Span>,
    /// Instant when this entry was queued
    pub queue_time: Instant,
27
28
29
30
31
32
33
34
    /// Instant when this entry was added to a batch
    pub batch_time: Option<Instant>,
}

/// Request Queue
#[derive(Debug, Clone)]
pub(crate) struct Queue {
    /// Channel to communicate with the background queue task
OlivierDehaene's avatar
OlivierDehaene committed
35
    queue_sender: mpsc::UnboundedSender<QueueCommand>,
36
37
38
}

impl Queue {
Nicolas Patry's avatar
Nicolas Patry committed
39
40
41
42
43
44
    pub(crate) fn new(
        requires_padding: bool,
        block_size: u32,
        window_size: Option<u32>,
        speculate: u32,
    ) -> Self {
45
        // Create channel
OlivierDehaene's avatar
OlivierDehaene committed
46
        let (queue_sender, queue_receiver) = mpsc::unbounded_channel();
47
48

        // Launch background queue task
49
50
51
52
        tokio::spawn(queue_task(
            requires_padding,
            block_size,
            window_size,
Nicolas Patry's avatar
Nicolas Patry committed
53
            speculate,
54
55
            queue_receiver,
        ));
56
57
58
59
60

        Self { queue_sender }
    }

    /// Append an entry to the queue
61
    #[instrument(skip_all)]
62
63
64
    pub(crate) fn append(&self, entry: Entry) {
        // Send append command to the background task managing the state
        // Unwrap is safe here
65
        self.queue_sender
66
            .send(QueueCommand::Append(Box::new(entry), Span::current()))
67
            .unwrap();
68
69
70
    }

    // Get the next batch
71
    #[instrument(skip(self))]
72
73
74
    pub(crate) async fn next_batch(
        &self,
        min_size: Option<usize>,
75
        max_size: Option<usize>,
76
        prefill_token_budget: u32,
77
        token_budget: u32,
78
79
80
81
82
83
84
85
    ) -> Option<NextBatch> {
        // Create response channel
        let (response_sender, response_receiver) = oneshot::channel();
        // Send next batch command to the background task managing the state
        // Unwrap is safe here
        self.queue_sender
            .send(QueueCommand::NextBatch {
                min_size,
86
                max_size,
87
                prefill_token_budget,
88
                token_budget,
89
                response_sender,
90
                span: Span::current(),
91
92
93
94
95
96
97
98
99
            })
            .unwrap();
        // Await on response channel
        // Unwrap is safe here
        response_receiver.await.unwrap()
    }
}

// Background task responsible of the queue state
100
101
102
async fn queue_task(
    requires_padding: bool,
    block_size: u32,
103
    window_size: Option<u32>,
Nicolas Patry's avatar
Nicolas Patry committed
104
    speculate: u32,
OlivierDehaene's avatar
OlivierDehaene committed
105
    mut receiver: mpsc::UnboundedReceiver<QueueCommand>,
106
) {
Nicolas Patry's avatar
Nicolas Patry committed
107
    let mut state = State::new(requires_padding, block_size, window_size, speculate);
108

OlivierDehaene's avatar
OlivierDehaene committed
109
    while let Some(cmd) = receiver.recv().await {
110
        match cmd {
111
            QueueCommand::Append(entry, span) => {
112
                span.in_scope(|| state.append(*entry));
113
114
                metrics::increment_gauge!("tgi_queue_size", 1.0);
            }
115
116
            QueueCommand::NextBatch {
                min_size,
117
                max_size,
118
                prefill_token_budget,
119
                token_budget,
120
                response_sender,
121
122
                span,
            } => span.in_scope(|| {
123
124
                let next_batch =
                    state.next_batch(min_size, max_size, prefill_token_budget, token_budget);
125
                response_sender.send(next_batch).unwrap();
126
                metrics::gauge!("tgi_queue_size", state.entries.len() as f64);
127
            }),
128
129
130
131
132
133
134
135
        }
    }
}

/// Queue State
#[derive(Debug)]
struct State {
    /// Queue entries organized in a Vec
136
    entries: VecDeque<(u64, Entry)>,
137
138
139
140
141
142

    /// Id of the next entry
    next_id: u64,

    /// Id of the next batch
    next_batch_id: u64,
143
144
145

    /// Whether the model is using padding
    requires_padding: bool,
146
147
148

    /// Paged Attention block size
    block_size: u32,
149
150
151

    /// Sliding window
    window_size: Option<u32>,
Nicolas Patry's avatar
Nicolas Patry committed
152
153
154

    /// Speculation amount
    speculate: u32,
155
156
157
}

impl State {
Nicolas Patry's avatar
Nicolas Patry committed
158
159
160
161
162
163
    fn new(
        requires_padding: bool,
        block_size: u32,
        window_size: Option<u32>,
        speculate: u32,
    ) -> Self {
164
        Self {
165
            entries: VecDeque::with_capacity(128),
166
167
            next_id: 0,
            next_batch_id: 0,
168
            requires_padding,
169
            block_size,
170
            window_size,
Nicolas Patry's avatar
Nicolas Patry committed
171
            speculate,
172
173
174
175
        }
    }

    /// Append an entry to the queue
176
177
178
179
180
181
    fn append(&mut self, mut entry: Entry) {
        // Create a span that will live as long as the entry is in the queue waiting to be batched
        let queue_span = info_span!(parent: &entry.span, "queued");
        entry.temp_span = Some(queue_span);

        // Push entry in the queue
182
        self.entries.push_back((self.next_id, entry));
183
184
185
186
        self.next_id += 1;
    }

    // Get the next batch
187
188
189
    fn next_batch(
        &mut self,
        min_size: Option<usize>,
190
        max_size: Option<usize>,
191
192
193
        prefill_token_budget: u32,
        token_budget: u32,
    ) -> Option<NextBatch> {
194
        if self.entries.is_empty() {
195
            tracing::debug!("No queue");
196
197
198
199
200
201
            return None;
        }

        // Check if we have enough entries
        if let Some(min_size) = min_size {
            if self.entries.len() < min_size {
202
                tracing::debug!("Not enough entries");
203
204
205
206
                return None;
            }
        }

207
208
209
210
        // Pad prefill_token_budget to be a multiple of block size
        let prefill_token_budget =
            ((prefill_token_budget + self.block_size - 1) / self.block_size) * self.block_size;

211
        // Create span for this batch to add context to inference calls
212
        let next_batch_span = info_span!(parent: None, "batch", batch_size = tracing::field::Empty);
213
214
        next_batch_span.follows_from(&Span::current());

215
        let mut batch_requests = Vec::with_capacity(self.entries.len());
216
        let mut batch_entries =
217
            IntMap::with_capacity_and_hasher(self.entries.len(), BuildNoHashHasher::default());
218

219
220
221
222
223
        let mut max_input_length = 0;
        let mut prefill_tokens: u32 = 0;
        let mut decode_tokens: u32 = 0;

        // Pop entries starting from the front of the queue
224
225
226
        while let Some((id, mut entry)) = self.entries.pop_front() {
            // Filter entries where the response receiver was dropped (== entries where the request
            // was dropped by the client)
OlivierDehaene's avatar
OlivierDehaene committed
227
            if entry.response_tx.is_closed() {
228
                metrics::increment_counter!("tgi_request_failure", "err" => "dropped");
229
                tracing::debug!("Dropping entry");
230
231
232
                continue;
            }

233
234
235
236
237
238
            if self.requires_padding {
                // We pad to max input length in the Python shards
                // We need to take these padding tokens into the equation
                max_input_length = max_input_length.max(entry.request.input_length);
                prefill_tokens = (batch_requests.len() + 1) as u32 * max_input_length
            } else {
239
240
241
242
                // pad to block size
                prefill_tokens += ((entry.request.input_length + self.block_size - 1)
                    / self.block_size)
                    * self.block_size;
243
244
            }

245
246
247
            if self.requires_padding {
                decode_tokens += entry.request.stopping_parameters.max_new_tokens;
            } else {
248
249
250
251
252
253
254
255
                let max_new_tokens = match self.window_size {
                    None => entry.request.stopping_parameters.max_new_tokens,
                    Some(window_size) => min(
                        window_size.saturating_sub(entry.request.input_length),
                        entry.request.stopping_parameters.max_new_tokens,
                    ),
                };

256
257
                // pad to block size
                decode_tokens +=
258
                    ((max_new_tokens + self.block_size - 1) / self.block_size) * self.block_size;
259
            }
260

261
            if prefill_tokens > prefill_token_budget
Nicolas Patry's avatar
Nicolas Patry committed
262
                || (prefill_tokens + decode_tokens + self.speculate) > token_budget
263
            {
264
265
                // Entry is over budget
                // Add it back to the front
266
                tracing::debug!("Over budget: prefill_tokens={prefill_tokens} > {prefill_token_budget} || {prefill_tokens} + {decode_tokens} + {} > {token_budget}", self.speculate);
267
268
269
270
                self.entries.push_front((id, entry));
                break;
            }

271
            tracing::debug!("Accepting entry");
272
273
274
275
276
277
278
279
280
281
            // Create a new span to link the batch back to this entry
            let entry_batch_span = info_span!(parent: &entry.span, "infer");
            // Add relationships
            next_batch_span.follows_from(&entry_batch_span);
            entry_batch_span.follows_from(&next_batch_span);
            // Update entry
            entry.temp_span = Some(entry_batch_span);

            batch_requests.push(Request {
                id,
282
                prefill_logprobs: entry.request.decoder_input_details,
283
284
285
286
                input_chunks: Some(Input {
                    chunks: entry.request.inputs.clone(),
                }),
                inputs: entry.request.inputs.chunks_to_string(),
287
288
289
                truncate: entry.request.truncate,
                parameters: Some(entry.request.parameters.clone()),
                stopping_parameters: Some(entry.request.stopping_parameters.clone()),
Nicolas Patry's avatar
Nicolas Patry committed
290
                top_n_tokens: entry.request.top_n_tokens,
291
            });
292
293
294
295
            // Set batch_time
            entry.batch_time = Some(Instant::now());
            // Insert in batch_entries IntMap
            batch_entries.insert(id, entry);
296
297
298
299
300

            // Check if max_size
            if Some(batch_requests.len()) == max_size {
                break;
            }
301
302
        }

303
        // Empty batch
304
        if batch_requests.is_empty() {
305
            tracing::debug!("Filterered out all entries");
306
307
308
            return None;
        }

309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
        // Check if our batch is big enough
        if let Some(min_size) = min_size {
            // Batch is too small
            if batch_requests.len() < min_size {
                // Add back entries to the queue in the correct order
                for r in batch_requests.into_iter().rev() {
                    let id = r.id;
                    let entry = batch_entries.remove(&id).unwrap();
                    self.entries.push_front((id, entry));
                }

                return None;
            }
        }

        // Final batch size
325
326
        let size = batch_requests.len() as u32;
        next_batch_span.record("batch_size", size);
327
328
329
330

        let batch = Batch {
            id: self.next_batch_id,
            requests: batch_requests,
331
            size,
332
            max_tokens: (prefill_tokens + decode_tokens),
333
334
335
336
        };
        // Increment batch id
        self.next_batch_id += 1;

337
        metrics::histogram!("tgi_batch_next_size", batch.size as f64);
338

339
        Some((batch_entries, batch, next_batch_span))
340
341
342
    }
}

343
type NextBatch = (IntMap<u64, Entry>, Batch, Span);
344
345
346

#[derive(Debug)]
enum QueueCommand {
347
    Append(Box<Entry>, Span),
348
349
    NextBatch {
        min_size: Option<usize>,
350
        max_size: Option<usize>,
351
        prefill_token_budget: u32,
352
        token_budget: u32,
353
        response_sender: oneshot::Sender<Option<NextBatch>>,
354
        span: Span,
355
356
357
358
359
360
    },
}

#[cfg(test)]
mod tests {
    use super::*;
drbh's avatar
drbh committed
361
362
363
    use text_generation_client::{
        GrammarType as ProtoGrammarType, NextTokenChooserParameters, StoppingCriteriaParameters,
    };
364
    use tracing::info_span;
365

366
367
    fn default_entry() -> (
        Entry,
OlivierDehaene's avatar
OlivierDehaene committed
368
        mpsc::UnboundedReceiver<Result<InferStreamResponse, InferError>>,
369
    ) {
OlivierDehaene's avatar
OlivierDehaene committed
370
        let (response_tx, receiver_tx) = mpsc::unbounded_channel();
371

372
        let entry = Entry {
373
            request: ValidGenerateRequest {
374
                inputs: vec![],
375
                input_length: 0,
376
                truncate: 0,
377
                decoder_input_details: false,
378
379
380
381
                parameters: NextTokenChooserParameters {
                    temperature: 0.0,
                    top_k: 0,
                    top_p: 0.0,
382
                    typical_p: 0.0,
383
384
385
                    do_sample: false,
                    seed: 0,
                    repetition_penalty: 0.0,
386
                    frequency_penalty: 0.0,
387
                    watermark: false,
drbh's avatar
drbh committed
388
389
                    grammar: String::new(),
                    grammar_type: ProtoGrammarType::None as i32,
390
391
                },
                stopping_parameters: StoppingCriteriaParameters {
392
                    ignore_eos_token: false,
393
                    max_new_tokens: 1,
394
395
                    stop_sequences: vec![],
                },
Nicolas Patry's avatar
Nicolas Patry committed
396
                top_n_tokens: 0,
397
398
            },
            response_tx,
399
400
401
            span: info_span!("entry"),
            temp_span: None,
            queue_time: Instant::now(),
402
            batch_time: None,
403
404
        };
        (entry, receiver_tx)
405
406
407
408
    }

    #[test]
    fn test_append() {
Nicolas Patry's avatar
Nicolas Patry committed
409
        let mut state = State::new(false, 1, None, 0);
410
        let (entry, _guard) = default_entry();
411
412
413
414
415
416
417
418

        assert_eq!(state.next_id, 0);
        assert_eq!(state.entries.len(), 0);

        state.append(entry);

        assert_eq!(state.next_id, 1);
        assert_eq!(state.entries.len(), 1);
419
        let (id, _) = state.entries.remove(0).unwrap();
420
421
422
423
424
        assert_eq!(id, 0);
    }

    #[test]
    fn test_next_batch_empty() {
Nicolas Patry's avatar
Nicolas Patry committed
425
        let mut state = State::new(false, 1, None, 0);
426

427
428
        assert!(state.next_batch(None, None, 1, 1).is_none());
        assert!(state.next_batch(Some(1), None, 1, 1).is_none());
429
430
431
432
    }

    #[test]
    fn test_next_batch_min_size() {
Nicolas Patry's avatar
Nicolas Patry committed
433
        let mut state = State::new(false, 1, None, 0);
434
435
436
437
        let (entry1, _guard1) = default_entry();
        let (entry2, _guard2) = default_entry();
        state.append(entry1);
        state.append(entry2);
438

439
        let (entries, batch, _) = state.next_batch(None, None, 2, 2).unwrap();
440
441
442
443
444
445
446
447
448
449
450
451
        assert_eq!(entries.len(), 2);
        assert!(entries.contains_key(&0));
        assert!(entries.contains_key(&1));
        assert!(entries.get(&0).unwrap().batch_time.is_some());
        assert!(entries.get(&1).unwrap().batch_time.is_some());
        assert_eq!(batch.id, 0);
        assert_eq!(batch.size, 2);

        assert_eq!(state.next_id, 2);
        assert_eq!(state.entries.len(), 0);
        assert_eq!(state.next_batch_id, 1);

452
453
        let (entry3, _guard3) = default_entry();
        state.append(entry3);
454

455
        assert!(state.next_batch(Some(2), None, 2, 2).is_none());
456
457
458

        assert_eq!(state.next_id, 3);
        assert_eq!(state.entries.len(), 1);
459
        let (id, _) = state.entries.remove(0).unwrap();
460
461
462
        assert_eq!(id, 2);
    }

463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
    #[test]
    fn test_next_batch_max_size() {
        let mut state = State::new(false, 1, None, 0);
        let (entry1, _guard1) = default_entry();
        let (entry2, _guard2) = default_entry();
        state.append(entry1);
        state.append(entry2);

        let (entries, batch, _) = state.next_batch(None, Some(1), 2, 2).unwrap();
        assert_eq!(entries.len(), 1);
        assert!(entries.contains_key(&0));
        assert!(entries.get(&0).unwrap().batch_time.is_some());
        assert_eq!(batch.id, 0);
        assert_eq!(batch.size, 1);

        assert_eq!(state.next_id, 2);
        assert_eq!(state.entries.len(), 1);
        assert_eq!(state.next_batch_id, 1);
    }

483
    #[test]
484
    fn test_next_batch_token_budget() {
Nicolas Patry's avatar
Nicolas Patry committed
485
        let mut state = State::new(false, 1, None, 0);
486
487
488
489
        let (entry1, _guard1) = default_entry();
        let (entry2, _guard2) = default_entry();
        state.append(entry1);
        state.append(entry2);
490

491
        let (entries, batch, _) = state.next_batch(None, None, 1, 1).unwrap();
492
493
494
495
496
497
498
499
500
        assert_eq!(entries.len(), 1);
        assert!(entries.contains_key(&0));
        assert_eq!(batch.id, 0);
        assert_eq!(batch.size, 1);

        assert_eq!(state.next_id, 2);
        assert_eq!(state.entries.len(), 1);
        assert_eq!(state.next_batch_id, 1);

501
502
        let (entry3, _guard3) = default_entry();
        state.append(entry3);
503

504
        let (entries, batch, _) = state.next_batch(None, None, 3, 3).unwrap();
505
506
507
508
509
510
511
512
513
514
515
516
517
        assert_eq!(entries.len(), 2);
        assert!(entries.contains_key(&1));
        assert!(entries.contains_key(&2));
        assert_eq!(batch.id, 1);
        assert_eq!(batch.size, 2);

        assert_eq!(state.next_id, 3);
        assert_eq!(state.entries.len(), 0);
        assert_eq!(state.next_batch_id, 2);
    }

    #[tokio::test]
    async fn test_queue_append() {
Nicolas Patry's avatar
Nicolas Patry committed
518
        let queue = Queue::new(false, 1, None, 0);
519
520
        let (entry, _guard) = default_entry();
        queue.append(entry);
521
522
523
524
    }

    #[tokio::test]
    async fn test_queue_next_batch_empty() {
Nicolas Patry's avatar
Nicolas Patry committed
525
        let queue = Queue::new(false, 1, None, 0);
526

527
528
        assert!(queue.next_batch(None, None, 1, 1).await.is_none());
        assert!(queue.next_batch(Some(1), None, 1, 1).await.is_none());
529
530
531
532
    }

    #[tokio::test]
    async fn test_queue_next_batch_min_size() {
Nicolas Patry's avatar
Nicolas Patry committed
533
        let queue = Queue::new(false, 1, None, 0);
534
535
536
537
        let (entry1, _guard1) = default_entry();
        let (entry2, _guard2) = default_entry();
        queue.append(entry1);
        queue.append(entry2);
538

539
        let (entries, batch, _) = queue.next_batch(None, None, 2, 2).await.unwrap();
540
541
542
543
544
545
546
547
        assert_eq!(entries.len(), 2);
        assert!(entries.contains_key(&0));
        assert!(entries.contains_key(&1));
        assert!(entries.get(&0).unwrap().batch_time.is_some());
        assert!(entries.get(&1).unwrap().batch_time.is_some());
        assert_eq!(batch.id, 0);
        assert_eq!(batch.size, 2);

548
549
        let (entry3, _guard3) = default_entry();
        queue.append(entry3);
550

551
        // Not enough requests pending
552
        assert!(queue.next_batch(Some(2), None, 2, 2).await.is_none());
553
        // Not enough token budget
554
        assert!(queue.next_batch(Some(1), None, 0, 0).await.is_none());
555
        // Ok
556
        let (entries2, batch2, _) = queue.next_batch(Some(1), None, 2, 2).await.unwrap();
557
558
559
560
561
        assert_eq!(entries2.len(), 1);
        assert!(entries2.contains_key(&2));
        assert!(entries2.get(&2).unwrap().batch_time.is_some());
        assert_eq!(batch2.id, 1);
        assert_eq!(batch2.size, 1);
562
563
    }

564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
    #[tokio::test]
    async fn test_queue_next_batch_max_size() {
        let queue = Queue::new(false, 1, None, 0);
        let (entry1, _guard1) = default_entry();
        let (entry2, _guard2) = default_entry();
        queue.append(entry1);
        queue.append(entry2);

        let (entries, batch, _) = queue.next_batch(None, Some(1), 2, 2).await.unwrap();
        assert_eq!(entries.len(), 1);
        assert!(entries.contains_key(&0));
        assert!(entries.get(&0).unwrap().batch_time.is_some());
        assert_eq!(batch.id, 0);
        assert_eq!(batch.size, 1);
    }

580
    #[tokio::test]
581
    async fn test_queue_next_batch_token_budget() {
Nicolas Patry's avatar
Nicolas Patry committed
582
        let queue = Queue::new(false, 1, None, 0);
583
584
585
586
        let (entry1, _guard1) = default_entry();
        let (entry2, _guard2) = default_entry();
        queue.append(entry1);
        queue.append(entry2);
587

588
        let (entries, batch, _) = queue.next_batch(None, None, 1, 1).await.unwrap();
589
590
591
592
593
        assert_eq!(entries.len(), 1);
        assert!(entries.contains_key(&0));
        assert_eq!(batch.id, 0);
        assert_eq!(batch.size, 1);

594
595
        let (entry3, _guard3) = default_entry();
        queue.append(entry3);
596

597
        let (entries, batch, _) = queue.next_batch(None, None, 3, 3).await.unwrap();
598
599
600
601
602
603
        assert_eq!(entries.len(), 2);
        assert!(entries.contains_key(&1));
        assert!(entries.contains_key(&2));
        assert_eq!(batch.id, 1);
        assert_eq!(batch.size, 2);
    }
604

Nicolas Patry's avatar
Nicolas Patry committed
605
606
607
608
609
610
611
612
613
    #[tokio::test]
    async fn test_queue_next_batch_token_speculate() {
        let queue = Queue::new(false, 1, None, 2);
        let (entry1, _guard1) = default_entry();
        let (entry2, _guard2) = default_entry();
        queue.append(entry1);
        queue.append(entry2);

        // Budget of 1 is not enough
614
        assert!(queue.next_batch(None, None, 1, 1).await.is_none());
Nicolas Patry's avatar
Nicolas Patry committed
615

616
        let (entries, batch, _) = queue.next_batch(None, None, 6, 6).await.unwrap();
Nicolas Patry's avatar
Nicolas Patry committed
617
618
619
620
621
622
623
        assert_eq!(entries.len(), 2);
        assert!(entries.contains_key(&0));
        assert!(entries.contains_key(&1));
        assert_eq!(batch.id, 0);
        assert_eq!(batch.size, 2);
    }

624
625
    #[tokio::test]
    async fn test_queue_next_batch_dropped_receiver() {
Nicolas Patry's avatar
Nicolas Patry committed
626
        let queue = Queue::new(false, 1, None, 0);
627
628
629
        let (entry, _) = default_entry();
        queue.append(entry);

630
        assert!(queue.next_batch(None, None, 1, 1).await.is_none());
631
    }
632
}