queue.rs 16.6 KB
Newer Older
1
2
3
4
use crate::infer::InferError;
use crate::infer::InferStreamResponse;
use crate::validation::ValidGenerateRequest;
use nohash_hasher::{BuildNoHashHasher, IntMap};
5
use std::collections::VecDeque;
6
use text_generation_client::{Batch, Request};
7
use tokio::sync::oneshot;
8
use tokio::time::Instant;
9
use tracing::{info_span, instrument, Span};
10
11
12
13
14
15
16

/// Queue entry
#[derive(Debug)]
pub(crate) struct Entry {
    /// Request
    pub request: ValidGenerateRequest,
    /// Response sender to communicate between the Infer struct and the batching_task
17
    pub response_tx: flume::Sender<Result<InferStreamResponse, InferError>>,
18
19
20
21
22
23
    /// Span that will live as long as entry
    pub span: Span,
    /// Temporary span used as a guard when logging inference, wait times...
    pub temp_span: Option<Span>,
    /// Instant when this entry was queued
    pub queue_time: Instant,
24
25
26
27
28
29
30
31
    /// Instant when this entry was added to a batch
    pub batch_time: Option<Instant>,
}

/// Request Queue
#[derive(Debug, Clone)]
pub(crate) struct Queue {
    /// Channel to communicate with the background queue task
32
    queue_sender: flume::Sender<QueueCommand>,
33
34
35
}

impl Queue {
36
    pub(crate) fn new(requires_padding: bool, block_size: u32) -> Self {
37
        // Create channel
38
        let (queue_sender, queue_receiver) = flume::unbounded();
39
40

        // Launch background queue task
41
        tokio::spawn(queue_task(requires_padding, block_size, queue_receiver));
42
43
44
45
46

        Self { queue_sender }
    }

    /// Append an entry to the queue
47
    #[instrument(skip_all)]
48
49
50
    pub(crate) fn append(&self, entry: Entry) {
        // Send append command to the background task managing the state
        // Unwrap is safe here
51
        self.queue_sender
52
            .send(QueueCommand::Append(Box::new(entry), Span::current()))
53
            .unwrap();
54
55
56
    }

    // Get the next batch
57
    #[instrument(skip(self))]
58
59
60
    pub(crate) async fn next_batch(
        &self,
        min_size: Option<usize>,
61
        prefill_token_budget: u32,
62
        token_budget: u32,
63
64
65
66
67
68
69
70
    ) -> Option<NextBatch> {
        // Create response channel
        let (response_sender, response_receiver) = oneshot::channel();
        // Send next batch command to the background task managing the state
        // Unwrap is safe here
        self.queue_sender
            .send(QueueCommand::NextBatch {
                min_size,
71
                prefill_token_budget,
72
                token_budget,
73
                response_sender,
74
                span: Span::current(),
75
76
77
78
79
80
81
82
83
            })
            .unwrap();
        // Await on response channel
        // Unwrap is safe here
        response_receiver.await.unwrap()
    }
}

// Background task responsible of the queue state
84
85
86
87
88
89
async fn queue_task(
    requires_padding: bool,
    block_size: u32,
    receiver: flume::Receiver<QueueCommand>,
) {
    let mut state = State::new(requires_padding, block_size);
90

91
    while let Ok(cmd) = receiver.recv_async().await {
92
        match cmd {
93
            QueueCommand::Append(entry, span) => {
94
                span.in_scope(|| state.append(*entry));
95
96
                metrics::increment_gauge!("tgi_queue_size", 1.0);
            }
97
98
            QueueCommand::NextBatch {
                min_size,
99
                prefill_token_budget,
100
                token_budget,
101
                response_sender,
102
103
                span,
            } => span.in_scope(|| {
104
                let next_batch = state.next_batch(min_size, prefill_token_budget, token_budget);
105
                response_sender.send(next_batch).unwrap();
106
                metrics::gauge!("tgi_queue_size", state.entries.len() as f64);
107
            }),
108
109
110
111
112
113
114
115
        }
    }
}

/// Queue State
#[derive(Debug)]
struct State {
    /// Queue entries organized in a Vec
116
    entries: VecDeque<(u64, Entry)>,
117
118
119
120
121
122

    /// Id of the next entry
    next_id: u64,

    /// Id of the next batch
    next_batch_id: u64,
123
124
125

    /// Whether the model is using padding
    requires_padding: bool,
126
127
128

    /// Paged Attention block size
    block_size: u32,
129
130
131
}

impl State {
132
    fn new(requires_padding: bool, block_size: u32) -> Self {
133
        Self {
134
            entries: VecDeque::with_capacity(128),
135
136
            next_id: 0,
            next_batch_id: 0,
137
            requires_padding,
138
            block_size,
139
140
141
142
        }
    }

    /// Append an entry to the queue
143
144
145
146
147
148
    fn append(&mut self, mut entry: Entry) {
        // Create a span that will live as long as the entry is in the queue waiting to be batched
        let queue_span = info_span!(parent: &entry.span, "queued");
        entry.temp_span = Some(queue_span);

        // Push entry in the queue
149
        self.entries.push_back((self.next_id, entry));
150
151
152
153
        self.next_id += 1;
    }

    // Get the next batch
154
155
156
157
158
159
    fn next_batch(
        &mut self,
        min_size: Option<usize>,
        prefill_token_budget: u32,
        token_budget: u32,
    ) -> Option<NextBatch> {
160
161
162
163
164
165
166
167
168
169
170
        if self.entries.is_empty() {
            return None;
        }

        // Check if we have enough entries
        if let Some(min_size) = min_size {
            if self.entries.len() < min_size {
                return None;
            }
        }

171
        // Create span for this batch to add context to inference calls
172
        let next_batch_span = info_span!(parent: None, "batch", batch_size = tracing::field::Empty);
173
174
        next_batch_span.follows_from(&Span::current());

175
        let mut batch_requests = Vec::with_capacity(self.entries.len());
176
        let mut batch_entries =
177
            IntMap::with_capacity_and_hasher(self.entries.len(), BuildNoHashHasher::default());
178

179
180
181
182
183
        let mut max_input_length = 0;
        let mut prefill_tokens: u32 = 0;
        let mut decode_tokens: u32 = 0;

        // Pop entries starting from the front of the queue
184
185
186
187
188
189
190
191
        while let Some((id, mut entry)) = self.entries.pop_front() {
            // Filter entries where the response receiver was dropped (== entries where the request
            // was dropped by the client)
            if entry.response_tx.is_disconnected() {
                metrics::increment_counter!("tgi_request_failure", "err" => "dropped");
                continue;
            }

192
193
194
195
196
197
            if self.requires_padding {
                // We pad to max input length in the Python shards
                // We need to take these padding tokens into the equation
                max_input_length = max_input_length.max(entry.request.input_length);
                prefill_tokens = (batch_requests.len() + 1) as u32 * max_input_length
            } else {
198
199
200
201
                // pad to block size
                prefill_tokens += ((entry.request.input_length + self.block_size - 1)
                    / self.block_size)
                    * self.block_size;
202
203
            }

204
205
206
207
208
209
210
211
212
            if self.requires_padding {
                decode_tokens += entry.request.stopping_parameters.max_new_tokens;
            } else {
                // pad to block size
                decode_tokens +=
                    ((entry.request.stopping_parameters.max_new_tokens + self.block_size - 1)
                        / self.block_size)
                        * self.block_size;
            }
213

214
215
216
            if prefill_tokens > prefill_token_budget
                || (prefill_tokens + decode_tokens) > token_budget
            {
217
218
219
220
221
222
                // Entry is over budget
                // Add it back to the front
                self.entries.push_front((id, entry));
                break;
            }

223
224
225
226
227
228
229
230
231
232
            // Create a new span to link the batch back to this entry
            let entry_batch_span = info_span!(parent: &entry.span, "infer");
            // Add relationships
            next_batch_span.follows_from(&entry_batch_span);
            entry_batch_span.follows_from(&next_batch_span);
            // Update entry
            entry.temp_span = Some(entry_batch_span);

            batch_requests.push(Request {
                id,
233
                prefill_logprobs: entry.request.decoder_input_details,
234
235
236
237
                inputs: entry.request.inputs.clone(),
                truncate: entry.request.truncate,
                parameters: Some(entry.request.parameters.clone()),
                stopping_parameters: Some(entry.request.stopping_parameters.clone()),
Nicolas Patry's avatar
Nicolas Patry committed
238
                top_n_tokens: entry.request.top_n_tokens,
239
            });
240
241
242
243
244
245
            // Set batch_time
            entry.batch_time = Some(Instant::now());
            // Insert in batch_entries IntMap
            batch_entries.insert(id, entry);
        }

246
        // Empty batch
247
248
249
250
        if batch_requests.is_empty() {
            return None;
        }

251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
        // Check if our batch is big enough
        if let Some(min_size) = min_size {
            // Batch is too small
            if batch_requests.len() < min_size {
                // Add back entries to the queue in the correct order
                for r in batch_requests.into_iter().rev() {
                    let id = r.id;
                    let entry = batch_entries.remove(&id).unwrap();
                    self.entries.push_front((id, entry));
                }

                return None;
            }
        }

        // Final batch size
267
268
        let size = batch_requests.len() as u32;
        next_batch_span.record("batch_size", size);
269
270
271
272

        let batch = Batch {
            id: self.next_batch_id,
            requests: batch_requests,
273
            size,
274
            max_tokens: (prefill_tokens + decode_tokens),
275
276
277
278
        };
        // Increment batch id
        self.next_batch_id += 1;

279
        metrics::histogram!("tgi_batch_next_size", batch.size as f64);
280

281
        Some((batch_entries, batch, next_batch_span))
282
283
284
    }
}

285
type NextBatch = (IntMap<u64, Entry>, Batch, Span);
286
287
288

#[derive(Debug)]
enum QueueCommand {
289
    Append(Box<Entry>, Span),
290
291
    NextBatch {
        min_size: Option<usize>,
292
        prefill_token_budget: u32,
293
        token_budget: u32,
294
        response_sender: oneshot::Sender<Option<NextBatch>>,
295
        span: Span,
296
297
298
299
300
301
302
    },
}

#[cfg(test)]
mod tests {
    use super::*;
    use text_generation_client::{NextTokenChooserParameters, StoppingCriteriaParameters};
303
    use tracing::info_span;
304

305
306
307
308
309
    fn default_entry() -> (
        Entry,
        flume::Receiver<Result<InferStreamResponse, InferError>>,
    ) {
        let (response_tx, receiver_tx) = flume::unbounded();
310

311
        let entry = Entry {
312
313
            request: ValidGenerateRequest {
                inputs: "".to_string(),
314
                input_length: 0,
315
                truncate: 0,
316
                decoder_input_details: false,
317
318
319
320
                parameters: NextTokenChooserParameters {
                    temperature: 0.0,
                    top_k: 0,
                    top_p: 0.0,
321
                    typical_p: 0.0,
322
323
324
                    do_sample: false,
                    seed: 0,
                    repetition_penalty: 0.0,
325
                    watermark: false,
326
327
                },
                stopping_parameters: StoppingCriteriaParameters {
328
                    ignore_eos_token: false,
329
                    max_new_tokens: 1,
330
331
                    stop_sequences: vec![],
                },
Nicolas Patry's avatar
Nicolas Patry committed
332
                top_n_tokens: 0,
333
334
            },
            response_tx,
335
336
337
            span: info_span!("entry"),
            temp_span: None,
            queue_time: Instant::now(),
338
            batch_time: None,
339
340
        };
        (entry, receiver_tx)
341
342
343
344
    }

    #[test]
    fn test_append() {
345
        let mut state = State::new(false, 1);
346
        let (entry, _guard) = default_entry();
347
348
349
350
351
352
353
354

        assert_eq!(state.next_id, 0);
        assert_eq!(state.entries.len(), 0);

        state.append(entry);

        assert_eq!(state.next_id, 1);
        assert_eq!(state.entries.len(), 1);
355
        let (id, _) = state.entries.remove(0).unwrap();
356
357
358
359
360
        assert_eq!(id, 0);
    }

    #[test]
    fn test_next_batch_empty() {
361
        let mut state = State::new(false, 1);
362

363
364
        assert!(state.next_batch(None, 1, 1).is_none());
        assert!(state.next_batch(Some(1), 1, 1).is_none());
365
366
367
368
    }

    #[test]
    fn test_next_batch_min_size() {
369
        let mut state = State::new(false, 1);
370
371
372
373
        let (entry1, _guard1) = default_entry();
        let (entry2, _guard2) = default_entry();
        state.append(entry1);
        state.append(entry2);
374

375
        let (entries, batch, _) = state.next_batch(None, 2, 2).unwrap();
376
377
378
379
380
381
382
383
384
385
386
387
        assert_eq!(entries.len(), 2);
        assert!(entries.contains_key(&0));
        assert!(entries.contains_key(&1));
        assert!(entries.get(&0).unwrap().batch_time.is_some());
        assert!(entries.get(&1).unwrap().batch_time.is_some());
        assert_eq!(batch.id, 0);
        assert_eq!(batch.size, 2);

        assert_eq!(state.next_id, 2);
        assert_eq!(state.entries.len(), 0);
        assert_eq!(state.next_batch_id, 1);

388
389
        let (entry3, _guard3) = default_entry();
        state.append(entry3);
390

391
        assert!(state.next_batch(Some(2), 2, 2).is_none());
392
393
394

        assert_eq!(state.next_id, 3);
        assert_eq!(state.entries.len(), 1);
395
        let (id, _) = state.entries.remove(0).unwrap();
396
397
398
399
        assert_eq!(id, 2);
    }

    #[test]
400
    fn test_next_batch_token_budget() {
401
        let mut state = State::new(false, 1);
402
403
404
405
        let (entry1, _guard1) = default_entry();
        let (entry2, _guard2) = default_entry();
        state.append(entry1);
        state.append(entry2);
406

407
        let (entries, batch, _) = state.next_batch(None, 1, 1).unwrap();
408
409
410
411
412
413
414
415
416
        assert_eq!(entries.len(), 1);
        assert!(entries.contains_key(&0));
        assert_eq!(batch.id, 0);
        assert_eq!(batch.size, 1);

        assert_eq!(state.next_id, 2);
        assert_eq!(state.entries.len(), 1);
        assert_eq!(state.next_batch_id, 1);

417
418
        let (entry3, _guard3) = default_entry();
        state.append(entry3);
419

420
        let (entries, batch, _) = state.next_batch(None, 3, 3).unwrap();
421
422
423
424
425
426
427
428
429
430
431
432
433
        assert_eq!(entries.len(), 2);
        assert!(entries.contains_key(&1));
        assert!(entries.contains_key(&2));
        assert_eq!(batch.id, 1);
        assert_eq!(batch.size, 2);

        assert_eq!(state.next_id, 3);
        assert_eq!(state.entries.len(), 0);
        assert_eq!(state.next_batch_id, 2);
    }

    #[tokio::test]
    async fn test_queue_append() {
434
        let queue = Queue::new(false, 1);
435
436
        let (entry, _guard) = default_entry();
        queue.append(entry);
437
438
439
440
    }

    #[tokio::test]
    async fn test_queue_next_batch_empty() {
441
        let queue = Queue::new(false, 1);
442

443
444
        assert!(queue.next_batch(None, 1, 1).await.is_none());
        assert!(queue.next_batch(Some(1), 1, 1).await.is_none());
445
446
447
448
    }

    #[tokio::test]
    async fn test_queue_next_batch_min_size() {
449
        let queue = Queue::new(false, 1);
450
451
452
453
        let (entry1, _guard1) = default_entry();
        let (entry2, _guard2) = default_entry();
        queue.append(entry1);
        queue.append(entry2);
454

455
        let (entries, batch, _) = queue.next_batch(None, 2, 2).await.unwrap();
456
457
458
459
460
461
462
463
        assert_eq!(entries.len(), 2);
        assert!(entries.contains_key(&0));
        assert!(entries.contains_key(&1));
        assert!(entries.get(&0).unwrap().batch_time.is_some());
        assert!(entries.get(&1).unwrap().batch_time.is_some());
        assert_eq!(batch.id, 0);
        assert_eq!(batch.size, 2);

464
465
        let (entry3, _guard3) = default_entry();
        queue.append(entry3);
466

467
        // Not enough requests pending
468
        assert!(queue.next_batch(Some(2), 2, 2).await.is_none());
469
        // Not enough token budget
470
        assert!(queue.next_batch(Some(1), 0, 0).await.is_none());
471
        // Ok
472
        let (entries2, batch2, _) = queue.next_batch(Some(1), 2, 2).await.unwrap();
473
474
475
476
477
        assert_eq!(entries2.len(), 1);
        assert!(entries2.contains_key(&2));
        assert!(entries2.get(&2).unwrap().batch_time.is_some());
        assert_eq!(batch2.id, 1);
        assert_eq!(batch2.size, 1);
478
479
480
    }

    #[tokio::test]
481
    async fn test_queue_next_batch_token_budget() {
482
        let queue = Queue::new(false, 1);
483
484
485
486
        let (entry1, _guard1) = default_entry();
        let (entry2, _guard2) = default_entry();
        queue.append(entry1);
        queue.append(entry2);
487

488
        let (entries, batch, _) = queue.next_batch(None, 1, 1).await.unwrap();
489
490
491
492
493
        assert_eq!(entries.len(), 1);
        assert!(entries.contains_key(&0));
        assert_eq!(batch.id, 0);
        assert_eq!(batch.size, 1);

494
495
        let (entry3, _guard3) = default_entry();
        queue.append(entry3);
496

497
        let (entries, batch, _) = queue.next_batch(None, 3, 3).await.unwrap();
498
499
500
501
502
503
        assert_eq!(entries.len(), 2);
        assert!(entries.contains_key(&1));
        assert!(entries.contains_key(&2));
        assert_eq!(batch.id, 1);
        assert_eq!(batch.size, 2);
    }
504
505
506

    #[tokio::test]
    async fn test_queue_next_batch_dropped_receiver() {
507
        let queue = Queue::new(false, 1);
508
509
510
        let (entry, _) = default_entry();
        queue.append(entry);

511
        assert!(queue.next_batch(None, 1, 1).await.is_none());
512
    }
513
}