infer.rs 23.1 KB
Newer Older
1
2
/// Batching and inference logic
use crate::validation::{Validation, ValidationError};
3
use crate::{Entry, Queue, Token};
4
use crate::{GenerateRequest, PrefillToken};
5
use flume::r#async::RecvStream;
6
use flume::SendTimeoutError;
7
use futures::future::try_join_all;
8
use futures::stream::StreamExt;
9
use nohash_hasher::IntMap;
10
11
12
13
use std::sync::{
    atomic::{AtomicBool, Ordering},
    Arc,
};
14
use std::time::Duration;
15
use text_generation_client::{
16
    Batch, CachedBatch, ClientError, GeneratedText, Generation, PrefillTokens, ShardedClient,
17
18
};
use thiserror::Error;
19
use tokio::sync::{Notify, OwnedSemaphorePermit, Semaphore, TryAcquireError};
20
use tokio::time::Instant;
21
use tracing::{info_span, instrument, Instrument, Span};
22
23
24
25
26
27

/// Inference struct
#[derive(Clone)]
pub struct Infer {
    /// Validation
    validation: Validation,
28
29
    /// Request queue
    queue: Queue,
30
31
32
33
34
35
36
37
38
39
40
41
42
    /// Shared state
    shared: Arc<Shared>,
    /// Inference limit
    limit_concurrent_requests: Arc<Semaphore>,
}

/// Infer shared state
struct Shared {
    /// Batching background Tokio task notifier
    batching_task: Notify,
}

impl Infer {
43
    #[allow(clippy::too_many_arguments)]
44
45
46
    pub(crate) fn new(
        client: ShardedClient,
        validation: Validation,
47
        waiting_served_ratio: f32,
48
        max_batch_prefill_tokens: u32,
49
        max_batch_total_tokens: u32,
50
51
        max_waiting_tokens: usize,
        max_concurrent_requests: usize,
52
        requires_padding: bool,
53
        generation_health: Arc<AtomicBool>,
54
55
    ) -> Self {
        // Infer shared state
56
        let queue = Queue::new(requires_padding, 16);
57
58
59
60
61
62
63
        let shared = Arc::new(Shared {
            batching_task: Notify::new(),
        });

        // Spawn batching background task that contains all the inference logic
        tokio::spawn(batching_task(
            client,
64
            waiting_served_ratio,
65
            max_batch_prefill_tokens,
66
            max_batch_total_tokens,
67
            max_waiting_tokens,
68
            queue.clone(),
69
            shared.clone(),
70
            generation_health,
71
72
73
74
75
76
77
        ));

        // Inference limit with a semaphore
        let semaphore = Arc::new(Semaphore::new(max_concurrent_requests));

        Self {
            validation,
78
            queue,
79
80
81
82
83
            shared,
            limit_concurrent_requests: semaphore,
        }
    }

84
    /// Add a new request to the queue and return a stream of InferStreamResponse
85
    #[instrument(skip(self))]
86
87
88
    pub(crate) async fn generate_stream(
        &self,
        request: GenerateRequest,
89
90
91
92
93
94
95
    ) -> Result<
        (
            OwnedSemaphorePermit,
            RecvStream<Result<InferStreamResponse, InferError>>,
        ),
        InferError,
    > {
96
        // Limit concurrent requests by acquiring a permit from the semaphore
97
98
99
100
101
        let permit = self
            .clone()
            .limit_concurrent_requests
            .try_acquire_owned()
            .map_err(|err| {
102
                metrics::increment_counter!("tgi_request_failure", "err" => "overloaded");
103
104
105
                tracing::error!("{err}");
                err
            })?;
106
107

        // Validate request
108
109
110
111
112
        let valid_request = self.validation.validate(request).await.map_err(|err| {
            metrics::increment_counter!("tgi_request_failure", "err" => "validation");
            tracing::error!("{err}");
            err
        })?;
113
114

        // MPSC channel to communicate with the background batching task
115
        let (response_tx, response_rx) = flume::unbounded();
116

117
118
        // Append the request to the queue
        self.queue.append(Entry {
119
120
            request: valid_request,
            response_tx,
121
122
123
            span: Span::current(),
            temp_span: None,
            queue_time: Instant::now(),
124
125
126
            batch_time: None,
        });

127
        // Notify the background task that we have a new entry in the queue that needs
128
129
130
131
        // to be batched
        self.shared.batching_task.notify_one();

        // Return stream
132
        Ok((permit, response_rx.into_stream()))
133
134
    }

135
    /// Add a new request to the queue and return a InferResponse
136
    #[instrument(skip(self))]
137
138
139
140
    pub(crate) async fn generate(
        &self,
        request: GenerateRequest,
    ) -> Result<InferResponse, InferError> {
Nicolas Patry's avatar
Nicolas Patry committed
141
142
        let use_top_tokens = request.parameters.top_n_tokens.is_some_and(|x| x > 0);

143
144
        // Create stream and keep semaphore permit as long as generate lives
        let (_permit, mut stream) = self.generate_stream(request).await?;
145
146
147
148

        // Return values
        let mut result_prefill = Vec::new();
        let mut result_tokens = Vec::new();
Nicolas Patry's avatar
Nicolas Patry committed
149
        let mut result_top_tokens = Vec::new();
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
        let mut result_generated_text = None;
        let mut result_start = None;
        let mut result_queued = None;

        // Iterate on stream
        while let Some(response) = stream.next().await {
            match response? {
                // Add prefill tokens
                InferStreamResponse::Prefill(tokens) => {
                    // Create Token objects
                    // We do that here instead of in the Python code as Rust for loops are faster
                    result_prefill = tokens
                        .ids
                        .into_iter()
                        .zip(tokens.logprobs.into_iter())
                        .zip(tokens.texts.into_iter())
166
                        .map(|((id, logprob), text)| PrefillToken { id, text, logprob })
167
168
169
                        .collect();
                }
                // Push last token
Nicolas Patry's avatar
Nicolas Patry committed
170
171
172
173
                InferStreamResponse::Intermediate { token, top_tokens } => {
                    result_tokens.push(token);
                    result_top_tokens.push(top_tokens);
                }
174
175
176
177
178
179
180
                // Final message
                // Set return values
                InferStreamResponse::End {
                    token,
                    generated_text,
                    start,
                    queued,
Nicolas Patry's avatar
Nicolas Patry committed
181
                    top_tokens,
182
183
                } => {
                    result_tokens.push(token);
Nicolas Patry's avatar
Nicolas Patry committed
184
                    result_top_tokens.push(top_tokens);
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
                    result_generated_text = Some(generated_text);
                    result_start = Some(start);
                    result_queued = Some(queued)
                }
            }
        }

        // Check that we received a `InferStreamResponse::End` message
        if let (Some(generated_text), Some(queued), Some(start)) =
            (result_generated_text, result_queued, result_start)
        {
            Ok(InferResponse {
                prefill: result_prefill,
                tokens: result_tokens,
                generated_text,
                queued,
                start,
Nicolas Patry's avatar
Nicolas Patry committed
202
203
204
205
206
                top_tokens: if use_top_tokens {
                    result_top_tokens
                } else {
                    Vec::new()
                },
207
208
            })
        } else {
209
            let err = InferError::IncompleteGeneration;
210
            metrics::increment_counter!("tgi_request_failure", "err" => "incomplete");
211
212
            tracing::error!("{err}");
            Err(err)
213
214
        }
    }
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
    /// Add best_of new requests to the queue and return a InferResponse of the sequence with
    /// the highest log probability per token
    #[instrument(skip(self))]
    pub(crate) async fn generate_best_of(
        &self,
        request: GenerateRequest,
        best_of: usize,
    ) -> Result<(InferResponse, Vec<InferResponse>), InferError> {
        // validate  best_of parameter separately
        let best_of = self.validation.validate_best_of(best_of)?;

        // create multiple generate requests
        let mut infer_responses: Vec<InferResponse> =
            try_join_all((0..best_of).map(|_| self.generate(request.clone()))).await?;

        // get the sequence with the highest log probability per token
        let mut max_index = 0;
        let mut max_logprob: f32 = f32::MIN;

        for (i, response) in infer_responses.iter().enumerate() {
            // mean logprobs of the generated tokens
            let sequence_logprob = response
                .tokens
                .iter()
                .map(|token| token.logprob)
                .sum::<f32>()
                / response.tokens.len() as f32;

            // set best sequence
            if sequence_logprob > max_logprob {
                max_index = i;
                max_logprob = sequence_logprob;
            }
        }
        let best_response = infer_responses.remove(max_index);
        Ok((best_response, infer_responses))
    }
252
253
254
255
256
257
}

/// Batching logic
/// Will be launched in a background Tokio task
///
/// Batches requests and sends them to the inference server
258
#[allow(clippy::too_many_arguments)]
259
260
async fn batching_task(
    mut client: ShardedClient,
261
    waiting_served_ratio: f32,
262
    max_batch_prefill_tokens: u32,
263
    max_batch_total_tokens: u32,
264
    max_waiting_tokens: usize,
265
    queue: Queue,
266
    shared: Arc<Shared>,
267
    generation_health: Arc<AtomicBool>,
268
269
270
271
272
273
) {
    // Infinite loop
    loop {
        // Wait for a notification from the Infer struct
        shared.batching_task.notified().await;

274
        // Get the next batch from the queue
275
        // This batch might be smaller than the maximum batch size if there are not enough requests
276
        // waiting in the queue
277
278
279
        while let Some((mut entries, batch, span)) = queue
            .next_batch(None, max_batch_prefill_tokens, max_batch_total_tokens)
            .await
280
        {
281
            let mut cached_batch = prefill(&mut client, batch, &mut entries, &generation_health)
282
283
                .instrument(span)
                .await;
284
285
286
287
288
289
290
            let mut waiting_tokens = 1;

            // We loop until we do not receive any cached batch from the inference server (== until
            // all requests have met their stopping criteria)
            while let Some(batch) = cached_batch {
                // Get current batch info
                let batch_size = batch.size;
291
                let batch_max_tokens = batch.max_tokens;
292
                let mut batches = vec![batch];
293
                metrics::gauge!("tgi_batch_current_size", batch_size as f64);
294
295
296
297
298
299
300
301
302
303
304
                metrics::gauge!("tgi_batch_current_max_tokens", batch_max_tokens as f64);

                let min_size = if waiting_tokens >= max_waiting_tokens {
                    // If we didn't onboard any new requests since >= max_waiting_tokens, we try
                    // to add a new batch even though its size might be small
                    None
                } else {
                    // Minimum batch size
                    Some((batch_size as f32 * waiting_served_ratio).floor() as usize)
                };

305
                let token_budget = max_batch_total_tokens.saturating_sub(batch_max_tokens);
306
307

                // Try to get a new batch
308
309
310
                if let Some((mut new_entries, new_batch, span)) = queue
                    .next_batch(min_size, max_batch_prefill_tokens, token_budget)
                    .await
311
312
313
314
315
316
317
                {
                    // Tracking metrics
                    if min_size.is_some() {
                        metrics::increment_counter!("tgi_batch_concat", "reason" => "backpressure");
                    } else {
                        metrics::increment_counter!("tgi_batch_concat", "reason" => "wait_exceeded");
                    }
318

319
320
321
322
323
324
325
326
327
328
329
330
                    entries.iter_mut().for_each(|(_, entry)| {
                        // Create a new span to add the info that this entry is waiting
                        // because a new batch is being computed
                        let entry_waiting_span = info_span!(parent: &entry.span, "waiting");
                        // Add relationships
                        span.follows_from(&entry_waiting_span);
                        entry_waiting_span.follows_from(&span);
                        // Update entry
                        entry.temp_span = Some(entry_waiting_span);
                    });

                    // Generate one token for this new batch to have the attention past in cache
331
332
333
334
                    let new_cached_batch =
                        prefill(&mut client, new_batch, &mut new_entries, &generation_health)
                            .instrument(span)
                            .await;
335
336
337
338
339
340
                    // Reset waiting counter
                    waiting_tokens = 1;
                    // Extend current batch with the new batch
                    if let Some(new_cached_batch) = new_cached_batch {
                        entries.extend(new_entries);
                        batches.push(new_cached_batch);
341
342
                    }
                }
343

344
345
346
347
348
349
                // Create span for this batch to add context to inference calls
                let next_batch_size = entries.len();
                let next_batch_span =
                    info_span!(parent: None, "batch", batch_size = next_batch_size);
                entries.iter_mut().for_each(|(_, entry)| {
                    // Create a new span to link the batch back to this entry
350
                    let entry_batch_span = info_span!(parent: &entry.span, "infer");
351
352
                    // Add relationships
                    next_batch_span.follows_from(&entry_batch_span);
353
354
355
356
                    entry_batch_span.follows_from(&next_batch_span);
                    // Update entry
                    entry.temp_span = Some(entry_batch_span);
                });
357

358
                cached_batch = decode(&mut client, batches, &mut entries, &generation_health)
359
360
                    .instrument(next_batch_span)
                    .await;
361
362
                waiting_tokens += 1;
            }
363
            metrics::gauge!("tgi_batch_current_size", 0.0);
364
            metrics::gauge!("tgi_batch_current_max_tokens", 0.0);
365
366
367
368
        }
    }
}

369
#[instrument(skip_all)]
370
371
372
async fn prefill(
    client: &mut ShardedClient,
    batch: Batch,
373
    entries: &mut IntMap<u64, Entry>,
374
    generation_health: &Arc<AtomicBool>,
375
) -> Option<CachedBatch> {
376
    let start_time = Instant::now();
377
    let batch_id = batch.id;
378
    metrics::increment_counter!("tgi_batch_inference_count", "method" => "prefill");
379
380
381

    match client.prefill(batch).await {
        Ok((generations, next_batch)) => {
382
383
            // Update health
            generation_health.store(true, Ordering::SeqCst);
384
            // Send generated tokens and filter stopped entries
385
386
387
            filter_send_generations(generations, entries);

            // Filter next batch and remove requests that were stopped
388
            let next_batch = filter_batch(client, next_batch, entries).await;
389

390
            metrics::histogram!("tgi_batch_inference_duration", start_time.elapsed().as_secs_f64(), "method" => "prefill");
391
392
393
394
395
            metrics::increment_counter!("tgi_batch_inference_success", "method" => "prefill");
            next_batch
        }
        // If we have an error, we discard the whole batch
        Err(err) => {
396
397
            // Update health
            generation_health.store(false, Ordering::SeqCst);
398
            let _ = client.clear_cache(Some(batch_id)).await;
399
400
401
402
403
404
405
406
407
408
            send_errors(err, entries);
            metrics::increment_counter!("tgi_batch_inference_failure", "method" => "prefill");
            None
        }
    }
}

#[instrument(skip_all)]
async fn decode(
    client: &mut ShardedClient,
409
    batches: Vec<CachedBatch>,
410
    entries: &mut IntMap<u64, Entry>,
411
    generation_health: &Arc<AtomicBool>,
412
) -> Option<CachedBatch> {
413
    let start_time = Instant::now();
414
    let batch_ids: Vec<u64> = batches.iter().map(|b| b.id).collect();
415
    metrics::increment_counter!("tgi_batch_inference_count", "method" => "decode");
416
417

    match client.decode(batches).await {
418
        Ok((generations, next_batch)) => {
419
420
            // Update health
            generation_health.store(true, Ordering::SeqCst);
421
            // Send generated tokens and filter stopped entries
422
423
424
            filter_send_generations(generations, entries);

            // Filter next batch and remove requests that were stopped
425
            let next_batch = filter_batch(client, next_batch, entries).await;
426

427
            metrics::histogram!("tgi_batch_inference_duration", start_time.elapsed().as_secs_f64(), "method" => "decode");
428
            metrics::increment_counter!("tgi_batch_inference_success", "method" => "decode");
429
430
431
432
            next_batch
        }
        // If we have an error, we discard the whole batch
        Err(err) => {
433
            generation_health.store(false, Ordering::SeqCst);
434
435
436
            for id in batch_ids {
                let _ = client.clear_cache(Some(id)).await;
            }
437
            send_errors(err, entries);
438
            metrics::increment_counter!("tgi_batch_inference_failure", "method" => "decode");
439
440
441
442
443
            None
        }
    }
}

444
445
/// Filter a `batch` and remove all requests not present in `entries`
#[instrument(skip_all)]
446
447
async fn filter_batch(
    client: &mut ShardedClient,
448
    next_batch: Option<CachedBatch>,
449
    entries: &IntMap<u64, Entry>,
450
) -> Option<CachedBatch> {
451
452
453
454
455
456
457
458
459
460
    let mut batch = next_batch?;

    // No need to filter
    if batch.size as usize == entries.len() {
        return Some(batch);
    }

    let id = batch.id;

    // Retain only requests that are still in entries
461
    batch.request_ids.retain(|id| entries.contains_key(id));
462

463
    if batch.request_ids.is_empty() {
464
465
466
467
468
469
470
471
472
        // All requests have been filtered out
        // Next batch is now empty
        // Clear it from the Python shards cache
        // We unwrap here as we need to panic since we cannot recover if this method fails
        client.clear_cache(Some(id)).await.unwrap();
        None
    } else {
        // Filter Python shard cache
        // We unwrap here as we need to panic since we cannot recover if this method fails
473
        client.filter_batch(id, batch.request_ids).await.unwrap()
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
    }
}

/// Send one or multiple `InferStreamResponse` to Infer for all `entries`
/// and filter entries
#[instrument(skip_all)]
fn filter_send_generations(generations: Vec<Generation>, entries: &mut IntMap<u64, Entry>) {
    generations.into_iter().for_each(|generation| {
        let id = generation.request_id;
        // Get entry
        // We can `expect` here as the request id should always be in the entries
        let entry = entries
            .get(&id)
            .expect("ID not found in entries. This is a bug.");

        // Create and enter a span to link this function back to the entry
        let _span = info_span!(parent: entry.temp_span.as_ref().expect("batch_span is None. This is a bug."), "send_generation", generation = ?generation).entered();
        // Send generation responses back to the infer task
        // If the receive an error from the Flume channel, it means that the client dropped the
        // request and we need to stop generating hence why we unwrap_or(true)
        let stopped = send_responses(generation, entry).map_err(|err| {
495
496
497
498
            if let SendTimeoutError::Timeout(_) = *err {
                tracing::error!("Entry response channel timed out.")
            }

499
500
501
502
503
504
505
506
507
508
509
510
511
            metrics::increment_counter!("tgi_request_failure", "err" => "dropped");
            err
        }).unwrap_or(true);
        if stopped {
            entries.remove(&id).expect("ID not found in entries. This is a bug.");
        }
    });
}

/// Send responses through the `entry` response channel
fn send_responses(
    generation: Generation,
    entry: &Entry,
512
513
514
515
516
517
) -> Result<bool, Box<SendTimeoutError<Result<InferStreamResponse, InferError>>>> {
    // Return directly if the channel is disconnected
    if entry.response_tx.is_disconnected() {
        return Ok(true);
    }

518
519
520
521
    let mut stopped = false;

    if let Some(prefill_tokens) = generation.prefill_tokens {
        // Send message
522
523
524
525
        entry.response_tx.send_timeout(
            Ok(InferStreamResponse::Prefill(prefill_tokens)),
            Duration::from_millis(10),
        )?;
526
527
528
529
530
531
532
533
534
535
    }

    // Create last Token
    let token = Token {
        id: generation.token_id,
        text: generation.token_text,
        logprob: generation.token_logprob,
        special: generation.token_is_special,
    };

Nicolas Patry's avatar
Nicolas Patry committed
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
    // generation.top_tokens

    let mut top_tokens = Vec::new();
    if let Some(top_tokens_) = generation.top_tokens {
        top_tokens.extend(
            top_tokens_
                .ids
                .into_iter()
                .zip(top_tokens_.logprobs.into_iter())
                .zip(top_tokens_.texts.into_iter())
                .zip(top_tokens_.is_special.into_iter())
                .map(|(((id, logprob), text), special)| Token {
                    id,
                    text,
                    logprob,
                    special,
                }),
        )
    }

556
557
558
559
    if let Some(generated_text) = generation.generated_text {
        // Generation has ended
        stopped = true;
        // Send message
560
561
562
        entry.response_tx.send_timeout(
            Ok(InferStreamResponse::End {
                token,
Nicolas Patry's avatar
Nicolas Patry committed
563
                top_tokens,
564
565
566
567
568
569
                generated_text,
                queued: entry.queue_time,
                start: entry.batch_time.unwrap(),
            }),
            Duration::from_millis(10),
        )?;
570
571
    } else {
        // Send message
572
        entry.response_tx.send_timeout(
Nicolas Patry's avatar
Nicolas Patry committed
573
            Ok(InferStreamResponse::Intermediate { token, top_tokens }),
574
575
            Duration::from_millis(10),
        )?;
576
577
578
579
    }
    Ok(stopped)
}

580
/// Send errors to Infer for all `entries`
581
582
#[instrument(skip_all)]
fn send_errors(error: ClientError, entries: &mut IntMap<u64, Entry>) {
583
    entries.drain().for_each(|(_, entry)| {
584
585
586
        // Create and enter a span to link this function back to the entry
        let _send_error_span = info_span!(parent: entry.temp_span.as_ref().expect("batch_span is None. This is a bug."), "send_error").entered();
        let err = InferError::GenerationError(error.to_string());
587
        metrics::increment_counter!("tgi_request_failure", "err" => "generation");
588
589
        tracing::error!("{err}");

590
591
592
        // unwrap_or is valid here as we don't care if the receiver is gone.
        entry
            .response_tx
593
            .send_timeout(Err(err), Duration::from_millis(10))
594
595
596
597
598
599
600
601
602
            .unwrap_or(());
    });
}

#[derive(Debug)]
pub(crate) enum InferStreamResponse {
    // Optional first message
    Prefill(PrefillTokens),
    // Intermediate messages
Nicolas Patry's avatar
Nicolas Patry committed
603
604
605
606
    Intermediate {
        token: Token,
        top_tokens: Vec<Token>,
    },
607
608
609
    // Last message
    End {
        token: Token,
Nicolas Patry's avatar
Nicolas Patry committed
610
        top_tokens: Vec<Token>,
611
612
613
614
615
616
617
618
        generated_text: GeneratedText,
        start: Instant,
        queued: Instant,
    },
}

#[derive(Debug)]
pub(crate) struct InferResponse {
619
    pub(crate) prefill: Vec<PrefillToken>,
620
621
622
623
    pub(crate) tokens: Vec<Token>,
    pub(crate) generated_text: GeneratedText,
    pub(crate) queued: Instant,
    pub(crate) start: Instant,
Nicolas Patry's avatar
Nicolas Patry committed
624
    pub(crate) top_tokens: Vec<Vec<Token>>,
625
626
627
628
629
630
631
632
633
634
635
636
637
}

#[derive(Debug, Error)]
pub enum InferError {
    #[error("Request failed during generation: {0}")]
    GenerationError(String),
    #[error("Model is overloaded")]
    Overloaded(#[from] TryAcquireError),
    #[error("Input validation error: {0}")]
    ValidationError(#[from] ValidationError),
    #[error("Incomplete generation")]
    IncompleteGeneration,
}
638
639
640
641
642
643
644
645
646
647
648

impl InferError {
    pub(crate) fn error_type(&self) -> &str {
        match self {
            InferError::GenerationError(_) => "generation",
            InferError::Overloaded(_) => "overloaded",
            InferError::ValidationError(_) => "validation",
            InferError::IncompleteGeneration => "incomplete_generation",
        }
    }
}