infer.rs 21 KB
Newer Older
1
2
/// Batching and inference logic
use crate::validation::{Validation, ValidationError};
3
use crate::{Entry, Queue, Token};
4
use crate::{GenerateRequest, PrefillToken};
5
use flume::r#async::RecvStream;
6
use flume::SendError;
7
use futures::future::try_join_all;
8
use futures::stream::StreamExt;
9
use nohash_hasher::IntMap;
10
11
12
13
use std::sync::{
    atomic::{AtomicBool, Ordering},
    Arc,
};
14
15
16
17
use text_generation_client::{
    Batch, ClientError, GeneratedText, Generation, PrefillTokens, ShardedClient,
};
use thiserror::Error;
18
use tokio::sync::{Notify, OwnedSemaphorePermit, Semaphore, TryAcquireError};
19
use tokio::time::Instant;
20
use tracing::{info_span, instrument, Instrument, Span};
21
22
23
24
25
26

/// Inference struct
#[derive(Clone)]
pub struct Infer {
    /// Validation
    validation: Validation,
27
28
    /// Request queue
    queue: Queue,
29
30
31
32
33
34
35
36
37
38
39
40
41
    /// Shared state
    shared: Arc<Shared>,
    /// Inference limit
    limit_concurrent_requests: Arc<Semaphore>,
}

/// Infer shared state
struct Shared {
    /// Batching background Tokio task notifier
    batching_task: Notify,
}

impl Infer {
42
    #[allow(clippy::too_many_arguments)]
43
44
45
    pub(crate) fn new(
        client: ShardedClient,
        validation: Validation,
46
47
        waiting_served_ratio: f32,
        max_batch_total_tokens: u32,
48
49
        max_waiting_tokens: usize,
        max_concurrent_requests: usize,
50
        requires_padding: bool,
51
        generation_health: Arc<AtomicBool>,
52
53
    ) -> Self {
        // Infer shared state
54
        let queue = Queue::new(requires_padding);
55
56
57
58
59
60
61
        let shared = Arc::new(Shared {
            batching_task: Notify::new(),
        });

        // Spawn batching background task that contains all the inference logic
        tokio::spawn(batching_task(
            client,
62
63
            waiting_served_ratio,
            max_batch_total_tokens,
64
            max_waiting_tokens,
65
            queue.clone(),
66
            shared.clone(),
67
            generation_health,
68
69
70
71
72
73
74
        ));

        // Inference limit with a semaphore
        let semaphore = Arc::new(Semaphore::new(max_concurrent_requests));

        Self {
            validation,
75
            queue,
76
77
78
79
80
            shared,
            limit_concurrent_requests: semaphore,
        }
    }

81
    /// Add a new request to the queue and return a stream of InferStreamResponse
82
    #[instrument(skip(self))]
83
84
85
    pub(crate) async fn generate_stream(
        &self,
        request: GenerateRequest,
86
87
88
89
90
91
92
    ) -> Result<
        (
            OwnedSemaphorePermit,
            RecvStream<Result<InferStreamResponse, InferError>>,
        ),
        InferError,
    > {
93
        // Limit concurrent requests by acquiring a permit from the semaphore
94
95
96
97
98
        let permit = self
            .clone()
            .limit_concurrent_requests
            .try_acquire_owned()
            .map_err(|err| {
99
                metrics::increment_counter!("tgi_request_failure", "err" => "overloaded");
100
101
102
                tracing::error!("{err}");
                err
            })?;
103
104

        // Validate request
105
106
107
108
109
        let valid_request = self.validation.validate(request).await.map_err(|err| {
            metrics::increment_counter!("tgi_request_failure", "err" => "validation");
            tracing::error!("{err}");
            err
        })?;
110
111

        // MPSC channel to communicate with the background batching task
112
        let (response_tx, response_rx) = flume::unbounded();
113

114
115
        // Append the request to the queue
        self.queue.append(Entry {
116
117
            request: valid_request,
            response_tx,
118
119
120
            span: Span::current(),
            temp_span: None,
            queue_time: Instant::now(),
121
122
123
            batch_time: None,
        });

124
        // Notify the background task that we have a new entry in the queue that needs
125
126
127
128
        // to be batched
        self.shared.batching_task.notify_one();

        // Return stream
129
        Ok((permit, response_rx.into_stream()))
130
131
    }

132
    /// Add a new request to the queue and return a InferResponse
133
    #[instrument(skip(self))]
134
135
136
137
    pub(crate) async fn generate(
        &self,
        request: GenerateRequest,
    ) -> Result<InferResponse, InferError> {
138
139
        // Create stream and keep semaphore permit as long as generate lives
        let (_permit, mut stream) = self.generate_stream(request).await?;
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159

        // Return values
        let mut result_prefill = Vec::new();
        let mut result_tokens = Vec::new();
        let mut result_generated_text = None;
        let mut result_start = None;
        let mut result_queued = None;

        // Iterate on stream
        while let Some(response) = stream.next().await {
            match response? {
                // Add prefill tokens
                InferStreamResponse::Prefill(tokens) => {
                    // Create Token objects
                    // We do that here instead of in the Python code as Rust for loops are faster
                    result_prefill = tokens
                        .ids
                        .into_iter()
                        .zip(tokens.logprobs.into_iter())
                        .zip(tokens.texts.into_iter())
160
                        .map(|((id, logprob), text)| PrefillToken { id, text, logprob })
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
                        .collect();
                }
                // Push last token
                InferStreamResponse::Token(token) => result_tokens.push(token),
                // Final message
                // Set return values
                InferStreamResponse::End {
                    token,
                    generated_text,
                    start,
                    queued,
                } => {
                    result_tokens.push(token);
                    result_generated_text = Some(generated_text);
                    result_start = Some(start);
                    result_queued = Some(queued)
                }
            }
        }

        // Check that we received a `InferStreamResponse::End` message
        if let (Some(generated_text), Some(queued), Some(start)) =
            (result_generated_text, result_queued, result_start)
        {
            Ok(InferResponse {
                prefill: result_prefill,
                tokens: result_tokens,
                generated_text,
                queued,
                start,
            })
        } else {
193
            let err = InferError::IncompleteGeneration;
194
            metrics::increment_counter!("tgi_request_failure", "err" => "incomplete");
195
196
            tracing::error!("{err}");
            Err(err)
197
198
        }
    }
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
    /// Add best_of new requests to the queue and return a InferResponse of the sequence with
    /// the highest log probability per token
    #[instrument(skip(self))]
    pub(crate) async fn generate_best_of(
        &self,
        request: GenerateRequest,
        best_of: usize,
    ) -> Result<(InferResponse, Vec<InferResponse>), InferError> {
        // validate  best_of parameter separately
        let best_of = self.validation.validate_best_of(best_of)?;

        // create multiple generate requests
        let mut infer_responses: Vec<InferResponse> =
            try_join_all((0..best_of).map(|_| self.generate(request.clone()))).await?;

        // get the sequence with the highest log probability per token
        let mut max_index = 0;
        let mut max_logprob: f32 = f32::MIN;

        for (i, response) in infer_responses.iter().enumerate() {
            // mean logprobs of the generated tokens
            let sequence_logprob = response
                .tokens
                .iter()
                .map(|token| token.logprob)
                .sum::<f32>()
                / response.tokens.len() as f32;

            // set best sequence
            if sequence_logprob > max_logprob {
                max_index = i;
                max_logprob = sequence_logprob;
            }
        }
        let best_response = infer_responses.remove(max_index);
        Ok((best_response, infer_responses))
    }
236
237
238
239
240
241
242
243
}

/// Batching logic
/// Will be launched in a background Tokio task
///
/// Batches requests and sends them to the inference server
async fn batching_task(
    mut client: ShardedClient,
244
245
    waiting_served_ratio: f32,
    max_batch_total_tokens: u32,
246
    max_waiting_tokens: usize,
247
    queue: Queue,
248
    shared: Arc<Shared>,
249
    generation_health: Arc<AtomicBool>,
250
251
252
253
254
255
) {
    // Infinite loop
    loop {
        // Wait for a notification from the Infer struct
        shared.batching_task.notified().await;

256
        // Get the next batch from the queue
257
        // This batch might be smaller than the maximum batch size if there are not enough requests
258
        // waiting in the queue
259
260
261
        while let Some((mut entries, batch, span)) =
            queue.next_batch(None, max_batch_total_tokens).await
        {
262
            let mut cached_batch = prefill(&mut client, batch, &mut entries, &generation_health)
263
264
                .instrument(span)
                .await;
265
266
267
268
269
270
271
            let mut waiting_tokens = 1;

            // We loop until we do not receive any cached batch from the inference server (== until
            // all requests have met their stopping criteria)
            while let Some(batch) = cached_batch {
                // Get current batch info
                let batch_size = batch.size;
272
                let batch_max_tokens = batch.max_tokens;
273
                let mut batches = vec![batch];
274
                metrics::gauge!("tgi_batch_current_size", batch_size as f64);
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
                metrics::gauge!("tgi_batch_current_max_tokens", batch_max_tokens as f64);

                let min_size = if waiting_tokens >= max_waiting_tokens {
                    // If we didn't onboard any new requests since >= max_waiting_tokens, we try
                    // to add a new batch even though its size might be small
                    None
                } else {
                    // Minimum batch size
                    Some((batch_size as f32 * waiting_served_ratio).floor() as usize)
                };

                let token_budget = max_batch_total_tokens - batch_max_tokens;

                // Try to get a new batch
                if let Some((mut new_entries, new_batch, span)) =
                    queue.next_batch(min_size, token_budget).await
                {
                    // Tracking metrics
                    if min_size.is_some() {
                        metrics::increment_counter!("tgi_batch_concat", "reason" => "backpressure");
                    } else {
                        metrics::increment_counter!("tgi_batch_concat", "reason" => "wait_exceeded");
                    }
298

299
300
301
302
303
304
305
306
307
308
309
310
                    entries.iter_mut().for_each(|(_, entry)| {
                        // Create a new span to add the info that this entry is waiting
                        // because a new batch is being computed
                        let entry_waiting_span = info_span!(parent: &entry.span, "waiting");
                        // Add relationships
                        span.follows_from(&entry_waiting_span);
                        entry_waiting_span.follows_from(&span);
                        // Update entry
                        entry.temp_span = Some(entry_waiting_span);
                    });

                    // Generate one token for this new batch to have the attention past in cache
311
312
313
314
                    let new_cached_batch =
                        prefill(&mut client, new_batch, &mut new_entries, &generation_health)
                            .instrument(span)
                            .await;
315
316
317
318
319
320
                    // Reset waiting counter
                    waiting_tokens = 1;
                    // Extend current batch with the new batch
                    if let Some(new_cached_batch) = new_cached_batch {
                        entries.extend(new_entries);
                        batches.push(new_cached_batch);
321
322
                    }
                }
323

324
325
326
327
328
329
                // Create span for this batch to add context to inference calls
                let next_batch_size = entries.len();
                let next_batch_span =
                    info_span!(parent: None, "batch", batch_size = next_batch_size);
                entries.iter_mut().for_each(|(_, entry)| {
                    // Create a new span to link the batch back to this entry
330
                    let entry_batch_span = info_span!(parent: &entry.span, "infer");
331
332
                    // Add relationships
                    next_batch_span.follows_from(&entry_batch_span);
333
334
335
336
                    entry_batch_span.follows_from(&next_batch_span);
                    // Update entry
                    entry.temp_span = Some(entry_batch_span);
                });
337

338
                cached_batch = decode(&mut client, batches, &mut entries, &generation_health)
339
340
                    .instrument(next_batch_span)
                    .await;
341
342
                waiting_tokens += 1;
            }
343
            metrics::gauge!("tgi_batch_current_size", 0.0);
344
            metrics::gauge!("tgi_batch_current_max_tokens", 0.0);
345
346
347
348
        }
    }
}

349
#[instrument(skip_all)]
350
351
352
async fn prefill(
    client: &mut ShardedClient,
    batch: Batch,
353
    entries: &mut IntMap<u64, Entry>,
354
    generation_health: &Arc<AtomicBool>,
355
) -> Option<Batch> {
356
    let start_time = Instant::now();
357
    let batch_id = batch.id;
358
    metrics::increment_counter!("tgi_batch_inference_count", "method" => "prefill");
359
360
361

    match client.prefill(batch).await {
        Ok((generations, next_batch)) => {
362
363
            // Update health
            generation_health.store(true, Ordering::SeqCst);
364
            // Send generated tokens and filter stopped entries
365
366
367
            filter_send_generations(generations, entries);

            // Filter next batch and remove requests that were stopped
368
            let next_batch = filter_batch(client, next_batch, entries).await;
369

370
            metrics::histogram!("tgi_batch_inference_duration", start_time.elapsed().as_secs_f64(), "method" => "prefill");
371
372
373
374
375
            metrics::increment_counter!("tgi_batch_inference_success", "method" => "prefill");
            next_batch
        }
        // If we have an error, we discard the whole batch
        Err(err) => {
376
377
            // Update health
            generation_health.store(false, Ordering::SeqCst);
378
            let _ = client.clear_cache(Some(batch_id)).await;
379
380
381
382
383
384
385
386
387
388
389
390
            send_errors(err, entries);
            metrics::increment_counter!("tgi_batch_inference_failure", "method" => "prefill");
            None
        }
    }
}

#[instrument(skip_all)]
async fn decode(
    client: &mut ShardedClient,
    batches: Vec<Batch>,
    entries: &mut IntMap<u64, Entry>,
391
    generation_health: &Arc<AtomicBool>,
392
393
) -> Option<Batch> {
    let start_time = Instant::now();
394
    let batch_ids: Vec<u64> = batches.iter().map(|b| b.id).collect();
395
    metrics::increment_counter!("tgi_batch_inference_count", "method" => "decode");
396
397

    match client.decode(batches).await {
398
        Ok((generations, next_batch)) => {
399
400
            // Update health
            generation_health.store(true, Ordering::SeqCst);
401
            // Send generated tokens and filter stopped entries
402
403
404
            filter_send_generations(generations, entries);

            // Filter next batch and remove requests that were stopped
405
            let next_batch = filter_batch(client, next_batch, entries).await;
406

407
            metrics::histogram!("tgi_batch_inference_duration", start_time.elapsed().as_secs_f64(), "method" => "decode");
408
            metrics::increment_counter!("tgi_batch_inference_success", "method" => "decode");
409
410
411
412
            next_batch
        }
        // If we have an error, we discard the whole batch
        Err(err) => {
413
            generation_health.store(false, Ordering::SeqCst);
414
415
416
            for id in batch_ids {
                let _ = client.clear_cache(Some(id)).await;
            }
417
            send_errors(err, entries);
418
            metrics::increment_counter!("tgi_batch_inference_failure", "method" => "decode");
419
420
421
422
423
            None
        }
    }
}

424
425
/// Filter a `batch` and remove all requests not present in `entries`
#[instrument(skip_all)]
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
async fn filter_batch(
    client: &mut ShardedClient,
    next_batch: Option<Batch>,
    entries: &IntMap<u64, Entry>,
) -> Option<Batch> {
    let mut batch = next_batch?;

    // No need to filter
    if batch.size as usize == entries.len() {
        return Some(batch);
    }

    let id = batch.id;

    // Retain only requests that are still in entries
441
    batch.requests.retain(|r| entries.contains_key(&r.id));
442
443
444
445
446
447
448
449
450
451
452
453

    if batch.requests.is_empty() {
        // All requests have been filtered out
        // Next batch is now empty
        // Clear it from the Python shards cache
        // We unwrap here as we need to panic since we cannot recover if this method fails
        client.clear_cache(Some(id)).await.unwrap();
        None
    } else {
        // Filter Python shard cache
        // We unwrap here as we need to panic since we cannot recover if this method fails
        client.filter_batch(id, batch.requests).await.unwrap()
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
    }
}

/// Send one or multiple `InferStreamResponse` to Infer for all `entries`
/// and filter entries
#[instrument(skip_all)]
fn filter_send_generations(generations: Vec<Generation>, entries: &mut IntMap<u64, Entry>) {
    generations.into_iter().for_each(|generation| {
        let id = generation.request_id;
        // Get entry
        // We can `expect` here as the request id should always be in the entries
        let entry = entries
            .get(&id)
            .expect("ID not found in entries. This is a bug.");

        // Create and enter a span to link this function back to the entry
        let _span = info_span!(parent: entry.temp_span.as_ref().expect("batch_span is None. This is a bug."), "send_generation", generation = ?generation).entered();
        // Send generation responses back to the infer task
        // If the receive an error from the Flume channel, it means that the client dropped the
        // request and we need to stop generating hence why we unwrap_or(true)
        let stopped = send_responses(generation, entry).map_err(|err| {
            metrics::increment_counter!("tgi_request_failure", "err" => "dropped");
            err
        }).unwrap_or(true);
        if stopped {
            entries.remove(&id).expect("ID not found in entries. This is a bug.");
        }
    });
}

/// Send responses through the `entry` response channel
fn send_responses(
    generation: Generation,
    entry: &Entry,
) -> Result<bool, SendError<Result<InferStreamResponse, InferError>>> {
    let mut stopped = false;

    if let Some(prefill_tokens) = generation.prefill_tokens {
        // Send message
        entry
            .response_tx
            .send(Ok(InferStreamResponse::Prefill(prefill_tokens)))?;
    }

    // Create last Token
    let token = Token {
        id: generation.token_id,
        text: generation.token_text,
        logprob: generation.token_logprob,
        special: generation.token_is_special,
    };

    if let Some(generated_text) = generation.generated_text {
        // Generation has ended
        stopped = true;
        // Send message
        entry.response_tx.send(Ok(InferStreamResponse::End {
            token,
            generated_text,
            queued: entry.queue_time,
            start: entry.batch_time.unwrap(),
        }))?;
    } else {
        // Send message
        entry
            .response_tx
            .send(Ok(InferStreamResponse::Token(token)))?;
    }
    Ok(stopped)
}

525
/// Send errors to Infer for all `entries`
526
527
#[instrument(skip_all)]
fn send_errors(error: ClientError, entries: &mut IntMap<u64, Entry>) {
528
    entries.drain().for_each(|(_, entry)| {
529
530
531
        // Create and enter a span to link this function back to the entry
        let _send_error_span = info_span!(parent: entry.temp_span.as_ref().expect("batch_span is None. This is a bug."), "send_error").entered();
        let err = InferError::GenerationError(error.to_string());
532
        metrics::increment_counter!("tgi_request_failure", "err" => "generation");
533
534
        tracing::error!("{err}");

535
536
537
        // unwrap_or is valid here as we don't care if the receiver is gone.
        entry
            .response_tx
538
            .send(Err(err))
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
            .unwrap_or(());
    });
}

#[derive(Debug)]
pub(crate) enum InferStreamResponse {
    // Optional first message
    Prefill(PrefillTokens),
    // Intermediate messages
    Token(Token),
    // Last message
    End {
        token: Token,
        generated_text: GeneratedText,
        start: Instant,
        queued: Instant,
    },
}

#[derive(Debug)]
pub(crate) struct InferResponse {
560
    pub(crate) prefill: Vec<PrefillToken>,
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
    pub(crate) tokens: Vec<Token>,
    pub(crate) generated_text: GeneratedText,
    pub(crate) queued: Instant,
    pub(crate) start: Instant,
}

#[derive(Debug, Error)]
pub enum InferError {
    #[error("Request failed during generation: {0}")]
    GenerationError(String),
    #[error("Model is overloaded")]
    Overloaded(#[from] TryAcquireError),
    #[error("Input validation error: {0}")]
    ValidationError(#[from] ValidationError),
    #[error("Incomplete generation")]
    IncompleteGeneration,
}
578
579
580
581
582
583
584
585
586
587
588

impl InferError {
    pub(crate) fn error_type(&self) -> &str {
        match self {
            InferError::GenerationError(_) => "generation",
            InferError::Overloaded(_) => "overloaded",
            InferError::ValidationError(_) => "validation",
            InferError::IncompleteGeneration => "incomplete_generation",
        }
    }
}