infer.rs 18.4 KB
Newer Older
1
2
/// Batching and inference logic
use crate::validation::{Validation, ValidationError};
3
use crate::{Entry, Queue, Token};
4
use crate::{GenerateRequest, PrefillToken};
5
use flume::r#async::RecvStream;
6
use futures::future::try_join_all;
7
use futures::stream::StreamExt;
8
9
10
11
12
13
use nohash_hasher::IntMap;
use std::sync::Arc;
use text_generation_client::{
    Batch, ClientError, GeneratedText, Generation, PrefillTokens, ShardedClient,
};
use thiserror::Error;
14
use tokio::sync::{Notify, Semaphore, TryAcquireError};
15
use tokio::time::Instant;
16
use tracing::{info_span, instrument, Instrument, Span};
17
18
19
20
21
22

/// Inference struct
#[derive(Clone)]
pub struct Infer {
    /// Validation
    validation: Validation,
23
24
    /// Request queue
    queue: Queue,
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
    /// Shared state
    shared: Arc<Shared>,
    /// Inference limit
    limit_concurrent_requests: Arc<Semaphore>,
}

/// Infer shared state
struct Shared {
    /// Batching background Tokio task notifier
    batching_task: Notify,
}

impl Infer {
    pub(crate) fn new(
        client: ShardedClient,
        validation: Validation,
        max_batch_size: usize,
        max_waiting_tokens: usize,
        max_concurrent_requests: usize,
    ) -> Self {
        // Infer shared state
46
        let queue = Queue::new();
47
48
49
50
51
52
53
54
55
        let shared = Arc::new(Shared {
            batching_task: Notify::new(),
        });

        // Spawn batching background task that contains all the inference logic
        tokio::spawn(batching_task(
            client,
            max_batch_size,
            max_waiting_tokens,
56
            queue.clone(),
57
58
59
60
61
62
63
64
            shared.clone(),
        ));

        // Inference limit with a semaphore
        let semaphore = Arc::new(Semaphore::new(max_concurrent_requests));

        Self {
            validation,
65
            queue,
66
67
68
69
70
            shared,
            limit_concurrent_requests: semaphore,
        }
    }

71
    /// Add a new request to the queue and return a stream of InferStreamResponse
72
    #[instrument(skip(self))]
73
74
75
    pub(crate) async fn generate_stream(
        &self,
        request: GenerateRequest,
76
    ) -> Result<RecvStream<Result<InferStreamResponse, InferError>>, InferError> {
77
78
        // Limit concurrent requests by acquiring a permit from the semaphore
        // This permit will live as long as Entry
79
80
81
82
83
        let permit = self
            .clone()
            .limit_concurrent_requests
            .try_acquire_owned()
            .map_err(|err| {
84
                metrics::increment_counter!("tgi_request_failure", "err" => "overloaded");
85
86
87
                tracing::error!("{err}");
                err
            })?;
88
89

        // Validate request
90
91
92
93
94
        let valid_request = self.validation.validate(request).await.map_err(|err| {
            metrics::increment_counter!("tgi_request_failure", "err" => "validation");
            tracing::error!("{err}");
            err
        })?;
95
96

        // MPSC channel to communicate with the background batching task
97
        let (response_tx, response_rx) = flume::unbounded();
98

99
100
        // Append the request to the queue
        self.queue.append(Entry {
101
102
            request: valid_request,
            response_tx,
103
104
105
            span: Span::current(),
            temp_span: None,
            queue_time: Instant::now(),
106
107
108
109
            batch_time: None,
            _permit: permit,
        });

110
        // Notify the background task that we have a new entry in the queue that needs
111
112
113
114
        // to be batched
        self.shared.batching_task.notify_one();

        // Return stream
115
        Ok(response_rx.into_stream())
116
117
    }

118
    /// Add a new request to the queue and return a InferResponse
119
    #[instrument(skip(self))]
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
    pub(crate) async fn generate(
        &self,
        request: GenerateRequest,
    ) -> Result<InferResponse, InferError> {
        // Create stream
        let mut stream = self.generate_stream(request).await?;

        // Return values
        let mut result_prefill = Vec::new();
        let mut result_tokens = Vec::new();
        let mut result_generated_text = None;
        let mut result_start = None;
        let mut result_queued = None;

        // Iterate on stream
        while let Some(response) = stream.next().await {
            match response? {
                // Add prefill tokens
                InferStreamResponse::Prefill(tokens) => {
                    // Create Token objects
                    // We do that here instead of in the Python code as Rust for loops are faster
                    result_prefill = tokens
                        .ids
                        .into_iter()
                        .zip(tokens.logprobs.into_iter())
                        .zip(tokens.texts.into_iter())
146
                        .map(|((id, logprob), text)| PrefillToken { id, text, logprob })
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
                        .collect();
                }
                // Push last token
                InferStreamResponse::Token(token) => result_tokens.push(token),
                // Final message
                // Set return values
                InferStreamResponse::End {
                    token,
                    generated_text,
                    start,
                    queued,
                } => {
                    result_tokens.push(token);
                    result_generated_text = Some(generated_text);
                    result_start = Some(start);
                    result_queued = Some(queued)
                }
            }
        }

        // Check that we received a `InferStreamResponse::End` message
        if let (Some(generated_text), Some(queued), Some(start)) =
            (result_generated_text, result_queued, result_start)
        {
            Ok(InferResponse {
                prefill: result_prefill,
                tokens: result_tokens,
                generated_text,
                queued,
                start,
            })
        } else {
179
            let err = InferError::IncompleteGeneration;
180
            metrics::increment_counter!("tgi_request_failure", "err" => "incomplete");
181
182
            tracing::error!("{err}");
            Err(err)
183
184
        }
    }
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
    /// Add best_of new requests to the queue and return a InferResponse of the sequence with
    /// the highest log probability per token
    #[instrument(skip(self))]
    pub(crate) async fn generate_best_of(
        &self,
        request: GenerateRequest,
        best_of: usize,
    ) -> Result<(InferResponse, Vec<InferResponse>), InferError> {
        // validate  best_of parameter separately
        let best_of = self.validation.validate_best_of(best_of)?;

        // create multiple generate requests
        let mut infer_responses: Vec<InferResponse> =
            try_join_all((0..best_of).map(|_| self.generate(request.clone()))).await?;

        // get the sequence with the highest log probability per token
        let mut max_index = 0;
        let mut max_logprob: f32 = f32::MIN;

        for (i, response) in infer_responses.iter().enumerate() {
            // mean logprobs of the generated tokens
            let sequence_logprob = response
                .tokens
                .iter()
                .map(|token| token.logprob)
                .sum::<f32>()
                / response.tokens.len() as f32;

            // set best sequence
            if sequence_logprob > max_logprob {
                max_index = i;
                max_logprob = sequence_logprob;
            }
        }
        let best_response = infer_responses.remove(max_index);
        Ok((best_response, infer_responses))
    }
222
223
224
225
226
227
228
229
230
231
}

/// Batching logic
/// Will be launched in a background Tokio task
///
/// Batches requests and sends them to the inference server
async fn batching_task(
    mut client: ShardedClient,
    max_batch_size: usize,
    max_waiting_tokens: usize,
232
    queue: Queue,
233
234
235
    shared: Arc<Shared>,
) {
    // Minimum batch size after which we try to add more requests
236
237
238
239
240
    let limit_min_batch_size = if max_batch_size > 1 {
        (max_batch_size / 2) as u32
    } else {
        0
    };
241
242
243
244
245
246

    // Infinite loop
    loop {
        // Wait for a notification from the Infer struct
        shared.batching_task.notified().await;

247
        // Get the next batch from the queue
248
        // This batch might be smaller than the maximum batch size if there are not enough requests
249
        // waiting in the queue
250
        while let Some((mut entries, batch, span)) = queue.next_batch(None, max_batch_size).await {
251
            let mut cached_batch = prefill(&mut client, batch, &mut entries)
252
253
                .instrument(span)
                .await;
254
255
256
257
258
259
260
261
            let mut waiting_tokens = 1;

            // We loop until we do not receive any cached batch from the inference server (== until
            // all requests have met their stopping criteria)
            while let Some(batch) = cached_batch {
                // Get current batch info
                let batch_size = batch.size;
                let mut batches = vec![batch];
262
                metrics::gauge!("tgi_batch_current_size", batch_size as f64);
263
264
265
266
267
268
269
270
271
272
273
274

                // If the current batch is too small, we try to add more requests to it
                if batch_size <= limit_min_batch_size {
                    let min_size = match waiting_tokens {
                        // If we didn't onboard any new requests since >= max_waiting_tokens, we try
                        // to add a new batch even though its size might be small
                        _ if waiting_tokens >= max_waiting_tokens => None,
                        // Minimum size criteria
                        _ => Some(limit_min_batch_size as usize),
                    };

                    // Try to get a new batch
275
                    if let Some((mut new_entries, new_batch, span)) = queue
276
277
                        .next_batch(min_size, max_batch_size - batch_size as usize)
                        .await
278
                    {
279
280
281
282
283
284
                        let new_batch_size = new_batch.size;
                        entries.iter_mut().for_each(|(_, entry)| {
                            // Create a new span to add the info that this entry is waiting
                            // because a new batch is being computed
                            let entry_waiting_span =
                                info_span!(parent: &entry.span, "waiting", batch_size = new_batch_size);
285
286
                            // Add relationships
                            span.follows_from(&entry_waiting_span);
287
288
289
290
291
                            entry_waiting_span.follows_from(&span);
                            // Update entry
                            entry.temp_span = Some(entry_waiting_span);
                        });

292
                        // Generate one token for this new batch to have the attention past in cache
293
294
295
                        let new_cached_batch = prefill(&mut client, new_batch, &mut new_entries)
                            .instrument(span)
                            .await;
296
297
298
299
300
301
302
303
304
                        // Reset waiting counter
                        waiting_tokens = 1;
                        // Extend current batch with the new batch
                        if let Some(new_cached_batch) = new_cached_batch {
                            entries.extend(new_entries);
                            batches.push(new_cached_batch);
                        }
                    }
                }
305
306
307
308
309
310
311
312
                // Create span for this batch to add context to inference calls
                let next_batch_size = entries.len();
                let next_batch_span =
                    info_span!(parent: None, "batch", batch_size = next_batch_size);
                entries.iter_mut().for_each(|(_, entry)| {
                    // Create a new span to link the batch back to this entry
                    let entry_batch_span =
                        info_span!(parent: &entry.span, "infer", batch_size = next_batch_size);
313
314
                    // Add relationships
                    next_batch_span.follows_from(&entry_batch_span);
315
316
317
318
                    entry_batch_span.follows_from(&next_batch_span);
                    // Update entry
                    entry.temp_span = Some(entry_batch_span);
                });
319

320
                cached_batch = decode(&mut client, batches, &mut entries)
321
322
                    .instrument(next_batch_span)
                    .await;
323
324
                waiting_tokens += 1;
            }
325
            metrics::gauge!("tgi_batch_current_size", 0.0);
326
327
328
329
        }
    }
}

330
#[instrument(skip_all)]
331
332
333
async fn prefill(
    client: &mut ShardedClient,
    batch: Batch,
334
335
    entries: &mut IntMap<u64, Entry>,
) -> Option<Batch> {
336
    let start_time = Instant::now();
337
    let batch_id = batch.id;
338
    metrics::increment_counter!("tgi_batch_inference_count", "method" => "prefill");
339
340
341
342

    match client.prefill(batch).await {
        Ok((generations, next_batch)) => {
            send_generations(generations, entries);
343
            metrics::histogram!("tgi_batch_inference_duration", start_time.elapsed().as_secs_f64(), "method" => "prefill");
344
345
346
347
348
            metrics::increment_counter!("tgi_batch_inference_success", "method" => "prefill");
            next_batch
        }
        // If we have an error, we discard the whole batch
        Err(err) => {
349
            let _ = client.clear_cache(Some(batch_id)).await;
350
351
352
353
354
355
356
357
358
359
360
361
362
363
            send_errors(err, entries);
            metrics::increment_counter!("tgi_batch_inference_failure", "method" => "prefill");
            None
        }
    }
}

#[instrument(skip_all)]
async fn decode(
    client: &mut ShardedClient,
    batches: Vec<Batch>,
    entries: &mut IntMap<u64, Entry>,
) -> Option<Batch> {
    let start_time = Instant::now();
364
    metrics::increment_counter!("tgi_batch_inference_count", "method" => "decode");
365
366

    match client.decode(batches).await {
367
368
        Ok((generations, next_batch)) => {
            send_generations(generations, entries);
369
            metrics::histogram!("tgi_batch_inference_duration", start_time.elapsed().as_secs_f64(), "method" => "decode");
370
            metrics::increment_counter!("tgi_batch_inference_success", "method" => "decode");
371
372
373
374
            next_batch
        }
        // If we have an error, we discard the whole batch
        Err(err) => {
375
            send_errors(err, entries);
376
            metrics::increment_counter!("tgi_batch_inference_failure", "method" => "decode");
377
378
379
380
381
382
            None
        }
    }
}

/// Send errors to Infer for all `entries`
383
384
#[instrument(skip_all)]
fn send_errors(error: ClientError, entries: &mut IntMap<u64, Entry>) {
385
    entries.drain().for_each(|(_, entry)| {
386
387
388
        // Create and enter a span to link this function back to the entry
        let _send_error_span = info_span!(parent: entry.temp_span.as_ref().expect("batch_span is None. This is a bug."), "send_error").entered();
        let err = InferError::GenerationError(error.to_string());
389
        metrics::increment_counter!("tgi_request_failure", "err" => "generation");
390
391
        tracing::error!("{err}");

392
393
394
        // unwrap_or is valid here as we don't care if the receiver is gone.
        entry
            .response_tx
395
            .send(Err(err))
396
397
398
399
400
            .unwrap_or(());
    });
}

/// Send one or multiple `InferStreamResponse` to Infer for all `entries`
401
#[instrument(skip_all)]
402
403
404
405
406
407
408
409
fn send_generations(generations: Vec<Generation>, entries: &mut IntMap<u64, Entry>) {
    generations.into_iter().for_each(|generation| {
        // Get entry
        // We can `expect` here as the request id should always be in the entries
        let entry = entries
            .get(&generation.request_id)
            .expect("ID not found in entries. This is a bug.");

410
411
412
        // Create and enter a span to link this function back to the entry
        let _generation_span = info_span!(parent: entry.temp_span.as_ref().expect("batch_span is None. This is a bug."), "send_generation", generation = ?generation).entered();

413
414
415
416
417
418
419
420
421
422
        if let Some(prefill_tokens) = generation.prefill_tokens {
            // Send message
            // unwrap_or is valid here as we don't care if the receiver is gone.
            entry
                .response_tx
                .send(Ok(InferStreamResponse::Prefill(prefill_tokens)))
                .unwrap_or(());
        }

        // Create last Token
423
424
425
426
        let token = Token {
            id: generation.token_id,
            text: generation.token_text,
            logprob: generation.token_logprob,
427
            special: generation.token_is_special,
428
        };
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443

        if let Some(generated_text) = generation.generated_text {
            // Remove entry as this is the last message
            // We can `expect` here as the request id should always be in the entries
            let entry = entries
                .remove(&generation.request_id)
                .expect("ID not found in entries. This is a bug.");

            // Send message
            // unwrap_or is valid here as we don't care if the receiver is gone.
            entry
                .response_tx
                .send(Ok(InferStreamResponse::End {
                    token,
                    generated_text,
444
                    queued: entry.queue_time,
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
                    start: entry.batch_time.unwrap(),
                }))
                .unwrap_or(());
        } else {
            // Send message
            // unwrap_or is valid here as we don't care if the receiver is gone.
            entry
                .response_tx
                .send(Ok(InferStreamResponse::Token(token)))
                .unwrap_or(());
        }
    });
}

#[derive(Debug)]
pub(crate) enum InferStreamResponse {
    // Optional first message
    Prefill(PrefillTokens),
    // Intermediate messages
    Token(Token),
    // Last message
    End {
        token: Token,
        generated_text: GeneratedText,
        start: Instant,
        queued: Instant,
    },
}

#[derive(Debug)]
pub(crate) struct InferResponse {
476
    pub(crate) prefill: Vec<PrefillToken>,
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
    pub(crate) tokens: Vec<Token>,
    pub(crate) generated_text: GeneratedText,
    pub(crate) queued: Instant,
    pub(crate) start: Instant,
}

#[derive(Debug, Error)]
pub enum InferError {
    #[error("Request failed during generation: {0}")]
    GenerationError(String),
    #[error("Model is overloaded")]
    Overloaded(#[from] TryAcquireError),
    #[error("Input validation error: {0}")]
    ValidationError(#[from] ValidationError),
    #[error("Incomplete generation")]
    IncompleteGeneration,
}
494
495
496
497
498
499
500
501
502
503
504

impl InferError {
    pub(crate) fn error_type(&self) -> &str {
        match self {
            InferError::GenerationError(_) => "generation",
            InferError::Overloaded(_) => "overloaded",
            InferError::ValidationError(_) => "validation",
            InferError::IncompleteGeneration => "incomplete_generation",
        }
    }
}