mod.rs 16.4 KB
Newer Older
Nicolas Patry's avatar
Nicolas Patry committed
1
2
3
// pub(crate) mod v2;
mod chat_template;
pub mod tool_grammar;
OlivierDehaene's avatar
OlivierDehaene committed
4
5

use crate::validation::{ValidGenerateRequest, Validation, ValidationError};
drbh's avatar
drbh committed
6
use crate::Tool;
OlivierDehaene's avatar
OlivierDehaene committed
7
use crate::{
Nicolas Patry's avatar
Nicolas Patry committed
8
9
    ChatTemplateVersions, FinishReason, GenerateRequest, HubProcessorConfig, HubTokenizerConfig,
    Message, PrefillToken, Token,
OlivierDehaene's avatar
OlivierDehaene committed
10
};
11
use async_stream::stream;
Nicolas Patry's avatar
Nicolas Patry committed
12
use async_trait::async_trait;
13
use axum::response::sse::Event;
Nicolas Patry's avatar
Nicolas Patry committed
14
use chat_template::ChatTemplate;
OlivierDehaene's avatar
OlivierDehaene committed
15
use futures::future::try_join_all;
16
use futures::Stream;
Nicolas Patry's avatar
Nicolas Patry committed
17
use minijinja::ErrorKind;
18
use serde::Serialize;
Nicolas Patry's avatar
Nicolas Patry committed
19
use std::sync::atomic::{AtomicBool, Ordering};
OlivierDehaene's avatar
OlivierDehaene committed
20
21
22
23
24
25
26
27
use std::sync::Arc;
use thiserror::Error;
use tokio::sync::{OwnedSemaphorePermit, Semaphore, TryAcquireError};
use tokio::time::Instant;
use tokio_stream::wrappers::UnboundedReceiverStream;
use tokio_stream::StreamExt;
use tracing::instrument;

Nicolas Patry's avatar
Nicolas Patry committed
28
29
#[async_trait]
pub trait Backend {
OlivierDehaene's avatar
OlivierDehaene committed
30
31
32
    fn schedule(
        &self,
        request: ValidGenerateRequest,
Nicolas Patry's avatar
Nicolas Patry committed
33
34
35
    ) -> Result<UnboundedReceiverStream<Result<InferStreamResponse, InferError>>, InferError>;

    async fn health(&self, current_health: bool) -> bool;
OlivierDehaene's avatar
OlivierDehaene committed
36
37
38
39
40
41
42
}

/// Inference struct
#[derive(Clone)]
pub struct Infer {
    /// Validation
    validation: Validation,
Nicolas Patry's avatar
Nicolas Patry committed
43
44
    /// Request backend
    backend: Arc<dyn Backend + Send + Sync>,
OlivierDehaene's avatar
OlivierDehaene committed
45
46
47
48
    /// Chat template
    chat_template: Option<ChatTemplate>,
    /// Inference limit
    limit_concurrent_requests: Arc<Semaphore>,
Nicolas Patry's avatar
Nicolas Patry committed
49
50
    /// Backend health
    backend_health: Arc<AtomicBool>,
OlivierDehaene's avatar
OlivierDehaene committed
51
52
53
54
55
}

impl Infer {
    #[allow(clippy::too_many_arguments)]
    pub(crate) fn new(
Nicolas Patry's avatar
Nicolas Patry committed
56
        backend: impl Backend + Send + Sync + 'static,
OlivierDehaene's avatar
OlivierDehaene committed
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
        validation: Validation,
        max_concurrent_requests: usize,
        tokenizer_config: HubTokenizerConfig,
        processor_config: HubProcessorConfig,
    ) -> Self {
        let chat_template = tokenizer_config
            .chat_template
            .or(processor_config.chat_template)
            .and_then(|t| match t {
                ChatTemplateVersions::Single(template) => Some(template),
                ChatTemplateVersions::Multiple(templates) => templates
                    .into_iter()
                    .find(|t| t.name == "default")
                    .map(|t| t.template),
            })
drbh's avatar
drbh committed
72
            .map(|t| ChatTemplate::new(t, tokenizer_config.bos_token, tokenizer_config.eos_token));
OlivierDehaene's avatar
OlivierDehaene committed
73
74
75
76

        // Inference limit with a semaphore
        let semaphore = Arc::new(Semaphore::new(max_concurrent_requests));

Nicolas Patry's avatar
Nicolas Patry committed
77
78
79
        // Backend health
        let backend_health = Arc::new(AtomicBool::new(false));

OlivierDehaene's avatar
OlivierDehaene committed
80
81
        Self {
            validation,
Nicolas Patry's avatar
Nicolas Patry committed
82
            backend: Arc::new(backend),
OlivierDehaene's avatar
OlivierDehaene committed
83
84
            chat_template,
            limit_concurrent_requests: semaphore,
Nicolas Patry's avatar
Nicolas Patry committed
85
            backend_health,
OlivierDehaene's avatar
OlivierDehaene committed
86
87
88
89
90
        }
    }

    /// Add a new request to the queue and return a stream of InferStreamResponse
    #[instrument(skip_all)]
Nicolas Patry's avatar
Nicolas Patry committed
91
92
    pub(crate) async fn generate_stream<'a>(
        &'a self,
OlivierDehaene's avatar
OlivierDehaene committed
93
        request: GenerateRequest,
94
95
96
97
98
99
100
101
    ) -> Result<
        (
            OwnedSemaphorePermit,
            u32, // input_length
            impl Stream<Item = Result<InferStreamResponse, InferError>> + 'a,
        ),
        InferError,
    > {
OlivierDehaene's avatar
OlivierDehaene committed
102
103
104
105
106
107
        // Limit concurrent requests by acquiring a permit from the semaphore
        let permit = self
            .clone()
            .limit_concurrent_requests
            .try_acquire_owned()
            .map_err(|err| {
108
                metrics::counter!("tgi_request_failure", "err" => "overloaded").increment(1);
OlivierDehaene's avatar
OlivierDehaene committed
109
110
111
112
113
                tracing::error!("{err}");
                err
            })?;

        // Validate request
114
        let mut local_request = request.clone();
OlivierDehaene's avatar
OlivierDehaene committed
115
        let valid_request = self.validation.validate(request).await.map_err(|err| {
116
            metrics::counter!("tgi_request_failure", "err" => "validation").increment(1);
OlivierDehaene's avatar
OlivierDehaene committed
117
118
119
120
            tracing::error!("{err}");
            err
        })?;

121
122
        let seed = valid_request.parameters.seed;
        local_request.parameters.seed = Some(seed);
Nicolas Patry's avatar
Nicolas Patry committed
123
        let input_length = valid_request.input_length;
124
        let max_total_new_tokens = valid_request.stopping_parameters.max_total_new_tokens;
125
        let mut generation_stream = self.backend.schedule(valid_request)?;
Nicolas Patry's avatar
Nicolas Patry committed
126

127
128
        // Wrap generation stream to update the backend health if the stream contains an error
        let final_stream = stream! {
129
130
131
132
133
            let mut total_generated_tokens = 0;
            let mut first_start = None;
            let mut first_queued = None;
            let mut all_generated_text: Option<GeneratedText> = None;

134
            while let Some(response) = generation_stream.next().await {
135
                let response = response.inspect_err(|_err| {
136
                    self.backend_health.store(false, Ordering::SeqCst);
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
                })?;

                match response {
                    InferStreamResponse::Prefill(_) => yield Ok(response),
                    InferStreamResponse::Intermediate { .. } => {
                        total_generated_tokens += 1;
                        yield Ok(response);
                    }
                    InferStreamResponse::End { token, top_tokens,generated_text, start, queued  } => {
                        total_generated_tokens += 1;
                        first_start = first_start.or(Some(start));
                        first_queued = first_queued.or(Some(queued));
                        if let Some(v) = all_generated_text.as_mut() {
                                v.text.push_str(&generated_text.text);
                                v.generated_tokens = total_generated_tokens;
                                v.finish_reason = generated_text.finish_reason.clone();
                        };

                        if matches!(generated_text.finish_reason, FinishReason::Length) && total_generated_tokens < max_total_new_tokens {
                            local_request.inputs.push_str(&generated_text.text);
                            all_generated_text = all_generated_text.or(Some(generated_text));

                            let valid_request = match self.validation.validate(local_request.clone()).await {
                                Ok(valid_request) => valid_request,
                                Err(err) => {
                                    tracing::debug!("Failed to continue request: {err}");
                                    yield Ok(InferStreamResponse::End {token, top_tokens, generated_text: all_generated_text.unwrap(), start: first_start.unwrap(), queued: first_queued.unwrap() });
                                    break;
                                }
                            };

                            generation_stream = match self.backend.schedule(valid_request) {
                                Ok(stream) => {
                                    tracing::debug!("Continue request");
                                    yield Ok(InferStreamResponse::Intermediate { token, top_tokens } );
                                    stream
                                },
                                Err(err) => {
                                    tracing::debug!("Failed to continue request: {err}");
                                    yield Ok(InferStreamResponse::End {token, top_tokens, generated_text: all_generated_text.unwrap(), start: first_start.unwrap(), queued: first_queued.unwrap() });
                                    break;
                                }
                            }
                        } else {
                            yield Ok(InferStreamResponse::End {token, top_tokens, generated_text: all_generated_text.unwrap_or(generated_text), start: first_start.unwrap(), queued: first_queued.unwrap() });
                            break;
                        }

                    }
                }
187
188
189
190
            }
        };

        Ok((permit, input_length, final_stream))
OlivierDehaene's avatar
OlivierDehaene committed
191
192
193
194
195
196
197
    }

    /// Tokenizer the input
    #[instrument(skip_all)]
    pub(crate) async fn tokenize(
        &self,
        request: GenerateRequest,
198
    ) -> Result<tokenizers::Encoding, InferError> {
OlivierDehaene's avatar
OlivierDehaene committed
199
200
        // Tokenize request
        let inputs = request.inputs;
201
        let add_special_tokens = request.add_special_tokens;
OlivierDehaene's avatar
OlivierDehaene committed
202
203
204
        let truncate = request.parameters.truncate;
        let encoding = self
            .validation
205
            .tokenize(inputs, add_special_tokens, truncate)
OlivierDehaene's avatar
OlivierDehaene committed
206
207
208
209
210
211
212
            .await
            .map_err(|err| {
                tracing::error!("Tokenization {err}");
                err
            })?;

        // Return Encoding
213
        Ok(encoding.0)
OlivierDehaene's avatar
OlivierDehaene committed
214
215
216
217
218
219
220
    }

    /// Apply the chat template to the chat request
    #[instrument(skip_all)]
    pub(crate) fn apply_chat_template(
        &self,
        messages: Vec<Message>,
drbh's avatar
drbh committed
221
        tools_and_prompt: Option<(Vec<Tool>, String)>,
OlivierDehaene's avatar
OlivierDehaene committed
222
223
224
225
    ) -> Result<String, InferError> {
        self.chat_template
            .as_ref()
            .ok_or_else(|| InferError::TemplateError(ErrorKind::TemplateNotFound.into()))?
Lucain's avatar
Lucain committed
226
            .apply(messages, tools_and_prompt)
OlivierDehaene's avatar
OlivierDehaene committed
227
            .map_err(|e| {
228
                metrics::counter!("tgi_request_failure", "err" => "template").increment(1);
OlivierDehaene's avatar
OlivierDehaene committed
229
230
231
232
233
234
235
236
237
238
239
240
241
242
                tracing::error!("{e}");
                e
            })
    }

    /// Add a new request to the queue and return a InferResponse
    #[instrument(skip_all)]
    pub(crate) async fn generate(
        &self,
        request: GenerateRequest,
    ) -> Result<InferResponse, InferError> {
        let use_top_tokens = request.parameters.top_n_tokens.is_some_and(|x| x > 0);

        // Create stream and keep semaphore permit as long as generate lives
Nicolas Patry's avatar
Nicolas Patry committed
243
        let (_permit, _input_length, stream) = self.generate_stream(request).await?;
OlivierDehaene's avatar
OlivierDehaene committed
244
245
246
247
248
249
250
251
252

        // Return values
        let mut result_prefill = Vec::new();
        let mut result_tokens = Vec::new();
        let mut result_top_tokens = Vec::new();
        let mut result_generated_text = None;
        let mut result_start = None;
        let mut result_queued = None;

Nicolas Patry's avatar
Nicolas Patry committed
253
254
        let mut stream = Box::pin(stream);

OlivierDehaene's avatar
OlivierDehaene committed
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
        // Iterate on stream
        while let Some(response) = stream.next().await {
            match response? {
                // Add prefill tokens
                InferStreamResponse::Prefill(prefill_tokens) => {
                    result_prefill = prefill_tokens;
                }
                // Push last token
                InferStreamResponse::Intermediate { token, top_tokens } => {
                    result_tokens.push(token);
                    result_top_tokens.push(top_tokens);
                }
                // Final message
                // Set return values
                InferStreamResponse::End {
                    token,
                    generated_text,
                    start,
                    queued,
                    top_tokens,
                } => {
                    result_tokens.push(token);
                    result_top_tokens.push(top_tokens);
                    result_generated_text = Some(generated_text);
                    result_start = Some(start);
                    result_queued = Some(queued)
                }
            }
        }

        // Check that we received a `InferStreamResponse::End` message
        if let (Some(generated_text), Some(queued), Some(start)) =
            (result_generated_text, result_queued, result_start)
        {
            Ok(InferResponse {
                prefill: result_prefill,
                _input_length,
                tokens: result_tokens,
                generated_text,
                queued,
                start,
                top_tokens: if use_top_tokens {
                    result_top_tokens
                } else {
                    Vec::new()
                },
            })
        } else {
            let err = InferError::IncompleteGeneration;
304
            metrics::counter!("tgi_request_failure", "err" => "incomplete").increment(1);
OlivierDehaene's avatar
OlivierDehaene committed
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
            tracing::error!("{err}");
            Err(err)
        }
    }
    /// Add best_of new requests to the queue and return a InferResponse of the sequence with
    /// the highest log probability per token
    #[instrument(skip(self, request))]
    pub(crate) async fn generate_best_of(
        &self,
        request: GenerateRequest,
        best_of: usize,
    ) -> Result<(InferResponse, Vec<InferResponse>), InferError> {
        // validate  best_of parameter separately
        let best_of = self.validation.validate_best_of(best_of)?;

        // create multiple generate requests
        let mut infer_responses: Vec<InferResponse> =
            try_join_all((0..best_of).map(|_| self.generate(request.clone()))).await?;

        // get the sequence with the highest log probability per token
        let mut max_index = 0;
        let mut max_logprob: f32 = f32::MIN;

        for (i, response) in infer_responses.iter().enumerate() {
            // mean logprobs of the generated tokens
            let sequence_logprob = response
                .tokens
                .iter()
                .map(|token| token.logprob)
                .sum::<f32>()
                / response.tokens.len() as f32;

            // set best sequence
            if sequence_logprob > max_logprob {
                max_index = i;
                max_logprob = sequence_logprob;
            }
        }
        let best_response = infer_responses.remove(max_index);
        Ok((best_response, infer_responses))
    }
drbh's avatar
drbh committed
346

Nicolas Patry's avatar
Nicolas Patry committed
347
348
349
350
351
352
353
354
    #[instrument(skip(self))]
    pub(crate) async fn health(&self) -> bool {
        let health = self
            .backend
            .health(self.backend_health.load(Ordering::SeqCst))
            .await;
        self.backend_health.store(health, Ordering::SeqCst);
        health
OlivierDehaene's avatar
OlivierDehaene committed
355
356
357
358
    }
}

#[derive(Debug)]
Nicolas Patry's avatar
Nicolas Patry committed
359
360
361
362
363
pub struct GeneratedText {
    pub text: String,
    pub generated_tokens: u32,
    pub finish_reason: FinishReason,
    pub seed: Option<u64>,
OlivierDehaene's avatar
OlivierDehaene committed
364
365
366
}

#[derive(Debug)]
Nicolas Patry's avatar
Nicolas Patry committed
367
pub enum InferStreamResponse {
OlivierDehaene's avatar
OlivierDehaene committed
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
    // Optional first message
    Prefill(Vec<PrefillToken>),
    // Intermediate messages
    Intermediate {
        token: Token,
        top_tokens: Vec<Token>,
    },
    // Last message
    End {
        token: Token,
        top_tokens: Vec<Token>,
        generated_text: GeneratedText,
        start: Instant,
        queued: Instant,
    },
}

#[derive(Debug)]
pub(crate) struct InferResponse {
    /// input_length is the input as perceived by the rust tokenizer in the
    /// validation pathway. It is redundant with prefill.len() but prefill
    /// has data only if the user asked for it. This will always be filled.
    pub(crate) _input_length: u32,
    pub(crate) prefill: Vec<PrefillToken>,
    pub(crate) tokens: Vec<Token>,
    pub(crate) generated_text: GeneratedText,
    pub(crate) queued: Instant,
    pub(crate) start: Instant,
    pub(crate) top_tokens: Vec<Vec<Token>>,
}

#[derive(Debug, Error)]
pub enum InferError {
    #[error("Request failed during generation: {0}")]
    GenerationError(String),
    #[error("Model is overloaded")]
    Overloaded(#[from] TryAcquireError),
    #[error("Input validation error: {0}")]
    ValidationError(#[from] ValidationError),
    #[error("Incomplete generation")]
    IncompleteGeneration,
409
410
    #[error("Incomplete generation stream")]
    IncompleteGenerationStream,
OlivierDehaene's avatar
OlivierDehaene committed
411
412
    #[error("Template error: {0}")]
    TemplateError(#[from] minijinja::Error),
413
414
    #[error("Missing template vatiable: {0}")]
    MissingTemplateVariable(String),
OlivierDehaene's avatar
OlivierDehaene committed
415
416
    #[error("Tool error: {0}")]
    ToolError(String),
417
418
    #[error("Stream event serialization error")]
    StreamSerializationError(String),
OlivierDehaene's avatar
OlivierDehaene committed
419
420
421
422
423
424
425
426
427
}

impl InferError {
    pub(crate) fn error_type(&self) -> &str {
        match self {
            InferError::GenerationError(_) => "generation",
            InferError::Overloaded(_) => "overloaded",
            InferError::ValidationError(_) => "validation",
            InferError::IncompleteGeneration => "incomplete_generation",
428
            InferError::IncompleteGenerationStream => "incomplete_generation_stream",
OlivierDehaene's avatar
OlivierDehaene committed
429
            InferError::TemplateError(_) => "template_error",
430
            InferError::MissingTemplateVariable(_) => "missing_template_variable",
OlivierDehaene's avatar
OlivierDehaene committed
431
            InferError::ToolError(_) => "tool_error",
432
            InferError::StreamSerializationError(_) => "stream_serialization_error",
OlivierDehaene's avatar
OlivierDehaene committed
433
434
        }
    }
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456

    pub(crate) fn into_openai_event(self) -> Event {
        Event::default()
            .json_data(OpenaiErrorEvent {
                error: APIError {
                    message: self.to_string(),
                    http_status_code: 422,
                },
            })
            .unwrap()
    }
}

#[derive(Serialize)]
pub struct APIError {
    message: String,
    http_status_code: usize,
}

#[derive(Serialize)]
pub struct OpenaiErrorEvent {
    error: APIError,
OlivierDehaene's avatar
OlivierDehaene committed
457
}