validation.rs 40 KB
Newer Older
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1
/// Payload validation logic
OlivierDehaene's avatar
OlivierDehaene committed
2
use crate::config::Config;
3
use crate::validation::ValidationError::{BestOfSampling, BestOfSeed, EmptyInput};
4
5
use crate::{
    GenerateParameters, GenerateRequest, GrammarType, HubPreprocessorConfig, Idefics2Preprocessor,
6
    TokenizerTrait,
7
};
8
use crate::{PyTokenizer, Tokenizer};
OlivierDehaene's avatar
OlivierDehaene committed
9
use base64::{engine::general_purpose::STANDARD, Engine};
Nicolas Patry's avatar
Nicolas Patry committed
10
use image::{ImageFormat, ImageReader};
11
use jsonschema::{Draft, JSONSchema};
12
use rand::{thread_rng, Rng};
13
use serde_json::Value;
14
use std::io::Cursor;
15
use std::iter;
16
use std::sync::Arc;
Olivier Dehaene's avatar
Olivier Dehaene committed
17
use thiserror::Error;
OlivierDehaene's avatar
OlivierDehaene committed
18
use tokio::sync::mpsc;
19
use tokio::sync::oneshot;
20
use tracing::{instrument, Span};
21
use {once_cell::sync::Lazy, regex::Regex};
Olivier Dehaene's avatar
Olivier Dehaene committed
22

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
23
/// Validation
Olivier Dehaene's avatar
Olivier Dehaene committed
24
#[derive(Debug, Clone)]
Olivier Dehaene's avatar
Olivier Dehaene committed
25
pub struct Validation {
26
    /// Validation parameters
27
    max_best_of: usize,
28
    max_stop_sequences: usize,
Nicolas Patry's avatar
Nicolas Patry committed
29
    max_top_n_tokens: u32,
30
31
    max_input_length: usize,
    max_total_tokens: usize,
drbh's avatar
drbh committed
32
    disable_grammar_support: bool,
33
    /// Channel to communicate with the background tokenization task
34
    sender: mpsc::UnboundedSender<TokenizerRequest>,
Olivier Dehaene's avatar
Olivier Dehaene committed
35
36
37
}

impl Validation {
OlivierDehaene's avatar
OlivierDehaene committed
38
    #[allow(clippy::too_many_arguments)]
39
40
    pub(crate) fn new(
        workers: usize,
41
        tokenizer: Tokenizer,
42
        config: Option<Config>,
43
        preprocessor_config: Option<HubPreprocessorConfig>,
44
        max_best_of: usize,
45
        max_stop_sequences: usize,
Nicolas Patry's avatar
Nicolas Patry committed
46
        max_top_n_tokens: u32,
47
48
        max_input_length: usize,
        max_total_tokens: usize,
drbh's avatar
drbh committed
49
        disable_grammar_support: bool,
50
    ) -> Self {
51
52
53
54
55
        let workers = if let Tokenizer::Python { .. } = &tokenizer {
            1
        } else {
            workers
        };
56
        // If we have a fast tokenizer
57
        let sender = {
OlivierDehaene's avatar
OlivierDehaene committed
58
59
60
            // Create round robin channel
            let (validation_sender, validation_round_robin_receiver) = mpsc::unbounded_channel();
            let mut senders = Vec::with_capacity(workers);
61
62
63
64

            // Create workers
            for _ in 0..workers {
                let tokenizer_clone = tokenizer.clone();
65
                let config_clone = config.clone();
66
                let preprocessor_config_clone = preprocessor_config.clone();
OlivierDehaene's avatar
OlivierDehaene committed
67
68
                let (tokenizer_sender, tokenizer_receiver) = mpsc::unbounded_channel();
                senders.push(tokenizer_sender);
69
70
71

                // Spawn worker
                tokio::task::spawn_blocking(move || {
72
73
74
75
76
77
                    tokenizer_worker(
                        tokenizer_clone,
                        config_clone,
                        preprocessor_config_clone,
                        tokenizer_receiver,
                    )
78
79
                });
            }
OlivierDehaene's avatar
OlivierDehaene committed
80
81
82
83

            // Create tokenization round robin task
            tokio::spawn(round_robin_task(validation_round_robin_receiver, senders));

84
            validation_sender
85
86
87
88
89
        };

        Self {
            max_best_of,
            sender,
90
            max_stop_sequences,
Nicolas Patry's avatar
Nicolas Patry committed
91
            max_top_n_tokens,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
92
            max_input_length,
93
            max_total_tokens,
drbh's avatar
drbh committed
94
            disable_grammar_support,
95
96
        }
    }
Olivier Dehaene's avatar
Olivier Dehaene committed
97

98
    #[instrument(skip(self, inputs))]
99
    pub async fn tokenize(
100
101
        &self,
        inputs: String,
102
        add_special_tokens: bool,
103
        truncate: Option<usize>,
104
    ) -> Result<(tokenizers::Encoding, Vec<Chunk>), ValidationError> {
105
        // If we have a fast tokenizer
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
        // Create response channel
        let (response_sender, response_receiver) = oneshot::channel();
        // Send request to the background validation task
        // Unwrap is safe here
        let _ = &self
            .sender
            .send((
                (inputs, add_special_tokens, truncate),
                response_sender,
                Span::current(),
            ))
            .unwrap();

        // Await on response channel
        // Unwrap is safe here
        let encoding = response_receiver.await.unwrap()?;
        Ok(encoding)
123
124
    }

125
    #[allow(clippy::type_complexity)]
126
127
128
129
    #[instrument(skip(self, inputs))]
    async fn validate_input(
        &self,
        inputs: String,
130
        add_special_tokens: bool,
131
132
        truncate: Option<usize>,
        max_new_tokens: Option<u32>,
133
    ) -> Result<(Vec<Chunk>, Option<Vec<u32>>, usize, u32), ValidationError> {
134
        // If we have a fast tokenizer
135
        let (encoding, inputs) = self
136
            .tokenize(inputs.clone(), add_special_tokens, truncate)
137
138
139
140
141
142
143
            .await?;
        // Create response channel
        let input_length = if let Some(truncate) = truncate {
            std::cmp::min(encoding.len(), truncate)
        } else {
            encoding.len()
        };
144

145
146
147
148
149
150
151
        // Get total tokens
        let max_new_tokens: u32 = if let Some(max_new_tokens) = max_new_tokens {
            max_new_tokens
        } else {
            self.max_total_tokens.saturating_sub(input_length) as u32
        };
        let total_tokens = input_length + max_new_tokens as usize;
152

153
154
155
156
157
158
159
        // Validate MaxTotalTokens
        if total_tokens > self.max_total_tokens {
            return Err(ValidationError::MaxTotalTokens(
                self.max_total_tokens,
                input_length,
                max_new_tokens,
            ));
160
161
        }

162
163
164
165
        // Validate InputLength
        if input_length > self.max_input_length {
            return Err(ValidationError::InputLength(
                self.max_input_length,
166
                input_length,
167
            ));
Olivier Dehaene's avatar
Olivier Dehaene committed
168
        }
169
170
171
172
173
174

        let ids = encoding.get_ids();
        let input_ids = ids[ids.len().saturating_sub(input_length)..].to_owned();

        metrics::histogram!("tgi_request_input_length").record(input_length as f64);
        Ok((inputs, Some(input_ids), input_length, max_new_tokens))
Olivier Dehaene's avatar
Olivier Dehaene committed
175
176
    }

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
177
    /// Validate a payload and get the number of tokens in the input
178
    #[instrument(skip_all)]
Olivier Dehaene's avatar
Olivier Dehaene committed
179
180
181
    pub(crate) async fn validate(
        &self,
        request: GenerateRequest,
182
    ) -> Result<ValidGenerateRequest, ValidationError> {
183
184
185
186
        let GenerateParameters {
            best_of,
            temperature,
            repetition_penalty,
187
            frequency_penalty,
188
189
190
191
192
193
194
195
196
            top_k,
            top_p,
            typical_p,
            do_sample,
            max_new_tokens,
            stop: stop_sequences,
            truncate,
            seed,
            watermark,
197
            decoder_input_details,
Nicolas Patry's avatar
Nicolas Patry committed
198
            top_n_tokens,
drbh's avatar
drbh committed
199
            grammar,
drbh's avatar
drbh committed
200
            adapter_id,
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
            ..
        } = request.parameters;

        // sampling must be true when best_of > 1
        let best_of = best_of.unwrap_or(1);
        let sampling = do_sample
            || temperature.is_some()
            || top_k.is_some()
            || top_p.is_some()
            || typical_p.is_some();

        if best_of > 1 && !sampling {
            return Err(BestOfSampling);
        }

        let temperature = temperature.unwrap_or(1.0);
        if temperature <= 0.0 {
            return Err(ValidationError::Temperature);
        }

        let repetition_penalty = repetition_penalty.unwrap_or(1.0);
        if repetition_penalty <= 0.0 {
            return Err(ValidationError::RepetitionPenalty);
        }

226
227
228
229
230
        let frequency_penalty = frequency_penalty.unwrap_or(0.0);
        if !(-2.0..=2.0).contains(&frequency_penalty) {
            return Err(ValidationError::FrequencyPenalty);
        }

231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
        // Different because the proto default value is not a valid value
        // for the user
        let top_p = top_p
            .map(|value| {
                if value <= 0.0 || value >= 1.0 {
                    return Err(ValidationError::TopP);
                }
                Ok(value)
            })
            .unwrap_or(Ok(1.0))?;

        let typical_p = typical_p
            .map(|value| {
                if value <= 0.0 || value >= 1.0 {
                    return Err(ValidationError::TypicalP);
                }
                Ok(value)
            })
            .unwrap_or(Ok(1.0))?;

        let top_k: u32 = top_k
            .map(|value| {
                if value <= 0 {
                    return Err(ValidationError::TopK);
                }
                Ok(value as u32)
            })
            .unwrap_or(Ok(0))?;

260
        if max_new_tokens == Some(0) {
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
            return Err(ValidationError::NegativeMaxNewTokens);
        }

        if stop_sequences.len() > self.max_stop_sequences {
            return Err(ValidationError::StopSequence(
                self.max_stop_sequences,
                stop_sequences.len(),
            ));
        }

        // If seed is None, assign a random one
        let seed = match seed {
            None => thread_rng().gen(),
            Some(seed) => {
                if best_of > 1 {
                    return Err(BestOfSeed);
                }
                seed
            }
        };

Nicolas Patry's avatar
Nicolas Patry committed
282
283
284
285
286
287
288
289
290
        let top_n_tokens = top_n_tokens
            .map(|value| {
                if value > self.max_top_n_tokens {
                    return Err(ValidationError::TopNTokens(self.max_top_n_tokens, value));
                }
                Ok(value)
            })
            .unwrap_or(Ok(0))?;

291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
        // Check if inputs is empty
        if request.inputs.is_empty() {
            return Err(EmptyInput);
        }

        // Check if truncate is strictly positive and less than max_input_length
        let truncate = truncate
            .map(|value| {
                if value == 0 || value > self.max_input_length {
                    return Err(ValidationError::Truncate(self.max_input_length, value));
                }
                Ok(Some(value))
            })
            .unwrap_or(Ok(None))?;

        // Validate inputs
307
        let (inputs, input_ids, input_length, max_new_tokens) = self
308
309
310
311
312
313
            .validate_input(
                request.inputs,
                request.add_special_tokens,
                truncate,
                max_new_tokens,
            )
314
315
            .await?;

drbh's avatar
drbh committed
316
317
318
319
320
321
322
        // TODO: we should build the FSM here and pass the compiled FSM instead of the grammar
        // NOTE: this is currently difficult because we need the tokenizer in Python to build
        // the FSM and we'd have to load a copy of the tokenizer into our Pyo3 instance which
        // may be slow and memory intensive. Best case is to have a Rust implementation of the FSM
        // compiler and use that to build the FSM here.

        // Validate grammar and unpack the grammar and type for the proto message
OlivierDehaene's avatar
OlivierDehaene committed
323
        let grammar = match grammar {
drbh's avatar
drbh committed
324
325
326
327
328
            Some(grammar) => {
                // Ensure that grammar is not set if it's not supported
                if self.disable_grammar_support {
                    return Err(ValidationError::Grammar);
                }
OlivierDehaene's avatar
OlivierDehaene committed
329
                let valid_grammar = match grammar {
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
                    GrammarType::Json(json) => {
                        let json = match json {
                            // if value is a string, we need to parse it again to make sure its
                            // a valid json
                            Value::String(s) => serde_json::from_str(&s)
                                .map_err(|e| ValidationError::InvalidGrammar(e.to_string())),
                            Value::Object(_) => Ok(json),
                            _ => Err(ValidationError::Grammar),
                        }?;

                        // Check if the json is a valid JSONSchema
                        JSONSchema::options()
                            .with_draft(Draft::Draft202012)
                            .compile(&json)
                            .map_err(|e| ValidationError::InvalidGrammar(e.to_string()))?;

346
347
348
349
350
351
352
353
                        // The schema can be valid but lack properties.
                        // We need properties for the grammar to be successfully parsed in Python.
                        // Therefore, we must check and throw an error if properties are missing.
                        json.get("properties")
                            .ok_or(ValidationError::InvalidGrammar(
                                "Grammar must have a 'properties' field".to_string(),
                            ))?;

OlivierDehaene's avatar
OlivierDehaene committed
354
355
                        // Serialize json to string
                        ValidGrammar::Json(
356
357
358
359
                            serde_json::to_string(&json)
                                .map_err(|e| ValidationError::InvalidGrammar(e.to_string()))?,
                        )
                    }
OlivierDehaene's avatar
OlivierDehaene committed
360
361
362
                    GrammarType::Regex(regex) => ValidGrammar::Regex(regex),
                };
                Some(valid_grammar)
drbh's avatar
drbh committed
363
            }
OlivierDehaene's avatar
OlivierDehaene committed
364
            None => None,
drbh's avatar
drbh committed
365
366
        };

OlivierDehaene's avatar
OlivierDehaene committed
367
        let parameters = ValidParameters {
368
369
            temperature,
            repetition_penalty,
370
            frequency_penalty,
371
372
373
374
375
376
            top_k,
            top_p,
            typical_p,
            do_sample,
            seed,
            watermark,
drbh's avatar
drbh committed
377
            grammar,
378
        };
OlivierDehaene's avatar
OlivierDehaene committed
379
        let stopping_parameters = ValidStoppingParameters {
380
381
382
383
384
            max_new_tokens,
            stop_sequences,
            ignore_eos_token: false,
        };

385
        metrics::histogram!("tgi_request_max_new_tokens").record(max_new_tokens as f64);
386
387
388

        Ok(ValidGenerateRequest {
            inputs,
389
            input_ids: input_ids.map(Arc::new),
390
            add_special_tokens: request.add_special_tokens,
391
            decoder_input_details,
392
            input_length: input_length as u32,
393
394
395
            truncate: truncate.unwrap_or(self.max_input_length) as u32,
            parameters,
            stopping_parameters,
Nicolas Patry's avatar
Nicolas Patry committed
396
            top_n_tokens,
drbh's avatar
drbh committed
397
            adapter_id,
398
        })
Olivier Dehaene's avatar
Olivier Dehaene committed
399
    }
400
401
402
403
404
405
406
407
408
409
410
411
412
413

    /// Validate the best_of parameter
    #[instrument(skip_all)]
    pub(crate) fn validate_best_of(&self, best_of: usize) -> Result<usize, ValidationError> {
        if self.max_best_of == 1 && best_of != 1 {
            return Err(ValidationError::BestOfDisabled);
        }

        if best_of > self.max_best_of {
            return Err(ValidationError::BestOf(self.max_best_of, best_of));
        }

        Ok(best_of)
    }
Olivier Dehaene's avatar
Olivier Dehaene committed
414
415
}

OlivierDehaene's avatar
OlivierDehaene committed
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
/// Round robin tokenization task
async fn round_robin_task(
    mut receiver: mpsc::UnboundedReceiver<TokenizerRequest>,
    senders: Vec<mpsc::UnboundedSender<TokenizerRequest>>,
) {
    loop {
        for sender in &senders {
            match receiver.recv().await {
                None => return,
                Some(request) => sender.send(request).unwrap(),
            };
        }
    }
}

431
/// Start tokenization workers
432
433
434
fn tokenizer_worker(
    tokenizer: Tokenizer,
    config: Option<Config>,
435
    preprocessor_config: Option<HubPreprocessorConfig>,
436
437
    mut receiver: mpsc::UnboundedReceiver<TokenizerRequest>,
) {
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
    match tokenizer {
        Tokenizer::Python {
            tokenizer_name,
            revision,
        } => {
            pyo3::Python::with_gil(|py| -> pyo3::PyResult<()> {
                let tokenizer = PyTokenizer::from_py(py, tokenizer_name, revision)?;
                // Loop over requests
                while let Some(((inputs, add_special_tokens, truncate), response_tx, parent_span)) =
                    receiver.blocking_recv()
                {
                    parent_span.in_scope(|| {
                        response_tx
                            .send(prepare_input(
                                inputs,
                                truncate,
                                add_special_tokens,
                                &tokenizer,
                                config.as_ref(),
                                preprocessor_config.as_ref(),
                            ))
                            .unwrap_or(())
                    })
                }
                Ok(())
            })
            .expect("Failure in python tokenizer worker");
        }
        Tokenizer::Rust(tokenizer) => {
            while let Some(((inputs, add_special_tokens, truncate), response_tx, parent_span)) =
                receiver.blocking_recv()
            {
                parent_span.in_scope(|| {
                    response_tx
                        .send(prepare_input(
                            inputs,
                            truncate,
                            add_special_tokens,
                            &tokenizer,
                            config.as_ref(),
                            preprocessor_config.as_ref(),
                        ))
                        .unwrap_or(())
                })
            }
        }
484
485
    }
}
Olivier Dehaene's avatar
Olivier Dehaene committed
486

487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
fn format_from_mimetype(mimetype: &str) -> Option<ImageFormat> {
    match mimetype {
        "image/png" => Some(ImageFormat::Png),
        "image/jpeg" => Some(ImageFormat::Jpeg),
        "image/jpg" => Some(ImageFormat::Jpeg),
        "image/gif" => Some(ImageFormat::Gif),
        "image/webp" => Some(ImageFormat::WebP),
        "image/tiff" => Some(ImageFormat::Tiff),
        // "image/pnm"=>Some(ImageFormat::Pnm),
        // "image/tga"=>Some(ImageFormat::Tga),
        // "image/dds"=>Some(ImageFormat::Dds),
        // "image/bmp"=>Some(ImageFormat::Bmp),
        // "image/ico"=>Some(ImageFormat::Ico),
        // "image/x-exr"=>Some(ImageFormat::OpenExr),
        _ => None,
    }
}
OlivierDehaene's avatar
OlivierDehaene committed
504

505
506
507
508
509
510
511
512
513
514
515
516
fn format_to_mimetype(format: ImageFormat) -> String {
    match format {
        ImageFormat::Png => "image/png",
        ImageFormat::Jpeg => "image/jpeg",
        ImageFormat::Gif => "image/gif",
        ImageFormat::WebP => "image/webp",
        ImageFormat::Tiff => "image/tiff",
        _ => "application/octet-stream",
    }
    .to_string()
}

517
fn fetch_image(input: &str) -> Result<(Vec<u8>, String, usize, usize), ValidationError> {
518
519
520
521
522
523
524
525
526
527
    if input.starts_with("![](http://") || input.starts_with("![](https://") {
        let url = &input["![](".len()..input.len() - 1];
        let data = reqwest::blocking::get(url)?.bytes()?;

        let format = image::guess_format(&data)?;
        // TODO Remove this clone
        let img = ImageReader::with_format(Cursor::new(data.clone()), format).decode()?;
        let height: usize = img.height().try_into()?;
        let width: usize = img.width().try_into()?;
        let mimetype = format_to_mimetype(format);
528
        Ok((data.to_vec(), mimetype, height, width))
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
    } else if input.starts_with("![](data:") {
        // Remove ![](....)
        let content = &input["![](data:".len()..input.len() - 1];
        let tokens: Vec<_> = content.split(';').collect();
        if tokens.len() != 2 {
            return Err(ValidationError::InvalidImageContent(content.to_string()));
        }
        let mimetype = tokens[0];
        let content = tokens[1];

        if !content.starts_with("base64,") {
            return Err(ValidationError::InvalidImageContent(content.to_string()));
        }

        let data = STANDARD.decode(content["base64,".len()..].as_bytes())?;
        let img = if let Some(format) = format_from_mimetype(mimetype) {
545
            ImageReader::with_format(Cursor::new(&data), format).decode()?
546
        } else {
547
            ImageReader::new(Cursor::new(&data))
548
549
550
551
552
553
554
                .with_guessed_format()
                .map_err(|_io_error| ValidationError::InvalidImageContent(content.to_string()))?
                .decode()?
        };

        let height: usize = img.height().try_into()?;
        let width: usize = img.width().try_into()?;
555
        Ok((data, mimetype.to_string(), height, width))
556
557
558
559
560
    } else {
        Err(ValidationError::InvalidImageContent(input.to_string()))
    }
}

561
562
563
564
565
566
567
568
569
570
fn image_tokens(
    config: &Config,
    preprocessor_config: Option<&HubPreprocessorConfig>,
    height: usize,
    width: usize,
) -> String {
    use Config::*;
    use HubPreprocessorConfig::*;
    match config {
        Idefics => "<image>".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
571
        Mllama => "<|image|>".to_string(),
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
        Idefics2(config) => {
            const FAKE: &str = "<fake_token_around_image>";
            const IMAGE: &str = "<image>";

            let slots = config.get_number_of_features(height, width);

            let mut image_string = String::with_capacity(2 * FAKE.len() + slots * IMAGE.len());
            image_string.push_str(FAKE);
            image_string.extend(iter::repeat(IMAGE).take(slots));
            image_string.push_str(FAKE);

            if matches!(
                preprocessor_config,
                Some(Idefics2Processor(Idefics2Preprocessor {
                    do_image_splitting: true,
                    ..
                }))
            ) {
                image_string = image_string.repeat(5);
            };

            image_string
        }
        Paligemma(config) => "<image>".repeat(config.get_number_of_features(height, width)),
        LlavaNext(config) => "<image>".repeat(config.get_number_of_features(height, width)),
        _ => unimplemented!("Images tokens are not supported for this model configuration"),
    }
}

fn image_tokens_fixup(config: &Config, text: String) -> String {
    match config {
        Config::Idefics2(_) => {
            const FAKE: &str = "<fake_token_around_image>";
            text.replace(&format!("{FAKE}{FAKE}"), FAKE)
        }
        _ => text,
    }
}

611
/// Get input length and optionally truncate it
612
fn prepare_input<T: TokenizerTrait>(
613
    inputs: String,
614
    _truncate: Option<usize>,
615
    add_special_tokens: bool,
616
    tokenizer: &T,
617
618
    config: Option<&Config>,
    preprocessor_config: Option<&HubPreprocessorConfig>,
Nicolas Patry's avatar
Nicolas Patry committed
619
) -> Result<(tokenizers::Encoding, Vec<Chunk>), ValidationError> {
620
    use Config::*;
621
    static RE: Lazy<Regex> = Lazy::new(|| Regex::new(r"!\[\]\([^\)]*\)").unwrap());
622
    let (tokenizer_query, input_chunks) = match config {
Nicolas Patry's avatar
Nicolas Patry committed
623
        Some(config @ (Idefics | Mllama | Idefics2(_) | Paligemma(_) | LlavaNext(_))) => {
624
            let mut input_chunks = Vec::new();
625
626
627
628
            let mut tokenizer_query = String::with_capacity(inputs.len());
            let mut start = 0;
            for chunk in RE.find_iter(&inputs) {
                let chunk_start = chunk.start();
drbh's avatar
drbh committed
629
630
                let chunk_end = chunk.end();
                if chunk_start != start {
Nicolas Patry's avatar
Nicolas Patry committed
631
                    input_chunks.push(Chunk::Text(inputs[start..chunk_start].to_string()));
drbh's avatar
drbh committed
632
633
                    tokenizer_query.push_str(&inputs[start..chunk_start]);
                }
634
                let (data, mimetype, height, width) = fetch_image(&inputs[chunk_start..chunk_end])?;
Nicolas Patry's avatar
Nicolas Patry committed
635
                input_chunks.push(Chunk::Image(Image { data, mimetype }));
636
                tokenizer_query.push_str(&image_tokens(config, preprocessor_config, height, width));
drbh's avatar
drbh committed
637
638
                start = chunk_end;
            }
639
            if start != inputs.len() {
Nicolas Patry's avatar
Nicolas Patry committed
640
                input_chunks.push(Chunk::Text(inputs[start..].to_string()));
drbh's avatar
drbh committed
641
642
                tokenizer_query.push_str(&inputs[start..]);
            }
Nicolas Patry's avatar
Nicolas Patry committed
643

644
645
            tokenizer_query = image_tokens_fixup(config, tokenizer_query);

646
            (tokenizer_query, input_chunks)
Nicolas Patry's avatar
Nicolas Patry committed
647
        }
Nicolas Patry's avatar
Nicolas Patry committed
648
        _ => (inputs.clone(), vec![Chunk::Text(inputs)]),
649
    };
650

651
    // Get the number of tokens in the input
652
    let encoding = tokenizer
653
        .encode_trait(tokenizer_query, add_special_tokens)
654
655
        .map_err(|err| ValidationError::Tokenizer(err.to_string()))?;

656
    Ok((encoding, input_chunks))
Olivier Dehaene's avatar
Olivier Dehaene committed
657
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
658

659
type TokenizerRequest = (
660
    (String, bool, Option<usize>),
Nicolas Patry's avatar
Nicolas Patry committed
661
    oneshot::Sender<Result<(tokenizers::Encoding, Vec<Chunk>), ValidationError>>,
662
    Span,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
663
664
);

Nicolas Patry's avatar
Nicolas Patry committed
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Image {
    pub data: Vec<u8>,
    pub mimetype: String,
}

#[derive(Debug, Clone, Eq, PartialEq)]
pub enum Chunk {
    Text(String),
    Image(Image),
}

/// Convert input chunks to a stringly-typed input for backwards
/// compat for backends that haven't implemented chunked inputs.
pub trait ChunksToString {
    /// Convert chunks to string.
    fn chunks_to_string(&self) -> String;
}

impl ChunksToString for Vec<Chunk> {
    fn chunks_to_string(&self) -> String {
        let mut output = String::new();
        self.iter().for_each(|c| match &c {
            Chunk::Text(text) => output.push_str(text),
            Chunk::Image(Image { data, mimetype }) => {
                let encoded = STANDARD.encode(data);
                output.push_str(&format!("![](data:{};base64,{})", mimetype, encoded))
            }
        });
        output
    }
}

OlivierDehaene's avatar
OlivierDehaene committed
698
#[derive(Debug, Clone)]
Nicolas Patry's avatar
Nicolas Patry committed
699
pub enum ValidGrammar {
OlivierDehaene's avatar
OlivierDehaene committed
700
701
702
703
704
    Json(String),
    Regex(String),
}

#[derive(Debug, Clone)]
Nicolas Patry's avatar
Nicolas Patry committed
705
pub struct ValidParameters {
OlivierDehaene's avatar
OlivierDehaene committed
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
    /// / exponential scaling output probability distribution
    pub temperature: f32,
    /// / restricting to the k highest probability elements
    pub top_k: u32,
    /// / restricting to top tokens summing to prob_cut_off <= prob_cut_off
    pub top_p: f32,
    /// / restricting to top tokens summing to prob_cut_off <= prob_cut_off
    pub typical_p: f32,
    /// / apply sampling on the logits
    pub do_sample: bool,
    /// / random seed for sampling
    pub seed: u64,
    /// / repetition penalty
    pub repetition_penalty: f32,
    /// / frequency penalty
    pub frequency_penalty: f32,
    /// / token watermarking using "A Watermark for Large Language Models"
    pub watermark: bool,
    /// / grammar (applied if not empty)
    pub grammar: Option<ValidGrammar>,
}

#[derive(Debug, Clone)]
Nicolas Patry's avatar
Nicolas Patry committed
729
pub struct ValidStoppingParameters {
OlivierDehaene's avatar
OlivierDehaene committed
730
731
732
733
734
735
736
737
738
    /// / Maximum number of generated tokens
    pub max_new_tokens: u32,
    /// / Optional stopping sequences
    pub stop_sequences: Vec<String>,
    /// / Ignore end of sequence token
    /// / used for benchmarking
    pub ignore_eos_token: bool,
}

739
#[derive(Debug, Clone)]
Nicolas Patry's avatar
Nicolas Patry committed
740
741
pub struct ValidGenerateRequest {
    pub inputs: Vec<Chunk>,
742
    pub input_ids: Option<Arc<Vec<u32>>>,
743
    pub input_length: u32,
744
    pub truncate: u32,
745
    pub add_special_tokens: bool,
746
    pub decoder_input_details: bool,
OlivierDehaene's avatar
OlivierDehaene committed
747
748
    pub parameters: ValidParameters,
    pub stopping_parameters: ValidStoppingParameters,
Nicolas Patry's avatar
Nicolas Patry committed
749
    pub top_n_tokens: u32,
drbh's avatar
drbh committed
750
    pub adapter_id: Option<String>,
751
752
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
753
754
#[derive(Error, Debug)]
pub enum ValidationError {
755
756
757
758
759
760
761
762
763
764
    #[error("`best_of` must be > 0 and <= {0}. Given: {1}")]
    BestOf(usize, usize),
    #[error("`best_of` != 1 is not allowed for this endpoint")]
    BestOfDisabled,
    #[error("you must use sampling when `best_of` is > 1")]
    BestOfSampling,
    #[error("`seed` must not be set when `best_of` > 1")]
    BestOfSeed,
    #[error("`best_of` != 1 is not supported when streaming tokens")]
    BestOfStream,
Nicolas Patry's avatar
Nicolas Patry committed
765
766
767
768
    #[error("`top_n_tokens` must be >= 0 and <= {0}. Given: {1}")]
    TopNTokens(u32, u32),
    #[error("`top_n_tokens` != 0 is not allowed for this endpoint")]
    TopNTokensDisabled,
769
770
    #[error("`decoder_input_details` == true is not supported when streaming tokens")]
    PrefillDetailsStream,
771
    #[error("`temperature` must be strictly positive")]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
772
    Temperature,
773
    #[error("`repetition_penalty` must be strictly positive")]
774
    RepetitionPenalty,
775
776
    #[error("`frequency_penalty` must be >= -2.0 and <= 2.0")]
    FrequencyPenalty,
777
    #[error("`top_p` must be > 0.0 and < 1.0")]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
778
    TopP,
779
    #[error("`top_k` must be strictly positive")]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
780
    TopK,
781
782
    #[error("`truncate` must be strictly positive and less than {0}. Given: {1}")]
    Truncate(usize, usize),
783
784
    #[error("`typical_p` must be > 0.0 and < 1.0")]
    TypicalP,
785
786
    #[error("one of `max_new_tokens` or `truncate` must be set if a fast tokenizer is not in use")]
    UnsetMaxNewTokens,
787
    #[error("`max_new_tokens` must be strictly positive")]
788
789
790
    NegativeMaxNewTokens,
    #[error("`max_new_tokens` must be <= {0}. Given: {1}")]
    MaxNewTokens(usize, u32),
791
    #[error("`inputs` tokens + `max_new_tokens` must be <= {0}. Given: {1} `inputs` tokens and {2} `max_new_tokens`")]
792
    MaxTotalTokens(usize, usize, u32),
793
    #[error("`inputs` must have less than {0} tokens. Given: {1}")]
794
    InputLength(usize, usize),
795
    #[error("`inputs` cannot be empty")]
796
    EmptyInput,
797
    #[error("`stop` supports up to {0} stop sequences. Given: {1}")]
798
    StopSequence(usize, usize),
799
800
    #[error("tokenizer error {0}")]
    Tokenizer(String),
drbh's avatar
drbh committed
801
802
    #[error("grammar is not supported")]
    Grammar,
803
804
    #[error("grammar is not valid: {0}")]
    InvalidGrammar(String),
805
806
807
808
809
810
811
812
813
814
    #[error("base64 encoding is invalid: {0}")]
    InvalidBase64(#[from] base64::DecodeError),
    #[error("invalid image: {0}")]
    InvalidImage(#[from] image::ImageError),
    #[error("invalid integer: {0}")]
    InvalidInt(#[from] core::num::TryFromIntError),
    #[error("invalid image content: {0}")]
    InvalidImageContent(String),
    #[error("Could not fetch image: {0}")]
    FailedFetchImage(#[from] reqwest::Error),
Nicolas Patry's avatar
Nicolas Patry committed
815
816
    #[error("{0} modality is not supported")]
    UnsupportedModality(&'static str),
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
817
}
818
819

#[cfg(test)]
820
mod tests {
821
    use super::*;
822
    use crate::config::{Idefics2, PaliTextConfig, Paligemma};
823
824
    use crate::default_parameters;
    use crate::tests::get_tokenizer;
825
826

    #[tokio::test]
827
    async fn test_validation_max_new_tokens() {
828
        let tokenizer = get_tokenizer();
829
830
        let max_best_of = 2;
        let max_stop_sequence = 3;
Nicolas Patry's avatar
Nicolas Patry committed
831
832
833
        let max_top_n_tokens = 4;
        let max_input_length = 5;
        let max_total_tokens = 6;
834
        let workers = 1;
drbh's avatar
drbh committed
835
        let disable_grammar_support = true;
836
        let config = None;
837
838
839
        let validation = Validation::new(
            workers,
            tokenizer,
840
            config,
841
            None,
842
843
            max_best_of,
            max_stop_sequence,
Nicolas Patry's avatar
Nicolas Patry committed
844
            max_top_n_tokens,
845
846
            max_input_length,
            max_total_tokens,
drbh's avatar
drbh committed
847
            disable_grammar_support,
848
        );
849
850

        let max_new_tokens = 10;
851
        match validation
852
            .validate_input("Hello".to_string(), true, None, Some(max_new_tokens))
853
854
            .await
        {
855
856
            Err(ValidationError::MaxTotalTokens(6, 1, 10)) => (),
            // Ok((_s, _, 0, 10)) => (),
857
            r => panic!("Unexpected not max new tokens: {r:?}"),
858
859
860
861
        }
    }

    #[tokio::test]
862
    async fn test_validation_input_length() {
863
        let tokenizer = get_tokenizer();
864
865
        let max_best_of = 2;
        let max_stop_sequence = 3;
Nicolas Patry's avatar
Nicolas Patry committed
866
867
868
        let max_top_n_tokens = 4;
        let max_input_length = 5;
        let max_total_tokens = 6;
drbh's avatar
drbh committed
869
        let disable_grammar_support = true;
870
        let workers = 1;
871
        let config = None;
872
873
874
        let validation = Validation::new(
            workers,
            tokenizer,
875
            config,
876
            None,
877
878
            max_best_of,
            max_stop_sequence,
Nicolas Patry's avatar
Nicolas Patry committed
879
            max_top_n_tokens,
880
881
            max_input_length,
            max_total_tokens,
drbh's avatar
drbh committed
882
            disable_grammar_support,
883
        );
884
885

        let max_new_tokens = 10;
886
        match validation
887
            .validate_input("Hello".to_string(), true, None, Some(max_new_tokens))
888
889
            .await
        {
Nicolas Patry's avatar
Nicolas Patry committed
890
            Err(ValidationError::MaxTotalTokens(6, 1, 10)) => (),
891
            _ => panic!("Unexpected not max new tokens"),
892
893
        }
    }
894
895

    #[tokio::test]
896
    async fn test_validation_best_of_sampling() {
897
        let tokenizer = get_tokenizer();
898
899
        let max_best_of = 2;
        let max_stop_sequence = 3;
Nicolas Patry's avatar
Nicolas Patry committed
900
901
902
        let max_top_n_tokens = 4;
        let max_input_length = 5;
        let max_total_tokens = 6;
903
        let workers = 1;
drbh's avatar
drbh committed
904
        let disable_grammar_support = true;
905
        let config = None;
906
907
908
        let validation = Validation::new(
            workers,
            tokenizer,
909
            config,
910
            None,
911
912
            max_best_of,
            max_stop_sequence,
Nicolas Patry's avatar
Nicolas Patry committed
913
            max_top_n_tokens,
914
915
            max_input_length,
            max_total_tokens,
drbh's avatar
drbh committed
916
            disable_grammar_support,
917
918
919
920
        );
        match validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
921
                add_special_tokens: true,
922
923
924
925
926
927
928
929
                parameters: GenerateParameters {
                    best_of: Some(2),
                    do_sample: false,
                    ..default_parameters()
                },
            })
            .await
        {
930
            Err(ValidationError::BestOfSampling) => (),
931
            _ => panic!("Unexpected not best of sampling"),
932
933
934
935
        }
    }

    #[tokio::test]
936
    async fn test_validation_top_p() {
937
        let tokenizer = get_tokenizer();
938
939
        let max_best_of = 2;
        let max_stop_sequence = 3;
Nicolas Patry's avatar
Nicolas Patry committed
940
941
        let max_top_n_tokens = 4;
        let max_input_length = 5;
942
        let max_total_tokens = 106;
943
        let workers = 1;
drbh's avatar
drbh committed
944
        let disable_grammar_support = true;
945
        let config = None;
946
947
948
        let validation = Validation::new(
            workers,
            tokenizer,
949
            config,
950
            None,
951
952
            max_best_of,
            max_stop_sequence,
Nicolas Patry's avatar
Nicolas Patry committed
953
            max_top_n_tokens,
954
955
            max_input_length,
            max_total_tokens,
drbh's avatar
drbh committed
956
            disable_grammar_support,
957
958
959
960
        );
        match validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
961
                add_special_tokens: true,
962
963
                parameters: GenerateParameters {
                    top_p: Some(1.0),
964
                    max_new_tokens: Some(5),
965
966
967
968
969
                    ..default_parameters()
                },
            })
            .await
        {
970
            Err(ValidationError::TopP) => (),
971
            _ => panic!("Unexpected top_p"),
972
973
        }

974
975
976
        match validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
977
                add_special_tokens: true,
978
979
                parameters: GenerateParameters {
                    top_p: Some(0.99),
980
                    max_new_tokens: Some(5),
981
982
983
984
985
                    ..default_parameters()
                },
            })
            .await
        {
986
            Ok(_) => (),
987
            _ => panic!("Unexpected top_p error"),
988
989
        }

990
991
992
        let valid_request = validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
993
                add_special_tokens: true,
994
995
                parameters: GenerateParameters {
                    top_p: None,
996
                    max_new_tokens: Some(5),
997
998
999
1000
1001
                    ..default_parameters()
                },
            })
            .await
            .unwrap();
1002
1003
1004
        // top_p == 1.0 is invalid for users to ask for but it's the default resolved value.
        assert_eq!(valid_request.parameters.top_p, 1.0);
    }
Nicolas Patry's avatar
Nicolas Patry committed
1005
1006
1007

    #[tokio::test]
    async fn test_validation_top_n_tokens() {
1008
        let tokenizer = get_tokenizer();
Nicolas Patry's avatar
Nicolas Patry committed
1009
1010
1011
1012
        let max_best_of = 2;
        let max_stop_sequences = 3;
        let max_top_n_tokens = 4;
        let max_input_length = 5;
1013
        let max_total_tokens = 106;
Nicolas Patry's avatar
Nicolas Patry committed
1014
        let workers = 1;
drbh's avatar
drbh committed
1015
        let disable_grammar_support = true;
1016
        let config = None;
Nicolas Patry's avatar
Nicolas Patry committed
1017
1018
1019
        let validation = Validation::new(
            workers,
            tokenizer,
1020
            config,
1021
            None,
Nicolas Patry's avatar
Nicolas Patry committed
1022
1023
1024
1025
1026
            max_best_of,
            max_stop_sequences,
            max_top_n_tokens,
            max_input_length,
            max_total_tokens,
drbh's avatar
drbh committed
1027
            disable_grammar_support,
Nicolas Patry's avatar
Nicolas Patry committed
1028
1029
1030
1031
        );
        match validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
1032
                add_special_tokens: true,
Nicolas Patry's avatar
Nicolas Patry committed
1033
1034
                parameters: GenerateParameters {
                    top_n_tokens: Some(5),
1035
                    max_new_tokens: Some(5),
Nicolas Patry's avatar
Nicolas Patry committed
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
                    ..default_parameters()
                },
            })
            .await
        {
            Err(ValidationError::TopNTokens(4, 5)) => (),
            _ => panic!("Unexpected top_n_tokens"),
        }

        validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
1048
                add_special_tokens: true,
Nicolas Patry's avatar
Nicolas Patry committed
1049
1050
                parameters: GenerateParameters {
                    top_n_tokens: Some(4),
1051
                    max_new_tokens: Some(5),
Nicolas Patry's avatar
Nicolas Patry committed
1052
1053
1054
1055
1056
1057
1058
1059
1060
                    ..default_parameters()
                },
            })
            .await
            .unwrap();

        validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
1061
                add_special_tokens: true,
Nicolas Patry's avatar
Nicolas Patry committed
1062
1063
                parameters: GenerateParameters {
                    top_n_tokens: Some(0),
1064
                    max_new_tokens: Some(5),
Nicolas Patry's avatar
Nicolas Patry committed
1065
1066
1067
1068
1069
1070
1071
1072
1073
                    ..default_parameters()
                },
            })
            .await
            .unwrap();

        let valid_request = validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
1074
                add_special_tokens: true,
Nicolas Patry's avatar
Nicolas Patry committed
1075
1076
                parameters: GenerateParameters {
                    top_n_tokens: None,
1077
                    max_new_tokens: Some(5),
Nicolas Patry's avatar
Nicolas Patry committed
1078
1079
1080
1081
1082
1083
1084
1085
                    ..default_parameters()
                },
            })
            .await
            .unwrap();

        assert_eq!(valid_request.top_n_tokens, 0);
    }
1086
1087
1088
1089
1090
1091
1092

    static PIXEL_GIF: &str = "R0lGODdhAQABAIEAAP///wAAAAAAAAAAACwAAAAAAQABAAAIBAABBAQAOw==";

    #[tokio::test]
    async fn test_prepare_input_chunks() {
        let pixel_data = STANDARD.decode(PIXEL_GIF).unwrap();

1093
        let tokenizer = get_tokenizer();
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110

        let max_best_of = 2;
        let max_stop_sequence = 3;
        let max_top_n_tokens = 4;
        let max_input_length = 5;
        let max_total_tokens = 6;
        let disable_grammar_support = true;
        let workers = 1;
        let config = Config::Paligemma(Paligemma {
            text_config: PaliTextConfig {
                num_image_tokens: 1,
            },
        });
        let validation = Validation::new(
            workers,
            tokenizer,
            Some(config),
1111
            None,
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
            max_best_of,
            max_stop_sequence,
            max_top_n_tokens,
            max_input_length,
            max_total_tokens,
            disable_grammar_support,
        );

        let chunks = match validation
            .tokenize(
                format!("test![](data:image/gif;base64,{})", PIXEL_GIF),
1123
                true,
1124
1125
1126
1127
                None,
            )
            .await
        {
1128
            Ok((_encoding, chunks)) => chunks,
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
            _ => panic!("Unexpected tokenization failure"),
        };

        assert!(
            chunks
                == vec![
                    Chunk::Text("test".to_string()).into(),
                    Chunk::Image(Image {
                        data: pixel_data.clone(),
                        mimetype: "image/gif".to_string()
                    })
                    .into()
                ],
            "Failed to process images",
        );
    }
1145
1146
1147
1148
1149

    #[tokio::test]
    async fn test_idefics2_correct_n_fake_tokens() {
        let pixel_data = STANDARD.decode(PIXEL_GIF).unwrap();

1150
        let tokenizer = get_tokenizer();
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182

        let max_best_of = 2;
        let max_stop_sequence = 3;
        let max_top_n_tokens = 4;
        let max_input_length = 5;
        let max_total_tokens = 6;
        let disable_grammar_support = true;
        let workers = 1;
        let config = Config::Idefics2(Idefics2 {});
        let validation = Validation::new(
            workers,
            tokenizer,
            Some(config),
            Some(HubPreprocessorConfig::Idefics2Processor(
                Idefics2Preprocessor {
                    do_image_splitting: true,
                },
            )),
            max_best_of,
            max_stop_sequence,
            max_top_n_tokens,
            max_input_length,
            max_total_tokens,
            disable_grammar_support,
        );

        let (encoding, chunks) = match validation
            .tokenize(
                format!(
                    "test![](data:image/gif;base64,{})![](data:image/gif;base64,{})",
                    PIXEL_GIF, PIXEL_GIF
                ),
1183
                true,
1184
1185
1186
1187
                None,
            )
            .await
        {
1188
            Ok((encoding, chunks)) => (encoding, chunks),
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
            _ => panic!("Unexpected tokenization failure"),
        };

        assert!(
            chunks
                == vec![
                    Chunk::Text("test".to_string()).into(),
                    Chunk::Image(Image {
                        data: pixel_data.clone(),
                        mimetype: "image/gif".to_string()
                    })
                    .into(),
                    Chunk::Image(Image {
                        data: pixel_data.clone(),
                        mimetype: "image/gif".to_string()
                    })
                    .into()
                ],
            "Failed to process images",
        );

        // Verify the number of fake tokens:
        //
        // - Two images surrounded/separated by a fake token = 3.
        // - Both are split in 5 subimages, separated by a fake token: 2 * 4
        //
        // Fake tokens get split up by the testing tokenizer, but we don't care.
        assert_eq!(
            encoding
                .get_tokens()
                .iter()
                .filter(|t| *t == "fake")
                .count(),
            11
        );
    }
1225
}