validation.rs 40.2 KB
Newer Older
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1
/// Payload validation logic
OlivierDehaene's avatar
OlivierDehaene committed
2
use crate::config::Config;
3
use crate::validation::ValidationError::{BestOfSampling, BestOfSeed, EmptyInput};
4
5
6
use crate::{
    GenerateParameters, GenerateRequest, GrammarType, HubPreprocessorConfig, Idefics2Preprocessor,
};
OlivierDehaene's avatar
OlivierDehaene committed
7
use base64::{engine::general_purpose::STANDARD, Engine};
Nicolas Patry's avatar
Nicolas Patry committed
8
use image::{ImageFormat, ImageReader};
9
use jsonschema::{Draft, JSONSchema};
10
use rand::{thread_rng, Rng};
11
use serde_json::Value;
12
use std::io::Cursor;
13
use std::iter;
14
use std::sync::Arc;
Olivier Dehaene's avatar
Olivier Dehaene committed
15
use thiserror::Error;
Olivier Dehaene's avatar
Olivier Dehaene committed
16
use tokenizers::tokenizer::Tokenizer;
OlivierDehaene's avatar
OlivierDehaene committed
17
use tokio::sync::mpsc;
18
use tokio::sync::oneshot;
19
use tracing::{instrument, Span};
20
use {once_cell::sync::Lazy, regex::Regex};
Olivier Dehaene's avatar
Olivier Dehaene committed
21

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
22
/// Validation
Olivier Dehaene's avatar
Olivier Dehaene committed
23
#[derive(Debug, Clone)]
Olivier Dehaene's avatar
Olivier Dehaene committed
24
pub struct Validation {
25
    /// Validation parameters
26
    max_best_of: usize,
27
    max_stop_sequences: usize,
Nicolas Patry's avatar
Nicolas Patry committed
28
    max_top_n_tokens: u32,
29
30
    max_input_length: usize,
    max_total_tokens: usize,
drbh's avatar
drbh committed
31
    disable_grammar_support: bool,
32
    /// Channel to communicate with the background tokenization task
OlivierDehaene's avatar
OlivierDehaene committed
33
    sender: Option<mpsc::UnboundedSender<TokenizerRequest>>,
Olivier Dehaene's avatar
Olivier Dehaene committed
34
35
36
}

impl Validation {
OlivierDehaene's avatar
OlivierDehaene committed
37
    #[allow(clippy::too_many_arguments)]
38
39
    pub(crate) fn new(
        workers: usize,
40
        tokenizer: Option<Tokenizer>,
41
        config: Option<Config>,
42
        preprocessor_config: Option<HubPreprocessorConfig>,
43
        max_best_of: usize,
44
        max_stop_sequences: usize,
Nicolas Patry's avatar
Nicolas Patry committed
45
        max_top_n_tokens: u32,
46
47
        max_input_length: usize,
        max_total_tokens: usize,
drbh's avatar
drbh committed
48
        disable_grammar_support: bool,
49
    ) -> Self {
50
51
        // If we have a fast tokenizer
        let sender = if let Some(tokenizer) = tokenizer {
OlivierDehaene's avatar
OlivierDehaene committed
52
53
54
            // Create round robin channel
            let (validation_sender, validation_round_robin_receiver) = mpsc::unbounded_channel();
            let mut senders = Vec::with_capacity(workers);
55
56
57
58

            // Create workers
            for _ in 0..workers {
                let tokenizer_clone = tokenizer.clone();
59
                let config_clone = config.clone();
60
                let preprocessor_config_clone = preprocessor_config.clone();
OlivierDehaene's avatar
OlivierDehaene committed
61
62
                let (tokenizer_sender, tokenizer_receiver) = mpsc::unbounded_channel();
                senders.push(tokenizer_sender);
63
64
65

                // Spawn worker
                tokio::task::spawn_blocking(move || {
66
67
68
69
70
71
                    tokenizer_worker(
                        tokenizer_clone,
                        config_clone,
                        preprocessor_config_clone,
                        tokenizer_receiver,
                    )
72
73
                });
            }
OlivierDehaene's avatar
OlivierDehaene committed
74
75
76
77

            // Create tokenization round robin task
            tokio::spawn(round_robin_task(validation_round_robin_receiver, senders));

78
79
80
81
82
83
84
85
            Some(validation_sender)
        } else {
            None
        };

        Self {
            max_best_of,
            sender,
86
            max_stop_sequences,
Nicolas Patry's avatar
Nicolas Patry committed
87
            max_top_n_tokens,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
88
            max_input_length,
89
            max_total_tokens,
drbh's avatar
drbh committed
90
            disable_grammar_support,
91
92
        }
    }
Olivier Dehaene's avatar
Olivier Dehaene committed
93

94
    #[instrument(skip(self, inputs))]
95
    pub async fn tokenize(
96
97
        &self,
        inputs: String,
98
        add_special_tokens: bool,
99
        truncate: Option<usize>,
Nicolas Patry's avatar
Nicolas Patry committed
100
    ) -> Result<Option<(tokenizers::Encoding, Vec<Chunk>)>, ValidationError> {
101
102
103
104
105
106
107
        // If we have a fast tokenizer
        if let Some(sender) = &self.sender {
            // Create response channel
            let (response_sender, response_receiver) = oneshot::channel();
            // Send request to the background validation task
            // Unwrap is safe here
            sender
108
109
110
111
112
                .send((
                    (inputs, add_special_tokens, truncate),
                    response_sender,
                    Span::current(),
                ))
113
114
115
116
                .unwrap();

            // Await on response channel
            // Unwrap is safe here
117
118
119
120
121
122
123
            let encoding = response_receiver.await.unwrap()?;
            Ok(Some(encoding))
        } else {
            Ok(None)
        }
    }

124
    #[allow(clippy::type_complexity)]
125
126
127
128
    #[instrument(skip(self, inputs))]
    async fn validate_input(
        &self,
        inputs: String,
129
        add_special_tokens: bool,
130
131
        truncate: Option<usize>,
        max_new_tokens: Option<u32>,
132
    ) -> Result<(Vec<Chunk>, Option<Vec<u32>>, usize, u32), ValidationError> {
133
        // If we have a fast tokenizer
134
135
136
137
        if let Some((encoding, inputs)) = self
            .tokenize(inputs.clone(), add_special_tokens, truncate)
            .await?
        {
138
            // Create response channel
Nicolas Patry's avatar
Nicolas Patry committed
139
140
141
142
143
            let input_length = if let Some(truncate) = truncate {
                std::cmp::min(encoding.len(), truncate)
            } else {
                encoding.len()
            };
144
145

            // Get total tokens
146
147
148
149
150
            let max_new_tokens: u32 = if let Some(max_new_tokens) = max_new_tokens {
                max_new_tokens
            } else {
                self.max_total_tokens.saturating_sub(input_length) as u32
            };
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
            let total_tokens = input_length + max_new_tokens as usize;

            // Validate MaxTotalTokens
            if total_tokens > self.max_total_tokens {
                return Err(ValidationError::MaxTotalTokens(
                    self.max_total_tokens,
                    input_length,
                    max_new_tokens,
                ));
            }

            // Validate InputLength
            if input_length > self.max_input_length {
                return Err(ValidationError::InputLength(
                    self.max_input_length,
                    input_length,
                ));
            }

170
171
            let ids = encoding.get_ids();
            let input_ids = ids[ids.len().saturating_sub(input_length)..].to_owned();
172

173
            metrics::histogram!("tgi_request_input_length").record(input_length as f64);
174
            Ok((inputs, Some(input_ids), input_length, max_new_tokens))
175
176
177
178
179
180
        }
        // Return inputs without validation
        else {
            // In this case, we don't know the real length in tokens of the inputs
            // However, the inputs will be truncated by the python servers
            // We make sure that truncate + max_new_tokens <= self.max_total_tokens
181
182
            let max_new_tokens: u32 = if let Some(max_new_tokens) = max_new_tokens {
                max_new_tokens
OlivierDehaene's avatar
OlivierDehaene committed
183
184
            } else if let Some(truncate) = truncate {
                self.max_total_tokens.saturating_sub(truncate) as u32
185
            } else {
OlivierDehaene's avatar
OlivierDehaene committed
186
                return Err(ValidationError::UnsetMaxNewTokens);
187
            };
188
            let mut input_length = truncate.unwrap_or(self.max_input_length);
189

190
191
            // We don't have a tokenizer, therefore we have no idea how long is the query, let
            // them through and hope for the best.
192
            // Validate MaxNewTokens
193
            if (input_length as u32 + max_new_tokens) > self.max_total_tokens as u32 {
194
                input_length = input_length.saturating_sub(max_new_tokens as usize);
195
196
            }

197
198
199
200
201
202
            Ok((
                vec![Chunk::Text(inputs)],
                None,
                input_length,
                max_new_tokens,
            ))
Olivier Dehaene's avatar
Olivier Dehaene committed
203
204
205
        }
    }

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
206
    /// Validate a payload and get the number of tokens in the input
207
    #[instrument(skip_all)]
Olivier Dehaene's avatar
Olivier Dehaene committed
208
209
210
    pub(crate) async fn validate(
        &self,
        request: GenerateRequest,
211
    ) -> Result<ValidGenerateRequest, ValidationError> {
212
213
214
215
        let GenerateParameters {
            best_of,
            temperature,
            repetition_penalty,
216
            frequency_penalty,
217
218
219
220
221
222
223
224
225
            top_k,
            top_p,
            typical_p,
            do_sample,
            max_new_tokens,
            stop: stop_sequences,
            truncate,
            seed,
            watermark,
226
            decoder_input_details,
Nicolas Patry's avatar
Nicolas Patry committed
227
            top_n_tokens,
drbh's avatar
drbh committed
228
            grammar,
drbh's avatar
drbh committed
229
            adapter_id,
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
            ..
        } = request.parameters;

        // sampling must be true when best_of > 1
        let best_of = best_of.unwrap_or(1);
        let sampling = do_sample
            || temperature.is_some()
            || top_k.is_some()
            || top_p.is_some()
            || typical_p.is_some();

        if best_of > 1 && !sampling {
            return Err(BestOfSampling);
        }

        let temperature = temperature.unwrap_or(1.0);
        if temperature <= 0.0 {
            return Err(ValidationError::Temperature);
        }

        let repetition_penalty = repetition_penalty.unwrap_or(1.0);
        if repetition_penalty <= 0.0 {
            return Err(ValidationError::RepetitionPenalty);
        }

255
256
257
258
259
        let frequency_penalty = frequency_penalty.unwrap_or(0.0);
        if !(-2.0..=2.0).contains(&frequency_penalty) {
            return Err(ValidationError::FrequencyPenalty);
        }

260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
        // Different because the proto default value is not a valid value
        // for the user
        let top_p = top_p
            .map(|value| {
                if value <= 0.0 || value >= 1.0 {
                    return Err(ValidationError::TopP);
                }
                Ok(value)
            })
            .unwrap_or(Ok(1.0))?;

        let typical_p = typical_p
            .map(|value| {
                if value <= 0.0 || value >= 1.0 {
                    return Err(ValidationError::TypicalP);
                }
                Ok(value)
            })
            .unwrap_or(Ok(1.0))?;

        let top_k: u32 = top_k
            .map(|value| {
                if value <= 0 {
                    return Err(ValidationError::TopK);
                }
                Ok(value as u32)
            })
            .unwrap_or(Ok(0))?;

289
        if max_new_tokens == Some(0) {
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
            return Err(ValidationError::NegativeMaxNewTokens);
        }

        if stop_sequences.len() > self.max_stop_sequences {
            return Err(ValidationError::StopSequence(
                self.max_stop_sequences,
                stop_sequences.len(),
            ));
        }

        // If seed is None, assign a random one
        let seed = match seed {
            None => thread_rng().gen(),
            Some(seed) => {
                if best_of > 1 {
                    return Err(BestOfSeed);
                }
                seed
            }
        };

Nicolas Patry's avatar
Nicolas Patry committed
311
312
313
314
315
316
317
318
319
        let top_n_tokens = top_n_tokens
            .map(|value| {
                if value > self.max_top_n_tokens {
                    return Err(ValidationError::TopNTokens(self.max_top_n_tokens, value));
                }
                Ok(value)
            })
            .unwrap_or(Ok(0))?;

320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
        // Check if inputs is empty
        if request.inputs.is_empty() {
            return Err(EmptyInput);
        }

        // Check if truncate is strictly positive and less than max_input_length
        let truncate = truncate
            .map(|value| {
                if value == 0 || value > self.max_input_length {
                    return Err(ValidationError::Truncate(self.max_input_length, value));
                }
                Ok(Some(value))
            })
            .unwrap_or(Ok(None))?;

        // Validate inputs
336
        let (inputs, input_ids, input_length, max_new_tokens) = self
337
338
339
340
341
342
            .validate_input(
                request.inputs,
                request.add_special_tokens,
                truncate,
                max_new_tokens,
            )
343
344
            .await?;

drbh's avatar
drbh committed
345
346
347
348
349
350
351
        // TODO: we should build the FSM here and pass the compiled FSM instead of the grammar
        // NOTE: this is currently difficult because we need the tokenizer in Python to build
        // the FSM and we'd have to load a copy of the tokenizer into our Pyo3 instance which
        // may be slow and memory intensive. Best case is to have a Rust implementation of the FSM
        // compiler and use that to build the FSM here.

        // Validate grammar and unpack the grammar and type for the proto message
OlivierDehaene's avatar
OlivierDehaene committed
352
        let grammar = match grammar {
drbh's avatar
drbh committed
353
354
355
356
357
            Some(grammar) => {
                // Ensure that grammar is not set if it's not supported
                if self.disable_grammar_support {
                    return Err(ValidationError::Grammar);
                }
OlivierDehaene's avatar
OlivierDehaene committed
358
                let valid_grammar = match grammar {
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
                    GrammarType::Json(json) => {
                        let json = match json {
                            // if value is a string, we need to parse it again to make sure its
                            // a valid json
                            Value::String(s) => serde_json::from_str(&s)
                                .map_err(|e| ValidationError::InvalidGrammar(e.to_string())),
                            Value::Object(_) => Ok(json),
                            _ => Err(ValidationError::Grammar),
                        }?;

                        // Check if the json is a valid JSONSchema
                        JSONSchema::options()
                            .with_draft(Draft::Draft202012)
                            .compile(&json)
                            .map_err(|e| ValidationError::InvalidGrammar(e.to_string()))?;

375
376
377
378
379
380
381
382
                        // The schema can be valid but lack properties.
                        // We need properties for the grammar to be successfully parsed in Python.
                        // Therefore, we must check and throw an error if properties are missing.
                        json.get("properties")
                            .ok_or(ValidationError::InvalidGrammar(
                                "Grammar must have a 'properties' field".to_string(),
                            ))?;

OlivierDehaene's avatar
OlivierDehaene committed
383
384
                        // Serialize json to string
                        ValidGrammar::Json(
385
386
387
388
                            serde_json::to_string(&json)
                                .map_err(|e| ValidationError::InvalidGrammar(e.to_string()))?,
                        )
                    }
OlivierDehaene's avatar
OlivierDehaene committed
389
390
391
                    GrammarType::Regex(regex) => ValidGrammar::Regex(regex),
                };
                Some(valid_grammar)
drbh's avatar
drbh committed
392
            }
OlivierDehaene's avatar
OlivierDehaene committed
393
            None => None,
drbh's avatar
drbh committed
394
395
        };

OlivierDehaene's avatar
OlivierDehaene committed
396
        let parameters = ValidParameters {
397
398
            temperature,
            repetition_penalty,
399
            frequency_penalty,
400
401
402
403
404
405
            top_k,
            top_p,
            typical_p,
            do_sample,
            seed,
            watermark,
drbh's avatar
drbh committed
406
            grammar,
407
        };
OlivierDehaene's avatar
OlivierDehaene committed
408
        let stopping_parameters = ValidStoppingParameters {
409
410
411
412
413
            max_new_tokens,
            stop_sequences,
            ignore_eos_token: false,
        };

414
        metrics::histogram!("tgi_request_max_new_tokens").record(max_new_tokens as f64);
415
416
417

        Ok(ValidGenerateRequest {
            inputs,
418
            input_ids: input_ids.map(Arc::new),
419
            add_special_tokens: request.add_special_tokens,
420
            decoder_input_details,
421
            input_length: input_length as u32,
422
423
424
            truncate: truncate.unwrap_or(self.max_input_length) as u32,
            parameters,
            stopping_parameters,
Nicolas Patry's avatar
Nicolas Patry committed
425
            top_n_tokens,
drbh's avatar
drbh committed
426
            adapter_id,
427
        })
Olivier Dehaene's avatar
Olivier Dehaene committed
428
    }
429
430
431
432
433
434
435
436
437
438
439
440
441
442

    /// Validate the best_of parameter
    #[instrument(skip_all)]
    pub(crate) fn validate_best_of(&self, best_of: usize) -> Result<usize, ValidationError> {
        if self.max_best_of == 1 && best_of != 1 {
            return Err(ValidationError::BestOfDisabled);
        }

        if best_of > self.max_best_of {
            return Err(ValidationError::BestOf(self.max_best_of, best_of));
        }

        Ok(best_of)
    }
Olivier Dehaene's avatar
Olivier Dehaene committed
443
444
}

OlivierDehaene's avatar
OlivierDehaene committed
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
/// Round robin tokenization task
async fn round_robin_task(
    mut receiver: mpsc::UnboundedReceiver<TokenizerRequest>,
    senders: Vec<mpsc::UnboundedSender<TokenizerRequest>>,
) {
    loop {
        for sender in &senders {
            match receiver.recv().await {
                None => return,
                Some(request) => sender.send(request).unwrap(),
            };
        }
    }
}

460
/// Start tokenization workers
461
462
463
fn tokenizer_worker(
    tokenizer: Tokenizer,
    config: Option<Config>,
464
    preprocessor_config: Option<HubPreprocessorConfig>,
465
466
    mut receiver: mpsc::UnboundedReceiver<TokenizerRequest>,
) {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
467
    // Loop over requests
468
469
470
    while let Some(((inputs, add_special_tokens, truncate), response_tx, parent_span)) =
        receiver.blocking_recv()
    {
471
472
        parent_span.in_scope(|| {
            response_tx
473
474
475
                .send(prepare_input(
                    inputs,
                    truncate,
476
                    add_special_tokens,
477
478
479
480
                    &tokenizer,
                    config.as_ref(),
                    preprocessor_config.as_ref(),
                ))
481
482
                .unwrap_or(())
        })
483
484
    }
}
Olivier Dehaene's avatar
Olivier Dehaene committed
485

486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
fn format_from_mimetype(mimetype: &str) -> Option<ImageFormat> {
    match mimetype {
        "image/png" => Some(ImageFormat::Png),
        "image/jpeg" => Some(ImageFormat::Jpeg),
        "image/jpg" => Some(ImageFormat::Jpeg),
        "image/gif" => Some(ImageFormat::Gif),
        "image/webp" => Some(ImageFormat::WebP),
        "image/tiff" => Some(ImageFormat::Tiff),
        // "image/pnm"=>Some(ImageFormat::Pnm),
        // "image/tga"=>Some(ImageFormat::Tga),
        // "image/dds"=>Some(ImageFormat::Dds),
        // "image/bmp"=>Some(ImageFormat::Bmp),
        // "image/ico"=>Some(ImageFormat::Ico),
        // "image/x-exr"=>Some(ImageFormat::OpenExr),
        _ => None,
    }
}
OlivierDehaene's avatar
OlivierDehaene committed
503

504
505
506
507
508
509
510
511
512
513
514
515
fn format_to_mimetype(format: ImageFormat) -> String {
    match format {
        ImageFormat::Png => "image/png",
        ImageFormat::Jpeg => "image/jpeg",
        ImageFormat::Gif => "image/gif",
        ImageFormat::WebP => "image/webp",
        ImageFormat::Tiff => "image/tiff",
        _ => "application/octet-stream",
    }
    .to_string()
}

516
fn fetch_image(input: &str) -> Result<(Vec<u8>, String, usize, usize), ValidationError> {
517
518
519
520
521
522
523
524
525
526
    if input.starts_with("![](http://") || input.starts_with("![](https://") {
        let url = &input["![](".len()..input.len() - 1];
        let data = reqwest::blocking::get(url)?.bytes()?;

        let format = image::guess_format(&data)?;
        // TODO Remove this clone
        let img = ImageReader::with_format(Cursor::new(data.clone()), format).decode()?;
        let height: usize = img.height().try_into()?;
        let width: usize = img.width().try_into()?;
        let mimetype = format_to_mimetype(format);
527
        Ok((data.to_vec(), mimetype, height, width))
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
    } else if input.starts_with("![](data:") {
        // Remove ![](....)
        let content = &input["![](data:".len()..input.len() - 1];
        let tokens: Vec<_> = content.split(';').collect();
        if tokens.len() != 2 {
            return Err(ValidationError::InvalidImageContent(content.to_string()));
        }
        let mimetype = tokens[0];
        let content = tokens[1];

        if !content.starts_with("base64,") {
            return Err(ValidationError::InvalidImageContent(content.to_string()));
        }

        let data = STANDARD.decode(content["base64,".len()..].as_bytes())?;
        let img = if let Some(format) = format_from_mimetype(mimetype) {
544
            ImageReader::with_format(Cursor::new(&data), format).decode()?
545
        } else {
546
            ImageReader::new(Cursor::new(&data))
547
548
549
550
551
552
553
                .with_guessed_format()
                .map_err(|_io_error| ValidationError::InvalidImageContent(content.to_string()))?
                .decode()?
        };

        let height: usize = img.height().try_into()?;
        let width: usize = img.width().try_into()?;
554
        Ok((data, mimetype.to_string(), height, width))
555
556
557
558
559
    } else {
        Err(ValidationError::InvalidImageContent(input.to_string()))
    }
}

560
561
562
563
564
565
566
567
568
569
fn image_tokens(
    config: &Config,
    preprocessor_config: Option<&HubPreprocessorConfig>,
    height: usize,
    width: usize,
) -> String {
    use Config::*;
    use HubPreprocessorConfig::*;
    match config {
        Idefics => "<image>".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
570
        Mllama => "<|image|>".to_string(),
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
        Idefics2(config) => {
            const FAKE: &str = "<fake_token_around_image>";
            const IMAGE: &str = "<image>";

            let slots = config.get_number_of_features(height, width);

            let mut image_string = String::with_capacity(2 * FAKE.len() + slots * IMAGE.len());
            image_string.push_str(FAKE);
            image_string.extend(iter::repeat(IMAGE).take(slots));
            image_string.push_str(FAKE);

            if matches!(
                preprocessor_config,
                Some(Idefics2Processor(Idefics2Preprocessor {
                    do_image_splitting: true,
                    ..
                }))
            ) {
                image_string = image_string.repeat(5);
            };

            image_string
        }
        Paligemma(config) => "<image>".repeat(config.get_number_of_features(height, width)),
        LlavaNext(config) => "<image>".repeat(config.get_number_of_features(height, width)),
        _ => unimplemented!("Images tokens are not supported for this model configuration"),
    }
}

fn image_tokens_fixup(config: &Config, text: String) -> String {
    match config {
        Config::Idefics2(_) => {
            const FAKE: &str = "<fake_token_around_image>";
            text.replace(&format!("{FAKE}{FAKE}"), FAKE)
        }
        _ => text,
    }
}

610
611
/// Get input length and optionally truncate it
fn prepare_input(
612
    inputs: String,
613
    _truncate: Option<usize>,
614
    add_special_tokens: bool,
615
    tokenizer: &Tokenizer,
616
617
    config: Option<&Config>,
    preprocessor_config: Option<&HubPreprocessorConfig>,
Nicolas Patry's avatar
Nicolas Patry committed
618
) -> Result<(tokenizers::Encoding, Vec<Chunk>), ValidationError> {
619
    use Config::*;
620
    static RE: Lazy<Regex> = Lazy::new(|| Regex::new(r"!\[\]\([^\)]*\)").unwrap());
621
    let (tokenizer_query, input_chunks) = match config {
Nicolas Patry's avatar
Nicolas Patry committed
622
        Some(config @ (Idefics | Mllama | Idefics2(_) | Paligemma(_) | LlavaNext(_))) => {
623
            let mut input_chunks = Vec::new();
624
625
626
627
            let mut tokenizer_query = String::with_capacity(inputs.len());
            let mut start = 0;
            for chunk in RE.find_iter(&inputs) {
                let chunk_start = chunk.start();
drbh's avatar
drbh committed
628
629
                let chunk_end = chunk.end();
                if chunk_start != start {
Nicolas Patry's avatar
Nicolas Patry committed
630
                    input_chunks.push(Chunk::Text(inputs[start..chunk_start].to_string()));
drbh's avatar
drbh committed
631
632
                    tokenizer_query.push_str(&inputs[start..chunk_start]);
                }
633
                let (data, mimetype, height, width) = fetch_image(&inputs[chunk_start..chunk_end])?;
Nicolas Patry's avatar
Nicolas Patry committed
634
                input_chunks.push(Chunk::Image(Image { data, mimetype }));
635
                tokenizer_query.push_str(&image_tokens(config, preprocessor_config, height, width));
drbh's avatar
drbh committed
636
637
                start = chunk_end;
            }
638
            if start != inputs.len() {
Nicolas Patry's avatar
Nicolas Patry committed
639
                input_chunks.push(Chunk::Text(inputs[start..].to_string()));
drbh's avatar
drbh committed
640
641
                tokenizer_query.push_str(&inputs[start..]);
            }
Nicolas Patry's avatar
Nicolas Patry committed
642

643
644
            tokenizer_query = image_tokens_fixup(config, tokenizer_query);

645
            (tokenizer_query, input_chunks)
Nicolas Patry's avatar
Nicolas Patry committed
646
        }
Nicolas Patry's avatar
Nicolas Patry committed
647
        _ => (inputs.clone(), vec![Chunk::Text(inputs)]),
648
    };
649

650
    // Get the number of tokens in the input
651
    let encoding = tokenizer
652
        .encode(tokenizer_query, add_special_tokens)
653
654
        .map_err(|err| ValidationError::Tokenizer(err.to_string()))?;

655
    Ok((encoding, input_chunks))
Olivier Dehaene's avatar
Olivier Dehaene committed
656
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
657

658
type TokenizerRequest = (
659
    (String, bool, Option<usize>),
Nicolas Patry's avatar
Nicolas Patry committed
660
    oneshot::Sender<Result<(tokenizers::Encoding, Vec<Chunk>), ValidationError>>,
661
    Span,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
662
663
);

Nicolas Patry's avatar
Nicolas Patry committed
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Image {
    pub data: Vec<u8>,
    pub mimetype: String,
}

#[derive(Debug, Clone, Eq, PartialEq)]
pub enum Chunk {
    Text(String),
    Image(Image),
}

/// Convert input chunks to a stringly-typed input for backwards
/// compat for backends that haven't implemented chunked inputs.
pub trait ChunksToString {
    /// Convert chunks to string.
    fn chunks_to_string(&self) -> String;
}

impl ChunksToString for Vec<Chunk> {
    fn chunks_to_string(&self) -> String {
        let mut output = String::new();
        self.iter().for_each(|c| match &c {
            Chunk::Text(text) => output.push_str(text),
            Chunk::Image(Image { data, mimetype }) => {
                let encoded = STANDARD.encode(data);
                output.push_str(&format!("![](data:{};base64,{})", mimetype, encoded))
            }
        });
        output
    }
}

OlivierDehaene's avatar
OlivierDehaene committed
697
#[derive(Debug, Clone)]
Nicolas Patry's avatar
Nicolas Patry committed
698
pub enum ValidGrammar {
OlivierDehaene's avatar
OlivierDehaene committed
699
700
701
702
703
    Json(String),
    Regex(String),
}

#[derive(Debug, Clone)]
Nicolas Patry's avatar
Nicolas Patry committed
704
pub struct ValidParameters {
OlivierDehaene's avatar
OlivierDehaene committed
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
    /// / exponential scaling output probability distribution
    pub temperature: f32,
    /// / restricting to the k highest probability elements
    pub top_k: u32,
    /// / restricting to top tokens summing to prob_cut_off <= prob_cut_off
    pub top_p: f32,
    /// / restricting to top tokens summing to prob_cut_off <= prob_cut_off
    pub typical_p: f32,
    /// / apply sampling on the logits
    pub do_sample: bool,
    /// / random seed for sampling
    pub seed: u64,
    /// / repetition penalty
    pub repetition_penalty: f32,
    /// / frequency penalty
    pub frequency_penalty: f32,
    /// / token watermarking using "A Watermark for Large Language Models"
    pub watermark: bool,
    /// / grammar (applied if not empty)
    pub grammar: Option<ValidGrammar>,
}

#[derive(Debug, Clone)]
Nicolas Patry's avatar
Nicolas Patry committed
728
pub struct ValidStoppingParameters {
OlivierDehaene's avatar
OlivierDehaene committed
729
730
731
732
733
734
735
736
737
    /// / Maximum number of generated tokens
    pub max_new_tokens: u32,
    /// / Optional stopping sequences
    pub stop_sequences: Vec<String>,
    /// / Ignore end of sequence token
    /// / used for benchmarking
    pub ignore_eos_token: bool,
}

738
#[derive(Debug, Clone)]
Nicolas Patry's avatar
Nicolas Patry committed
739
740
pub struct ValidGenerateRequest {
    pub inputs: Vec<Chunk>,
741
    pub input_ids: Option<Arc<Vec<u32>>>,
742
    pub input_length: u32,
743
    pub truncate: u32,
744
    pub add_special_tokens: bool,
745
    pub decoder_input_details: bool,
OlivierDehaene's avatar
OlivierDehaene committed
746
747
    pub parameters: ValidParameters,
    pub stopping_parameters: ValidStoppingParameters,
Nicolas Patry's avatar
Nicolas Patry committed
748
    pub top_n_tokens: u32,
drbh's avatar
drbh committed
749
    pub adapter_id: Option<String>,
750
751
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
752
753
#[derive(Error, Debug)]
pub enum ValidationError {
754
755
756
757
758
759
760
761
762
763
    #[error("`best_of` must be > 0 and <= {0}. Given: {1}")]
    BestOf(usize, usize),
    #[error("`best_of` != 1 is not allowed for this endpoint")]
    BestOfDisabled,
    #[error("you must use sampling when `best_of` is > 1")]
    BestOfSampling,
    #[error("`seed` must not be set when `best_of` > 1")]
    BestOfSeed,
    #[error("`best_of` != 1 is not supported when streaming tokens")]
    BestOfStream,
Nicolas Patry's avatar
Nicolas Patry committed
764
765
766
767
    #[error("`top_n_tokens` must be >= 0 and <= {0}. Given: {1}")]
    TopNTokens(u32, u32),
    #[error("`top_n_tokens` != 0 is not allowed for this endpoint")]
    TopNTokensDisabled,
768
769
    #[error("`decoder_input_details` == true is not supported when streaming tokens")]
    PrefillDetailsStream,
770
    #[error("`temperature` must be strictly positive")]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
771
    Temperature,
772
    #[error("`repetition_penalty` must be strictly positive")]
773
    RepetitionPenalty,
774
775
    #[error("`frequency_penalty` must be >= -2.0 and <= 2.0")]
    FrequencyPenalty,
776
    #[error("`top_p` must be > 0.0 and < 1.0")]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
777
    TopP,
778
    #[error("`top_k` must be strictly positive")]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
779
    TopK,
780
781
    #[error("`truncate` must be strictly positive and less than {0}. Given: {1}")]
    Truncate(usize, usize),
782
783
    #[error("`typical_p` must be > 0.0 and < 1.0")]
    TypicalP,
784
785
    #[error("one of `max_new_tokens` or `truncate` must be set if a fast tokenizer is not in use")]
    UnsetMaxNewTokens,
786
    #[error("`max_new_tokens` must be strictly positive")]
787
788
789
    NegativeMaxNewTokens,
    #[error("`max_new_tokens` must be <= {0}. Given: {1}")]
    MaxNewTokens(usize, u32),
790
    #[error("`inputs` tokens + `max_new_tokens` must be <= {0}. Given: {1} `inputs` tokens and {2} `max_new_tokens`")]
791
    MaxTotalTokens(usize, usize, u32),
792
    #[error("`inputs` must have less than {0} tokens. Given: {1}")]
793
    InputLength(usize, usize),
794
    #[error("`inputs` cannot be empty")]
795
    EmptyInput,
796
    #[error("`stop` supports up to {0} stop sequences. Given: {1}")]
797
    StopSequence(usize, usize),
798
799
    #[error("tokenizer error {0}")]
    Tokenizer(String),
drbh's avatar
drbh committed
800
801
    #[error("grammar is not supported")]
    Grammar,
802
803
    #[error("grammar is not valid: {0}")]
    InvalidGrammar(String),
804
805
806
807
808
809
810
811
812
813
    #[error("base64 encoding is invalid: {0}")]
    InvalidBase64(#[from] base64::DecodeError),
    #[error("invalid image: {0}")]
    InvalidImage(#[from] image::ImageError),
    #[error("invalid integer: {0}")]
    InvalidInt(#[from] core::num::TryFromIntError),
    #[error("invalid image content: {0}")]
    InvalidImageContent(String),
    #[error("Could not fetch image: {0}")]
    FailedFetchImage(#[from] reqwest::Error),
Nicolas Patry's avatar
Nicolas Patry committed
814
815
    #[error("{0} modality is not supported")]
    UnsupportedModality(&'static str),
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
816
}
817
818

#[cfg(test)]
819
mod tests {
820
    use super::*;
821
    use crate::config::{Idefics2, PaliTextConfig, Paligemma};
822
823
    use crate::default_parameters;
    use crate::tests::get_tokenizer;
824
825

    #[tokio::test]
826
    async fn test_validation_max_new_tokens() {
827
828
829
        let tokenizer = None;
        let max_best_of = 2;
        let max_stop_sequence = 3;
Nicolas Patry's avatar
Nicolas Patry committed
830
831
832
        let max_top_n_tokens = 4;
        let max_input_length = 5;
        let max_total_tokens = 6;
833
        let workers = 1;
drbh's avatar
drbh committed
834
        let disable_grammar_support = true;
835
        let config = None;
836
837
838
        let validation = Validation::new(
            workers,
            tokenizer,
839
            config,
840
            None,
841
842
            max_best_of,
            max_stop_sequence,
Nicolas Patry's avatar
Nicolas Patry committed
843
            max_top_n_tokens,
844
845
            max_input_length,
            max_total_tokens,
drbh's avatar
drbh committed
846
            disable_grammar_support,
847
        );
848
849

        let max_new_tokens = 10;
850
        match validation
851
            .validate_input("Hello".to_string(), true, None, Some(max_new_tokens))
852
853
            .await
        {
854
            // Err(ValidationError::MaxNewTokens(1, 10)) => (),
855
            Ok((_s, _, 0, 10)) => (),
856
            r => panic!("Unexpected not max new tokens: {r:?}"),
857
858
859
860
        }
    }

    #[tokio::test]
861
    async fn test_validation_input_length() {
862
863
864
        let tokenizer = Some(get_tokenizer().await);
        let max_best_of = 2;
        let max_stop_sequence = 3;
Nicolas Patry's avatar
Nicolas Patry committed
865
866
867
        let max_top_n_tokens = 4;
        let max_input_length = 5;
        let max_total_tokens = 6;
drbh's avatar
drbh committed
868
        let disable_grammar_support = true;
869
        let workers = 1;
870
        let config = None;
871
872
873
        let validation = Validation::new(
            workers,
            tokenizer,
874
            config,
875
            None,
876
877
            max_best_of,
            max_stop_sequence,
Nicolas Patry's avatar
Nicolas Patry committed
878
            max_top_n_tokens,
879
880
            max_input_length,
            max_total_tokens,
drbh's avatar
drbh committed
881
            disable_grammar_support,
882
        );
883
884

        let max_new_tokens = 10;
885
        match validation
886
            .validate_input("Hello".to_string(), true, None, Some(max_new_tokens))
887
888
            .await
        {
Nicolas Patry's avatar
Nicolas Patry committed
889
            Err(ValidationError::MaxTotalTokens(6, 1, 10)) => (),
890
            _ => panic!("Unexpected not max new tokens"),
891
892
        }
    }
893
894

    #[tokio::test]
895
    async fn test_validation_best_of_sampling() {
896
897
898
        let tokenizer = Some(get_tokenizer().await);
        let max_best_of = 2;
        let max_stop_sequence = 3;
Nicolas Patry's avatar
Nicolas Patry committed
899
900
901
        let max_top_n_tokens = 4;
        let max_input_length = 5;
        let max_total_tokens = 6;
902
        let workers = 1;
drbh's avatar
drbh committed
903
        let disable_grammar_support = true;
904
        let config = None;
905
906
907
        let validation = Validation::new(
            workers,
            tokenizer,
908
            config,
909
            None,
910
911
            max_best_of,
            max_stop_sequence,
Nicolas Patry's avatar
Nicolas Patry committed
912
            max_top_n_tokens,
913
914
            max_input_length,
            max_total_tokens,
drbh's avatar
drbh committed
915
            disable_grammar_support,
916
917
918
919
        );
        match validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
920
                add_special_tokens: true,
921
922
923
924
925
926
927
928
                parameters: GenerateParameters {
                    best_of: Some(2),
                    do_sample: false,
                    ..default_parameters()
                },
            })
            .await
        {
929
            Err(ValidationError::BestOfSampling) => (),
930
            _ => panic!("Unexpected not best of sampling"),
931
932
933
934
        }
    }

    #[tokio::test]
935
    async fn test_validation_top_p() {
936
937
938
        let tokenizer = Some(get_tokenizer().await);
        let max_best_of = 2;
        let max_stop_sequence = 3;
Nicolas Patry's avatar
Nicolas Patry committed
939
940
        let max_top_n_tokens = 4;
        let max_input_length = 5;
941
        let max_total_tokens = 106;
942
        let workers = 1;
drbh's avatar
drbh committed
943
        let disable_grammar_support = true;
944
        let config = None;
945
946
947
        let validation = Validation::new(
            workers,
            tokenizer,
948
            config,
949
            None,
950
951
            max_best_of,
            max_stop_sequence,
Nicolas Patry's avatar
Nicolas Patry committed
952
            max_top_n_tokens,
953
954
            max_input_length,
            max_total_tokens,
drbh's avatar
drbh committed
955
            disable_grammar_support,
956
957
958
959
        );
        match validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
960
                add_special_tokens: true,
961
962
                parameters: GenerateParameters {
                    top_p: Some(1.0),
963
                    max_new_tokens: Some(5),
964
965
966
967
968
                    ..default_parameters()
                },
            })
            .await
        {
969
            Err(ValidationError::TopP) => (),
970
            _ => panic!("Unexpected top_p"),
971
972
        }

973
974
975
        match validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
976
                add_special_tokens: true,
977
978
                parameters: GenerateParameters {
                    top_p: Some(0.99),
979
                    max_new_tokens: Some(5),
980
981
982
983
984
                    ..default_parameters()
                },
            })
            .await
        {
985
            Ok(_) => (),
986
            _ => panic!("Unexpected top_p error"),
987
988
        }

989
990
991
        let valid_request = validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
992
                add_special_tokens: true,
993
994
                parameters: GenerateParameters {
                    top_p: None,
995
                    max_new_tokens: Some(5),
996
997
998
999
1000
                    ..default_parameters()
                },
            })
            .await
            .unwrap();
1001
1002
1003
        // top_p == 1.0 is invalid for users to ask for but it's the default resolved value.
        assert_eq!(valid_request.parameters.top_p, 1.0);
    }
Nicolas Patry's avatar
Nicolas Patry committed
1004
1005
1006
1007
1008
1009
1010
1011

    #[tokio::test]
    async fn test_validation_top_n_tokens() {
        let tokenizer = Some(get_tokenizer().await);
        let max_best_of = 2;
        let max_stop_sequences = 3;
        let max_top_n_tokens = 4;
        let max_input_length = 5;
1012
        let max_total_tokens = 106;
Nicolas Patry's avatar
Nicolas Patry committed
1013
        let workers = 1;
drbh's avatar
drbh committed
1014
        let disable_grammar_support = true;
1015
        let config = None;
Nicolas Patry's avatar
Nicolas Patry committed
1016
1017
1018
        let validation = Validation::new(
            workers,
            tokenizer,
1019
            config,
1020
            None,
Nicolas Patry's avatar
Nicolas Patry committed
1021
1022
1023
1024
1025
            max_best_of,
            max_stop_sequences,
            max_top_n_tokens,
            max_input_length,
            max_total_tokens,
drbh's avatar
drbh committed
1026
            disable_grammar_support,
Nicolas Patry's avatar
Nicolas Patry committed
1027
1028
1029
1030
        );
        match validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
1031
                add_special_tokens: true,
Nicolas Patry's avatar
Nicolas Patry committed
1032
1033
                parameters: GenerateParameters {
                    top_n_tokens: Some(5),
1034
                    max_new_tokens: Some(5),
Nicolas Patry's avatar
Nicolas Patry committed
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
                    ..default_parameters()
                },
            })
            .await
        {
            Err(ValidationError::TopNTokens(4, 5)) => (),
            _ => panic!("Unexpected top_n_tokens"),
        }

        validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
1047
                add_special_tokens: true,
Nicolas Patry's avatar
Nicolas Patry committed
1048
1049
                parameters: GenerateParameters {
                    top_n_tokens: Some(4),
1050
                    max_new_tokens: Some(5),
Nicolas Patry's avatar
Nicolas Patry committed
1051
1052
1053
1054
1055
1056
1057
1058
1059
                    ..default_parameters()
                },
            })
            .await
            .unwrap();

        validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
1060
                add_special_tokens: true,
Nicolas Patry's avatar
Nicolas Patry committed
1061
1062
                parameters: GenerateParameters {
                    top_n_tokens: Some(0),
1063
                    max_new_tokens: Some(5),
Nicolas Patry's avatar
Nicolas Patry committed
1064
1065
1066
1067
1068
1069
1070
1071
1072
                    ..default_parameters()
                },
            })
            .await
            .unwrap();

        let valid_request = validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
1073
                add_special_tokens: true,
Nicolas Patry's avatar
Nicolas Patry committed
1074
1075
                parameters: GenerateParameters {
                    top_n_tokens: None,
1076
                    max_new_tokens: Some(5),
Nicolas Patry's avatar
Nicolas Patry committed
1077
1078
1079
1080
1081
1082
1083
1084
                    ..default_parameters()
                },
            })
            .await
            .unwrap();

        assert_eq!(valid_request.top_n_tokens, 0);
    }
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109

    static PIXEL_GIF: &str = "R0lGODdhAQABAIEAAP///wAAAAAAAAAAACwAAAAAAQABAAAIBAABBAQAOw==";

    #[tokio::test]
    async fn test_prepare_input_chunks() {
        let pixel_data = STANDARD.decode(PIXEL_GIF).unwrap();

        let tokenizer = Some(get_tokenizer().await);

        let max_best_of = 2;
        let max_stop_sequence = 3;
        let max_top_n_tokens = 4;
        let max_input_length = 5;
        let max_total_tokens = 6;
        let disable_grammar_support = true;
        let workers = 1;
        let config = Config::Paligemma(Paligemma {
            text_config: PaliTextConfig {
                num_image_tokens: 1,
            },
        });
        let validation = Validation::new(
            workers,
            tokenizer,
            Some(config),
1110
            None,
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
            max_best_of,
            max_stop_sequence,
            max_top_n_tokens,
            max_input_length,
            max_total_tokens,
            disable_grammar_support,
        );

        let chunks = match validation
            .tokenize(
                format!("test![](data:image/gif;base64,{})", PIXEL_GIF),
1122
                true,
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
                None,
            )
            .await
        {
            Ok(Some((_encoding, chunks))) => chunks,
            _ => panic!("Unexpected tokenization failure"),
        };

        assert!(
            chunks
                == vec![
                    Chunk::Text("test".to_string()).into(),
                    Chunk::Image(Image {
                        data: pixel_data.clone(),
                        mimetype: "image/gif".to_string()
                    })
                    .into()
                ],
            "Failed to process images",
        );
    }
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181

    #[tokio::test]
    async fn test_idefics2_correct_n_fake_tokens() {
        let pixel_data = STANDARD.decode(PIXEL_GIF).unwrap();

        let tokenizer = Some(get_tokenizer().await);

        let max_best_of = 2;
        let max_stop_sequence = 3;
        let max_top_n_tokens = 4;
        let max_input_length = 5;
        let max_total_tokens = 6;
        let disable_grammar_support = true;
        let workers = 1;
        let config = Config::Idefics2(Idefics2 {});
        let validation = Validation::new(
            workers,
            tokenizer,
            Some(config),
            Some(HubPreprocessorConfig::Idefics2Processor(
                Idefics2Preprocessor {
                    do_image_splitting: true,
                },
            )),
            max_best_of,
            max_stop_sequence,
            max_top_n_tokens,
            max_input_length,
            max_total_tokens,
            disable_grammar_support,
        );

        let (encoding, chunks) = match validation
            .tokenize(
                format!(
                    "test![](data:image/gif;base64,{})![](data:image/gif;base64,{})",
                    PIXEL_GIF, PIXEL_GIF
                ),
1182
                true,
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
                None,
            )
            .await
        {
            Ok(Some((encoding, chunks))) => (encoding, chunks),
            _ => panic!("Unexpected tokenization failure"),
        };

        assert!(
            chunks
                == vec![
                    Chunk::Text("test".to_string()).into(),
                    Chunk::Image(Image {
                        data: pixel_data.clone(),
                        mimetype: "image/gif".to_string()
                    })
                    .into(),
                    Chunk::Image(Image {
                        data: pixel_data.clone(),
                        mimetype: "image/gif".to_string()
                    })
                    .into()
                ],
            "Failed to process images",
        );

        // Verify the number of fake tokens:
        //
        // - Two images surrounded/separated by a fake token = 3.
        // - Both are split in 5 subimages, separated by a fake token: 2 * 4
        //
        // Fake tokens get split up by the testing tokenizer, but we don't care.
        assert_eq!(
            encoding
                .get_tokens()
                .iter()
                .filter(|t| *t == "fake")
                .count(),
            11
        );
    }
1224
}