validation.rs 40.2 KB
Newer Older
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1
/// Payload validation logic
OlivierDehaene's avatar
OlivierDehaene committed
2
use crate::config::Config;
3
use crate::validation::ValidationError::{BestOfSampling, BestOfSeed, EmptyInput};
4
5
6
use crate::{
    GenerateParameters, GenerateRequest, GrammarType, HubPreprocessorConfig, Idefics2Preprocessor,
};
OlivierDehaene's avatar
OlivierDehaene committed
7
use base64::{engine::general_purpose::STANDARD, Engine};
Nicolas Patry's avatar
Nicolas Patry committed
8
use image::{ImageFormat, ImageReader};
9
use jsonschema::{Draft, JSONSchema};
10
use rand::{thread_rng, Rng};
11
use serde_json::Value;
12
use std::io::Cursor;
13
use std::iter;
14
use std::sync::Arc;
Olivier Dehaene's avatar
Olivier Dehaene committed
15
use thiserror::Error;
Olivier Dehaene's avatar
Olivier Dehaene committed
16
use tokenizers::tokenizer::Tokenizer;
OlivierDehaene's avatar
OlivierDehaene committed
17
use tokio::sync::mpsc;
18
use tokio::sync::oneshot;
19
use tracing::{instrument, Span};
20
use {once_cell::sync::Lazy, regex::Regex};
Olivier Dehaene's avatar
Olivier Dehaene committed
21

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
22
/// Validation
Olivier Dehaene's avatar
Olivier Dehaene committed
23
#[derive(Debug, Clone)]
Olivier Dehaene's avatar
Olivier Dehaene committed
24
pub struct Validation {
25
    /// Validation parameters
26
    max_best_of: usize,
27
    max_stop_sequences: usize,
Nicolas Patry's avatar
Nicolas Patry committed
28
    max_top_n_tokens: u32,
29
30
    max_input_length: usize,
    max_total_tokens: usize,
drbh's avatar
drbh committed
31
    disable_grammar_support: bool,
32
    /// Channel to communicate with the background tokenization task
OlivierDehaene's avatar
OlivierDehaene committed
33
    sender: Option<mpsc::UnboundedSender<TokenizerRequest>>,
Olivier Dehaene's avatar
Olivier Dehaene committed
34
35
36
}

impl Validation {
OlivierDehaene's avatar
OlivierDehaene committed
37
    #[allow(clippy::too_many_arguments)]
38
39
    pub(crate) fn new(
        workers: usize,
40
        tokenizer: Option<Tokenizer>,
41
        config: Option<Config>,
42
        preprocessor_config: Option<HubPreprocessorConfig>,
43
        max_best_of: usize,
44
        max_stop_sequences: usize,
Nicolas Patry's avatar
Nicolas Patry committed
45
        max_top_n_tokens: u32,
46
47
        max_input_length: usize,
        max_total_tokens: usize,
drbh's avatar
drbh committed
48
        disable_grammar_support: bool,
49
    ) -> Self {
50
51
        // If we have a fast tokenizer
        let sender = if let Some(tokenizer) = tokenizer {
OlivierDehaene's avatar
OlivierDehaene committed
52
53
54
            // Create round robin channel
            let (validation_sender, validation_round_robin_receiver) = mpsc::unbounded_channel();
            let mut senders = Vec::with_capacity(workers);
55
56
57
58

            // Create workers
            for _ in 0..workers {
                let tokenizer_clone = tokenizer.clone();
59
                let config_clone = config.clone();
60
                let preprocessor_config_clone = preprocessor_config.clone();
OlivierDehaene's avatar
OlivierDehaene committed
61
62
                let (tokenizer_sender, tokenizer_receiver) = mpsc::unbounded_channel();
                senders.push(tokenizer_sender);
63
64
65

                // Spawn worker
                tokio::task::spawn_blocking(move || {
66
67
68
69
70
71
                    tokenizer_worker(
                        tokenizer_clone,
                        config_clone,
                        preprocessor_config_clone,
                        tokenizer_receiver,
                    )
72
73
                });
            }
OlivierDehaene's avatar
OlivierDehaene committed
74
75
76
77

            // Create tokenization round robin task
            tokio::spawn(round_robin_task(validation_round_robin_receiver, senders));

78
79
80
81
82
83
84
85
            Some(validation_sender)
        } else {
            None
        };

        Self {
            max_best_of,
            sender,
86
            max_stop_sequences,
Nicolas Patry's avatar
Nicolas Patry committed
87
            max_top_n_tokens,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
88
            max_input_length,
89
            max_total_tokens,
drbh's avatar
drbh committed
90
            disable_grammar_support,
91
92
        }
    }
Olivier Dehaene's avatar
Olivier Dehaene committed
93

94
    #[instrument(skip(self, inputs))]
95
    pub async fn tokenize(
96
97
        &self,
        inputs: String,
98
        add_special_tokens: bool,
99
        truncate: Option<usize>,
Nicolas Patry's avatar
Nicolas Patry committed
100
    ) -> Result<Option<(tokenizers::Encoding, Vec<Chunk>)>, ValidationError> {
101
102
103
104
105
106
107
        // If we have a fast tokenizer
        if let Some(sender) = &self.sender {
            // Create response channel
            let (response_sender, response_receiver) = oneshot::channel();
            // Send request to the background validation task
            // Unwrap is safe here
            sender
108
109
110
111
112
                .send((
                    (inputs, add_special_tokens, truncate),
                    response_sender,
                    Span::current(),
                ))
113
114
115
116
                .unwrap();

            // Await on response channel
            // Unwrap is safe here
117
118
119
120
121
122
123
            let encoding = response_receiver.await.unwrap()?;
            Ok(Some(encoding))
        } else {
            Ok(None)
        }
    }

124
    #[allow(clippy::type_complexity)]
125
126
127
128
    #[instrument(skip(self, inputs))]
    async fn validate_input(
        &self,
        inputs: String,
129
        add_special_tokens: bool,
130
131
        truncate: Option<usize>,
        max_new_tokens: Option<u32>,
132
    ) -> Result<(Vec<Chunk>, Option<Vec<u32>>, usize, u32), ValidationError> {
133
        // If we have a fast tokenizer
134
135
136
137
        if let Some((encoding, inputs)) = self
            .tokenize(inputs.clone(), add_special_tokens, truncate)
            .await?
        {
138
            // Create response channel
Nicolas Patry's avatar
Nicolas Patry committed
139
140
141
142
143
            let input_length = if let Some(truncate) = truncate {
                std::cmp::min(encoding.len(), truncate)
            } else {
                encoding.len()
            };
144
145

            // Get total tokens
146
147
148
149
150
            let max_new_tokens: u32 = if let Some(max_new_tokens) = max_new_tokens {
                max_new_tokens
            } else {
                self.max_total_tokens.saturating_sub(input_length) as u32
            };
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
            let total_tokens = input_length + max_new_tokens as usize;

            // Validate MaxTotalTokens
            if total_tokens > self.max_total_tokens {
                return Err(ValidationError::MaxTotalTokens(
                    self.max_total_tokens,
                    input_length,
                    max_new_tokens,
                ));
            }

            // Validate InputLength
            if input_length > self.max_input_length {
                return Err(ValidationError::InputLength(
                    self.max_input_length,
                    input_length,
                ));
            }

170
171
            let ids = encoding.get_ids();
            let input_ids = ids[ids.len().saturating_sub(input_length)..].to_owned();
172

173
            metrics::histogram!("tgi_request_input_length").record(input_length as f64);
174
            Ok((inputs, Some(input_ids), input_length, max_new_tokens))
175
176
177
178
179
180
        }
        // Return inputs without validation
        else {
            // In this case, we don't know the real length in tokens of the inputs
            // However, the inputs will be truncated by the python servers
            // We make sure that truncate + max_new_tokens <= self.max_total_tokens
181
182
            let max_new_tokens: u32 = if let Some(max_new_tokens) = max_new_tokens {
                max_new_tokens
OlivierDehaene's avatar
OlivierDehaene committed
183
184
            } else if let Some(truncate) = truncate {
                self.max_total_tokens.saturating_sub(truncate) as u32
185
            } else {
OlivierDehaene's avatar
OlivierDehaene committed
186
                return Err(ValidationError::UnsetMaxNewTokens);
187
            };
188
            let mut input_length = truncate.unwrap_or(self.max_input_length);
189

190
191
            // We don't have a tokenizer, therefore we have no idea how long is the query, let
            // them through and hope for the best.
192
            // Validate MaxNewTokens
193
            if (input_length as u32 + max_new_tokens) > self.max_total_tokens as u32 {
194
                input_length = input_length.saturating_sub(max_new_tokens as usize);
195
196
            }

197
198
199
200
201
202
            Ok((
                vec![Chunk::Text(inputs)],
                None,
                input_length,
                max_new_tokens,
            ))
Olivier Dehaene's avatar
Olivier Dehaene committed
203
204
205
        }
    }

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
206
    /// Validate a payload and get the number of tokens in the input
207
    #[instrument(skip_all)]
Olivier Dehaene's avatar
Olivier Dehaene committed
208
209
210
    pub(crate) async fn validate(
        &self,
        request: GenerateRequest,
211
    ) -> Result<ValidGenerateRequest, ValidationError> {
212
213
214
215
        let GenerateParameters {
            best_of,
            temperature,
            repetition_penalty,
216
            frequency_penalty,
217
218
219
220
221
222
223
224
225
            top_k,
            top_p,
            typical_p,
            do_sample,
            max_new_tokens,
            stop: stop_sequences,
            truncate,
            seed,
            watermark,
226
            decoder_input_details,
Nicolas Patry's avatar
Nicolas Patry committed
227
            top_n_tokens,
drbh's avatar
drbh committed
228
            grammar,
drbh's avatar
drbh committed
229
            adapter_id,
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
            ..
        } = request.parameters;

        // sampling must be true when best_of > 1
        let best_of = best_of.unwrap_or(1);
        let sampling = do_sample
            || temperature.is_some()
            || top_k.is_some()
            || top_p.is_some()
            || typical_p.is_some();

        if best_of > 1 && !sampling {
            return Err(BestOfSampling);
        }

        let temperature = temperature.unwrap_or(1.0);
        if temperature <= 0.0 {
            return Err(ValidationError::Temperature);
        }

        let repetition_penalty = repetition_penalty.unwrap_or(1.0);
        if repetition_penalty <= 0.0 {
            return Err(ValidationError::RepetitionPenalty);
        }

255
256
257
258
259
        let frequency_penalty = frequency_penalty.unwrap_or(0.0);
        if !(-2.0..=2.0).contains(&frequency_penalty) {
            return Err(ValidationError::FrequencyPenalty);
        }

260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
        // Different because the proto default value is not a valid value
        // for the user
        let top_p = top_p
            .map(|value| {
                if value <= 0.0 || value >= 1.0 {
                    return Err(ValidationError::TopP);
                }
                Ok(value)
            })
            .unwrap_or(Ok(1.0))?;

        let typical_p = typical_p
            .map(|value| {
                if value <= 0.0 || value >= 1.0 {
                    return Err(ValidationError::TypicalP);
                }
                Ok(value)
            })
            .unwrap_or(Ok(1.0))?;

        let top_k: u32 = top_k
            .map(|value| {
                if value <= 0 {
                    return Err(ValidationError::TopK);
                }
                Ok(value as u32)
            })
            .unwrap_or(Ok(0))?;

289
        if max_new_tokens == Some(0) {
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
            return Err(ValidationError::NegativeMaxNewTokens);
        }

        if stop_sequences.len() > self.max_stop_sequences {
            return Err(ValidationError::StopSequence(
                self.max_stop_sequences,
                stop_sequences.len(),
            ));
        }

        // If seed is None, assign a random one
        let seed = match seed {
            None => thread_rng().gen(),
            Some(seed) => {
                if best_of > 1 {
                    return Err(BestOfSeed);
                }
                seed
            }
        };

Nicolas Patry's avatar
Nicolas Patry committed
311
312
313
314
315
316
317
318
319
        let top_n_tokens = top_n_tokens
            .map(|value| {
                if value > self.max_top_n_tokens {
                    return Err(ValidationError::TopNTokens(self.max_top_n_tokens, value));
                }
                Ok(value)
            })
            .unwrap_or(Ok(0))?;

320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
        // Check if inputs is empty
        if request.inputs.is_empty() {
            return Err(EmptyInput);
        }

        // Check if truncate is strictly positive and less than max_input_length
        let truncate = truncate
            .map(|value| {
                if value == 0 || value > self.max_input_length {
                    return Err(ValidationError::Truncate(self.max_input_length, value));
                }
                Ok(Some(value))
            })
            .unwrap_or(Ok(None))?;

        // Validate inputs
336
        let (inputs, input_ids, input_length, max_new_tokens) = self
337
338
339
340
341
342
            .validate_input(
                request.inputs,
                request.add_special_tokens,
                truncate,
                max_new_tokens,
            )
343
344
            .await?;

drbh's avatar
drbh committed
345
346
347
348
349
350
351
        // TODO: we should build the FSM here and pass the compiled FSM instead of the grammar
        // NOTE: this is currently difficult because we need the tokenizer in Python to build
        // the FSM and we'd have to load a copy of the tokenizer into our Pyo3 instance which
        // may be slow and memory intensive. Best case is to have a Rust implementation of the FSM
        // compiler and use that to build the FSM here.

        // Validate grammar and unpack the grammar and type for the proto message
OlivierDehaene's avatar
OlivierDehaene committed
352
        let grammar = match grammar {
drbh's avatar
drbh committed
353
354
355
356
357
            Some(grammar) => {
                // Ensure that grammar is not set if it's not supported
                if self.disable_grammar_support {
                    return Err(ValidationError::Grammar);
                }
OlivierDehaene's avatar
OlivierDehaene committed
358
                let valid_grammar = match grammar {
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
                    GrammarType::Json(json) => {
                        let json = match json {
                            // if value is a string, we need to parse it again to make sure its
                            // a valid json
                            Value::String(s) => serde_json::from_str(&s)
                                .map_err(|e| ValidationError::InvalidGrammar(e.to_string())),
                            Value::Object(_) => Ok(json),
                            _ => Err(ValidationError::Grammar),
                        }?;

                        // Check if the json is a valid JSONSchema
                        JSONSchema::options()
                            .with_draft(Draft::Draft202012)
                            .compile(&json)
                            .map_err(|e| ValidationError::InvalidGrammar(e.to_string()))?;

375
376
377
378
379
380
381
382
                        // The schema can be valid but lack properties.
                        // We need properties for the grammar to be successfully parsed in Python.
                        // Therefore, we must check and throw an error if properties are missing.
                        json.get("properties")
                            .ok_or(ValidationError::InvalidGrammar(
                                "Grammar must have a 'properties' field".to_string(),
                            ))?;

OlivierDehaene's avatar
OlivierDehaene committed
383
384
                        // Serialize json to string
                        ValidGrammar::Json(
385
386
387
388
                            serde_json::to_string(&json)
                                .map_err(|e| ValidationError::InvalidGrammar(e.to_string()))?,
                        )
                    }
OlivierDehaene's avatar
OlivierDehaene committed
389
390
391
                    GrammarType::Regex(regex) => ValidGrammar::Regex(regex),
                };
                Some(valid_grammar)
drbh's avatar
drbh committed
392
            }
OlivierDehaene's avatar
OlivierDehaene committed
393
            None => None,
drbh's avatar
drbh committed
394
395
        };

OlivierDehaene's avatar
OlivierDehaene committed
396
        let parameters = ValidParameters {
397
398
            temperature,
            repetition_penalty,
399
            frequency_penalty,
400
401
402
403
404
405
            top_k,
            top_p,
            typical_p,
            do_sample,
            seed,
            watermark,
drbh's avatar
drbh committed
406
            grammar,
407
        };
OlivierDehaene's avatar
OlivierDehaene committed
408
        let stopping_parameters = ValidStoppingParameters {
409
410
411
412
413
            max_new_tokens,
            stop_sequences,
            ignore_eos_token: false,
        };

414
        metrics::histogram!("tgi_request_max_new_tokens").record(max_new_tokens as f64);
415
416
417

        Ok(ValidGenerateRequest {
            inputs,
418
            input_ids: input_ids.map(Arc::new),
419
            add_special_tokens: request.add_special_tokens,
420
            decoder_input_details,
421
            input_length: input_length as u32,
422
423
424
            truncate: truncate.unwrap_or(self.max_input_length) as u32,
            parameters,
            stopping_parameters,
Nicolas Patry's avatar
Nicolas Patry committed
425
            top_n_tokens,
drbh's avatar
drbh committed
426
            adapter_id,
427
        })
Olivier Dehaene's avatar
Olivier Dehaene committed
428
    }
429
430
431
432
433
434
435
436
437
438
439
440
441
442

    /// Validate the best_of parameter
    #[instrument(skip_all)]
    pub(crate) fn validate_best_of(&self, best_of: usize) -> Result<usize, ValidationError> {
        if self.max_best_of == 1 && best_of != 1 {
            return Err(ValidationError::BestOfDisabled);
        }

        if best_of > self.max_best_of {
            return Err(ValidationError::BestOf(self.max_best_of, best_of));
        }

        Ok(best_of)
    }
Olivier Dehaene's avatar
Olivier Dehaene committed
443
444
}

OlivierDehaene's avatar
OlivierDehaene committed
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
/// Round robin tokenization task
async fn round_robin_task(
    mut receiver: mpsc::UnboundedReceiver<TokenizerRequest>,
    senders: Vec<mpsc::UnboundedSender<TokenizerRequest>>,
) {
    loop {
        for sender in &senders {
            match receiver.recv().await {
                None => return,
                Some(request) => sender.send(request).unwrap(),
            };
        }
    }
}

460
/// Start tokenization workers
461
462
463
fn tokenizer_worker(
    tokenizer: Tokenizer,
    config: Option<Config>,
464
    preprocessor_config: Option<HubPreprocessorConfig>,
465
466
    mut receiver: mpsc::UnboundedReceiver<TokenizerRequest>,
) {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
467
    // Loop over requests
468
469
470
    while let Some(((inputs, add_special_tokens, truncate), response_tx, parent_span)) =
        receiver.blocking_recv()
    {
471
472
        parent_span.in_scope(|| {
            response_tx
473
474
475
                .send(prepare_input(
                    inputs,
                    truncate,
476
                    add_special_tokens,
477
478
479
480
                    &tokenizer,
                    config.as_ref(),
                    preprocessor_config.as_ref(),
                ))
481
482
                .unwrap_or(())
        })
483
484
    }
}
Olivier Dehaene's avatar
Olivier Dehaene committed
485

486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
fn format_from_mimetype(mimetype: &str) -> Option<ImageFormat> {
    match mimetype {
        "image/png" => Some(ImageFormat::Png),
        "image/jpeg" => Some(ImageFormat::Jpeg),
        "image/jpg" => Some(ImageFormat::Jpeg),
        "image/gif" => Some(ImageFormat::Gif),
        "image/webp" => Some(ImageFormat::WebP),
        "image/tiff" => Some(ImageFormat::Tiff),
        // "image/pnm"=>Some(ImageFormat::Pnm),
        // "image/tga"=>Some(ImageFormat::Tga),
        // "image/dds"=>Some(ImageFormat::Dds),
        // "image/bmp"=>Some(ImageFormat::Bmp),
        // "image/ico"=>Some(ImageFormat::Ico),
        // "image/x-exr"=>Some(ImageFormat::OpenExr),
        _ => None,
    }
}
OlivierDehaene's avatar
OlivierDehaene committed
503

504
505
506
507
508
509
510
511
512
513
514
515
fn format_to_mimetype(format: ImageFormat) -> String {
    match format {
        ImageFormat::Png => "image/png",
        ImageFormat::Jpeg => "image/jpeg",
        ImageFormat::Gif => "image/gif",
        ImageFormat::WebP => "image/webp",
        ImageFormat::Tiff => "image/tiff",
        _ => "application/octet-stream",
    }
    .to_string()
}

516
fn fetch_image(input: &str) -> Result<(Vec<u8>, String, usize, usize), ValidationError> {
517
518
519
520
521
522
523
524
525
526
    if input.starts_with("![](http://") || input.starts_with("![](https://") {
        let url = &input["![](".len()..input.len() - 1];
        let data = reqwest::blocking::get(url)?.bytes()?;

        let format = image::guess_format(&data)?;
        // TODO Remove this clone
        let img = ImageReader::with_format(Cursor::new(data.clone()), format).decode()?;
        let height: usize = img.height().try_into()?;
        let width: usize = img.width().try_into()?;
        let mimetype = format_to_mimetype(format);
527
        Ok((data.to_vec(), mimetype, height, width))
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
    } else if input.starts_with("![](data:") {
        // Remove ![](....)
        let content = &input["![](data:".len()..input.len() - 1];
        let tokens: Vec<_> = content.split(';').collect();
        if tokens.len() != 2 {
            return Err(ValidationError::InvalidImageContent(content.to_string()));
        }
        let mimetype = tokens[0];
        let content = tokens[1];

        if !content.starts_with("base64,") {
            return Err(ValidationError::InvalidImageContent(content.to_string()));
        }

        let data = STANDARD.decode(content["base64,".len()..].as_bytes())?;
        let img = if let Some(format) = format_from_mimetype(mimetype) {
544
            ImageReader::with_format(Cursor::new(&data), format).decode()?
545
        } else {
546
            ImageReader::new(Cursor::new(&data))
547
548
549
550
551
552
553
                .with_guessed_format()
                .map_err(|_io_error| ValidationError::InvalidImageContent(content.to_string()))?
                .decode()?
        };

        let height: usize = img.height().try_into()?;
        let width: usize = img.width().try_into()?;
554
        Ok((data, mimetype.to_string(), height, width))
555
556
557
558
559
    } else {
        Err(ValidationError::InvalidImageContent(input.to_string()))
    }
}

560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
fn image_tokens(
    config: &Config,
    preprocessor_config: Option<&HubPreprocessorConfig>,
    height: usize,
    width: usize,
) -> String {
    use Config::*;
    use HubPreprocessorConfig::*;
    match config {
        Idefics => "<image>".to_string(),
        Idefics2(config) => {
            const FAKE: &str = "<fake_token_around_image>";
            const IMAGE: &str = "<image>";

            let slots = config.get_number_of_features(height, width);

            let mut image_string = String::with_capacity(2 * FAKE.len() + slots * IMAGE.len());
            image_string.push_str(FAKE);
            image_string.extend(iter::repeat(IMAGE).take(slots));
            image_string.push_str(FAKE);

            if matches!(
                preprocessor_config,
                Some(Idefics2Processor(Idefics2Preprocessor {
                    do_image_splitting: true,
                    ..
                }))
            ) {
                image_string = image_string.repeat(5);
            };

            image_string
        }
        Paligemma(config) => "<image>".repeat(config.get_number_of_features(height, width)),
        LlavaNext(config) => "<image>".repeat(config.get_number_of_features(height, width)),
        _ => unimplemented!("Images tokens are not supported for this model configuration"),
    }
}

fn image_tokens_fixup(config: &Config, text: String) -> String {
    match config {
        Config::Idefics2(_) => {
            const FAKE: &str = "<fake_token_around_image>";
            text.replace(&format!("{FAKE}{FAKE}"), FAKE)
        }
        _ => text,
    }
}

609
610
/// Get input length and optionally truncate it
fn prepare_input(
611
    inputs: String,
612
    _truncate: Option<usize>,
613
    add_special_tokens: bool,
614
    tokenizer: &Tokenizer,
615
616
    config: Option<&Config>,
    preprocessor_config: Option<&HubPreprocessorConfig>,
Nicolas Patry's avatar
Nicolas Patry committed
617
) -> Result<(tokenizers::Encoding, Vec<Chunk>), ValidationError> {
618
    use Config::*;
619
    static RE: Lazy<Regex> = Lazy::new(|| Regex::new(r"!\[\]\([^\)]*\)").unwrap());
620
    let (tokenizer_query, input_chunks) = match config {
621
        Some(config @ (Idefics | Idefics2(_) | Paligemma(_) | LlavaNext(_))) => {
622
            let mut input_chunks = Vec::new();
623
624
625
626
            let mut tokenizer_query = String::with_capacity(inputs.len());
            let mut start = 0;
            for chunk in RE.find_iter(&inputs) {
                let chunk_start = chunk.start();
drbh's avatar
drbh committed
627
628
                let chunk_end = chunk.end();
                if chunk_start != start {
Nicolas Patry's avatar
Nicolas Patry committed
629
                    input_chunks.push(Chunk::Text(inputs[start..chunk_start].to_string()));
drbh's avatar
drbh committed
630
631
                    tokenizer_query.push_str(&inputs[start..chunk_start]);
                }
632
                let (data, mimetype, height, width) = fetch_image(&inputs[chunk_start..chunk_end])?;
Nicolas Patry's avatar
Nicolas Patry committed
633
                input_chunks.push(Chunk::Image(Image { data, mimetype }));
634
                tokenizer_query.push_str(&image_tokens(config, preprocessor_config, height, width));
drbh's avatar
drbh committed
635
636
                start = chunk_end;
            }
637
            if start != inputs.len() {
Nicolas Patry's avatar
Nicolas Patry committed
638
                input_chunks.push(Chunk::Text(inputs[start..].to_string()));
drbh's avatar
drbh committed
639
640
                tokenizer_query.push_str(&inputs[start..]);
            }
Nicolas Patry's avatar
Nicolas Patry committed
641

642
643
            tokenizer_query = image_tokens_fixup(config, tokenizer_query);

644
            (tokenizer_query, input_chunks)
Nicolas Patry's avatar
Nicolas Patry committed
645
        }
Nicolas Patry's avatar
Nicolas Patry committed
646
        _ => (inputs.clone(), vec![Chunk::Text(inputs)]),
647
    };
648

649
    // Get the number of tokens in the input
650
    let encoding = tokenizer
651
        .encode(tokenizer_query, add_special_tokens)
652
653
        .map_err(|err| ValidationError::Tokenizer(err.to_string()))?;

654
    Ok((encoding, input_chunks))
Olivier Dehaene's avatar
Olivier Dehaene committed
655
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
656

657
type TokenizerRequest = (
658
    (String, bool, Option<usize>),
Nicolas Patry's avatar
Nicolas Patry committed
659
    oneshot::Sender<Result<(tokenizers::Encoding, Vec<Chunk>), ValidationError>>,
660
    Span,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
661
662
);

Nicolas Patry's avatar
Nicolas Patry committed
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Image {
    pub data: Vec<u8>,
    pub mimetype: String,
}

#[derive(Debug, Clone, Eq, PartialEq)]
pub enum Chunk {
    Text(String),
    Image(Image),
}

/// Convert input chunks to a stringly-typed input for backwards
/// compat for backends that haven't implemented chunked inputs.
pub trait ChunksToString {
    /// Convert chunks to string.
    fn chunks_to_string(&self) -> String;
}

impl ChunksToString for Vec<Chunk> {
    fn chunks_to_string(&self) -> String {
        let mut output = String::new();
        self.iter().for_each(|c| match &c {
            Chunk::Text(text) => output.push_str(text),
            Chunk::Image(Image { data, mimetype }) => {
                let encoded = STANDARD.encode(data);
                output.push_str(&format!("![](data:{};base64,{})", mimetype, encoded))
            }
        });
        output
    }
}

OlivierDehaene's avatar
OlivierDehaene committed
696
#[derive(Debug, Clone)]
Nicolas Patry's avatar
Nicolas Patry committed
697
pub enum ValidGrammar {
OlivierDehaene's avatar
OlivierDehaene committed
698
699
700
701
702
    Json(String),
    Regex(String),
}

#[derive(Debug, Clone)]
Nicolas Patry's avatar
Nicolas Patry committed
703
pub struct ValidParameters {
OlivierDehaene's avatar
OlivierDehaene committed
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
    /// / exponential scaling output probability distribution
    pub temperature: f32,
    /// / restricting to the k highest probability elements
    pub top_k: u32,
    /// / restricting to top tokens summing to prob_cut_off <= prob_cut_off
    pub top_p: f32,
    /// / restricting to top tokens summing to prob_cut_off <= prob_cut_off
    pub typical_p: f32,
    /// / apply sampling on the logits
    pub do_sample: bool,
    /// / random seed for sampling
    pub seed: u64,
    /// / repetition penalty
    pub repetition_penalty: f32,
    /// / frequency penalty
    pub frequency_penalty: f32,
    /// / token watermarking using "A Watermark for Large Language Models"
    pub watermark: bool,
    /// / grammar (applied if not empty)
    pub grammar: Option<ValidGrammar>,
}

#[derive(Debug, Clone)]
Nicolas Patry's avatar
Nicolas Patry committed
727
pub struct ValidStoppingParameters {
OlivierDehaene's avatar
OlivierDehaene committed
728
729
730
731
732
733
734
735
736
    /// / Maximum number of generated tokens
    pub max_new_tokens: u32,
    /// / Optional stopping sequences
    pub stop_sequences: Vec<String>,
    /// / Ignore end of sequence token
    /// / used for benchmarking
    pub ignore_eos_token: bool,
}

737
#[derive(Debug, Clone)]
Nicolas Patry's avatar
Nicolas Patry committed
738
739
pub struct ValidGenerateRequest {
    pub inputs: Vec<Chunk>,
740
    pub input_ids: Option<Arc<Vec<u32>>>,
741
    pub input_length: u32,
742
    pub truncate: u32,
743
    pub add_special_tokens: bool,
744
    pub decoder_input_details: bool,
OlivierDehaene's avatar
OlivierDehaene committed
745
746
    pub parameters: ValidParameters,
    pub stopping_parameters: ValidStoppingParameters,
Nicolas Patry's avatar
Nicolas Patry committed
747
    pub top_n_tokens: u32,
drbh's avatar
drbh committed
748
    pub adapter_id: Option<String>,
749
750
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
751
752
#[derive(Error, Debug)]
pub enum ValidationError {
753
754
755
756
757
758
759
760
761
762
    #[error("`best_of` must be > 0 and <= {0}. Given: {1}")]
    BestOf(usize, usize),
    #[error("`best_of` != 1 is not allowed for this endpoint")]
    BestOfDisabled,
    #[error("you must use sampling when `best_of` is > 1")]
    BestOfSampling,
    #[error("`seed` must not be set when `best_of` > 1")]
    BestOfSeed,
    #[error("`best_of` != 1 is not supported when streaming tokens")]
    BestOfStream,
Nicolas Patry's avatar
Nicolas Patry committed
763
764
765
766
    #[error("`top_n_tokens` must be >= 0 and <= {0}. Given: {1}")]
    TopNTokens(u32, u32),
    #[error("`top_n_tokens` != 0 is not allowed for this endpoint")]
    TopNTokensDisabled,
767
768
    #[error("`decoder_input_details` == true is not supported when streaming tokens")]
    PrefillDetailsStream,
769
    #[error("`temperature` must be strictly positive")]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
770
    Temperature,
771
    #[error("`repetition_penalty` must be strictly positive")]
772
    RepetitionPenalty,
773
774
    #[error("`frequency_penalty` must be >= -2.0 and <= 2.0")]
    FrequencyPenalty,
775
    #[error("`top_p` must be > 0.0 and < 1.0")]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
776
    TopP,
777
    #[error("`top_k` must be strictly positive")]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
778
    TopK,
779
780
    #[error("`truncate` must be strictly positive and less than {0}. Given: {1}")]
    Truncate(usize, usize),
781
782
    #[error("`typical_p` must be > 0.0 and < 1.0")]
    TypicalP,
783
784
    #[error("one of `max_new_tokens` or `truncate` must be set if a fast tokenizer is not in use")]
    UnsetMaxNewTokens,
785
    #[error("`max_new_tokens` must be strictly positive")]
786
787
788
    NegativeMaxNewTokens,
    #[error("`max_new_tokens` must be <= {0}. Given: {1}")]
    MaxNewTokens(usize, u32),
789
    #[error("`inputs` tokens + `max_new_tokens` must be <= {0}. Given: {1} `inputs` tokens and {2} `max_new_tokens`")]
790
    MaxTotalTokens(usize, usize, u32),
791
    #[error("`inputs` must have less than {0} tokens. Given: {1}")]
792
    InputLength(usize, usize),
793
    #[error("`inputs` cannot be empty")]
794
    EmptyInput,
795
    #[error("`stop` supports up to {0} stop sequences. Given: {1}")]
796
    StopSequence(usize, usize),
797
798
    #[error("tokenizer error {0}")]
    Tokenizer(String),
drbh's avatar
drbh committed
799
800
    #[error("grammar is not supported")]
    Grammar,
801
802
    #[error("grammar is not valid: {0}")]
    InvalidGrammar(String),
803
804
805
806
807
808
809
810
811
812
    #[error("base64 encoding is invalid: {0}")]
    InvalidBase64(#[from] base64::DecodeError),
    #[error("invalid image: {0}")]
    InvalidImage(#[from] image::ImageError),
    #[error("invalid integer: {0}")]
    InvalidInt(#[from] core::num::TryFromIntError),
    #[error("invalid image content: {0}")]
    InvalidImageContent(String),
    #[error("Could not fetch image: {0}")]
    FailedFetchImage(#[from] reqwest::Error),
Nicolas Patry's avatar
Nicolas Patry committed
813
814
    #[error("{0} modality is not supported")]
    UnsupportedModality(&'static str),
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
815
}
816
817

#[cfg(test)]
818
mod tests {
819
    use super::*;
820
    use crate::config::{Idefics2, PaliTextConfig, Paligemma};
821
822
    use crate::default_parameters;
    use crate::tests::get_tokenizer;
823
824

    #[tokio::test]
825
    async fn test_validation_max_new_tokens() {
826
827
828
        let tokenizer = None;
        let max_best_of = 2;
        let max_stop_sequence = 3;
Nicolas Patry's avatar
Nicolas Patry committed
829
830
831
        let max_top_n_tokens = 4;
        let max_input_length = 5;
        let max_total_tokens = 6;
832
        let workers = 1;
drbh's avatar
drbh committed
833
        let disable_grammar_support = true;
834
        let config = None;
835
836
837
        let validation = Validation::new(
            workers,
            tokenizer,
838
            config,
839
            None,
840
841
            max_best_of,
            max_stop_sequence,
Nicolas Patry's avatar
Nicolas Patry committed
842
            max_top_n_tokens,
843
844
            max_input_length,
            max_total_tokens,
drbh's avatar
drbh committed
845
            disable_grammar_support,
846
        );
847
848

        let max_new_tokens = 10;
849
        match validation
850
            .validate_input("Hello".to_string(), true, None, Some(max_new_tokens))
851
852
            .await
        {
853
            // Err(ValidationError::MaxNewTokens(1, 10)) => (),
854
            Ok((_s, _, 0, 10)) => (),
855
            r => panic!("Unexpected not max new tokens: {r:?}"),
856
857
858
859
        }
    }

    #[tokio::test]
860
    async fn test_validation_input_length() {
861
862
863
        let tokenizer = Some(get_tokenizer().await);
        let max_best_of = 2;
        let max_stop_sequence = 3;
Nicolas Patry's avatar
Nicolas Patry committed
864
865
866
        let max_top_n_tokens = 4;
        let max_input_length = 5;
        let max_total_tokens = 6;
drbh's avatar
drbh committed
867
        let disable_grammar_support = true;
868
        let workers = 1;
869
        let config = None;
870
871
872
        let validation = Validation::new(
            workers,
            tokenizer,
873
            config,
874
            None,
875
876
            max_best_of,
            max_stop_sequence,
Nicolas Patry's avatar
Nicolas Patry committed
877
            max_top_n_tokens,
878
879
            max_input_length,
            max_total_tokens,
drbh's avatar
drbh committed
880
            disable_grammar_support,
881
        );
882
883

        let max_new_tokens = 10;
884
        match validation
885
            .validate_input("Hello".to_string(), true, None, Some(max_new_tokens))
886
887
            .await
        {
Nicolas Patry's avatar
Nicolas Patry committed
888
            Err(ValidationError::MaxTotalTokens(6, 1, 10)) => (),
889
            _ => panic!("Unexpected not max new tokens"),
890
891
        }
    }
892
893

    #[tokio::test]
894
    async fn test_validation_best_of_sampling() {
895
896
897
        let tokenizer = Some(get_tokenizer().await);
        let max_best_of = 2;
        let max_stop_sequence = 3;
Nicolas Patry's avatar
Nicolas Patry committed
898
899
900
        let max_top_n_tokens = 4;
        let max_input_length = 5;
        let max_total_tokens = 6;
901
        let workers = 1;
drbh's avatar
drbh committed
902
        let disable_grammar_support = true;
903
        let config = None;
904
905
906
        let validation = Validation::new(
            workers,
            tokenizer,
907
            config,
908
            None,
909
910
            max_best_of,
            max_stop_sequence,
Nicolas Patry's avatar
Nicolas Patry committed
911
            max_top_n_tokens,
912
913
            max_input_length,
            max_total_tokens,
drbh's avatar
drbh committed
914
            disable_grammar_support,
915
916
917
918
        );
        match validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
919
                add_special_tokens: true,
920
921
922
923
924
925
926
927
                parameters: GenerateParameters {
                    best_of: Some(2),
                    do_sample: false,
                    ..default_parameters()
                },
            })
            .await
        {
928
            Err(ValidationError::BestOfSampling) => (),
929
            _ => panic!("Unexpected not best of sampling"),
930
931
932
933
        }
    }

    #[tokio::test]
934
    async fn test_validation_top_p() {
935
936
937
        let tokenizer = Some(get_tokenizer().await);
        let max_best_of = 2;
        let max_stop_sequence = 3;
Nicolas Patry's avatar
Nicolas Patry committed
938
939
        let max_top_n_tokens = 4;
        let max_input_length = 5;
940
        let max_total_tokens = 106;
941
        let workers = 1;
drbh's avatar
drbh committed
942
        let disable_grammar_support = true;
943
        let config = None;
944
945
946
        let validation = Validation::new(
            workers,
            tokenizer,
947
            config,
948
            None,
949
950
            max_best_of,
            max_stop_sequence,
Nicolas Patry's avatar
Nicolas Patry committed
951
            max_top_n_tokens,
952
953
            max_input_length,
            max_total_tokens,
drbh's avatar
drbh committed
954
            disable_grammar_support,
955
956
957
958
        );
        match validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
959
                add_special_tokens: true,
960
961
                parameters: GenerateParameters {
                    top_p: Some(1.0),
962
                    max_new_tokens: Some(5),
963
964
965
966
967
                    ..default_parameters()
                },
            })
            .await
        {
968
            Err(ValidationError::TopP) => (),
969
            _ => panic!("Unexpected top_p"),
970
971
        }

972
973
974
        match validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
975
                add_special_tokens: true,
976
977
                parameters: GenerateParameters {
                    top_p: Some(0.99),
978
                    max_new_tokens: Some(5),
979
980
981
982
983
                    ..default_parameters()
                },
            })
            .await
        {
984
            Ok(_) => (),
985
            _ => panic!("Unexpected top_p error"),
986
987
        }

988
989
990
        let valid_request = validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
991
                add_special_tokens: true,
992
993
                parameters: GenerateParameters {
                    top_p: None,
994
                    max_new_tokens: Some(5),
995
996
997
998
999
                    ..default_parameters()
                },
            })
            .await
            .unwrap();
1000
1001
1002
        // top_p == 1.0 is invalid for users to ask for but it's the default resolved value.
        assert_eq!(valid_request.parameters.top_p, 1.0);
    }
Nicolas Patry's avatar
Nicolas Patry committed
1003
1004
1005
1006
1007
1008
1009
1010

    #[tokio::test]
    async fn test_validation_top_n_tokens() {
        let tokenizer = Some(get_tokenizer().await);
        let max_best_of = 2;
        let max_stop_sequences = 3;
        let max_top_n_tokens = 4;
        let max_input_length = 5;
1011
        let max_total_tokens = 106;
Nicolas Patry's avatar
Nicolas Patry committed
1012
        let workers = 1;
drbh's avatar
drbh committed
1013
        let disable_grammar_support = true;
1014
        let config = None;
Nicolas Patry's avatar
Nicolas Patry committed
1015
1016
1017
        let validation = Validation::new(
            workers,
            tokenizer,
1018
            config,
1019
            None,
Nicolas Patry's avatar
Nicolas Patry committed
1020
1021
1022
1023
1024
            max_best_of,
            max_stop_sequences,
            max_top_n_tokens,
            max_input_length,
            max_total_tokens,
drbh's avatar
drbh committed
1025
            disable_grammar_support,
Nicolas Patry's avatar
Nicolas Patry committed
1026
1027
1028
1029
        );
        match validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
1030
                add_special_tokens: true,
Nicolas Patry's avatar
Nicolas Patry committed
1031
1032
                parameters: GenerateParameters {
                    top_n_tokens: Some(5),
1033
                    max_new_tokens: Some(5),
Nicolas Patry's avatar
Nicolas Patry committed
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
                    ..default_parameters()
                },
            })
            .await
        {
            Err(ValidationError::TopNTokens(4, 5)) => (),
            _ => panic!("Unexpected top_n_tokens"),
        }

        validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
1046
                add_special_tokens: true,
Nicolas Patry's avatar
Nicolas Patry committed
1047
1048
                parameters: GenerateParameters {
                    top_n_tokens: Some(4),
1049
                    max_new_tokens: Some(5),
Nicolas Patry's avatar
Nicolas Patry committed
1050
1051
1052
1053
1054
1055
1056
1057
1058
                    ..default_parameters()
                },
            })
            .await
            .unwrap();

        validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
1059
                add_special_tokens: true,
Nicolas Patry's avatar
Nicolas Patry committed
1060
1061
                parameters: GenerateParameters {
                    top_n_tokens: Some(0),
1062
                    max_new_tokens: Some(5),
Nicolas Patry's avatar
Nicolas Patry committed
1063
1064
1065
1066
1067
1068
1069
1070
1071
                    ..default_parameters()
                },
            })
            .await
            .unwrap();

        let valid_request = validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
1072
                add_special_tokens: true,
Nicolas Patry's avatar
Nicolas Patry committed
1073
1074
                parameters: GenerateParameters {
                    top_n_tokens: None,
1075
                    max_new_tokens: Some(5),
Nicolas Patry's avatar
Nicolas Patry committed
1076
1077
1078
1079
1080
1081
1082
1083
                    ..default_parameters()
                },
            })
            .await
            .unwrap();

        assert_eq!(valid_request.top_n_tokens, 0);
    }
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108

    static PIXEL_GIF: &str = "R0lGODdhAQABAIEAAP///wAAAAAAAAAAACwAAAAAAQABAAAIBAABBAQAOw==";

    #[tokio::test]
    async fn test_prepare_input_chunks() {
        let pixel_data = STANDARD.decode(PIXEL_GIF).unwrap();

        let tokenizer = Some(get_tokenizer().await);

        let max_best_of = 2;
        let max_stop_sequence = 3;
        let max_top_n_tokens = 4;
        let max_input_length = 5;
        let max_total_tokens = 6;
        let disable_grammar_support = true;
        let workers = 1;
        let config = Config::Paligemma(Paligemma {
            text_config: PaliTextConfig {
                num_image_tokens: 1,
            },
        });
        let validation = Validation::new(
            workers,
            tokenizer,
            Some(config),
1109
            None,
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
            max_best_of,
            max_stop_sequence,
            max_top_n_tokens,
            max_input_length,
            max_total_tokens,
            disable_grammar_support,
        );

        let chunks = match validation
            .tokenize(
                format!("test![](data:image/gif;base64,{})", PIXEL_GIF),
1121
                true,
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
                None,
            )
            .await
        {
            Ok(Some((_encoding, chunks))) => chunks,
            _ => panic!("Unexpected tokenization failure"),
        };

        assert!(
            chunks
                == vec![
                    Chunk::Text("test".to_string()).into(),
                    Chunk::Image(Image {
                        data: pixel_data.clone(),
                        mimetype: "image/gif".to_string()
                    })
                    .into()
                ],
            "Failed to process images",
        );
    }
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180

    #[tokio::test]
    async fn test_idefics2_correct_n_fake_tokens() {
        let pixel_data = STANDARD.decode(PIXEL_GIF).unwrap();

        let tokenizer = Some(get_tokenizer().await);

        let max_best_of = 2;
        let max_stop_sequence = 3;
        let max_top_n_tokens = 4;
        let max_input_length = 5;
        let max_total_tokens = 6;
        let disable_grammar_support = true;
        let workers = 1;
        let config = Config::Idefics2(Idefics2 {});
        let validation = Validation::new(
            workers,
            tokenizer,
            Some(config),
            Some(HubPreprocessorConfig::Idefics2Processor(
                Idefics2Preprocessor {
                    do_image_splitting: true,
                },
            )),
            max_best_of,
            max_stop_sequence,
            max_top_n_tokens,
            max_input_length,
            max_total_tokens,
            disable_grammar_support,
        );

        let (encoding, chunks) = match validation
            .tokenize(
                format!(
                    "test![](data:image/gif;base64,{})![](data:image/gif;base64,{})",
                    PIXEL_GIF, PIXEL_GIF
                ),
1181
                true,
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
                None,
            )
            .await
        {
            Ok(Some((encoding, chunks))) => (encoding, chunks),
            _ => panic!("Unexpected tokenization failure"),
        };

        assert!(
            chunks
                == vec![
                    Chunk::Text("test".to_string()).into(),
                    Chunk::Image(Image {
                        data: pixel_data.clone(),
                        mimetype: "image/gif".to_string()
                    })
                    .into(),
                    Chunk::Image(Image {
                        data: pixel_data.clone(),
                        mimetype: "image/gif".to_string()
                    })
                    .into()
                ],
            "Failed to process images",
        );

        // Verify the number of fake tokens:
        //
        // - Two images surrounded/separated by a fake token = 3.
        // - Both are split in 5 subimages, separated by a fake token: 2 * 4
        //
        // Fake tokens get split up by the testing tokenizer, but we don't care.
        assert_eq!(
            encoding
                .get_tokens()
                .iter()
                .filter(|t| *t == "fake")
                .count(),
            11
        );
    }
1223
}