validation.rs 41.3 KB
Newer Older
OlivierDehaene's avatar
OlivierDehaene committed
1
use crate::config::Config;
2
use crate::validation::ValidationError::{BestOfSampling, BestOfSeed, EmptyInput};
3
4
use crate::{
    GenerateParameters, GenerateRequest, GrammarType, HubPreprocessorConfig, Idefics2Preprocessor,
5
    TokenizerTrait,
6
};
7
use crate::{PyTokenizer, Tokenizer};
OlivierDehaene's avatar
OlivierDehaene committed
8
use base64::{engine::general_purpose::STANDARD, Engine};
Nicolas Patry's avatar
Nicolas Patry committed
9
use image::{ImageFormat, ImageReader};
10
use jsonschema::{Draft, JSONSchema};
11
use outlines_core::json_schema::to_regex as json_schema_to_regex;
12
use rand::{thread_rng, Rng};
13
use serde_json::Value;
14
15
/// Payload validation logic
use std::cmp::min;
16
use std::io::Cursor;
17
use std::iter;
18
use std::sync::Arc;
Olivier Dehaene's avatar
Olivier Dehaene committed
19
use thiserror::Error;
OlivierDehaene's avatar
OlivierDehaene committed
20
use tokio::sync::mpsc;
21
use tokio::sync::oneshot;
22
use tracing::{instrument, Span};
23
use {once_cell::sync::Lazy, regex::Regex};
Olivier Dehaene's avatar
Olivier Dehaene committed
24

25
26
static DEFAULT_GENERATION_LENGTH: u32 = 1024;

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
27
/// Validation
Olivier Dehaene's avatar
Olivier Dehaene committed
28
#[derive(Debug, Clone)]
Olivier Dehaene's avatar
Olivier Dehaene committed
29
pub struct Validation {
30
    /// Validation parameters
31
    max_best_of: usize,
32
    max_stop_sequences: usize,
Nicolas Patry's avatar
Nicolas Patry committed
33
    max_top_n_tokens: u32,
34
35
    max_input_length: usize,
    max_total_tokens: usize,
drbh's avatar
drbh committed
36
    disable_grammar_support: bool,
37
    /// Channel to communicate with the background tokenization task
38
    sender: mpsc::UnboundedSender<TokenizerRequest>,
Olivier Dehaene's avatar
Olivier Dehaene committed
39
40
41
}

impl Validation {
OlivierDehaene's avatar
OlivierDehaene committed
42
    #[allow(clippy::too_many_arguments)]
43
44
    pub(crate) fn new(
        workers: usize,
45
        tokenizer: Tokenizer,
46
        config: Option<Config>,
47
        preprocessor_config: Option<HubPreprocessorConfig>,
48
        max_best_of: usize,
49
        max_stop_sequences: usize,
Nicolas Patry's avatar
Nicolas Patry committed
50
        max_top_n_tokens: u32,
51
52
        max_input_length: usize,
        max_total_tokens: usize,
drbh's avatar
drbh committed
53
        disable_grammar_support: bool,
54
    ) -> Self {
55
56
57
58
59
        let workers = if let Tokenizer::Python { .. } = &tokenizer {
            1
        } else {
            workers
        };
60
        // If we have a fast tokenizer
61
        let sender = {
OlivierDehaene's avatar
OlivierDehaene committed
62
63
64
            // Create round robin channel
            let (validation_sender, validation_round_robin_receiver) = mpsc::unbounded_channel();
            let mut senders = Vec::with_capacity(workers);
65
66
67
68

            // Create workers
            for _ in 0..workers {
                let tokenizer_clone = tokenizer.clone();
69
                let config_clone = config.clone();
70
                let preprocessor_config_clone = preprocessor_config.clone();
OlivierDehaene's avatar
OlivierDehaene committed
71
72
                let (tokenizer_sender, tokenizer_receiver) = mpsc::unbounded_channel();
                senders.push(tokenizer_sender);
73
74
75

                // Spawn worker
                tokio::task::spawn_blocking(move || {
76
77
78
79
80
81
                    tokenizer_worker(
                        tokenizer_clone,
                        config_clone,
                        preprocessor_config_clone,
                        tokenizer_receiver,
                    )
82
83
                });
            }
OlivierDehaene's avatar
OlivierDehaene committed
84
85
86
87

            // Create tokenization round robin task
            tokio::spawn(round_robin_task(validation_round_robin_receiver, senders));

88
            validation_sender
89
90
91
92
93
        };

        Self {
            max_best_of,
            sender,
94
            max_stop_sequences,
Nicolas Patry's avatar
Nicolas Patry committed
95
            max_top_n_tokens,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
96
            max_input_length,
97
            max_total_tokens,
drbh's avatar
drbh committed
98
            disable_grammar_support,
99
100
        }
    }
Olivier Dehaene's avatar
Olivier Dehaene committed
101

102
    #[instrument(skip(self, inputs))]
103
    pub async fn tokenize(
104
105
        &self,
        inputs: String,
106
        add_special_tokens: bool,
107
        truncate: Option<usize>,
108
    ) -> Result<(tokenizers::Encoding, Vec<Chunk>), ValidationError> {
109
        // If we have a fast tokenizer
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
        // Create response channel
        let (response_sender, response_receiver) = oneshot::channel();
        // Send request to the background validation task
        // Unwrap is safe here
        let _ = &self
            .sender
            .send((
                (inputs, add_special_tokens, truncate),
                response_sender,
                Span::current(),
            ))
            .unwrap();

        // Await on response channel
        // Unwrap is safe here
        let encoding = response_receiver.await.unwrap()?;
        Ok(encoding)
127
128
    }

129
    #[allow(clippy::type_complexity)]
130
131
132
133
    #[instrument(skip(self, inputs))]
    async fn validate_input(
        &self,
        inputs: String,
134
        add_special_tokens: bool,
135
136
        truncate: Option<usize>,
        max_new_tokens: Option<u32>,
137
    ) -> Result<(Vec<Chunk>, Option<Vec<u32>>, usize, u32, u32), ValidationError> {
138
        // If we have a fast tokenizer
139
        let (encoding, inputs) = self
140
            .tokenize(inputs.clone(), add_special_tokens, truncate)
141
142
143
144
145
146
147
            .await?;
        // Create response channel
        let input_length = if let Some(truncate) = truncate {
            std::cmp::min(encoding.len(), truncate)
        } else {
            encoding.len()
        };
148

149
        // Get total tokens
150
151
        let (max_new_tokens, max_total_new_tokens) = if let Some(max_new_tokens) = max_new_tokens {
            (max_new_tokens, max_new_tokens)
152
        } else {
153
154
155
156
157
158
159
160
            // Use the maximum possible number of tokens as default
            // However, the system will re-queue the request everytime it completes
            // `DEFAULT_GENERATION_LENGTH` tokens.
            let max_new_tokens = self.max_total_tokens.saturating_sub(input_length) as u32;
            (
                min(max_new_tokens, DEFAULT_GENERATION_LENGTH),
                max_new_tokens,
            )
161
162
        };
        let total_tokens = input_length + max_new_tokens as usize;
163

164
165
166
167
168
169
170
        // Validate MaxTotalTokens
        if total_tokens > self.max_total_tokens {
            return Err(ValidationError::MaxTotalTokens(
                self.max_total_tokens,
                input_length,
                max_new_tokens,
            ));
171
172
        }

173
174
175
176
        // Validate InputLength
        if input_length > self.max_input_length {
            return Err(ValidationError::InputLength(
                self.max_input_length,
177
                input_length,
178
            ));
Olivier Dehaene's avatar
Olivier Dehaene committed
179
        }
180
181
182
183
184

        let ids = encoding.get_ids();
        let input_ids = ids[ids.len().saturating_sub(input_length)..].to_owned();

        metrics::histogram!("tgi_request_input_length").record(input_length as f64);
185
186
187
188
189
190
191
        Ok((
            inputs,
            Some(input_ids),
            input_length,
            max_new_tokens,
            max_total_new_tokens,
        ))
Olivier Dehaene's avatar
Olivier Dehaene committed
192
193
    }

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
194
    /// Validate a payload and get the number of tokens in the input
195
    #[instrument(skip_all)]
Olivier Dehaene's avatar
Olivier Dehaene committed
196
197
198
    pub(crate) async fn validate(
        &self,
        request: GenerateRequest,
199
    ) -> Result<ValidGenerateRequest, ValidationError> {
200
201
202
203
        let GenerateParameters {
            best_of,
            temperature,
            repetition_penalty,
204
            frequency_penalty,
205
206
207
208
209
210
211
212
213
            top_k,
            top_p,
            typical_p,
            do_sample,
            max_new_tokens,
            stop: stop_sequences,
            truncate,
            seed,
            watermark,
214
            decoder_input_details,
Nicolas Patry's avatar
Nicolas Patry committed
215
            top_n_tokens,
drbh's avatar
drbh committed
216
            grammar,
drbh's avatar
drbh committed
217
            adapter_id,
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
            ..
        } = request.parameters;

        // sampling must be true when best_of > 1
        let best_of = best_of.unwrap_or(1);
        let sampling = do_sample
            || temperature.is_some()
            || top_k.is_some()
            || top_p.is_some()
            || typical_p.is_some();

        if best_of > 1 && !sampling {
            return Err(BestOfSampling);
        }

        let temperature = temperature.unwrap_or(1.0);
        if temperature <= 0.0 {
            return Err(ValidationError::Temperature);
        }

        let repetition_penalty = repetition_penalty.unwrap_or(1.0);
        if repetition_penalty <= 0.0 {
            return Err(ValidationError::RepetitionPenalty);
        }

243
244
245
246
247
        let frequency_penalty = frequency_penalty.unwrap_or(0.0);
        if !(-2.0..=2.0).contains(&frequency_penalty) {
            return Err(ValidationError::FrequencyPenalty);
        }

248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
        // Different because the proto default value is not a valid value
        // for the user
        let top_p = top_p
            .map(|value| {
                if value <= 0.0 || value >= 1.0 {
                    return Err(ValidationError::TopP);
                }
                Ok(value)
            })
            .unwrap_or(Ok(1.0))?;

        let typical_p = typical_p
            .map(|value| {
                if value <= 0.0 || value >= 1.0 {
                    return Err(ValidationError::TypicalP);
                }
                Ok(value)
            })
            .unwrap_or(Ok(1.0))?;

        let top_k: u32 = top_k
            .map(|value| {
                if value <= 0 {
                    return Err(ValidationError::TopK);
                }
                Ok(value as u32)
            })
            .unwrap_or(Ok(0))?;

277
        if max_new_tokens == Some(0) {
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
            return Err(ValidationError::NegativeMaxNewTokens);
        }

        if stop_sequences.len() > self.max_stop_sequences {
            return Err(ValidationError::StopSequence(
                self.max_stop_sequences,
                stop_sequences.len(),
            ));
        }

        // If seed is None, assign a random one
        let seed = match seed {
            None => thread_rng().gen(),
            Some(seed) => {
                if best_of > 1 {
                    return Err(BestOfSeed);
                }
                seed
            }
        };

Nicolas Patry's avatar
Nicolas Patry committed
299
300
301
302
303
304
305
306
307
        let top_n_tokens = top_n_tokens
            .map(|value| {
                if value > self.max_top_n_tokens {
                    return Err(ValidationError::TopNTokens(self.max_top_n_tokens, value));
                }
                Ok(value)
            })
            .unwrap_or(Ok(0))?;

308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
        // Check if inputs is empty
        if request.inputs.is_empty() {
            return Err(EmptyInput);
        }

        // Check if truncate is strictly positive and less than max_input_length
        let truncate = truncate
            .map(|value| {
                if value == 0 || value > self.max_input_length {
                    return Err(ValidationError::Truncate(self.max_input_length, value));
                }
                Ok(Some(value))
            })
            .unwrap_or(Ok(None))?;

        // Validate inputs
324
        let (inputs, input_ids, input_length, max_new_tokens, max_total_new_tokens) = self
325
326
327
328
329
330
            .validate_input(
                request.inputs,
                request.add_special_tokens,
                truncate,
                max_new_tokens,
            )
331
332
            .await?;

drbh's avatar
drbh committed
333
334
335
336
337
338
339
        // TODO: we should build the FSM here and pass the compiled FSM instead of the grammar
        // NOTE: this is currently difficult because we need the tokenizer in Python to build
        // the FSM and we'd have to load a copy of the tokenizer into our Pyo3 instance which
        // may be slow and memory intensive. Best case is to have a Rust implementation of the FSM
        // compiler and use that to build the FSM here.

        // Validate grammar and unpack the grammar and type for the proto message
OlivierDehaene's avatar
OlivierDehaene committed
340
        let grammar = match grammar {
drbh's avatar
drbh committed
341
342
343
344
345
            Some(grammar) => {
                // Ensure that grammar is not set if it's not supported
                if self.disable_grammar_support {
                    return Err(ValidationError::Grammar);
                }
OlivierDehaene's avatar
OlivierDehaene committed
346
                let valid_grammar = match grammar {
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
                    GrammarType::Json(json) => {
                        let json = match json {
                            // if value is a string, we need to parse it again to make sure its
                            // a valid json
                            Value::String(s) => serde_json::from_str(&s)
                                .map_err(|e| ValidationError::InvalidGrammar(e.to_string())),
                            Value::Object(_) => Ok(json),
                            _ => Err(ValidationError::Grammar),
                        }?;

                        // Check if the json is a valid JSONSchema
                        JSONSchema::options()
                            .with_draft(Draft::Draft202012)
                            .compile(&json)
                            .map_err(|e| ValidationError::InvalidGrammar(e.to_string()))?;

363
364
365
366
367
368
369
370
                        // The schema can be valid but lack properties.
                        // We need properties for the grammar to be successfully parsed in Python.
                        // Therefore, we must check and throw an error if properties are missing.
                        json.get("properties")
                            .ok_or(ValidationError::InvalidGrammar(
                                "Grammar must have a 'properties' field".to_string(),
                            ))?;

371
372
373
374
375
376
377
                        // Do compilation in the router for performance. In the future, we
                        // should also move regex -> automaton compilation in the router,
                        // but this is not yet supported in pure Rust by outlines-core.
                        let grammar_regex = json_schema_to_regex(&json, None, &json)
                            .map_err(ValidationError::RegexFromSchema)?;

                        ValidGrammar::Regex(grammar_regex.to_string())
378
                    }
OlivierDehaene's avatar
OlivierDehaene committed
379
380
381
                    GrammarType::Regex(regex) => ValidGrammar::Regex(regex),
                };
                Some(valid_grammar)
drbh's avatar
drbh committed
382
            }
OlivierDehaene's avatar
OlivierDehaene committed
383
            None => None,
drbh's avatar
drbh committed
384
385
        };

OlivierDehaene's avatar
OlivierDehaene committed
386
        let parameters = ValidParameters {
387
388
            temperature,
            repetition_penalty,
389
            frequency_penalty,
390
391
392
393
394
395
            top_k,
            top_p,
            typical_p,
            do_sample,
            seed,
            watermark,
drbh's avatar
drbh committed
396
            grammar,
397
        };
OlivierDehaene's avatar
OlivierDehaene committed
398
        let stopping_parameters = ValidStoppingParameters {
399
            max_new_tokens,
400
            max_total_new_tokens,
401
402
403
404
            stop_sequences,
            ignore_eos_token: false,
        };

405
        metrics::histogram!("tgi_request_max_new_tokens").record(max_new_tokens as f64);
406
407
408

        Ok(ValidGenerateRequest {
            inputs,
409
            input_ids: input_ids.map(Arc::new),
410
            add_special_tokens: request.add_special_tokens,
411
            decoder_input_details,
412
            input_length: input_length as u32,
413
414
415
            truncate: truncate.unwrap_or(self.max_input_length) as u32,
            parameters,
            stopping_parameters,
Nicolas Patry's avatar
Nicolas Patry committed
416
            top_n_tokens,
drbh's avatar
drbh committed
417
            adapter_id,
418
        })
Olivier Dehaene's avatar
Olivier Dehaene committed
419
    }
420
421
422
423
424
425
426
427
428
429
430
431
432
433

    /// Validate the best_of parameter
    #[instrument(skip_all)]
    pub(crate) fn validate_best_of(&self, best_of: usize) -> Result<usize, ValidationError> {
        if self.max_best_of == 1 && best_of != 1 {
            return Err(ValidationError::BestOfDisabled);
        }

        if best_of > self.max_best_of {
            return Err(ValidationError::BestOf(self.max_best_of, best_of));
        }

        Ok(best_of)
    }
Olivier Dehaene's avatar
Olivier Dehaene committed
434
435
}

OlivierDehaene's avatar
OlivierDehaene committed
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
/// Round robin tokenization task
async fn round_robin_task(
    mut receiver: mpsc::UnboundedReceiver<TokenizerRequest>,
    senders: Vec<mpsc::UnboundedSender<TokenizerRequest>>,
) {
    loop {
        for sender in &senders {
            match receiver.recv().await {
                None => return,
                Some(request) => sender.send(request).unwrap(),
            };
        }
    }
}

451
/// Start tokenization workers
452
453
454
fn tokenizer_worker(
    tokenizer: Tokenizer,
    config: Option<Config>,
455
    preprocessor_config: Option<HubPreprocessorConfig>,
456
457
    mut receiver: mpsc::UnboundedReceiver<TokenizerRequest>,
) {
458
459
460
461
    match tokenizer {
        Tokenizer::Python {
            tokenizer_name,
            revision,
462
            trust_remote_code,
463
464
        } => {
            pyo3::Python::with_gil(|py| -> pyo3::PyResult<()> {
465
466
                let tokenizer =
                    PyTokenizer::from_py(py, tokenizer_name, revision, trust_remote_code)?;
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
                // Loop over requests
                while let Some(((inputs, add_special_tokens, truncate), response_tx, parent_span)) =
                    receiver.blocking_recv()
                {
                    parent_span.in_scope(|| {
                        response_tx
                            .send(prepare_input(
                                inputs,
                                truncate,
                                add_special_tokens,
                                &tokenizer,
                                config.as_ref(),
                                preprocessor_config.as_ref(),
                            ))
                            .unwrap_or(())
                    })
                }
                Ok(())
            })
            .expect("Failure in python tokenizer worker");
        }
        Tokenizer::Rust(tokenizer) => {
            while let Some(((inputs, add_special_tokens, truncate), response_tx, parent_span)) =
                receiver.blocking_recv()
            {
                parent_span.in_scope(|| {
                    response_tx
                        .send(prepare_input(
                            inputs,
                            truncate,
                            add_special_tokens,
                            &tokenizer,
                            config.as_ref(),
                            preprocessor_config.as_ref(),
                        ))
                        .unwrap_or(())
                })
            }
        }
506
507
    }
}
Olivier Dehaene's avatar
Olivier Dehaene committed
508

509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
fn format_from_mimetype(mimetype: &str) -> Option<ImageFormat> {
    match mimetype {
        "image/png" => Some(ImageFormat::Png),
        "image/jpeg" => Some(ImageFormat::Jpeg),
        "image/jpg" => Some(ImageFormat::Jpeg),
        "image/gif" => Some(ImageFormat::Gif),
        "image/webp" => Some(ImageFormat::WebP),
        "image/tiff" => Some(ImageFormat::Tiff),
        // "image/pnm"=>Some(ImageFormat::Pnm),
        // "image/tga"=>Some(ImageFormat::Tga),
        // "image/dds"=>Some(ImageFormat::Dds),
        // "image/bmp"=>Some(ImageFormat::Bmp),
        // "image/ico"=>Some(ImageFormat::Ico),
        // "image/x-exr"=>Some(ImageFormat::OpenExr),
        _ => None,
    }
}
OlivierDehaene's avatar
OlivierDehaene committed
526

527
528
529
530
531
532
533
534
535
536
537
538
fn format_to_mimetype(format: ImageFormat) -> String {
    match format {
        ImageFormat::Png => "image/png",
        ImageFormat::Jpeg => "image/jpeg",
        ImageFormat::Gif => "image/gif",
        ImageFormat::WebP => "image/webp",
        ImageFormat::Tiff => "image/tiff",
        _ => "application/octet-stream",
    }
    .to_string()
}

539
fn fetch_image(input: &str) -> Result<(Vec<u8>, String, usize, usize), ValidationError> {
540
541
542
543
544
545
546
547
548
549
    if input.starts_with("![](http://") || input.starts_with("![](https://") {
        let url = &input["![](".len()..input.len() - 1];
        let data = reqwest::blocking::get(url)?.bytes()?;

        let format = image::guess_format(&data)?;
        // TODO Remove this clone
        let img = ImageReader::with_format(Cursor::new(data.clone()), format).decode()?;
        let height: usize = img.height().try_into()?;
        let width: usize = img.width().try_into()?;
        let mimetype = format_to_mimetype(format);
550
        Ok((data.to_vec(), mimetype, height, width))
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
    } else if input.starts_with("![](data:") {
        // Remove ![](....)
        let content = &input["![](data:".len()..input.len() - 1];
        let tokens: Vec<_> = content.split(';').collect();
        if tokens.len() != 2 {
            return Err(ValidationError::InvalidImageContent(content.to_string()));
        }
        let mimetype = tokens[0];
        let content = tokens[1];

        if !content.starts_with("base64,") {
            return Err(ValidationError::InvalidImageContent(content.to_string()));
        }

        let data = STANDARD.decode(content["base64,".len()..].as_bytes())?;
        let img = if let Some(format) = format_from_mimetype(mimetype) {
567
            ImageReader::with_format(Cursor::new(&data), format).decode()?
568
        } else {
569
            ImageReader::new(Cursor::new(&data))
570
571
572
573
574
575
576
                .with_guessed_format()
                .map_err(|_io_error| ValidationError::InvalidImageContent(content.to_string()))?
                .decode()?
        };

        let height: usize = img.height().try_into()?;
        let width: usize = img.width().try_into()?;
577
        Ok((data, mimetype.to_string(), height, width))
578
579
580
581
582
    } else {
        Err(ValidationError::InvalidImageContent(input.to_string()))
    }
}

583
584
585
586
587
588
589
590
591
592
fn image_tokens(
    config: &Config,
    preprocessor_config: Option<&HubPreprocessorConfig>,
    height: usize,
    width: usize,
) -> String {
    use Config::*;
    use HubPreprocessorConfig::*;
    match config {
        Idefics => "<image>".to_string(),
Nicolas Patry's avatar
Nicolas Patry committed
593
        Mllama => "<|image|>".to_string(),
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
        Idefics2(config) => {
            const FAKE: &str = "<fake_token_around_image>";
            const IMAGE: &str = "<image>";

            let slots = config.get_number_of_features(height, width);

            let mut image_string = String::with_capacity(2 * FAKE.len() + slots * IMAGE.len());
            image_string.push_str(FAKE);
            image_string.extend(iter::repeat(IMAGE).take(slots));
            image_string.push_str(FAKE);

            if matches!(
                preprocessor_config,
                Some(Idefics2Processor(Idefics2Preprocessor {
                    do_image_splitting: true,
                    ..
                }))
            ) {
                image_string = image_string.repeat(5);
            };

            image_string
        }
        Paligemma(config) => "<image>".repeat(config.get_number_of_features(height, width)),
        LlavaNext(config) => "<image>".repeat(config.get_number_of_features(height, width)),
drbh's avatar
drbh committed
619
620
621
622
        Qwen2Vl(config) => format!(
            "<|vision_start|>{:?}<|vision_end|>",
            "<|image_pad|>".repeat(config.get_number_of_features(height, width))
        ),
623
624
625
626
627
628
629
630
631
632
633
634
635
636
        _ => unimplemented!("Images tokens are not supported for this model configuration"),
    }
}

fn image_tokens_fixup(config: &Config, text: String) -> String {
    match config {
        Config::Idefics2(_) => {
            const FAKE: &str = "<fake_token_around_image>";
            text.replace(&format!("{FAKE}{FAKE}"), FAKE)
        }
        _ => text,
    }
}

637
/// Get input length and optionally truncate it
638
fn prepare_input<T: TokenizerTrait>(
639
    inputs: String,
640
    _truncate: Option<usize>,
641
    add_special_tokens: bool,
642
    tokenizer: &T,
643
644
    config: Option<&Config>,
    preprocessor_config: Option<&HubPreprocessorConfig>,
Nicolas Patry's avatar
Nicolas Patry committed
645
) -> Result<(tokenizers::Encoding, Vec<Chunk>), ValidationError> {
646
    use Config::*;
647
    static RE: Lazy<Regex> = Lazy::new(|| Regex::new(r"!\[\]\([^\)]*\)").unwrap());
648
    let (tokenizer_query, input_chunks) = match config {
drbh's avatar
drbh committed
649
650
651
        Some(
            config @ (Idefics | Mllama | Idefics2(_) | Paligemma(_) | LlavaNext(_) | Qwen2Vl(_)),
        ) => {
652
            let mut input_chunks = Vec::new();
653
654
655
656
            let mut tokenizer_query = String::with_capacity(inputs.len());
            let mut start = 0;
            for chunk in RE.find_iter(&inputs) {
                let chunk_start = chunk.start();
drbh's avatar
drbh committed
657
658
                let chunk_end = chunk.end();
                if chunk_start != start {
Nicolas Patry's avatar
Nicolas Patry committed
659
                    input_chunks.push(Chunk::Text(inputs[start..chunk_start].to_string()));
drbh's avatar
drbh committed
660
661
                    tokenizer_query.push_str(&inputs[start..chunk_start]);
                }
662
                let (data, mimetype, height, width) = fetch_image(&inputs[chunk_start..chunk_end])?;
Nicolas Patry's avatar
Nicolas Patry committed
663
                input_chunks.push(Chunk::Image(Image { data, mimetype }));
664
                tokenizer_query.push_str(&image_tokens(config, preprocessor_config, height, width));
drbh's avatar
drbh committed
665
666
                start = chunk_end;
            }
667
            if start != inputs.len() {
Nicolas Patry's avatar
Nicolas Patry committed
668
                input_chunks.push(Chunk::Text(inputs[start..].to_string()));
drbh's avatar
drbh committed
669
670
                tokenizer_query.push_str(&inputs[start..]);
            }
Nicolas Patry's avatar
Nicolas Patry committed
671

672
673
            tokenizer_query = image_tokens_fixup(config, tokenizer_query);

674
            (tokenizer_query, input_chunks)
Nicolas Patry's avatar
Nicolas Patry committed
675
        }
Nicolas Patry's avatar
Nicolas Patry committed
676
        _ => (inputs.clone(), vec![Chunk::Text(inputs)]),
677
    };
678

679
    // Get the number of tokens in the input
680
    let encoding = tokenizer
681
        .encode_trait(tokenizer_query, add_special_tokens)
682
683
        .map_err(|err| ValidationError::Tokenizer(err.to_string()))?;

684
    Ok((encoding, input_chunks))
Olivier Dehaene's avatar
Olivier Dehaene committed
685
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
686

687
type TokenizerRequest = (
688
    (String, bool, Option<usize>),
Nicolas Patry's avatar
Nicolas Patry committed
689
    oneshot::Sender<Result<(tokenizers::Encoding, Vec<Chunk>), ValidationError>>,
690
    Span,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
691
692
);

Nicolas Patry's avatar
Nicolas Patry committed
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Image {
    pub data: Vec<u8>,
    pub mimetype: String,
}

#[derive(Debug, Clone, Eq, PartialEq)]
pub enum Chunk {
    Text(String),
    Image(Image),
}

/// Convert input chunks to a stringly-typed input for backwards
/// compat for backends that haven't implemented chunked inputs.
pub trait ChunksToString {
    /// Convert chunks to string.
    fn chunks_to_string(&self) -> String;
}

impl ChunksToString for Vec<Chunk> {
    fn chunks_to_string(&self) -> String {
        let mut output = String::new();
        self.iter().for_each(|c| match &c {
            Chunk::Text(text) => output.push_str(text),
            Chunk::Image(Image { data, mimetype }) => {
                let encoded = STANDARD.encode(data);
                output.push_str(&format!("![](data:{};base64,{})", mimetype, encoded))
            }
        });
        output
    }
}

OlivierDehaene's avatar
OlivierDehaene committed
726
#[derive(Debug, Clone)]
Nicolas Patry's avatar
Nicolas Patry committed
727
pub enum ValidGrammar {
OlivierDehaene's avatar
OlivierDehaene committed
728
729
730
731
732
    Json(String),
    Regex(String),
}

#[derive(Debug, Clone)]
Nicolas Patry's avatar
Nicolas Patry committed
733
pub struct ValidParameters {
OlivierDehaene's avatar
OlivierDehaene committed
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
    /// / exponential scaling output probability distribution
    pub temperature: f32,
    /// / restricting to the k highest probability elements
    pub top_k: u32,
    /// / restricting to top tokens summing to prob_cut_off <= prob_cut_off
    pub top_p: f32,
    /// / restricting to top tokens summing to prob_cut_off <= prob_cut_off
    pub typical_p: f32,
    /// / apply sampling on the logits
    pub do_sample: bool,
    /// / random seed for sampling
    pub seed: u64,
    /// / repetition penalty
    pub repetition_penalty: f32,
    /// / frequency penalty
    pub frequency_penalty: f32,
    /// / token watermarking using "A Watermark for Large Language Models"
    pub watermark: bool,
    /// / grammar (applied if not empty)
    pub grammar: Option<ValidGrammar>,
}

#[derive(Debug, Clone)]
Nicolas Patry's avatar
Nicolas Patry committed
757
pub struct ValidStoppingParameters {
OlivierDehaene's avatar
OlivierDehaene committed
758
759
    /// / Maximum number of generated tokens
    pub max_new_tokens: u32,
760
761
    /// Maximum number of generated tokens before being re-queued by the system
    pub max_total_new_tokens: u32,
OlivierDehaene's avatar
OlivierDehaene committed
762
763
764
765
766
767
768
    /// / Optional stopping sequences
    pub stop_sequences: Vec<String>,
    /// / Ignore end of sequence token
    /// / used for benchmarking
    pub ignore_eos_token: bool,
}

769
#[derive(Debug, Clone)]
Nicolas Patry's avatar
Nicolas Patry committed
770
771
pub struct ValidGenerateRequest {
    pub inputs: Vec<Chunk>,
772
    pub input_ids: Option<Arc<Vec<u32>>>,
773
    pub input_length: u32,
774
    pub truncate: u32,
775
    pub add_special_tokens: bool,
776
    pub decoder_input_details: bool,
OlivierDehaene's avatar
OlivierDehaene committed
777
778
    pub parameters: ValidParameters,
    pub stopping_parameters: ValidStoppingParameters,
Nicolas Patry's avatar
Nicolas Patry committed
779
    pub top_n_tokens: u32,
drbh's avatar
drbh committed
780
    pub adapter_id: Option<String>,
781
782
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
783
784
#[derive(Error, Debug)]
pub enum ValidationError {
785
786
787
788
789
790
791
792
793
794
    #[error("`best_of` must be > 0 and <= {0}. Given: {1}")]
    BestOf(usize, usize),
    #[error("`best_of` != 1 is not allowed for this endpoint")]
    BestOfDisabled,
    #[error("you must use sampling when `best_of` is > 1")]
    BestOfSampling,
    #[error("`seed` must not be set when `best_of` > 1")]
    BestOfSeed,
    #[error("`best_of` != 1 is not supported when streaming tokens")]
    BestOfStream,
Nicolas Patry's avatar
Nicolas Patry committed
795
796
797
798
    #[error("`top_n_tokens` must be >= 0 and <= {0}. Given: {1}")]
    TopNTokens(u32, u32),
    #[error("`top_n_tokens` != 0 is not allowed for this endpoint")]
    TopNTokensDisabled,
799
800
    #[error("`decoder_input_details` == true is not supported when streaming tokens")]
    PrefillDetailsStream,
801
    #[error("`temperature` must be strictly positive")]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
802
    Temperature,
803
    #[error("`repetition_penalty` must be strictly positive")]
804
    RepetitionPenalty,
805
806
    #[error("`frequency_penalty` must be >= -2.0 and <= 2.0")]
    FrequencyPenalty,
807
    #[error("`top_p` must be > 0.0 and < 1.0")]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
808
    TopP,
809
    #[error("`top_k` must be strictly positive")]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
810
    TopK,
811
812
    #[error("`truncate` must be strictly positive and less than {0}. Given: {1}")]
    Truncate(usize, usize),
813
814
    #[error("`typical_p` must be > 0.0 and < 1.0")]
    TypicalP,
815
816
    #[error("one of `max_new_tokens` or `truncate` must be set if a fast tokenizer is not in use")]
    UnsetMaxNewTokens,
817
    #[error("`max_new_tokens` must be strictly positive")]
818
819
820
    NegativeMaxNewTokens,
    #[error("`max_new_tokens` must be <= {0}. Given: {1}")]
    MaxNewTokens(usize, u32),
821
    #[error("`inputs` tokens + `max_new_tokens` must be <= {0}. Given: {1} `inputs` tokens and {2} `max_new_tokens`")]
822
    MaxTotalTokens(usize, usize, u32),
823
    #[error("`inputs` must have less than {0} tokens. Given: {1}")]
824
    InputLength(usize, usize),
825
    #[error("`inputs` cannot be empty")]
826
    EmptyInput,
827
    #[error("`stop` supports up to {0} stop sequences. Given: {1}")]
828
    StopSequence(usize, usize),
829
830
    #[error("tokenizer error {0}")]
    Tokenizer(String),
drbh's avatar
drbh committed
831
832
    #[error("grammar is not supported")]
    Grammar,
833
834
    #[error("grammar is not valid: {0}")]
    InvalidGrammar(String),
835
836
    #[error("cannot compile regex from schema: {0}")]
    RegexFromSchema(anyhow::Error),
837
838
839
840
841
842
843
844
845
846
    #[error("base64 encoding is invalid: {0}")]
    InvalidBase64(#[from] base64::DecodeError),
    #[error("invalid image: {0}")]
    InvalidImage(#[from] image::ImageError),
    #[error("invalid integer: {0}")]
    InvalidInt(#[from] core::num::TryFromIntError),
    #[error("invalid image content: {0}")]
    InvalidImageContent(String),
    #[error("Could not fetch image: {0}")]
    FailedFetchImage(#[from] reqwest::Error),
Nicolas Patry's avatar
Nicolas Patry committed
847
848
    #[error("{0} modality is not supported")]
    UnsupportedModality(&'static str),
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
849
}
850
851

#[cfg(test)]
852
mod tests {
853
    use super::*;
854
    use crate::config::{Idefics2, PaliTextConfig, Paligemma};
855
856
    use crate::default_parameters;
    use crate::tests::get_tokenizer;
857
858

    #[tokio::test]
859
    async fn test_validation_max_new_tokens() {
860
        let tokenizer = get_tokenizer();
861
862
        let max_best_of = 2;
        let max_stop_sequence = 3;
Nicolas Patry's avatar
Nicolas Patry committed
863
864
865
        let max_top_n_tokens = 4;
        let max_input_length = 5;
        let max_total_tokens = 6;
866
        let workers = 1;
drbh's avatar
drbh committed
867
        let disable_grammar_support = true;
868
        let config = None;
869
870
871
        let validation = Validation::new(
            workers,
            tokenizer,
872
            config,
873
            None,
874
875
            max_best_of,
            max_stop_sequence,
Nicolas Patry's avatar
Nicolas Patry committed
876
            max_top_n_tokens,
877
878
            max_input_length,
            max_total_tokens,
drbh's avatar
drbh committed
879
            disable_grammar_support,
880
        );
881
882

        let max_new_tokens = 10;
883
        match validation
884
            .validate_input("Hello".to_string(), true, None, Some(max_new_tokens))
885
886
            .await
        {
887
888
            Err(ValidationError::MaxTotalTokens(6, 1, 10)) => (),
            // Ok((_s, _, 0, 10)) => (),
889
            r => panic!("Unexpected not max new tokens: {r:?}"),
890
891
892
893
        }
    }

    #[tokio::test]
894
    async fn test_validation_input_length() {
895
        let tokenizer = get_tokenizer();
896
897
        let max_best_of = 2;
        let max_stop_sequence = 3;
Nicolas Patry's avatar
Nicolas Patry committed
898
899
900
        let max_top_n_tokens = 4;
        let max_input_length = 5;
        let max_total_tokens = 6;
drbh's avatar
drbh committed
901
        let disable_grammar_support = true;
902
        let workers = 1;
903
        let config = None;
904
905
906
        let validation = Validation::new(
            workers,
            tokenizer,
907
            config,
908
            None,
909
910
            max_best_of,
            max_stop_sequence,
Nicolas Patry's avatar
Nicolas Patry committed
911
            max_top_n_tokens,
912
913
            max_input_length,
            max_total_tokens,
drbh's avatar
drbh committed
914
            disable_grammar_support,
915
        );
916
917

        let max_new_tokens = 10;
918
        match validation
919
            .validate_input("Hello".to_string(), true, None, Some(max_new_tokens))
920
921
            .await
        {
Nicolas Patry's avatar
Nicolas Patry committed
922
            Err(ValidationError::MaxTotalTokens(6, 1, 10)) => (),
923
            _ => panic!("Unexpected not max new tokens"),
924
925
        }
    }
926
927

    #[tokio::test]
928
    async fn test_validation_best_of_sampling() {
929
        let tokenizer = get_tokenizer();
930
931
        let max_best_of = 2;
        let max_stop_sequence = 3;
Nicolas Patry's avatar
Nicolas Patry committed
932
933
934
        let max_top_n_tokens = 4;
        let max_input_length = 5;
        let max_total_tokens = 6;
935
        let workers = 1;
drbh's avatar
drbh committed
936
        let disable_grammar_support = true;
937
        let config = None;
938
939
940
        let validation = Validation::new(
            workers,
            tokenizer,
941
            config,
942
            None,
943
944
            max_best_of,
            max_stop_sequence,
Nicolas Patry's avatar
Nicolas Patry committed
945
            max_top_n_tokens,
946
947
            max_input_length,
            max_total_tokens,
drbh's avatar
drbh committed
948
            disable_grammar_support,
949
950
951
952
        );
        match validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
953
                add_special_tokens: true,
954
955
956
957
958
959
960
961
                parameters: GenerateParameters {
                    best_of: Some(2),
                    do_sample: false,
                    ..default_parameters()
                },
            })
            .await
        {
962
            Err(ValidationError::BestOfSampling) => (),
963
            _ => panic!("Unexpected not best of sampling"),
964
965
966
967
        }
    }

    #[tokio::test]
968
    async fn test_validation_top_p() {
969
        let tokenizer = get_tokenizer();
970
971
        let max_best_of = 2;
        let max_stop_sequence = 3;
Nicolas Patry's avatar
Nicolas Patry committed
972
973
        let max_top_n_tokens = 4;
        let max_input_length = 5;
974
        let max_total_tokens = 106;
975
        let workers = 1;
drbh's avatar
drbh committed
976
        let disable_grammar_support = true;
977
        let config = None;
978
979
980
        let validation = Validation::new(
            workers,
            tokenizer,
981
            config,
982
            None,
983
984
            max_best_of,
            max_stop_sequence,
Nicolas Patry's avatar
Nicolas Patry committed
985
            max_top_n_tokens,
986
987
            max_input_length,
            max_total_tokens,
drbh's avatar
drbh committed
988
            disable_grammar_support,
989
990
991
992
        );
        match validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
993
                add_special_tokens: true,
994
995
                parameters: GenerateParameters {
                    top_p: Some(1.0),
996
                    max_new_tokens: Some(5),
997
998
999
1000
1001
                    ..default_parameters()
                },
            })
            .await
        {
1002
            Err(ValidationError::TopP) => (),
1003
            _ => panic!("Unexpected top_p"),
1004
1005
        }

1006
1007
1008
        match validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
1009
                add_special_tokens: true,
1010
1011
                parameters: GenerateParameters {
                    top_p: Some(0.99),
1012
                    max_new_tokens: Some(5),
1013
1014
1015
1016
1017
                    ..default_parameters()
                },
            })
            .await
        {
1018
            Ok(_) => (),
1019
            _ => panic!("Unexpected top_p error"),
1020
1021
        }

1022
1023
1024
        let valid_request = validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
1025
                add_special_tokens: true,
1026
1027
                parameters: GenerateParameters {
                    top_p: None,
1028
                    max_new_tokens: Some(5),
1029
1030
1031
1032
1033
                    ..default_parameters()
                },
            })
            .await
            .unwrap();
1034
1035
1036
        // top_p == 1.0 is invalid for users to ask for but it's the default resolved value.
        assert_eq!(valid_request.parameters.top_p, 1.0);
    }
Nicolas Patry's avatar
Nicolas Patry committed
1037
1038
1039

    #[tokio::test]
    async fn test_validation_top_n_tokens() {
1040
        let tokenizer = get_tokenizer();
Nicolas Patry's avatar
Nicolas Patry committed
1041
1042
1043
1044
        let max_best_of = 2;
        let max_stop_sequences = 3;
        let max_top_n_tokens = 4;
        let max_input_length = 5;
1045
        let max_total_tokens = 106;
Nicolas Patry's avatar
Nicolas Patry committed
1046
        let workers = 1;
drbh's avatar
drbh committed
1047
        let disable_grammar_support = true;
1048
        let config = None;
Nicolas Patry's avatar
Nicolas Patry committed
1049
1050
1051
        let validation = Validation::new(
            workers,
            tokenizer,
1052
            config,
1053
            None,
Nicolas Patry's avatar
Nicolas Patry committed
1054
1055
1056
1057
1058
            max_best_of,
            max_stop_sequences,
            max_top_n_tokens,
            max_input_length,
            max_total_tokens,
drbh's avatar
drbh committed
1059
            disable_grammar_support,
Nicolas Patry's avatar
Nicolas Patry committed
1060
1061
1062
1063
        );
        match validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
1064
                add_special_tokens: true,
Nicolas Patry's avatar
Nicolas Patry committed
1065
1066
                parameters: GenerateParameters {
                    top_n_tokens: Some(5),
1067
                    max_new_tokens: Some(5),
Nicolas Patry's avatar
Nicolas Patry committed
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
                    ..default_parameters()
                },
            })
            .await
        {
            Err(ValidationError::TopNTokens(4, 5)) => (),
            _ => panic!("Unexpected top_n_tokens"),
        }

        validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
1080
                add_special_tokens: true,
Nicolas Patry's avatar
Nicolas Patry committed
1081
1082
                parameters: GenerateParameters {
                    top_n_tokens: Some(4),
1083
                    max_new_tokens: Some(5),
Nicolas Patry's avatar
Nicolas Patry committed
1084
1085
1086
1087
1088
1089
1090
1091
1092
                    ..default_parameters()
                },
            })
            .await
            .unwrap();

        validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
1093
                add_special_tokens: true,
Nicolas Patry's avatar
Nicolas Patry committed
1094
1095
                parameters: GenerateParameters {
                    top_n_tokens: Some(0),
1096
                    max_new_tokens: Some(5),
Nicolas Patry's avatar
Nicolas Patry committed
1097
1098
1099
1100
1101
1102
1103
1104
1105
                    ..default_parameters()
                },
            })
            .await
            .unwrap();

        let valid_request = validation
            .validate(GenerateRequest {
                inputs: "Hello".to_string(),
1106
                add_special_tokens: true,
Nicolas Patry's avatar
Nicolas Patry committed
1107
1108
                parameters: GenerateParameters {
                    top_n_tokens: None,
1109
                    max_new_tokens: Some(5),
Nicolas Patry's avatar
Nicolas Patry committed
1110
1111
1112
1113
1114
1115
1116
1117
                    ..default_parameters()
                },
            })
            .await
            .unwrap();

        assert_eq!(valid_request.top_n_tokens, 0);
    }
1118
1119
1120
1121
1122
1123
1124

    static PIXEL_GIF: &str = "R0lGODdhAQABAIEAAP///wAAAAAAAAAAACwAAAAAAQABAAAIBAABBAQAOw==";

    #[tokio::test]
    async fn test_prepare_input_chunks() {
        let pixel_data = STANDARD.decode(PIXEL_GIF).unwrap();

1125
        let tokenizer = get_tokenizer();
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142

        let max_best_of = 2;
        let max_stop_sequence = 3;
        let max_top_n_tokens = 4;
        let max_input_length = 5;
        let max_total_tokens = 6;
        let disable_grammar_support = true;
        let workers = 1;
        let config = Config::Paligemma(Paligemma {
            text_config: PaliTextConfig {
                num_image_tokens: 1,
            },
        });
        let validation = Validation::new(
            workers,
            tokenizer,
            Some(config),
1143
            None,
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
            max_best_of,
            max_stop_sequence,
            max_top_n_tokens,
            max_input_length,
            max_total_tokens,
            disable_grammar_support,
        );

        let chunks = match validation
            .tokenize(
                format!("test![](data:image/gif;base64,{})", PIXEL_GIF),
1155
                true,
1156
1157
1158
1159
                None,
            )
            .await
        {
1160
            Ok((_encoding, chunks)) => chunks,
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
            _ => panic!("Unexpected tokenization failure"),
        };

        assert!(
            chunks
                == vec![
                    Chunk::Text("test".to_string()).into(),
                    Chunk::Image(Image {
                        data: pixel_data.clone(),
                        mimetype: "image/gif".to_string()
                    })
                    .into()
                ],
            "Failed to process images",
        );
    }
1177
1178
1179
1180
1181

    #[tokio::test]
    async fn test_idefics2_correct_n_fake_tokens() {
        let pixel_data = STANDARD.decode(PIXEL_GIF).unwrap();

1182
        let tokenizer = get_tokenizer();
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214

        let max_best_of = 2;
        let max_stop_sequence = 3;
        let max_top_n_tokens = 4;
        let max_input_length = 5;
        let max_total_tokens = 6;
        let disable_grammar_support = true;
        let workers = 1;
        let config = Config::Idefics2(Idefics2 {});
        let validation = Validation::new(
            workers,
            tokenizer,
            Some(config),
            Some(HubPreprocessorConfig::Idefics2Processor(
                Idefics2Preprocessor {
                    do_image_splitting: true,
                },
            )),
            max_best_of,
            max_stop_sequence,
            max_top_n_tokens,
            max_input_length,
            max_total_tokens,
            disable_grammar_support,
        );

        let (encoding, chunks) = match validation
            .tokenize(
                format!(
                    "test![](data:image/gif;base64,{})![](data:image/gif;base64,{})",
                    PIXEL_GIF, PIXEL_GIF
                ),
1215
                true,
1216
1217
1218
1219
                None,
            )
            .await
        {
1220
            Ok((encoding, chunks)) => (encoding, chunks),
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
            _ => panic!("Unexpected tokenization failure"),
        };

        assert!(
            chunks
                == vec![
                    Chunk::Text("test".to_string()).into(),
                    Chunk::Image(Image {
                        data: pixel_data.clone(),
                        mimetype: "image/gif".to_string()
                    })
                    .into(),
                    Chunk::Image(Image {
                        data: pixel_data.clone(),
                        mimetype: "image/gif".to_string()
                    })
                    .into()
                ],
            "Failed to process images",
        );

        // Verify the number of fake tokens:
        //
        // - Two images surrounded/separated by a fake token = 3.
        // - Both are split in 5 subimages, separated by a fake token: 2 * 4
        //
        // Fake tokens get split up by the testing tokenizer, but we don't care.
        assert_eq!(
            encoding
                .get_tokens()
                .iter()
                .filter(|t| *t == "fake")
                .count(),
            11
        );
    }
1257
}