lib.rs 47.9 KB
Newer Older
1
/// Text Generation Inference Webserver
OlivierDehaene's avatar
OlivierDehaene committed
2
pub mod config;
Nicolas Patry's avatar
Nicolas Patry committed
3
pub mod infer;
Olivier Dehaene's avatar
Olivier Dehaene committed
4
pub mod server;
Nicolas Patry's avatar
Nicolas Patry committed
5
pub mod validation;
Olivier Dehaene's avatar
Olivier Dehaene committed
6

7
8
#[cfg(feature = "kserve")]
mod kserve;
Nicolas Patry's avatar
Nicolas Patry committed
9
pub mod logging;
10

11
12
pub mod usage_stats;

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
13
use serde::{Deserialize, Serialize};
Nicolas Patry's avatar
Nicolas Patry committed
14
use tracing::warn;
15
use utoipa::ToSchema;
Olivier Dehaene's avatar
Olivier Dehaene committed
16
use validation::Validation;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
17

18
19
20
21
22
23
24
#[derive(PartialEq)]
pub enum Attention {
    Paged,
    FlashDecoding,
    FlashInfer,
}

25
26
27
28
29
30
31
32
33
34
impl Attention {
    pub fn block_size(&self) -> u32 {
        match self {
            Attention::FlashDecoding => 256,
            Attention::FlashInfer => 1,
            Attention::Paged => 16,
        }
    }
}

35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
#[derive(Debug)]
pub struct ParseError;

impl std::fmt::Display for ParseError {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        write!(f, "Cannot parse attention value")
    }
}
impl std::error::Error for ParseError {}

impl std::str::FromStr for Attention {
    type Err = ParseError;
    fn from_str(s: &str) -> Result<Self, Self::Err> {
        match s {
            "paged" => Ok(Attention::Paged),
            "flashdecoding" => Ok(Attention::FlashDecoding),
            "flashinfer" => Ok(Attention::FlashInfer),
            _ => Err(ParseError),
        }
    }
}

drbh's avatar
drbh committed
57
#[derive(Clone, Deserialize, ToSchema)]
58
pub(crate) struct GenerateVertexInstance {
drbh's avatar
drbh committed
59
60
61
62
63
64
    #[schema(example = "What is Deep Learning?")]
    pub inputs: String,
    #[schema(nullable = true, default = "null", example = "null")]
    pub parameters: Option<GenerateParameters>,
}

65
66
67
68
69
70
71
#[derive(Clone, Deserialize, ToSchema)]
#[serde(untagged)]
enum VertexInstance {
    Generate(GenerateVertexInstance),
    Chat(ChatRequest),
}

drbh's avatar
drbh committed
72
73
74
75
76
77
78
79
80
81
82
#[derive(Deserialize, ToSchema)]
pub(crate) struct VertexRequest {
    #[serde(rename = "instances")]
    pub instances: Vec<VertexInstance>,
}

#[derive(Clone, Deserialize, ToSchema, Serialize)]
pub(crate) struct VertexResponse {
    pub predictions: Vec<String>,
}

83
84
/// Hub type
#[derive(Clone, Debug, Deserialize)]
85
pub struct HubModelInfo {
86
87
88
89
90
91
    #[serde(rename(deserialize = "id"))]
    pub model_id: String,
    pub sha: Option<String>,
    pub pipeline_tag: Option<String>,
}

92
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
93
94
95
96
97
pub struct ChatTemplate {
    name: String,
    template: String,
}

98
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
99
100
101
102
103
104
#[serde(untagged)]
pub enum ChatTemplateVersions {
    Single(String),
    Multiple(Vec<ChatTemplate>),
}

105
106
use std::path::Path;

107
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
108
pub struct HubTokenizerConfig {
109
    pub chat_template: Option<ChatTemplateVersions>,
110
    pub completion_template: Option<String>,
111
112
    pub bos_token: Option<TokenizerConfigToken>,
    pub eos_token: Option<TokenizerConfigToken>,
113
114
115
    pub tokenizer_class: Option<String>,
    pub add_bos_token: Option<bool>,
    pub add_eos_token: Option<bool>,
116
117
118
}

impl HubTokenizerConfig {
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
    pub fn from_file<P: AsRef<Path>>(filename: P) -> Option<Self> {
        std::fs::read_to_string(filename)
            .ok()
            .and_then(|content| serde_json::from_str(&content).ok())
    }
}

#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
#[serde(untagged)]
pub enum TokenizerConfigToken {
    String(String),
    Object { content: String },
}

impl TokenizerConfigToken {
    pub fn as_str(&self) -> &str {
        match self {
            TokenizerConfigToken::String(s) => s,
            TokenizerConfigToken::Object { content } => content,
        }
139
140
141
    }
}

142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "processor_class")]
pub enum HubPreprocessorConfig {
    Idefics2Processor(Idefics2Preprocessor),
}

impl HubPreprocessorConfig {
    pub fn from_file<P: AsRef<std::path::Path>>(filename: P) -> Option<Self> {
        let content = std::fs::read_to_string(filename).ok()?;
        serde_json::from_str(&content).ok()
    }
}

#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct Idefics2Preprocessor {
    #[serde(default)]
    do_image_splitting: bool,
}

drbh's avatar
drbh committed
161
162
163
164
165
166
167
168
#[derive(Debug, Clone, Deserialize, Default)]
pub struct HubProcessorConfig {
    pub chat_template: Option<ChatTemplateVersions>,
    pub image_seq_len: usize,
    pub processor_class: Option<String>,
}

impl HubProcessorConfig {
169
170
171
172
    pub fn from_file<P: AsRef<Path>>(filename: P) -> Option<Self> {
        std::fs::read_to_string(filename)
            .ok()
            .and_then(|content| serde_json::from_str(&content).ok())
drbh's avatar
drbh committed
173
174
175
    }
}

176
#[derive(Clone, Debug, Deserialize, ToSchema, Serialize)]
drbh's avatar
drbh committed
177
178
#[serde(tag = "type", content = "value")]
pub(crate) enum GrammarType {
179
180
181
182
183
    /// A string that represents a [JSON Schema](https://json-schema.org/).
    ///
    /// JSON Schema is a declarative language that allows to annotate JSON documents
    /// with types and descriptions.
    #[serde(rename = "json")]
drbh's avatar
drbh committed
184
    #[serde(alias = "json_object")]
185
186
    #[schema(example = json ! ({"properties": {"location":{"type": "string"}}}))]
    Json(serde_json::Value),
drbh's avatar
drbh committed
187
188
189
190
    #[serde(rename = "regex")]
    Regex(String),
}

191
192
#[derive(Clone, Debug, Serialize, ToSchema)]
pub struct Info {
193
    /// Model info
194
195
196
197
    #[schema(example = "bigscience/blomm-560m")]
    pub model_id: String,
    #[schema(nullable = true, example = "e985a63cdc139290c5f700ff1929f0b5942cced2")]
    pub model_sha: Option<String>,
Nicolas Patry's avatar
Nicolas Patry committed
198
199
200
201
    // #[schema(example = "torch.float16")]
    // pub model_dtype: String,
    // #[schema(example = "cuda")]
    // pub model_device_type: String,
202
203
    #[schema(nullable = true, example = "text-generation")]
    pub model_pipeline_tag: Option<String>,
Nicolas Patry's avatar
Nicolas Patry committed
204

205
206
207
208
209
210
211
212
    /// Router Parameters
    #[schema(example = "128")]
    pub max_concurrent_requests: usize,
    #[schema(example = "2")]
    pub max_best_of: usize,
    #[schema(example = "4")]
    pub max_stop_sequences: usize,
    #[schema(example = "1024")]
OlivierDehaene's avatar
OlivierDehaene committed
213
    pub max_input_tokens: usize,
214
215
216
217
    #[schema(example = "2048")]
    pub max_total_tokens: usize,
    #[schema(example = "2")]
    pub validation_workers: usize,
218
219
    #[schema(example = "32")]
    pub max_client_batch_size: usize,
Nicolas Patry's avatar
Nicolas Patry committed
220

221
    /// Router Info
222
223
    #[schema(example = "text-generation-router")]
    pub router: &'static str,
224
225
226
227
    #[schema(example = "0.5.0")]
    pub version: &'static str,
    #[schema(nullable = true, example = "null")]
    pub sha: Option<&'static str>,
228
229
    #[schema(nullable = true, example = "null")]
    pub docker_label: Option<&'static str>,
230
231
}

drbh's avatar
drbh committed
232
#[derive(Clone, Debug, Deserialize, ToSchema, Default)]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
233
pub(crate) struct GenerateParameters {
234
    /// Generate best_of sequences and return the one if the highest token logprobs.
235
236
237
    #[serde(default)]
    #[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 1)]
    pub best_of: Option<usize>,
238
239

    /// The value used to module the logits distribution.
240
241
242
243
244
245
246
247
    #[serde(default)]
    #[schema(
        exclusive_minimum = 0.0,
        nullable = true,
        default = "null",
        example = 0.5
    )]
    pub temperature: Option<f32>,
248
249
250

    /// The parameter for repetition penalty. 1.0 means no penalty.
    /// See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
251
252
253
254
255
256
257
258
    #[serde(default)]
    #[schema(
        exclusive_minimum = 0.0,
        nullable = true,
        default = "null",
        example = 1.03
    )]
    pub repetition_penalty: Option<f32>,
259
260
261
262

    /// The parameter for frequency penalty. 1.0 means no penalty
    /// Penalize new tokens based on their existing frequency in the text so far,
    /// decreasing the model's likelihood to repeat the same line verbatim.
263
    #[serde(default)]
264
265
266
267
268
269
270
    #[schema(
        exclusive_minimum = -2.0,
        nullable = true,
        default = "null",
        example = 0.1
    )]
    pub frequency_penalty: Option<f32>,
271
272

    /// The number of highest probability vocabulary tokens to keep for top-k-filtering.
273
    #[serde(default)]
274
275
    #[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 10)]
    pub top_k: Option<i32>,
276
277

    /// Top-p value for nucleus sampling.
278
279
280
281
282
283
284
285
286
    #[serde(default)]
    #[schema(
        exclusive_minimum = 0.0,
        maximum = 1.0,
        nullable = true,
        default = "null",
        example = 0.95
    )]
    pub top_p: Option<f32>,
287
288
289

    /// Typical Decoding mass
    /// See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information.
290
    #[serde(default)]
291
292
293
294
295
296
297
298
    #[schema(
        exclusive_minimum = 0.0,
        maximum = 1.0,
        nullable = true,
        default = "null",
        example = 0.95
    )]
    pub typical_p: Option<f32>,
299
300

    /// Activate logits sampling.
301
    #[serde(default)]
302
    #[schema(default = "false", example = true)]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
303
    pub do_sample: bool,
304
305

    /// Maximum number of tokens to generate.
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
306
    #[serde(default = "default_max_new_tokens")]
307
    #[schema(nullable = true, default = "100", example = "20")]
308
    pub max_new_tokens: Option<u32>,
309
310

    /// Whether to prepend the prompt to the generated text
OlivierDehaene's avatar
OlivierDehaene committed
311
    #[serde(default)]
312
    #[schema(nullable = true, default = "null", example = false)]
313
    pub return_full_text: Option<bool>,
314
315

    /// Stop generating tokens if a member of `stop` is generated.
316
    #[serde(default)]
317
    #[schema(inline, max_items = 4, example = json ! (["photographer"]))]
318
    pub stop: Vec<String>,
319
320

    /// Truncate inputs tokens to the given size.
OlivierDehaene's avatar
OlivierDehaene committed
321
    #[serde(default)]
322
    #[schema(nullable = true, default = "null", example = "null")]
323
    pub truncate: Option<usize>,
324
325

    /// Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226).
326
    #[serde(default)]
327
328
    #[schema(default = "false", example = true)]
    pub watermark: bool,
329
330

    /// Whether to return generation details.
331
    #[serde(default)]
332
    #[schema(default = "true")]
OlivierDehaene's avatar
OlivierDehaene committed
333
    pub details: bool,
334
335

    /// Whether to return decoder input token logprobs and ids.
336
    #[serde(default)]
337
    #[schema(default = "false")]
338
    pub decoder_input_details: bool,
339
340

    /// Random sampling seed.
341
    #[serde(default)]
342
343
344
345
346
347
    #[schema(
        exclusive_minimum = 0,
        nullable = true,
        default = "null",
        example = "null"
    )]
348
    pub seed: Option<u64>,
349
350

    /// The number of highest probability vocabulary tokens to keep for top-n-filtering.
Nicolas Patry's avatar
Nicolas Patry committed
351
352
353
    #[serde(default)]
    #[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 5)]
    pub top_n_tokens: Option<u32>,
354
355

    /// Grammar constraints for the generation.
drbh's avatar
drbh committed
356
    #[serde(default)]
357
    #[schema(nullable = true, default = "null", example = "null")]
drbh's avatar
drbh committed
358
    pub grammar: Option<GrammarType>,
drbh's avatar
drbh committed
359
360
361
362
363

    /// Lora adapter id
    #[serde(default)]
    #[schema(nullable = true, default = "null", example = "null")]
    pub adapter_id: Option<String>,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
364
365
}

366
fn default_max_new_tokens() -> Option<u32> {
367
    Some(100)
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
368
369
370
371
}

fn default_parameters() -> GenerateParameters {
    GenerateParameters {
372
        best_of: None,
373
374
        temperature: None,
        repetition_penalty: None,
375
        frequency_penalty: None,
376
377
        top_k: None,
        top_p: None,
378
        typical_p: None,
379
        do_sample: true,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
380
        max_new_tokens: default_max_new_tokens(),
381
        return_full_text: None,
382
        stop: Vec::new(),
383
        truncate: None,
384
        watermark: false,
OlivierDehaene's avatar
OlivierDehaene committed
385
        details: false,
386
        decoder_input_details: false,
387
        seed: None,
Nicolas Patry's avatar
Nicolas Patry committed
388
        top_n_tokens: None,
drbh's avatar
drbh committed
389
        grammar: None,
drbh's avatar
drbh committed
390
        adapter_id: None,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
391
392
393
    }
}

394
395
396
397
398
399
400
401
402
403
404
405
406
#[derive(Clone, Deserialize, Serialize, ToSchema, Debug)]
#[serde(try_from = "PromptDeserializer")]
pub struct Prompt(pub Vec<String>);

#[derive(Deserialize)]
#[serde(untagged)]
enum PromptDeserializer {
    Single(String),
    Multiple(Vec<String>),
}

impl TryFrom<PromptDeserializer> for Prompt {
    type Error = String;
407

408
    fn try_from(value: PromptDeserializer) -> Result<Self, Self::Error> {
409
        match value {
410
411
412
413
414
415
416
417
418
419
420
            PromptDeserializer::Single(s) => Ok(Prompt(vec![s])),
            PromptDeserializer::Multiple(v) => {
                if v.is_empty() {
                    Err(
                        "Empty array detected. Do not use an empty array for the prompt."
                            .to_string(),
                    )
                } else {
                    Ok(Prompt(v))
                }
            }
421
422
423
424
        }
    }
}

425
426
427
428
429
#[derive(Clone, Deserialize, Serialize, ToSchema, Debug)]
pub struct CompletionRequest {
    /// UNUSED
    #[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")]
    /// ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.
430
    pub model: Option<String>,
431
432
433

    /// The prompt to generate completions for.
    #[schema(example = "What is Deep Learning?")]
434
    pub prompt: Prompt,
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471

    /// The maximum number of tokens that can be generated in the chat completion.
    #[serde(default)]
    #[schema(default = "32")]
    pub max_tokens: Option<u32>,

    /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while
    /// lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
    #[serde(default)]
    #[schema(nullable = true, example = 1.0)]
    pub temperature: Option<f32>,

    /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the
    /// tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
    #[serde(default)]
    #[schema(nullable = true, example = 0.95)]
    pub top_p: Option<f32>,

    #[serde(default = "bool::default")]
    pub stream: bool,

    #[schema(nullable = true, example = 42)]
    pub seed: Option<u64>,

    /// The text to append to the prompt. This is useful for completing sentences or generating a paragraph of text.
    /// please see the completion_template field in the model's tokenizer_config.json file for completion template.
    #[serde(default)]
    pub suffix: Option<String>,

    #[serde(default)]
    pub repetition_penalty: Option<f32>,

    /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,
    /// decreasing the model's likelihood to repeat the same line verbatim.
    #[serde(default)]
    #[schema(example = "1.0")]
    pub frequency_penalty: Option<f32>,
472
473
474
475
476

    /// Up to 4 sequences where the API will stop generating further tokens.
    #[serde(default)]
    #[schema(nullable = true, example = "null")]
    pub stop: Option<Vec<String>>,
477
478
}

479
480
481
482
483
484
485
486
487
#[derive(Clone, Serialize, ToSchema)]
#[serde(tag = "object")]
enum Completion {
    #[serde(rename = "text_completion")]
    Chunk(Chunk),
    #[serde(rename = "text_completion")]
    Final(CompletionFinal),
}

488
#[derive(Clone, Deserialize, Serialize, ToSchema, Default)]
489
pub(crate) struct CompletionFinal {
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
    pub id: String,
    #[schema(example = "1706270835")]
    pub created: u64,
    #[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")]
    pub model: String,
    pub system_fingerprint: String,
    pub choices: Vec<CompletionComplete>,
    pub usage: Usage,
}

#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct CompletionComplete {
    pub index: u32,
    pub text: String,
    pub logprobs: Option<Vec<f32>>,
    pub finish_reason: String,
}

508
509
510
511
512
513
514
515
516
#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct Chunk {
    pub id: String,
    pub created: u64,
    pub choices: Vec<CompletionComplete>,
    pub model: String,
    pub system_fingerprint: String,
}

517
#[derive(Clone, Deserialize, Serialize, ToSchema)]
518
519
pub(crate) struct ChatCompletion {
    pub id: String,
520
    #[schema(example = "1706270835")]
521
    pub created: u64,
522
    #[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")]
523
524
525
526
527
528
    pub model: String,
    pub system_fingerprint: String,
    pub choices: Vec<ChatCompletionComplete>,
    pub usage: Usage,
}

529
#[derive(Clone, Deserialize, Serialize, ToSchema)]
530
531
pub(crate) struct ChatCompletionComplete {
    pub index: u32,
Nicolas Patry's avatar
Nicolas Patry committed
532
    pub message: OutputMessage,
533
    pub logprobs: Option<ChatCompletionLogprobs>,
534
535
536
    pub finish_reason: String,
}

537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct ChatCompletionLogprobs {
    content: Vec<ChatCompletionLogprob>,
}

impl From<(Token, Vec<Token>)> for ChatCompletionLogprobs {
    fn from(value: (Token, Vec<Token>)) -> Self {
        let (token, top_tokens) = value;

        Self {
            content: vec![ChatCompletionLogprob {
                token: token.text,
                logprob: token.logprob,
                top_logprobs: top_tokens
                    .into_iter()
                    .map(|t| ChatCompletionTopLogprob {
                        token: t.text,
                        logprob: t.logprob,
                    })
                    .collect(),
            }],
        }
    }
}

impl From<(Vec<Token>, Vec<Vec<Token>>)> for ChatCompletionLogprobs {
    fn from(value: (Vec<Token>, Vec<Vec<Token>>)) -> Self {
        let (tokens, top_tokens) = value;
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579

        // Create an iterator that produces None for top_tokens once it's exhausted
        let top_tokens_iter = top_tokens
            .into_iter()
            .map(Some)
            .chain(std::iter::repeat(None));

        let content = tokens
            .into_iter()
            .zip(top_tokens_iter)
            .map(|(t, top_t_option)| ChatCompletionLogprob {
                token: t.text,
                logprob: t.logprob,
                top_logprobs: match top_t_option {
                    Some(top_t) => top_t
580
581
582
583
584
585
                        .into_iter()
                        .map(|t| ChatCompletionTopLogprob {
                            token: t.text,
                            logprob: t.logprob,
                        })
                        .collect(),
586
587
588
589
590
591
                    None => vec![], // Handle the case where there are no top tokens
                },
            })
            .collect();

        Self { content }
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
    }
}

#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct ChatCompletionLogprob {
    token: String,
    logprob: f32,
    top_logprobs: Vec<ChatCompletionTopLogprob>,
}

#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct ChatCompletionTopLogprob {
    token: String,
    logprob: f32,
}

608
#[derive(Clone, Deserialize, Serialize, ToSchema, Default)]
609
610
611
612
613
614
pub(crate) struct Usage {
    pub prompt_tokens: u32,
    pub completion_tokens: u32,
    pub total_tokens: u32,
}

615
616
617
618
619
620
621
622
623
#[derive(Clone, Serialize, ToSchema)]
#[serde(tag = "object")]
enum CompletionType {
    #[serde(rename = "chat.completion.chunk")]
    ChatCompletionChunk(ChatCompletionChunk),
    #[serde(rename = "chat.completion")]
    ChatCompletion(ChatCompletion),
}

624
625
626
627
impl ChatCompletion {
    pub(crate) fn new(
        model: String,
        system_fingerprint: String,
drbh's avatar
drbh committed
628
        output: Option<String>,
629
630
631
        created: u64,
        details: Details,
        return_logprobs: bool,
632
        tool_calls: Option<Vec<ToolCall>>,
633
    ) -> Self {
Nicolas Patry's avatar
Nicolas Patry committed
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
        let message = match (output, tool_calls) {
            (Some(content), None) => OutputMessage::ChatMessage(TextMessage {
                role: "assistant".into(),
                content,
            }),
            (None, Some(tool_calls)) => OutputMessage::ToolCall(ToolCallMessage {
                role: "assistant".to_string(),
                tool_calls,
            }),
            (Some(output), Some(_)) => {
                warn!("Received both chat and tool call");
                OutputMessage::ChatMessage(TextMessage {
                    role: "assistant".into(),
                    content: output,
                })
            }
            (None, None) => {
                warn!("Didn't receive an answer");
                OutputMessage::ChatMessage(TextMessage {
                    role: "assistant".into(),
                    content: "".to_string(),
                })
            }
        };
658
659
660
661
662
663
664
        Self {
            id: String::new(),
            created,
            model,
            system_fingerprint,
            choices: vec![ChatCompletionComplete {
                index: 0,
Nicolas Patry's avatar
Nicolas Patry committed
665
                message,
666
                logprobs: return_logprobs
667
                    .then(|| ChatCompletionLogprobs::from((details.tokens, details.top_tokens))),
668
                finish_reason: details.finish_reason.format(true),
669
670
671
672
673
674
675
676
677
            }],
            usage: Usage {
                prompt_tokens: details.prefill.len() as u32,
                completion_tokens: details.generated_tokens,
                total_tokens: details.prefill.len() as u32 + details.generated_tokens,
            },
        }
    }
}
678
#[derive(Clone, Serialize, ToSchema)]
679
680
pub(crate) struct ChatCompletionChunk {
    pub id: String,
681
    #[schema(example = "1706270978")]
682
    pub created: u64,
683
    #[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")]
684
685
686
    pub model: String,
    pub system_fingerprint: String,
    pub choices: Vec<ChatCompletionChoice>,
Nicolas Patry's avatar
Nicolas Patry committed
687
    pub usage: Option<Usage>,
688
689
}

690
#[derive(Clone, Serialize, ToSchema)]
691
692
693
pub(crate) struct ChatCompletionChoice {
    pub index: u32,
    pub delta: ChatCompletionDelta,
694
    pub logprobs: Option<ChatCompletionLogprobs>,
695
696
697
    pub finish_reason: Option<String>,
}

Nicolas Patry's avatar
Nicolas Patry committed
698
699
700
701
702
703
704
#[derive(Clone, Deserialize, ToSchema, Serialize, Debug, PartialEq)]
pub struct ToolCallDelta {
    #[schema(example = "assistant")]
    role: String,
    tool_calls: DeltaToolCall,
}

705
706
#[derive(Clone, Debug, Serialize, ToSchema)]
#[serde(untagged)]
Nicolas Patry's avatar
Nicolas Patry committed
707
708
709
enum ChatCompletionDelta {
    Chat(TextMessage),
    Tool(ToolCallDelta),
drbh's avatar
drbh committed
710
711
}

Nicolas Patry's avatar
Nicolas Patry committed
712
#[derive(Clone, Deserialize, Serialize, ToSchema, Debug, PartialEq)]
drbh's avatar
drbh committed
713
714
715
716
717
718
719
pub(crate) struct DeltaToolCall {
    pub index: u32,
    pub id: String,
    pub r#type: String,
    pub function: Function,
}

Nicolas Patry's avatar
Nicolas Patry committed
720
#[derive(Clone, Deserialize, Serialize, ToSchema, Debug, PartialEq)]
drbh's avatar
drbh committed
721
722
723
pub(crate) struct Function {
    pub name: Option<String>,
    pub arguments: String,
724
725
}

drbh's avatar
drbh committed
726
#[allow(clippy::too_many_arguments)]
727
728
729
730
impl ChatCompletionChunk {
    pub(crate) fn new(
        model: String,
        system_fingerprint: String,
drbh's avatar
drbh committed
731
732
        delta: Option<String>,
        tool_calls: Option<Vec<String>>,
733
        created: u64,
734
        logprobs: Option<ChatCompletionLogprobs>,
735
        finish_reason: Option<String>,
Nicolas Patry's avatar
Nicolas Patry committed
736
        usage: Option<Usage>,
737
    ) -> Self {
738
        let delta = match (delta, tool_calls) {
Nicolas Patry's avatar
Nicolas Patry committed
739
740
741
742
743
744
745
            (Some(delta), _) => ChatCompletionDelta::Chat(TextMessage {
                role: "assistant".to_string(),
                content: delta,
            }),
            (None, Some(tool_calls)) => ChatCompletionDelta::Tool(ToolCallDelta {
                role: "assistant".to_string(),
                tool_calls: DeltaToolCall {
746
747
748
749
750
751
752
                    index: 0,
                    id: String::new(),
                    r#type: "function".to_string(),
                    function: Function {
                        name: None,
                        arguments: tool_calls[0].to_string(),
                    },
Nicolas Patry's avatar
Nicolas Patry committed
753
754
755
756
757
758
                },
            }),
            (None, None) => ChatCompletionDelta::Chat(TextMessage {
                role: "assistant".to_string(),
                content: "".to_string(),
            }),
759
        };
760
761
762
763
764
765
        Self {
            id: String::new(),
            created,
            model,
            system_fingerprint,
            choices: vec![ChatCompletionChoice {
766
                index: 0,
767
                delta,
768
769
770
                logprobs,
                finish_reason,
            }],
Nicolas Patry's avatar
Nicolas Patry committed
771
            usage,
772
773
774
775
776
777
        }
    }
}

#[derive(Clone, Deserialize, ToSchema, Serialize)]
pub(crate) struct ChatRequest {
778
    #[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")]
drbh's avatar
drbh committed
779
    /// [UNUSED] ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.
780
    pub model: Option<String>,
drbh's avatar
drbh committed
781

782
    /// A list of messages comprising the conversation so far.
drbh's avatar
drbh committed
783
    #[schema(example = "[{\"role\": \"user\", \"content\": \"What is Deep Learning?\"}]")]
784
785
786
787
788
    pub messages: Vec<Message>,

    /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,
    /// decreasing the model's likelihood to repeat the same line verbatim.
    #[serde(default)]
789
    #[schema(example = "1.0")]
790
791
792
793
794
795
796
797
798
799
800
801
802
803
    pub frequency_penalty: Option<f32>,

    /// UNUSED
    /// Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens
    /// (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically,
    /// the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model,
    /// but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should
    /// result in a ban or exclusive selection of the relevant token.
    #[serde(default)]
    pub logit_bias: Option<Vec<f32>>,

    /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each
    /// output token returned in the content of message.
    #[serde(default)]
804
    #[schema(example = "false")]
805
806
807
808
809
    pub logprobs: Option<bool>,

    /// An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with
    /// an associated log probability. logprobs must be set to true if this parameter is used.
    #[serde(default)]
810
    #[schema(example = "5")]
811
812
813
814
    pub top_logprobs: Option<u32>,

    /// The maximum number of tokens that can be generated in the chat completion.
    #[serde(default)]
815
    #[schema(example = "32")]
816
817
818
819
820
821
    pub max_tokens: Option<u32>,

    /// UNUSED
    /// How many chat completion choices to generate for each input message. Note that you will be charged based on the
    /// number of generated tokens across all of the choices. Keep n as 1 to minimize costs.
    #[serde(default)]
822
    #[schema(nullable = true, example = "2")]
823
824
825
826
827
    pub n: Option<u32>,

    /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,
    /// increasing the model's likelihood to talk about new topics
    #[serde(default)]
828
    #[schema(nullable = true, example = 0.1)]
829
830
    pub presence_penalty: Option<f32>,

831
832
833
834
835
    /// Up to 4 sequences where the API will stop generating further tokens.
    #[serde(default)]
    #[schema(nullable = true, example = "null")]
    pub stop: Option<Vec<String>>,

836
837
838
839
840
    #[serde(default = "bool::default")]
    pub stream: bool,

    #[schema(nullable = true, example = 42)]
    pub seed: Option<u64>,
841
842
843
844
845
846

    /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while
    /// lower values like 0.2 will make it more focused and deterministic.
    ///
    /// We generally recommend altering this or `top_p` but not both.
    #[serde(default)]
847
    #[schema(nullable = true, example = 1.0)]
848
849
850
851
852
    pub temperature: Option<f32>,

    /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the
    /// tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
    #[serde(default)]
853
    #[schema(nullable = true, example = 0.95)]
854
    pub top_p: Option<f32>,
drbh's avatar
drbh committed
855
856
857
858
859
860
861
862

    /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of
    /// functions the model may generate JSON inputs for.
    #[serde(default)]
    #[schema(nullable = true, example = "null")]
    pub tools: Option<Vec<Tool>>,

    /// A prompt to be appended before the tools
drbh's avatar
drbh committed
863
    #[serde(default)]
drbh's avatar
drbh committed
864
865
    #[schema(
        nullable = true,
drbh's avatar
drbh committed
866
        example = "Given the functions available, please respond with a JSON for a function call with its proper arguments that best answers the given prompt. Respond in the format {name: function name, parameters: dictionary of argument name and its value}.Do not use variables."
drbh's avatar
drbh committed
867
868
869
870
871
872
    )]
    pub tool_prompt: Option<String>,

    /// A specific tool to use. If not provided, the model will default to use any of the tools provided in the tools parameter.
    #[serde(default)]
    #[schema(nullable = true, example = "null")]
drbh's avatar
drbh committed
873
    pub tool_choice: ToolChoice,
drbh's avatar
drbh committed
874
875
876
877
878
879
880

    /// Response format constraints for the generation.
    ///
    /// NOTE: A request can use `response_format` OR `tools` but not both.
    #[serde(default)]
    #[schema(nullable = true, default = "null", example = "null")]
    pub response_format: Option<GrammarType>,
881
882
883
884
885

    /// A guideline to be used in the chat_template
    #[serde(default)]
    #[schema(nullable = true, default = "null", example = "null")]
    pub guideline: Option<String>,
Nicolas Patry's avatar
Nicolas Patry committed
886
887
888
889
890
891
892
893
894
895
896
897

    /// Options for streaming response. Only set this when you set stream: true.
    #[serde(default)]
    #[schema(nullable = true, example = "null")]
    pub stream_options: Option<StreamOptions>,
}

#[derive(Clone, Deserialize, ToSchema, Serialize)]
struct StreamOptions {
    /// If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value.
    #[schema(example = "true")]
    include_usage: bool,
drbh's avatar
drbh committed
898
899
}

drbh's avatar
drbh committed
900
901
pub fn default_tool_prompt() -> String {
    "\nGiven the functions available, please respond with a JSON for a function call with its proper arguments that best answers the given prompt. Respond in the format {name: function name, parameters: dictionary of argument name and its value}.Do not use variables.\n".to_string()
drbh's avatar
drbh committed
902
}
903
904
905
906

#[derive(Clone, Debug, Deserialize, PartialEq, Serialize, ToSchema)]
#[serde(untagged)]
pub enum ToolType {
drbh's avatar
drbh committed
907
    OneOf,
908
909
    FunctionName(String),
    Function { function: FunctionName },
drbh's avatar
drbh committed
910
    NoTool,
drbh's avatar
drbh committed
911
912
}

913
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ToSchema)]
914
915
916
917
pub struct FunctionName {
    pub name: String,
}

drbh's avatar
drbh committed
918
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default, ToSchema)]
919
920
#[serde(from = "ToolTypeDeserializer")]
pub struct ToolChoice(pub Option<ToolType>);
drbh's avatar
drbh committed
921

922
923
924
#[derive(Deserialize)]
#[serde(untagged)]
enum ToolTypeDeserializer {
drbh's avatar
drbh committed
925
926
    String(String),
    ToolType(ToolType),
927
}
drbh's avatar
drbh committed
928

929
930
impl From<ToolTypeDeserializer> for ToolChoice {
    fn from(value: ToolTypeDeserializer) -> Self {
drbh's avatar
drbh committed
931
        match value {
drbh's avatar
drbh committed
932
933
934
935
            ToolTypeDeserializer::String(s) => match s.as_str() {
                "none" => ToolChoice(Some(ToolType::NoTool)),
                "auto" => ToolChoice(Some(ToolType::OneOf)),
                _ => ToolChoice(Some(ToolType::FunctionName(s))),
drbh's avatar
drbh committed
936
            },
drbh's avatar
drbh committed
937
            ToolTypeDeserializer::ToolType(tool_type) => ToolChoice(Some(tool_type)),
drbh's avatar
drbh committed
938
939
940
941
        }
    }
}

942
#[derive(Debug, Deserialize, Serialize, ToSchema, PartialEq)]
drbh's avatar
drbh committed
943
pub struct JsonSchemaTool {
drbh's avatar
drbh committed
944
945
946
947
948
    #[serde(flatten)]
    functions_map: FunctionsMap,
    properties: Properties,
}

949
#[derive(Debug, Serialize, Deserialize, PartialEq)]
drbh's avatar
drbh committed
950
951
952
953
954
struct FunctionsMap {
    #[serde(rename = "$functions")]
    functions: std::collections::HashMap<String, serde_json::Value>,
}

955
#[derive(Debug, Serialize, Deserialize, PartialEq)]
drbh's avatar
drbh committed
956
957
958
959
960
struct FunctionRef {
    #[serde(rename = "$ref")]
    ref_path: String,
}

961
#[derive(Debug, Serialize, Deserialize, PartialEq)]
drbh's avatar
drbh committed
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
struct Properties {
    #[serde(serialize_with = "serialize_function")]
    function: Vec<FunctionRef>,
}

fn serialize_function<S>(functions: &Vec<FunctionRef>, serializer: S) -> Result<S::Ok, S::Error>
where
    S: serde::Serializer,
{
    use serde::ser::SerializeStruct;
    let mut state = serializer.serialize_struct("Function", 1)?;
    state.serialize_field("anyOf", functions)?;
    state.end()
}

Nicolas Patry's avatar
Nicolas Patry committed
977
#[derive(Clone, Debug, Deserialize, Serialize, ToSchema, Default, PartialEq)]
drbh's avatar
drbh committed
978
979
980
981
pub(crate) struct FunctionDefinition {
    #[serde(default)]
    pub description: Option<String>,
    pub name: String,
982
983
    #[serde(alias = "parameters")]
    pub arguments: serde_json::Value,
drbh's avatar
drbh committed
984
985
986
987
988
989
990
991
992
}

#[derive(Clone, Debug, Deserialize, Serialize, ToSchema)]
pub(crate) struct Tool {
    // The type of the tool. Currently, only 'function' is supported.
    #[schema(example = "function")]
    pub r#type: String,
    // Grab the tool as generic JSON for debugging purposes.
    pub function: FunctionDefinition,
993
994
}

995
#[derive(Clone, Serialize, Deserialize, Default)]
996
pub(crate) struct ChatTemplateInputs<'a> {
Nicolas Patry's avatar
Nicolas Patry committed
997
    messages: Vec<TextMessage>,
998
999
    bos_token: Option<&'a str>,
    eos_token: Option<&'a str>,
1000
    add_generation_prompt: bool,
drbh's avatar
drbh committed
1001
    tools: Option<Vec<Tool>>,
1002
    guideline: Option<&'a str>,
1003
1004
}

Nicolas Patry's avatar
Nicolas Patry committed
1005
#[derive(Clone, Deserialize, Serialize, ToSchema, Default, Debug, PartialEq)]
drbh's avatar
drbh committed
1006
pub(crate) struct ToolCall {
1007
    pub id: String,
drbh's avatar
drbh committed
1008
1009
1010
1011
    pub r#type: String,
    pub function: FunctionDefinition,
}

Nicolas Patry's avatar
Nicolas Patry committed
1012
#[derive(Clone, Deserialize, ToSchema, Serialize, Debug, PartialEq)]
1013
pub struct Url {
Nicolas Patry's avatar
Nicolas Patry committed
1014
    url: String,
drbh's avatar
drbh committed
1015
1016
}

Nicolas Patry's avatar
Nicolas Patry committed
1017
1018
1019
#[derive(Clone, Deserialize, ToSchema, Serialize, Debug, PartialEq)]
#[serde(tag = "type")]
#[serde(rename_all = "snake_case")]
1020
1021
1022
pub enum MessageChunk {
    Text { text: String },
    ImageUrl { image_url: Url },
Nicolas Patry's avatar
Nicolas Patry committed
1023
1024
1025
1026
1027
1028
1029
}

#[derive(Clone, Deserialize, ToSchema, Serialize, Debug, PartialEq)]
pub struct Message {
    #[schema(example = "user")]
    role: String,
    #[schema(example = "My name is David and I")]
1030
    pub content: MessageContent,
drbh's avatar
drbh committed
1031
    #[serde(default, skip_serializing_if = "Option::is_none")]
Nicolas Patry's avatar
Nicolas Patry committed
1032
1033
    #[schema(example = "\"David\"")]
    name: Option<String>,
drbh's avatar
drbh committed
1034
1035
}

1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
#[derive(Clone, Deserialize, Serialize, ToSchema, Debug, PartialEq)]
#[serde(untagged)]
pub enum MessageContent {
    SingleText(String),
    MultipleChunks(Vec<MessageChunk>),
}

// Pushing a chunk to a single text message will convert it to a multiple chunks message
impl MessageContent {
    pub fn push(&mut self, chunk: MessageChunk) {
        match self {
            MessageContent::SingleText(text) => {
drbh's avatar
drbh committed
1048
1049
1050
1051
                *self = MessageContent::MultipleChunks(vec![
                    MessageChunk::Text { text: text.clone() },
                    chunk,
                ]);
Nicolas Patry's avatar
Nicolas Patry committed
1052
            }
1053
1054
1055
1056
            MessageContent::MultipleChunks(chunks) => {
                chunks.push(chunk);
            }
        }
drbh's avatar
drbh committed
1057
1058
1059
    }
}

Nicolas Patry's avatar
Nicolas Patry committed
1060
1061
#[derive(Clone, Deserialize, ToSchema, Serialize, Debug, PartialEq)]
pub struct TextMessage {
1062
1063
1064
    #[schema(example = "user")]
    pub role: String,
    #[schema(example = "My name is David and I")]
Nicolas Patry's avatar
Nicolas Patry committed
1065
1066
1067
1068
1069
1070
1071
    pub content: String,
}

impl From<Message> for TextMessage {
    fn from(value: Message) -> Self {
        TextMessage {
            role: value.role,
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
            content: match value.content {
                MessageContent::SingleText(text) => text,
                MessageContent::MultipleChunks(chunks) => chunks
                    .into_iter()
                    .map(|chunk| match chunk {
                        MessageChunk::Text { text } => text,
                        MessageChunk::ImageUrl { image_url } => format!("![]({})", image_url.url),
                    })
                    .collect::<Vec<_>>()
                    .join(""),
            },
Nicolas Patry's avatar
Nicolas Patry committed
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
        }
    }
}

#[derive(Clone, Deserialize, ToSchema, Serialize, Debug, PartialEq)]
pub struct ToolCallMessage {
    #[schema(example = "assistant")]
    role: String,
    tool_calls: Vec<ToolCall>,
}

#[derive(Clone, Deserialize, ToSchema, Serialize, Debug, PartialEq)]
#[serde(untagged)]
pub(crate) enum OutputMessage {
    ChatMessage(TextMessage),
    ToolCall(ToolCallMessage),
1099
1100
}

1101
#[derive(Clone, Debug, Deserialize, ToSchema)]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1102
pub(crate) struct GenerateRequest {
1103
    #[schema(example = "My name is Olivier and I")]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1104
1105
1106
    pub inputs: String,
    #[serde(default = "default_parameters")]
    pub parameters: GenerateParameters,
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116

    /// This is used internally because some requests
    /// already contain the templated input therefore
    /// we shouldn't add the special tokens.
    #[serde(default = "default_true", skip)]
    pub add_special_tokens: bool,
}

fn default_true() -> bool {
    true
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1117
1118
}

1119
1120
1121
1122
1123
1124
1125
#[derive(Clone, Debug, Deserialize, ToSchema)]
pub(crate) struct CompatGenerateRequest {
    #[schema(example = "My name is Olivier and I")]
    pub inputs: String,
    #[serde(default = "default_parameters")]
    pub parameters: GenerateParameters,
    #[serde(default)]
OlivierDehaene's avatar
OlivierDehaene committed
1126
    #[schema(default = "false")]
1127
1128
1129
1130
1131
1132
1133
    pub stream: bool,
}

impl From<CompatGenerateRequest> for GenerateRequest {
    fn from(req: CompatGenerateRequest) -> Self {
        Self {
            inputs: req.inputs,
1134
            add_special_tokens: true,
1135
1136
1137
1138
1139
            parameters: req.parameters,
        }
    }
}

1140
1141
1142
#[derive(Debug, Serialize, ToSchema)]
pub struct PrefillToken {
    #[schema(example = 0)]
Nicolas Patry's avatar
Nicolas Patry committed
1143
    pub id: u32,
1144
    #[schema(example = "test")]
Nicolas Patry's avatar
Nicolas Patry committed
1145
    pub text: String,
1146
    #[schema(nullable = true, example = - 0.34)]
Nicolas Patry's avatar
Nicolas Patry committed
1147
    pub logprob: f32,
1148
1149
}

1150
#[derive(Debug, Serialize, ToSchema, Clone)]
1151
1152
pub struct Token {
    #[schema(example = 0)]
Nicolas Patry's avatar
Nicolas Patry committed
1153
    pub id: u32,
1154
    #[schema(example = "test")]
Nicolas Patry's avatar
Nicolas Patry committed
1155
    pub text: String,
1156
    #[schema(nullable = true, example = - 0.34)]
Nicolas Patry's avatar
Nicolas Patry committed
1157
    pub logprob: f32,
1158
    #[schema(example = "false")]
Nicolas Patry's avatar
Nicolas Patry committed
1159
    pub special: bool,
1160
1161
}

1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
#[derive(Debug, Serialize, ToSchema)]
pub struct SimpleToken {
    #[schema(example = 0)]
    id: u32,
    #[schema(example = "test")]
    text: String,
    #[schema(example = 0)]
    start: usize,
    #[schema(example = 2)]
    stop: usize,
}

OlivierDehaene's avatar
OlivierDehaene committed
1174
#[derive(Debug, Serialize, ToSchema)]
1175
#[serde(rename_all(serialize = "snake_case"))]
1176
#[schema(example = "Length")]
Nicolas Patry's avatar
Nicolas Patry committed
1177
pub enum FinishReason {
1178
1179
1180
1181
1182
1183
1184
1185
    #[schema(rename = "length")]
    Length,
    #[serde(rename = "eos_token")]
    #[schema(rename = "eos_token")]
    EndOfSequenceToken,
    #[schema(rename = "stop_sequence")]
    StopSequence,
}
1186

1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
impl std::fmt::Display for FinishReason {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        match self {
            FinishReason::Length => write!(f, "length"),
            FinishReason::EndOfSequenceToken => write!(f, "eos_token"),
            FinishReason::StopSequence => write!(f, "stop_sequence"),
        }
    }
}

1197
1198
1199
1200
1201
1202
1203
1204
1205
impl FinishReason {
    pub fn format(&self, use_stop: bool) -> String {
        match self {
            FinishReason::EndOfSequenceToken if use_stop => "stop".to_string(),
            _ => self.to_string(),
        }
    }
}

1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
#[derive(Serialize, ToSchema)]
pub(crate) struct BestOfSequence {
    #[schema(example = "test")]
    pub generated_text: String,
    #[schema(example = "length")]
    pub finish_reason: FinishReason,
    #[schema(example = 1)]
    pub generated_tokens: u32,
    #[schema(nullable = true, example = 42)]
    pub seed: Option<u64>,
    pub prefill: Vec<PrefillToken>,
    pub tokens: Vec<Token>,
Nicolas Patry's avatar
Nicolas Patry committed
1218
1219
    #[serde(skip_serializing_if = "Vec::is_empty")]
    pub top_tokens: Vec<Vec<Token>>,
1220
1221
}

1222
#[derive(Serialize, ToSchema)]
OlivierDehaene's avatar
OlivierDehaene committed
1223
pub(crate) struct Details {
1224
1225
1226
    #[schema(example = "length")]
    pub finish_reason: FinishReason,
    #[schema(example = 1)]
OlivierDehaene's avatar
OlivierDehaene committed
1227
    pub generated_tokens: u32,
1228
    #[schema(nullable = true, example = 42)]
1229
    pub seed: Option<u64>,
1230
1231
    pub prefill: Vec<PrefillToken>,
    pub tokens: Vec<Token>,
1232
1233
    #[serde(skip_serializing_if = "Option::is_none")]
    pub best_of_sequences: Option<Vec<BestOfSequence>>,
Nicolas Patry's avatar
Nicolas Patry committed
1234
1235
    #[serde(skip_serializing_if = "Vec::is_empty")]
    pub top_tokens: Vec<Vec<Token>>,
OlivierDehaene's avatar
OlivierDehaene committed
1236
1237
}

1238
#[derive(Serialize, ToSchema)]
1239
pub(crate) struct GenerateResponse {
1240
    #[schema(example = "test")]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1241
    pub generated_text: String,
OlivierDehaene's avatar
OlivierDehaene committed
1242
1243
    #[serde(skip_serializing_if = "Option::is_none")]
    pub details: Option<Details>,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1244
}
1245

1246
1247
1248
1249
1250
1251
#[derive(Serialize, ToSchema)]
pub(crate) struct ChatTokenizeResponse {
    pub(crate) tokenize_response: TokenizeResponse,
    pub(crate) templated_text: String,
}

1252
1253
1254
1255
#[derive(Serialize, ToSchema)]
#[serde(transparent)]
pub(crate) struct TokenizeResponse(Vec<SimpleToken>);

1256
1257
1258
1259
1260
1261
#[derive(Serialize, ToSchema)]
pub(crate) struct StreamDetails {
    #[schema(example = "length")]
    pub finish_reason: FinishReason,
    #[schema(example = 1)]
    pub generated_tokens: u32,
1262
    #[schema(nullable = true, example = 42)]
1263
    pub seed: Option<u64>,
1264
1265
    #[schema(example = 1)]
    pub input_length: u32,
1266
1267
1268
}

#[derive(Serialize, ToSchema)]
1269
pub(crate) struct StreamResponse {
1270
    pub index: u32,
1271
    pub token: Token,
Nicolas Patry's avatar
Nicolas Patry committed
1272
1273
    #[serde(skip_serializing_if = "Vec::is_empty")]
    pub top_tokens: Vec<Token>,
1274
    #[schema(nullable = true, default = "null", example = "test")]
1275
    pub generated_text: Option<String>,
1276
1277
    #[schema(nullable = true, default = "null")]
    pub details: Option<StreamDetails>,
1278
1279
}

1280
#[derive(Serialize, ToSchema)]
1281
1282
pub(crate) struct ErrorResponse {
    pub error: String,
1283
    pub error_type: String,
1284
}
1285

drbh's avatar
drbh committed
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
#[derive(Serialize, Deserialize, ToSchema)]
pub(crate) struct ModelInfo {
    #[schema(example = "gpt2")]
    pub id: String,
    #[schema(example = "model")]
    pub object: String,
    #[schema(example = 1686935002)]
    pub created: u64,
    #[schema(example = "openai")]
    pub owned_by: String,
}

#[derive(Serialize, Deserialize, ToSchema)]
pub(crate) struct ModelsInfo {
    #[schema(example = "list")]
    pub object: String,
    pub data: Vec<ModelInfo>,
}

impl Default for ModelsInfo {
    fn default() -> Self {
        ModelsInfo {
            object: "list".to_string(),
            data: Vec::new(),
        }
    }
}

1314
#[cfg(test)]
1315
mod tests {
1316
    use super::*;
Nicolas Patry's avatar
Nicolas Patry committed
1317
    use serde_json::json;
1318
1319
    use tokenizers::Tokenizer;

1320
    pub(crate) async fn get_tokenizer() -> Tokenizer {
1321
1322
1323
1324
        let api = hf_hub::api::sync::Api::new().unwrap();
        let repo = api.model("gpt2".to_string());
        let filename = repo.get("tokenizer.json").unwrap();
        Tokenizer::from_file(filename).unwrap()
1325
    }
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339

    #[test]
    fn test_hub_nested_tokens_tokenizer_config() {
        // this is a subset of the tokenizer.json file
        // in this case we expect the tokens to be encoded as simple strings
        let json_content = r#"{
            "chat_template": "test",
            "bos_token": "<|begin▁of▁sentence|>",
            "eos_token": "<|end▁of▁sentence|>"
        }"#;

        let config: HubTokenizerConfig = serde_json::from_str(json_content).unwrap();

        // check that we successfully parsed the tokens
1340
1341
1342
1343
        assert_eq!(
            config.chat_template,
            Some(ChatTemplateVersions::Single("test".to_string()))
        );
1344
1345
        assert_eq!(
            config.bos_token,
1346
1347
1348
1349
1350
1351
1352
1353
1354
            Some(TokenizerConfigToken::String(
                "<|begin▁of▁sentence|>".to_string()
            ))
        );
        assert_eq!(
            config.eos_token,
            Some(TokenizerConfigToken::String(
                "<|end▁of▁sentence|>".to_string()
            ))
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
        );

        // in this case we expect the tokens to be encoded as structured tokens
        // we want the content of the structured token
        let json_content = r#"{
            "chat_template": "test",
            "bos_token": {
              "__type": "AddedToken",
              "content": "<|begin▁of▁sentence|>",
              "lstrip": false,
              "normalized": true,
              "rstrip": false,
              "single_word": false
            },
            "eos_token": {
              "__type": "AddedToken",
              "content": "<|end▁of▁sentence|>",
              "lstrip": false,
              "normalized": true,
              "rstrip": false,
              "single_word": false
            }
        }"#;

        let config: HubTokenizerConfig = serde_json::from_str(json_content).unwrap();

        // check that we successfully parsed the tokens
1382
1383
1384
1385
        assert_eq!(
            config.chat_template,
            Some(ChatTemplateVersions::Single("test".to_string()))
        );
1386
1387
        assert_eq!(
            config.bos_token,
1388
1389
1390
1391
1392
1393
1394
1395
1396
            Some(TokenizerConfigToken::Object {
                content: "<|begin▁of▁sentence|>".to_string()
            })
        );
        assert_eq!(
            config.eos_token,
            Some(TokenizerConfigToken::Object {
                content: "<|end▁of▁sentence|>".to_string()
            })
1397
1398
        );
    }
Nicolas Patry's avatar
Nicolas Patry committed
1399
1400
1401

    #[test]
    fn test_chat_simple_string() {
Nicolas Patry's avatar
Nicolas Patry committed
1402
        let json = json!({
Nicolas Patry's avatar
Nicolas Patry committed
1403
            "model": "",
Nicolas Patry's avatar
Nicolas Patry committed
1404
1405
            "messages": [{
                "role": "user",
Nicolas Patry's avatar
Nicolas Patry committed
1406
                "content": "What is Deep Learning?"
Nicolas Patry's avatar
Nicolas Patry committed
1407
            }]
Nicolas Patry's avatar
Nicolas Patry committed
1408
1409
1410
1411
1412
1413
1414
        });
        let request: ChatRequest = serde_json::from_str(json.to_string().as_str()).unwrap();

        assert_eq!(
            request.messages[0],
            Message {
                role: "user".to_string(),
1415
                content: MessageContent::SingleText("What is Deep Learning?".to_string()),
Nicolas Patry's avatar
Nicolas Patry committed
1416
1417
1418
1419
1420
                name: None
            }
        );
    }

drbh's avatar
drbh committed
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
    #[test]
    fn test_message_content_append() {
        let mut content = MessageContent::SingleText("Initial text".to_string());
        let chunk = MessageChunk::Text {
            text: "Additional text".to_string(),
        };

        content.push(chunk);

        match content {
            MessageContent::MultipleChunks(chunks) => {
                assert_eq!(chunks.len(), 2);
                assert_eq!(
                    chunks[0],
                    MessageChunk::Text {
                        text: "Initial text".to_string()
                    }
                );
                assert_eq!(
                    chunks[1],
                    MessageChunk::Text {
                        text: "Additional text".to_string()
                    }
                );
            }
            _ => panic!("Expected MultipleChunks, but got a different variant"),
        }
    }

Nicolas Patry's avatar
Nicolas Patry committed
1450
1451
    #[test]
    fn test_chat_request() {
Nicolas Patry's avatar
Nicolas Patry committed
1452
        let json = json!({
Nicolas Patry's avatar
Nicolas Patry committed
1453
            "model": "",
Nicolas Patry's avatar
Nicolas Patry committed
1454
1455
            "messages": [{
                "role": "user",
Nicolas Patry's avatar
Nicolas Patry committed
1456
1457
                "content": [
                    {"type": "text", "text": "Whats in this image?"},
Nicolas Patry's avatar
Nicolas Patry committed
1458
                    {"type": "image_url", "image_url": {"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png"}},
Nicolas Patry's avatar
Nicolas Patry committed
1459
                ]
Nicolas Patry's avatar
Nicolas Patry committed
1460
            }]
Nicolas Patry's avatar
Nicolas Patry committed
1461
1462
1463
1464
1465
1466
1467
        });
        let request: ChatRequest = serde_json::from_str(json.to_string().as_str()).unwrap();

        assert_eq!(
            request.messages[0],
            Message{
                role: "user".to_string(),
1468
1469
1470
1471
                content: MessageContent::MultipleChunks(vec![
                    MessageChunk::Text { text: "Whats in this image?".to_string() },
                    MessageChunk::ImageUrl { image_url: Url { url: "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png".to_string() }},
                ]),
Nicolas Patry's avatar
Nicolas Patry committed
1472
1473
1474
1475
                name: None
            }
        );
    }
Nicolas Patry's avatar
Nicolas Patry committed
1476
1477
1478
1479
1480

    #[test]
    fn text_message_convert() {
        let message = Message{
                role: "user".to_string(),
1481
1482
1483
1484
                content: MessageContent::MultipleChunks(vec![
                    MessageChunk::Text { text: "Whats in this image?".to_string() },
                    MessageChunk::ImageUrl { image_url: Url { url: "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png".to_string() } }
                ]),
Nicolas Patry's avatar
Nicolas Patry committed
1485
1486
1487
1488
1489
                name: None
            };
        let textmsg: TextMessage = message.into();
        assert_eq!(textmsg.content, "Whats in this image?![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png)");
    }
Nicolas Patry's avatar
Nicolas Patry committed
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510

    #[test]
    fn test_chat_stream_options() {
        let json = json!({
            "model": "",
            "stream_options": {"include_usage": true},
            "messages": [{
                "role": "user",
                "content": "Hello"
            }]
        });
        let request: ChatRequest = serde_json::from_str(json.to_string().as_str()).unwrap();

        assert!(matches!(
            request.stream_options,
            Some(StreamOptions {
                include_usage: true
            })
        ));
    }

Nicolas Patry's avatar
Nicolas Patry committed
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
    #[test]
    fn openai_output() {
        let message = OutputMessage::ChatMessage(TextMessage {
            role: "assistant".to_string(),
            content: "This is the answer".to_string(),
        });
        let serialized = serde_json::to_string(&message).unwrap();
        assert_eq!(
            serialized,
            r#"{"role":"assistant","content":"This is the answer"}"#
        );

        let message = OutputMessage::ToolCall(ToolCallMessage {
            role: "assistant".to_string(),
            tool_calls: vec![ToolCall {
                id: "0".to_string(),
                r#type: "function".to_string(),
                function: FunctionDefinition {
                    description: None,
                    name: "myfn".to_string(),
                    arguments: json!({
                        "format": "csv"
                    }),
                },
            }],
        });
        let serialized = serde_json::to_string(&message).unwrap();
        assert_eq!(
            serialized,
            r#"{"role":"assistant","tool_calls":[{"id":"0","type":"function","function":{"description":null,"name":"myfn","arguments":{"format":"csv"}}}]}"#
        );
    }
1543
}