lib.rs 38.3 KB
Newer Older
1
pub mod config;
2
mod health;
3
/// Text Generation Inference Webserver
4
mod infer;
5
mod queue;
Olivier Dehaene's avatar
Olivier Dehaene committed
6
pub mod server;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
7
mod validation;
Olivier Dehaene's avatar
Olivier Dehaene committed
8

9
use infer::{Infer, InferError, InferStreamResponse};
10
use queue::{Entry, Queue};
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
11
use serde::{Deserialize, Serialize};
12
13
use tokio::sync::OwnedSemaphorePermit;
use tokio_stream::wrappers::UnboundedReceiverStream;
14
use utoipa::ToSchema;
Olivier Dehaene's avatar
Olivier Dehaene committed
15
use validation::Validation;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
16

17
18
19
20
21
22
23
/// Type alias for generation responses
pub(crate) type GenerateStreamResponse = (
    OwnedSemaphorePermit,
    u32, // input_length
    UnboundedReceiverStream<Result<InferStreamResponse, InferError>>,
);

drbh's avatar
drbh committed
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
#[derive(Clone, Deserialize, ToSchema)]
pub(crate) struct VertexInstance {
    #[schema(example = "What is Deep Learning?")]
    pub inputs: String,
    #[schema(nullable = true, default = "null", example = "null")]
    pub parameters: Option<GenerateParameters>,
}

#[derive(Deserialize, ToSchema)]
pub(crate) struct VertexRequest {
    #[serde(rename = "instances")]
    pub instances: Vec<VertexInstance>,
}

#[derive(Clone, Deserialize, ToSchema, Serialize)]
pub(crate) struct VertexResponse {
    pub predictions: Vec<String>,
}

43
44
/// Hub type
#[derive(Clone, Debug, Deserialize)]
45
pub struct HubModelInfo {
46
47
48
49
50
51
    #[serde(rename(deserialize = "id"))]
    pub model_id: String,
    pub sha: Option<String>,
    pub pipeline_tag: Option<String>,
}

52
53
54
55
56
57
58
59
60
61
62
63
64
65
#[derive(Debug, Clone, Deserialize, PartialEq)]
pub struct ChatTemplate {
    name: String,
    template: String,
}

#[derive(Debug, Clone, Deserialize, PartialEq)]
#[serde(untagged)]
pub enum ChatTemplateVersions {
    Single(String),
    Multiple(Vec<ChatTemplate>),
}

#[derive(Debug, Clone, Deserialize, Default)]
66
pub struct HubTokenizerConfig {
67
    pub chat_template: Option<ChatTemplateVersions>,
68
    pub completion_template: Option<String>,
69
    #[serde(deserialize_with = "token_serde::deserialize")]
70
    pub bos_token: Option<String>,
71
    #[serde(deserialize_with = "token_serde::deserialize")]
72
    pub eos_token: Option<String>,
73
74
75
}

impl HubTokenizerConfig {
76
77
78
    pub fn from_file<P: AsRef<std::path::Path>>(filename: P) -> Option<Self> {
        let content = std::fs::read_to_string(filename).ok()?;
        serde_json::from_str(&content).ok()
79
80
81
    }
}

82
#[derive(Clone, Debug, Deserialize, ToSchema, Serialize)]
drbh's avatar
drbh committed
83
84
#[serde(tag = "type", content = "value")]
pub(crate) enum GrammarType {
85
86
87
88
89
90
91
    /// A string that represents a [JSON Schema](https://json-schema.org/).
    ///
    /// JSON Schema is a declarative language that allows to annotate JSON documents
    /// with types and descriptions.
    #[serde(rename = "json")]
    #[schema(example = json ! ({"properties": {"location":{"type": "string"}}}))]
    Json(serde_json::Value),
drbh's avatar
drbh committed
92
93
94
95
    #[serde(rename = "regex")]
    Regex(String),
}

96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
mod token_serde {
    use super::*;
    use serde::de;
    use serde::Deserializer;
    use serde_json::Value;

    pub fn deserialize<'de, D>(deserializer: D) -> Result<Option<String>, D::Error>
    where
        D: Deserializer<'de>,
    {
        let value = Value::deserialize(deserializer)?;

        match value {
            Value::String(s) => Ok(Some(s)),
            Value::Object(map) => {
                if let Some(content) = map.get("content").and_then(|v| v.as_str()) {
                    Ok(Some(content.to_string()))
                } else {
                    Err(de::Error::custom(
                        "content key not found in structured token",
                    ))
                }
            }
119
            Value::Null => Ok(None),
120
121
122
123
124
            _ => Err(de::Error::custom("invalid token format")),
        }
    }
}

125
126
#[derive(Clone, Debug, Serialize, ToSchema)]
pub struct Info {
127
    /// Model info
128
129
130
131
    #[schema(example = "bigscience/blomm-560m")]
    pub model_id: String,
    #[schema(nullable = true, example = "e985a63cdc139290c5f700ff1929f0b5942cced2")]
    pub model_sha: Option<String>,
132
133
134
135
    #[schema(example = "torch.float16")]
    pub model_dtype: String,
    #[schema(example = "cuda")]
    pub model_device_type: String,
136
137
    #[schema(nullable = true, example = "text-generation")]
    pub model_pipeline_tag: Option<String>,
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
    /// Router Parameters
    #[schema(example = "128")]
    pub max_concurrent_requests: usize,
    #[schema(example = "2")]
    pub max_best_of: usize,
    #[schema(example = "4")]
    pub max_stop_sequences: usize,
    #[schema(example = "1024")]
    pub max_input_length: usize,
    #[schema(example = "2048")]
    pub max_total_tokens: usize,
    #[schema(example = "1.2")]
    pub waiting_served_ratio: f32,
    #[schema(example = "32000")]
    pub max_batch_total_tokens: u32,
    #[schema(example = "20")]
    pub max_waiting_tokens: usize,
155
156
    #[schema(nullable = true, example = "null")]
    pub max_batch_size: Option<usize>,
157
158
    #[schema(example = "2")]
    pub validation_workers: usize,
159
160
    #[schema(example = "32")]
    pub max_client_batch_size: usize,
161
    /// Router Info
162
163
164
165
    #[schema(example = "0.5.0")]
    pub version: &'static str,
    #[schema(nullable = true, example = "null")]
    pub sha: Option<&'static str>,
166
167
    #[schema(nullable = true, example = "null")]
    pub docker_label: Option<&'static str>,
168
169
}

drbh's avatar
drbh committed
170
#[derive(Clone, Debug, Deserialize, ToSchema, Default)]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
171
pub(crate) struct GenerateParameters {
172
    /// Generate best_of sequences and return the one if the highest token logprobs.
173
174
175
    #[serde(default)]
    #[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 1)]
    pub best_of: Option<usize>,
176
177

    /// The value used to module the logits distribution.
178
179
180
181
182
183
184
185
    #[serde(default)]
    #[schema(
        exclusive_minimum = 0.0,
        nullable = true,
        default = "null",
        example = 0.5
    )]
    pub temperature: Option<f32>,
186
187
188

    /// The parameter for repetition penalty. 1.0 means no penalty.
    /// See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
189
190
191
192
193
194
195
196
    #[serde(default)]
    #[schema(
        exclusive_minimum = 0.0,
        nullable = true,
        default = "null",
        example = 1.03
    )]
    pub repetition_penalty: Option<f32>,
197
198
199
200

    /// The parameter for frequency penalty. 1.0 means no penalty
    /// Penalize new tokens based on their existing frequency in the text so far,
    /// decreasing the model's likelihood to repeat the same line verbatim.
201
    #[serde(default)]
202
203
204
205
206
207
208
    #[schema(
        exclusive_minimum = -2.0,
        nullable = true,
        default = "null",
        example = 0.1
    )]
    pub frequency_penalty: Option<f32>,
209
210

    /// The number of highest probability vocabulary tokens to keep for top-k-filtering.
211
    #[serde(default)]
212
213
    #[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 10)]
    pub top_k: Option<i32>,
214
215

    /// Top-p value for nucleus sampling.
216
217
218
219
220
221
222
223
224
    #[serde(default)]
    #[schema(
        exclusive_minimum = 0.0,
        maximum = 1.0,
        nullable = true,
        default = "null",
        example = 0.95
    )]
    pub top_p: Option<f32>,
225
226
227

    /// Typical Decoding mass
    /// See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information.
228
    #[serde(default)]
229
230
231
232
233
234
235
236
    #[schema(
        exclusive_minimum = 0.0,
        maximum = 1.0,
        nullable = true,
        default = "null",
        example = 0.95
    )]
    pub typical_p: Option<f32>,
237
238

    /// Activate logits sampling.
239
    #[serde(default)]
240
    #[schema(default = "false", example = true)]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
241
    pub do_sample: bool,
242
243

    /// Maximum number of tokens to generate.
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
244
    #[serde(default = "default_max_new_tokens")]
245
    #[schema(nullable = true, default = "100", example = "20")]
246
    pub max_new_tokens: Option<u32>,
247
248

    /// Whether to prepend the prompt to the generated text
OlivierDehaene's avatar
OlivierDehaene committed
249
    #[serde(default)]
250
    #[schema(nullable = true, default = "null", example = false)]
251
    pub return_full_text: Option<bool>,
252
253

    /// Stop generating tokens if a member of `stop` is generated.
254
    #[serde(default)]
255
    #[schema(inline, max_items = 4, example = json ! (["photographer"]))]
256
    pub stop: Vec<String>,
257
258

    /// Truncate inputs tokens to the given size.
OlivierDehaene's avatar
OlivierDehaene committed
259
    #[serde(default)]
260
    #[schema(nullable = true, default = "null", example = "null")]
261
    pub truncate: Option<usize>,
262
263

    /// Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226).
264
    #[serde(default)]
265
266
    #[schema(default = "false", example = true)]
    pub watermark: bool,
267
268

    /// Whether to return generation details.
269
    #[serde(default)]
270
    #[schema(default = "true")]
OlivierDehaene's avatar
OlivierDehaene committed
271
    pub details: bool,
272
273

    /// Whether to return decoder input token logprobs and ids.
274
    #[serde(default)]
275
    #[schema(default = "false")]
276
    pub decoder_input_details: bool,
277
278

    /// Random sampling seed.
279
    #[serde(default)]
280
281
282
283
284
285
    #[schema(
        exclusive_minimum = 0,
        nullable = true,
        default = "null",
        example = "null"
    )]
286
    pub seed: Option<u64>,
287
288

    /// The number of highest probability vocabulary tokens to keep for top-n-filtering.
Nicolas Patry's avatar
Nicolas Patry committed
289
290
291
    #[serde(default)]
    #[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 5)]
    pub top_n_tokens: Option<u32>,
292
293

    /// Grammar constraints for the generation.
drbh's avatar
drbh committed
294
    #[serde(default)]
295
    #[schema(nullable = true, default = "null", example = "null")]
drbh's avatar
drbh committed
296
    pub grammar: Option<GrammarType>,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
297
298
}

299
fn default_max_new_tokens() -> Option<u32> {
300
    Some(100)
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
301
302
303
304
}

fn default_parameters() -> GenerateParameters {
    GenerateParameters {
305
        best_of: None,
306
307
        temperature: None,
        repetition_penalty: None,
308
        frequency_penalty: None,
309
310
        top_k: None,
        top_p: None,
311
        typical_p: None,
312
        do_sample: true,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
313
        max_new_tokens: default_max_new_tokens(),
314
        return_full_text: None,
315
        stop: Vec::new(),
316
        truncate: None,
317
        watermark: false,
OlivierDehaene's avatar
OlivierDehaene committed
318
        details: false,
319
        decoder_input_details: false,
320
        seed: None,
Nicolas Patry's avatar
Nicolas Patry committed
321
        top_n_tokens: None,
drbh's avatar
drbh committed
322
        grammar: None,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
323
324
325
    }
}

326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
mod prompt_serde {
    use serde::{self, Deserialize, Deserializer};
    use serde_json::Value;

    pub fn deserialize<'de, D>(deserializer: D) -> Result<Vec<String>, D::Error>
    where
        D: Deserializer<'de>,
    {
        let value = Value::deserialize(deserializer)?;
        match value {
            Value::String(s) => Ok(vec![s]),
            Value::Array(arr) if arr.is_empty() => Err(serde::de::Error::custom(
                "Empty array detected. Do not use an empty array for the prompt.",
            )),
            Value::Array(arr) => arr
                .iter()
                .map(|v| match v {
                    Value::String(s) => Ok(s.to_owned()),
                    _ => Err(serde::de::Error::custom("Expected a string")),
                })
                .collect(),
            _ => Err(serde::de::Error::custom(
                "Expected a string or an array of strings",
            )),
        }
    }
}

354
355
356
357
358
359
360
361
362
#[derive(Clone, Deserialize, Serialize, ToSchema, Debug)]
pub struct CompletionRequest {
    /// UNUSED
    #[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")]
    /// ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.
    pub model: String,

    /// The prompt to generate completions for.
    #[schema(example = "What is Deep Learning?")]
363
364
    #[serde(deserialize_with = "prompt_serde::deserialize")]
    pub prompt: Vec<String>,
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424

    /// The maximum number of tokens that can be generated in the chat completion.
    #[serde(default)]
    #[schema(default = "32")]
    pub max_tokens: Option<u32>,

    /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while
    /// lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
    #[serde(default)]
    #[schema(nullable = true, example = 1.0)]
    pub temperature: Option<f32>,

    /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the
    /// tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
    #[serde(default)]
    #[schema(nullable = true, example = 0.95)]
    pub top_p: Option<f32>,

    #[serde(default = "bool::default")]
    pub stream: bool,

    #[schema(nullable = true, example = 42)]
    pub seed: Option<u64>,

    /// The text to append to the prompt. This is useful for completing sentences or generating a paragraph of text.
    /// please see the completion_template field in the model's tokenizer_config.json file for completion template.
    #[serde(default)]
    pub suffix: Option<String>,

    #[serde(default)]
    pub repetition_penalty: Option<f32>,

    /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,
    /// decreasing the model's likelihood to repeat the same line verbatim.
    #[serde(default)]
    #[schema(example = "1.0")]
    pub frequency_penalty: Option<f32>,
}

#[derive(Clone, Deserialize, Serialize, ToSchema, Default)]
pub(crate) struct Completion {
    pub id: String,
    pub object: String,
    #[schema(example = "1706270835")]
    pub created: u64,
    #[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")]
    pub model: String,
    pub system_fingerprint: String,
    pub choices: Vec<CompletionComplete>,
    pub usage: Usage,
}

#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct CompletionComplete {
    pub index: u32,
    pub text: String,
    pub logprobs: Option<Vec<f32>>,
    pub finish_reason: String,
}

425
#[derive(Clone, Deserialize, Serialize, ToSchema)]
426
427
428
pub(crate) struct ChatCompletion {
    pub id: String,
    pub object: String,
429
    #[schema(example = "1706270835")]
430
    pub created: u64,
431
    #[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")]
432
433
434
435
436
437
    pub model: String,
    pub system_fingerprint: String,
    pub choices: Vec<ChatCompletionComplete>,
    pub usage: Usage,
}

438
#[derive(Clone, Deserialize, Serialize, ToSchema)]
439
440
441
pub(crate) struct ChatCompletionComplete {
    pub index: u32,
    pub message: Message,
442
    pub logprobs: Option<ChatCompletionLogprobs>,
443
444
445
    pub finish_reason: String,
}

446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct ChatCompletionLogprobs {
    content: Vec<ChatCompletionLogprob>,
}

impl From<(Token, Vec<Token>)> for ChatCompletionLogprobs {
    fn from(value: (Token, Vec<Token>)) -> Self {
        let (token, top_tokens) = value;

        Self {
            content: vec![ChatCompletionLogprob {
                token: token.text,
                logprob: token.logprob,
                top_logprobs: top_tokens
                    .into_iter()
                    .map(|t| ChatCompletionTopLogprob {
                        token: t.text,
                        logprob: t.logprob,
                    })
                    .collect(),
            }],
        }
    }
}

impl From<(Vec<Token>, Vec<Vec<Token>>)> for ChatCompletionLogprobs {
    fn from(value: (Vec<Token>, Vec<Vec<Token>>)) -> Self {
        let (tokens, top_tokens) = value;
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488

        // Create an iterator that produces None for top_tokens once it's exhausted
        let top_tokens_iter = top_tokens
            .into_iter()
            .map(Some)
            .chain(std::iter::repeat(None));

        let content = tokens
            .into_iter()
            .zip(top_tokens_iter)
            .map(|(t, top_t_option)| ChatCompletionLogprob {
                token: t.text,
                logprob: t.logprob,
                top_logprobs: match top_t_option {
                    Some(top_t) => top_t
489
490
491
492
493
494
                        .into_iter()
                        .map(|t| ChatCompletionTopLogprob {
                            token: t.text,
                            logprob: t.logprob,
                        })
                        .collect(),
495
496
497
498
499
500
                    None => vec![], // Handle the case where there are no top tokens
                },
            })
            .collect();

        Self { content }
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
    }
}

#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct ChatCompletionLogprob {
    token: String,
    logprob: f32,
    top_logprobs: Vec<ChatCompletionTopLogprob>,
}

#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct ChatCompletionTopLogprob {
    token: String,
    logprob: f32,
}

517
#[derive(Clone, Deserialize, Serialize, ToSchema, Default)]
518
519
520
521
522
523
524
525
526
527
pub(crate) struct Usage {
    pub prompt_tokens: u32,
    pub completion_tokens: u32,
    pub total_tokens: u32,
}

impl ChatCompletion {
    pub(crate) fn new(
        model: String,
        system_fingerprint: String,
drbh's avatar
drbh committed
528
        output: Option<String>,
529
530
531
        created: u64,
        details: Details,
        return_logprobs: bool,
532
        tool_calls: Option<Vec<ToolCall>>,
533
534
535
536
537
538
539
540
541
542
543
544
    ) -> Self {
        Self {
            id: String::new(),
            object: "text_completion".into(),
            created,
            model,
            system_fingerprint,
            choices: vec![ChatCompletionComplete {
                index: 0,
                message: Message {
                    role: "assistant".into(),
                    content: output,
545
                    name: None,
drbh's avatar
drbh committed
546
                    tool_calls,
547
548
                },
                logprobs: return_logprobs
549
                    .then(|| ChatCompletionLogprobs::from((details.tokens, details.top_tokens))),
550
551
552
553
554
555
556
557
558
559
                finish_reason: details.finish_reason.to_string(),
            }],
            usage: Usage {
                prompt_tokens: details.prefill.len() as u32,
                completion_tokens: details.generated_tokens,
                total_tokens: details.prefill.len() as u32 + details.generated_tokens,
            },
        }
    }
}
560
561
562
563
564
565
566
567
568
#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct CompletionCompleteChunk {
    pub id: String,
    pub object: String,
    pub created: u64,
    pub choices: Vec<CompletionComplete>,
    pub model: String,
    pub system_fingerprint: String,
}
569
#[derive(Clone, Deserialize, Serialize, ToSchema)]
570
571
572
pub(crate) struct ChatCompletionChunk {
    pub id: String,
    pub object: String,
573
    #[schema(example = "1706270978")]
574
    pub created: u64,
575
    #[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")]
576
577
578
579
580
    pub model: String,
    pub system_fingerprint: String,
    pub choices: Vec<ChatCompletionChoice>,
}

581
#[derive(Clone, Deserialize, Serialize, ToSchema)]
582
583
584
pub(crate) struct ChatCompletionChoice {
    pub index: u32,
    pub delta: ChatCompletionDelta,
585
    pub logprobs: Option<ChatCompletionLogprobs>,
586
587
588
    pub finish_reason: Option<String>,
}

589
#[derive(Clone, Debug, Deserialize, Serialize, ToSchema)]
590
pub(crate) struct ChatCompletionDelta {
591
    #[schema(example = "user")]
592
593
594
    // TODO Modify this to a true enum.
    #[serde(default, skip_serializing_if = "Option::is_none")]
    pub role: Option<String>,
drbh's avatar
drbh committed
595
    #[serde(default, skip_serializing_if = "Option::is_none")]
596
    #[schema(example = "What is Deep Learning?")]
drbh's avatar
drbh committed
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
    pub content: Option<String>,
    // default to None
    #[serde(default, skip_serializing_if = "Option::is_none")]
    pub tool_calls: Option<DeltaToolCall>,
}

#[derive(Clone, Deserialize, Serialize, ToSchema, Debug)]
pub(crate) struct DeltaToolCall {
    pub index: u32,
    pub id: String,
    pub r#type: String,
    pub function: Function,
}

#[derive(Clone, Deserialize, Serialize, ToSchema, Debug)]
pub(crate) struct Function {
    pub name: Option<String>,
    pub arguments: String,
615
616
}

drbh's avatar
drbh committed
617
#[allow(clippy::too_many_arguments)]
618
619
620
621
impl ChatCompletionChunk {
    pub(crate) fn new(
        model: String,
        system_fingerprint: String,
drbh's avatar
drbh committed
622
623
        delta: Option<String>,
        tool_calls: Option<Vec<String>>,
624
        created: u64,
625
        logprobs: Option<ChatCompletionLogprobs>,
626
627
        finish_reason: Option<String>,
    ) -> Self {
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
        let delta = match (delta, tool_calls) {
            (Some(delta), _) => ChatCompletionDelta {
                role: Some("assistant".to_string()),
                content: Some(delta),
                tool_calls: None,
            },
            (None, Some(tool_calls)) => ChatCompletionDelta {
                role: Some("assistant".to_string()),
                content: None,
                tool_calls: Some(DeltaToolCall {
                    index: 0,
                    id: String::new(),
                    r#type: "function".to_string(),
                    function: Function {
                        name: None,
                        arguments: tool_calls[0].to_string(),
                    },
                }),
            },
            (None, None) => ChatCompletionDelta {
                role: None,
                content: None,
                tool_calls: None,
            },
        };
653
654
655
656
657
658
659
        Self {
            id: String::new(),
            object: "text_completion".to_string(),
            created,
            model,
            system_fingerprint,
            choices: vec![ChatCompletionChoice {
660
                index: 0,
661
                delta,
662
663
664
665
666
667
668
669
670
                logprobs,
                finish_reason,
            }],
        }
    }
}

#[derive(Clone, Deserialize, ToSchema, Serialize)]
pub(crate) struct ChatRequest {
671
    #[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")]
drbh's avatar
drbh committed
672
    /// [UNUSED] ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.
673
    pub model: String,
drbh's avatar
drbh committed
674

675
    /// A list of messages comprising the conversation so far.
drbh's avatar
drbh committed
676
    #[schema(example = "[{\"role\": \"user\", \"content\": \"What is Deep Learning?\"}]")]
677
678
679
680
681
    pub messages: Vec<Message>,

    /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,
    /// decreasing the model's likelihood to repeat the same line verbatim.
    #[serde(default)]
682
    #[schema(example = "1.0")]
683
684
685
686
687
688
689
690
691
692
693
694
695
696
    pub frequency_penalty: Option<f32>,

    /// UNUSED
    /// Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens
    /// (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically,
    /// the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model,
    /// but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should
    /// result in a ban or exclusive selection of the relevant token.
    #[serde(default)]
    pub logit_bias: Option<Vec<f32>>,

    /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each
    /// output token returned in the content of message.
    #[serde(default)]
697
    #[schema(example = "false")]
698
699
700
701
702
    pub logprobs: Option<bool>,

    /// An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with
    /// an associated log probability. logprobs must be set to true if this parameter is used.
    #[serde(default)]
703
    #[schema(example = "5")]
704
705
706
707
    pub top_logprobs: Option<u32>,

    /// The maximum number of tokens that can be generated in the chat completion.
    #[serde(default)]
708
    #[schema(example = "32")]
709
710
711
712
713
714
    pub max_tokens: Option<u32>,

    /// UNUSED
    /// How many chat completion choices to generate for each input message. Note that you will be charged based on the
    /// number of generated tokens across all of the choices. Keep n as 1 to minimize costs.
    #[serde(default)]
715
    #[schema(nullable = true, example = "2")]
716
717
718
719
720
    pub n: Option<u32>,

    /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,
    /// increasing the model's likelihood to talk about new topics
    #[serde(default)]
721
    #[schema(nullable = true, example = 0.1)]
722
723
    pub presence_penalty: Option<f32>,

724
725
726
727
728
    /// Up to 4 sequences where the API will stop generating further tokens.
    #[serde(default)]
    #[schema(nullable = true, example = "null")]
    pub stop: Option<Vec<String>>,

729
730
731
732
733
    #[serde(default = "bool::default")]
    pub stream: bool,

    #[schema(nullable = true, example = 42)]
    pub seed: Option<u64>,
734
735
736
737
738
739

    /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while
    /// lower values like 0.2 will make it more focused and deterministic.
    ///
    /// We generally recommend altering this or `top_p` but not both.
    #[serde(default)]
740
    #[schema(nullable = true, example = 1.0)]
741
742
743
744
745
    pub temperature: Option<f32>,

    /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the
    /// tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
    #[serde(default)]
746
    #[schema(nullable = true, example = 0.95)]
747
    pub top_p: Option<f32>,
drbh's avatar
drbh committed
748
749
750
751
752
753
754
755
756
757
758

    /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of
    /// functions the model may generate JSON inputs for.
    #[serde(default)]
    #[schema(nullable = true, example = "null")]
    pub tools: Option<Vec<Tool>>,

    /// A prompt to be appended before the tools
    #[serde(default = "default_tool_prompt")]
    #[schema(
        nullable = true,
759
        example = "\"You will be presented with a JSON schema representing a set of tools.\nIf the user request lacks of sufficient information to make a precise tool selection: Do not invent any tool's properties, instead notify with an error message.\n\nJSON Schema:\n\""
drbh's avatar
drbh committed
760
761
762
763
764
765
766
767
768
769
770
771
    )]
    pub tool_prompt: Option<String>,

    /// A specific tool to use. If not provided, the model will default to use any of the tools provided in the tools parameter.
    #[serde(default)]
    #[schema(nullable = true, example = "null")]
    #[serde(deserialize_with = "deserialize_tool_choice::deserialize")]
    pub tool_choice: Option<ToolType>,
}

fn default_tool_prompt() -> Option<String> {
    Some(
772
        "\nYou will be presented with a JSON schema representing a set of tools.\nIf the user request lacks of sufficient information to make a precise tool selection: Do not invent any tool's properties, instead notify with an error message.\n\nJSON Schema:\n".to_string(),
drbh's avatar
drbh committed
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
    )
}
#[derive(Clone, Deserialize, ToSchema, Serialize)]
enum ToolType {
    FunctionName(String),
    OneOf,
}

/// Deserialize the tool choice from the JSON input or from the function name ("none" is allowed but mapped to None)
mod deserialize_tool_choice {
    use super::*;
    use serde::de;
    use serde::Deserializer;
    use serde_json::Value;

    pub fn deserialize<'de, D>(deserializer: D) -> Result<Option<ToolType>, D::Error>
    where
        D: Deserializer<'de>,
    {
        let value = Value::deserialize(deserializer)?;

        match value {
            Value::String(s) => match s.as_str() {
                "none" => Ok(None),
                "auto" => Ok(Some(ToolType::OneOf)),
                _ => Ok(Some(ToolType::FunctionName(s))),
            },
            Value::Object(map) => {
                if let Some(content) = map
                    .get("function")
                    .and_then(|v| v.get("name"))
                    .and_then(|v| v.as_str())
                {
                    Ok(Some(ToolType::FunctionName(content.to_string())))
                } else {
                    Err(de::Error::custom("function key not found in tool choice"))
                }
            }
            Value::Null => Ok(Some(ToolType::OneOf)),
            _ => Err(de::Error::custom("invalid token format")),
        }
    }
}

817
#[derive(Debug, Deserialize, Serialize, ToSchema, PartialEq)]
drbh's avatar
drbh committed
818
819
820
821
822
823
pub struct Tools {
    #[serde(flatten)]
    functions_map: FunctionsMap,
    properties: Properties,
}

824
#[derive(Debug, Serialize, Deserialize, PartialEq)]
drbh's avatar
drbh committed
825
826
827
828
829
struct FunctionsMap {
    #[serde(rename = "$functions")]
    functions: std::collections::HashMap<String, serde_json::Value>,
}

830
#[derive(Debug, Serialize, Deserialize, PartialEq)]
drbh's avatar
drbh committed
831
832
833
834
835
struct FunctionRef {
    #[serde(rename = "$ref")]
    ref_path: String,
}

836
#[derive(Debug, Serialize, Deserialize, PartialEq)]
drbh's avatar
drbh committed
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
struct Properties {
    #[serde(serialize_with = "serialize_function")]
    function: Vec<FunctionRef>,
}

fn serialize_function<S>(functions: &Vec<FunctionRef>, serializer: S) -> Result<S::Ok, S::Error>
where
    S: serde::Serializer,
{
    use serde::ser::SerializeStruct;
    let mut state = serializer.serialize_struct("Function", 1)?;
    state.serialize_field("anyOf", functions)?;
    state.end()
}

#[derive(Clone, Debug, Deserialize, Serialize, ToSchema, Default)]
pub(crate) struct FunctionDefinition {
    #[serde(default)]
    pub description: Option<String>,
    pub name: String,
857
858
    #[serde(alias = "parameters")]
    pub arguments: serde_json::Value,
drbh's avatar
drbh committed
859
860
861
862
863
864
865
866
867
}

#[derive(Clone, Debug, Deserialize, Serialize, ToSchema)]
pub(crate) struct Tool {
    // The type of the tool. Currently, only 'function' is supported.
    #[schema(example = "function")]
    pub r#type: String,
    // Grab the tool as generic JSON for debugging purposes.
    pub function: FunctionDefinition,
868
869
}

870
#[derive(Clone, Serialize, Deserialize, Default)]
871
872
873
874
pub(crate) struct ChatTemplateInputs<'a> {
    messages: Vec<Message>,
    bos_token: Option<&'a str>,
    eos_token: Option<&'a str>,
875
    add_generation_prompt: bool,
876
877
    tools: Option<&'a str>,
    tools_prompt: Option<&'a str>,
878
879
}

drbh's avatar
drbh committed
880
881
882
883
884
885
886
#[derive(Clone, Deserialize, Serialize, ToSchema, Default, Debug)]
pub(crate) struct ToolCall {
    pub id: u32,
    pub r#type: String,
    pub function: FunctionDefinition,
}

drbh's avatar
drbh committed
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
#[derive(Clone, Deserialize, Serialize, ToSchema, Default, Debug)]
pub(crate) struct Text {
    #[serde(default)]
    pub text: String,
}

#[derive(Clone, Deserialize, Serialize, ToSchema, Default, Debug)]
pub(crate) struct ImageUrl {
    #[serde(default)]
    pub url: String,
}

#[derive(Clone, Deserialize, Serialize, ToSchema, Default, Debug)]
pub(crate) struct Content {
    pub r#type: String,
    #[serde(default, skip_serializing_if = "Option::is_none")]
    pub text: Option<String>,
    #[serde(default, skip_serializing_if = "Option::is_none")]
    pub image_url: Option<ImageUrl>,
}

mod message_content_serde {
    use super::*;
    use serde::de;
    use serde::Deserializer;
    use serde_json::Value;

    pub fn deserialize<'de, D>(deserializer: D) -> Result<Option<String>, D::Error>
    where
        D: Deserializer<'de>,
    {
        let value = Value::deserialize(deserializer)?;
        match value {
            Value::String(s) => Ok(Some(s)),
            Value::Array(arr) => {
                let results: Result<Vec<String>, _> = arr
                    .into_iter()
                    .map(|v| {
                        let content: Content =
                            serde_json::from_value(v).map_err(de::Error::custom)?;
                        match content.r#type.as_str() {
                            "text" => Ok(content.text.unwrap_or_default()),
                            "image_url" => {
                                if let Some(url) = content.image_url {
                                    Ok(format!("![]({})", url.url))
                                } else {
                                    Ok(String::new())
                                }
                            }
                            _ => Err(de::Error::custom("invalid content type")),
                        }
                    })
                    .collect();

                results.map(|strings| Some(strings.join("")))
            }
            Value::Null => Ok(None),
            _ => Err(de::Error::custom("invalid token format")),
        }
    }
}

#[derive(Clone, Deserialize, ToSchema, Serialize, Debug)]
950
951
952
pub(crate) struct Message {
    #[schema(example = "user")]
    pub role: String,
drbh's avatar
drbh committed
953
    #[serde(skip_serializing_if = "Option::is_none")]
954
    #[schema(example = "My name is David and I")]
drbh's avatar
drbh committed
955
    #[serde(deserialize_with = "message_content_serde::deserialize")]
drbh's avatar
drbh committed
956
    pub content: Option<String>,
drbh's avatar
drbh committed
957
    #[serde(default, skip_serializing_if = "Option::is_none")]
958
959
    #[schema(example = "\"David\"")]
    pub name: Option<String>,
drbh's avatar
drbh committed
960
    #[serde(default, skip_serializing_if = "Option::is_none")]
961
    pub tool_calls: Option<Vec<ToolCall>>,
962
963
}

964
#[derive(Clone, Debug, Deserialize, ToSchema)]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
965
pub(crate) struct GenerateRequest {
966
    #[schema(example = "My name is Olivier and I")]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
967
968
969
970
971
    pub inputs: String,
    #[serde(default = "default_parameters")]
    pub parameters: GenerateParameters,
}

972
973
974
975
976
977
978
#[derive(Clone, Debug, Deserialize, ToSchema)]
pub(crate) struct CompatGenerateRequest {
    #[schema(example = "My name is Olivier and I")]
    pub inputs: String,
    #[serde(default = "default_parameters")]
    pub parameters: GenerateParameters,
    #[serde(default)]
OlivierDehaene's avatar
OlivierDehaene committed
979
    #[schema(default = "false")]
980
981
982
983
984
985
986
987
988
989
990
991
    pub stream: bool,
}

impl From<CompatGenerateRequest> for GenerateRequest {
    fn from(req: CompatGenerateRequest) -> Self {
        Self {
            inputs: req.inputs,
            parameters: req.parameters,
        }
    }
}

992
993
994
995
996
997
#[derive(Debug, Serialize, ToSchema)]
pub struct PrefillToken {
    #[schema(example = 0)]
    id: u32,
    #[schema(example = "test")]
    text: String,
998
    #[schema(nullable = true, example = - 0.34)]
999
1000
1001
    logprob: f32,
}

1002
#[derive(Debug, Serialize, ToSchema, Clone)]
1003
1004
1005
1006
1007
pub struct Token {
    #[schema(example = 0)]
    id: u32,
    #[schema(example = "test")]
    text: String,
1008
    #[schema(nullable = true, example = - 0.34)]
1009
    logprob: f32,
1010
1011
    #[schema(example = "false")]
    special: bool,
1012
1013
}

1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
#[derive(Debug, Serialize, ToSchema)]
pub struct SimpleToken {
    #[schema(example = 0)]
    id: u32,
    #[schema(example = "test")]
    text: String,
    #[schema(example = 0)]
    start: usize,
    #[schema(example = 2)]
    stop: usize,
}

1026
1027
#[derive(Serialize, ToSchema)]
#[serde(rename_all(serialize = "snake_case"))]
1028
#[schema(example = "Length")]
1029
1030
1031
1032
1033
1034
1035
1036
1037
pub(crate) enum FinishReason {
    #[schema(rename = "length")]
    Length,
    #[serde(rename = "eos_token")]
    #[schema(rename = "eos_token")]
    EndOfSequenceToken,
    #[schema(rename = "stop_sequence")]
    StopSequence,
}
1038

1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
impl std::fmt::Display for FinishReason {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        match self {
            FinishReason::Length => write!(f, "length"),
            FinishReason::EndOfSequenceToken => write!(f, "eos_token"),
            FinishReason::StopSequence => write!(f, "stop_sequence"),
        }
    }
}

1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
#[derive(Serialize, ToSchema)]
pub(crate) struct BestOfSequence {
    #[schema(example = "test")]
    pub generated_text: String,
    #[schema(example = "length")]
    pub finish_reason: FinishReason,
    #[schema(example = 1)]
    pub generated_tokens: u32,
    #[schema(nullable = true, example = 42)]
    pub seed: Option<u64>,
    pub prefill: Vec<PrefillToken>,
    pub tokens: Vec<Token>,
Nicolas Patry's avatar
Nicolas Patry committed
1061
1062
    #[serde(skip_serializing_if = "Vec::is_empty")]
    pub top_tokens: Vec<Vec<Token>>,
1063
1064
}

1065
#[derive(Serialize, ToSchema)]
OlivierDehaene's avatar
OlivierDehaene committed
1066
pub(crate) struct Details {
1067
1068
1069
    #[schema(example = "length")]
    pub finish_reason: FinishReason,
    #[schema(example = 1)]
OlivierDehaene's avatar
OlivierDehaene committed
1070
    pub generated_tokens: u32,
1071
    #[schema(nullable = true, example = 42)]
1072
    pub seed: Option<u64>,
1073
1074
    pub prefill: Vec<PrefillToken>,
    pub tokens: Vec<Token>,
1075
1076
    #[serde(skip_serializing_if = "Option::is_none")]
    pub best_of_sequences: Option<Vec<BestOfSequence>>,
Nicolas Patry's avatar
Nicolas Patry committed
1077
1078
    #[serde(skip_serializing_if = "Vec::is_empty")]
    pub top_tokens: Vec<Vec<Token>>,
OlivierDehaene's avatar
OlivierDehaene committed
1079
1080
}

1081
#[derive(Serialize, ToSchema)]
1082
pub(crate) struct GenerateResponse {
1083
    #[schema(example = "test")]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1084
    pub generated_text: String,
OlivierDehaene's avatar
OlivierDehaene committed
1085
1086
    #[serde(skip_serializing_if = "Option::is_none")]
    pub details: Option<Details>,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1087
}
1088

1089
1090
1091
1092
#[derive(Serialize, ToSchema)]
#[serde(transparent)]
pub(crate) struct TokenizeResponse(Vec<SimpleToken>);

1093
1094
1095
1096
1097
1098
#[derive(Serialize, ToSchema)]
pub(crate) struct StreamDetails {
    #[schema(example = "length")]
    pub finish_reason: FinishReason,
    #[schema(example = 1)]
    pub generated_tokens: u32,
1099
    #[schema(nullable = true, example = 42)]
1100
1101
1102
1103
    pub seed: Option<u64>,
}

#[derive(Serialize, ToSchema)]
1104
pub(crate) struct StreamResponse {
1105
    pub index: u32,
1106
    pub token: Token,
Nicolas Patry's avatar
Nicolas Patry committed
1107
1108
    #[serde(skip_serializing_if = "Vec::is_empty")]
    pub top_tokens: Vec<Token>,
1109
    #[schema(nullable = true, default = "null", example = "test")]
1110
    pub generated_text: Option<String>,
1111
1112
    #[schema(nullable = true, default = "null")]
    pub details: Option<StreamDetails>,
1113
1114
}

1115
#[derive(Serialize, ToSchema)]
1116
1117
pub(crate) struct ErrorResponse {
    pub error: String,
1118
    pub error_type: String,
1119
}
1120
1121

#[cfg(test)]
1122
mod tests {
1123
1124
    use super::*;

1125
1126
    use tokenizers::Tokenizer;

1127
    pub(crate) async fn get_tokenizer() -> Tokenizer {
1128
1129
1130
1131
        let api = hf_hub::api::sync::Api::new().unwrap();
        let repo = api.model("gpt2".to_string());
        let filename = repo.get("tokenizer.json").unwrap();
        Tokenizer::from_file(filename).unwrap()
1132
    }
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146

    #[test]
    fn test_hub_nested_tokens_tokenizer_config() {
        // this is a subset of the tokenizer.json file
        // in this case we expect the tokens to be encoded as simple strings
        let json_content = r#"{
            "chat_template": "test",
            "bos_token": "<|begin▁of▁sentence|>",
            "eos_token": "<|end▁of▁sentence|>"
        }"#;

        let config: HubTokenizerConfig = serde_json::from_str(json_content).unwrap();

        // check that we successfully parsed the tokens
1147
1148
1149
1150
        assert_eq!(
            config.chat_template,
            Some(ChatTemplateVersions::Single("test".to_string()))
        );
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
        assert_eq!(
            config.bos_token,
            Some("<|begin▁of▁sentence|>".to_string())
        );
        assert_eq!(config.eos_token, Some("<|end▁of▁sentence|>".to_string()));

        // in this case we expect the tokens to be encoded as structured tokens
        // we want the content of the structured token
        let json_content = r#"{
            "chat_template": "test",
            "bos_token": {
              "__type": "AddedToken",
              "content": "<|begin▁of▁sentence|>",
              "lstrip": false,
              "normalized": true,
              "rstrip": false,
              "single_word": false
            },
            "eos_token": {
              "__type": "AddedToken",
              "content": "<|end▁of▁sentence|>",
              "lstrip": false,
              "normalized": true,
              "rstrip": false,
              "single_word": false
            }
        }"#;

        let config: HubTokenizerConfig = serde_json::from_str(json_content).unwrap();

        // check that we successfully parsed the tokens
1182
1183
1184
1185
        assert_eq!(
            config.chat_template,
            Some(ChatTemplateVersions::Single("test".to_string()))
        );
1186
1187
1188
1189
1190
1191
        assert_eq!(
            config.bos_token,
            Some("<|begin▁of▁sentence|>".to_string())
        );
        assert_eq!(config.eos_token, Some("<|end▁of▁sentence|>".to_string()));
    }
1192
}