lib.rs 31.8 KB
Newer Older
1
mod health;
2
/// Text Generation Inference Webserver
3
mod infer;
4
mod queue;
Olivier Dehaene's avatar
Olivier Dehaene committed
5
pub mod server;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
6
mod validation;
Olivier Dehaene's avatar
Olivier Dehaene committed
7

8
use infer::{Infer, InferError, InferStreamResponse};
9
use queue::{Entry, Queue};
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
10
use serde::{Deserialize, Serialize};
11
12
use tokio::sync::OwnedSemaphorePermit;
use tokio_stream::wrappers::UnboundedReceiverStream;
13
use utoipa::ToSchema;
Olivier Dehaene's avatar
Olivier Dehaene committed
14
use validation::Validation;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
15

16
17
18
19
20
21
22
/// Type alias for generation responses
pub(crate) type GenerateStreamResponse = (
    OwnedSemaphorePermit,
    u32, // input_length
    UnboundedReceiverStream<Result<InferStreamResponse, InferError>>,
);

drbh's avatar
drbh committed
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
#[derive(Clone, Deserialize, ToSchema)]
pub(crate) struct VertexInstance {
    #[schema(example = "What is Deep Learning?")]
    pub inputs: String,
    #[schema(nullable = true, default = "null", example = "null")]
    pub parameters: Option<GenerateParameters>,
}

#[derive(Deserialize, ToSchema)]
pub(crate) struct VertexRequest {
    #[serde(rename = "instances")]
    pub instances: Vec<VertexInstance>,
}

#[derive(Clone, Deserialize, ToSchema, Serialize)]
pub(crate) struct VertexResponse {
    pub predictions: Vec<String>,
}

42
43
/// Hub type
#[derive(Clone, Debug, Deserialize)]
44
pub struct HubModelInfo {
45
46
47
48
49
50
    #[serde(rename(deserialize = "id"))]
    pub model_id: String,
    pub sha: Option<String>,
    pub pipeline_tag: Option<String>,
}

51
52
53
#[derive(Clone, Deserialize, Default)]
pub struct HubTokenizerConfig {
    pub chat_template: Option<String>,
54
    pub completion_template: Option<String>,
55
    #[serde(deserialize_with = "token_serde::deserialize")]
56
    pub bos_token: Option<String>,
57
    #[serde(deserialize_with = "token_serde::deserialize")]
58
    pub eos_token: Option<String>,
59
60
61
}

impl HubTokenizerConfig {
62
    pub fn from_file(filename: &std::path::Path) -> Self {
63
64
65
66
67
        let content = std::fs::read_to_string(filename).unwrap();
        serde_json::from_str(&content).unwrap_or_default()
    }
}

drbh's avatar
drbh committed
68
#[derive(Clone, Debug, Deserialize, ToSchema)]
drbh's avatar
drbh committed
69
70
#[serde(tag = "type", content = "value")]
pub(crate) enum GrammarType {
71
72
73
74
75
76
77
    /// A string that represents a [JSON Schema](https://json-schema.org/).
    ///
    /// JSON Schema is a declarative language that allows to annotate JSON documents
    /// with types and descriptions.
    #[serde(rename = "json")]
    #[schema(example = json ! ({"properties": {"location":{"type": "string"}}}))]
    Json(serde_json::Value),
drbh's avatar
drbh committed
78
79
80
81
    #[serde(rename = "regex")]
    Regex(String),
}

82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
mod token_serde {
    use super::*;
    use serde::de;
    use serde::Deserializer;
    use serde_json::Value;

    pub fn deserialize<'de, D>(deserializer: D) -> Result<Option<String>, D::Error>
    where
        D: Deserializer<'de>,
    {
        let value = Value::deserialize(deserializer)?;

        match value {
            Value::String(s) => Ok(Some(s)),
            Value::Object(map) => {
                if let Some(content) = map.get("content").and_then(|v| v.as_str()) {
                    Ok(Some(content.to_string()))
                } else {
                    Err(de::Error::custom(
                        "content key not found in structured token",
                    ))
                }
            }
            _ => Err(de::Error::custom("invalid token format")),
        }
    }
}

110
111
#[derive(Clone, Debug, Serialize, ToSchema)]
pub struct Info {
112
    /// Model info
113
114
115
116
    #[schema(example = "bigscience/blomm-560m")]
    pub model_id: String,
    #[schema(nullable = true, example = "e985a63cdc139290c5f700ff1929f0b5942cced2")]
    pub model_sha: Option<String>,
117
118
119
120
    #[schema(example = "torch.float16")]
    pub model_dtype: String,
    #[schema(example = "cuda")]
    pub model_device_type: String,
121
122
    #[schema(nullable = true, example = "text-generation")]
    pub model_pipeline_tag: Option<String>,
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
    /// Router Parameters
    #[schema(example = "128")]
    pub max_concurrent_requests: usize,
    #[schema(example = "2")]
    pub max_best_of: usize,
    #[schema(example = "4")]
    pub max_stop_sequences: usize,
    #[schema(example = "1024")]
    pub max_input_length: usize,
    #[schema(example = "2048")]
    pub max_total_tokens: usize,
    #[schema(example = "1.2")]
    pub waiting_served_ratio: f32,
    #[schema(example = "32000")]
    pub max_batch_total_tokens: u32,
    #[schema(example = "20")]
    pub max_waiting_tokens: usize,
140
141
    #[schema(nullable = true, example = "null")]
    pub max_batch_size: Option<usize>,
142
143
144
    #[schema(example = "2")]
    pub validation_workers: usize,
    /// Router Info
145
146
147
148
    #[schema(example = "0.5.0")]
    pub version: &'static str,
    #[schema(nullable = true, example = "null")]
    pub sha: Option<&'static str>,
149
150
    #[schema(nullable = true, example = "null")]
    pub docker_label: Option<&'static str>,
151
152
}

drbh's avatar
drbh committed
153
#[derive(Clone, Debug, Deserialize, ToSchema, Default)]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
154
pub(crate) struct GenerateParameters {
155
156
157
    #[serde(default)]
    #[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 1)]
    pub best_of: Option<usize>,
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
    #[serde(default)]
    #[schema(
        exclusive_minimum = 0.0,
        nullable = true,
        default = "null",
        example = 0.5
    )]
    pub temperature: Option<f32>,
    #[serde(default)]
    #[schema(
        exclusive_minimum = 0.0,
        nullable = true,
        default = "null",
        example = 1.03
    )]
    pub repetition_penalty: Option<f32>,
    #[serde(default)]
175
176
177
178
179
180
181
182
    #[schema(
        exclusive_minimum = -2.0,
        nullable = true,
        default = "null",
        example = 0.1
    )]
    pub frequency_penalty: Option<f32>,
    #[serde(default)]
183
184
185
186
187
188
189
190
191
192
193
    #[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 10)]
    pub top_k: Option<i32>,
    #[serde(default)]
    #[schema(
        exclusive_minimum = 0.0,
        maximum = 1.0,
        nullable = true,
        default = "null",
        example = 0.95
    )]
    pub top_p: Option<f32>,
194
    #[serde(default)]
195
196
197
198
199
200
201
202
203
    #[schema(
        exclusive_minimum = 0.0,
        maximum = 1.0,
        nullable = true,
        default = "null",
        example = 0.95
    )]
    pub typical_p: Option<f32>,
    #[serde(default)]
204
    #[schema(default = "false", example = true)]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
205
206
    pub do_sample: bool,
    #[serde(default = "default_max_new_tokens")]
207
    #[schema(nullable = true, default = "100", example = "20")]
208
    pub max_new_tokens: Option<u32>,
OlivierDehaene's avatar
OlivierDehaene committed
209
    #[serde(default)]
210
    #[schema(nullable = true, default = "null", example = false)]
211
212
    pub return_full_text: Option<bool>,
    #[serde(default)]
213
    #[schema(inline, max_items = 4, example = json ! (["photographer"]))]
214
    pub stop: Vec<String>,
OlivierDehaene's avatar
OlivierDehaene committed
215
    #[serde(default)]
216
    #[schema(nullable = true, default = "null", example = "null")]
217
218
    pub truncate: Option<usize>,
    #[serde(default)]
219
220
221
    #[schema(default = "false", example = true)]
    pub watermark: bool,
    #[serde(default)]
222
    #[schema(default = "true")]
OlivierDehaene's avatar
OlivierDehaene committed
223
    pub details: bool,
224
    #[serde(default)]
225
226
227
    #[schema(default = "true")]
    pub decoder_input_details: bool,
    #[serde(default)]
228
229
230
231
232
233
    #[schema(
        exclusive_minimum = 0,
        nullable = true,
        default = "null",
        example = "null"
    )]
234
    pub seed: Option<u64>,
Nicolas Patry's avatar
Nicolas Patry committed
235
236
237
    #[serde(default)]
    #[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 5)]
    pub top_n_tokens: Option<u32>,
drbh's avatar
drbh committed
238
239
    #[serde(default)]
    pub grammar: Option<GrammarType>,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
240
241
}

242
fn default_max_new_tokens() -> Option<u32> {
243
    Some(100)
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
244
245
246
247
}

fn default_parameters() -> GenerateParameters {
    GenerateParameters {
248
        best_of: None,
249
250
        temperature: None,
        repetition_penalty: None,
251
        frequency_penalty: None,
252
253
        top_k: None,
        top_p: None,
254
        typical_p: None,
255
        do_sample: true,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
256
        max_new_tokens: default_max_new_tokens(),
257
        return_full_text: None,
258
        stop: Vec::new(),
259
        truncate: None,
260
        watermark: false,
OlivierDehaene's avatar
OlivierDehaene committed
261
        details: false,
262
        decoder_input_details: false,
263
        seed: None,
Nicolas Patry's avatar
Nicolas Patry committed
264
        top_n_tokens: None,
drbh's avatar
drbh committed
265
        grammar: None,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
266
267
268
    }
}

269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
#[derive(Clone, Deserialize, Serialize, ToSchema, Debug)]
pub struct CompletionRequest {
    /// UNUSED
    #[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")]
    /// ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.
    pub model: String,

    /// The prompt to generate completions for.
    #[schema(example = "What is Deep Learning?")]
    pub prompt: String,

    /// The maximum number of tokens that can be generated in the chat completion.
    #[serde(default)]
    #[schema(default = "32")]
    pub max_tokens: Option<u32>,

    /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while
    /// lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
    #[serde(default)]
    #[schema(nullable = true, example = 1.0)]
    pub temperature: Option<f32>,

    /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the
    /// tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
    #[serde(default)]
    #[schema(nullable = true, example = 0.95)]
    pub top_p: Option<f32>,

    #[serde(default = "bool::default")]
    pub stream: bool,

    #[schema(nullable = true, example = 42)]
    pub seed: Option<u64>,

    /// The text to append to the prompt. This is useful for completing sentences or generating a paragraph of text.
    /// please see the completion_template field in the model's tokenizer_config.json file for completion template.
    #[serde(default)]
    pub suffix: Option<String>,

    #[serde(default)]
    pub repetition_penalty: Option<f32>,

    /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,
    /// decreasing the model's likelihood to repeat the same line verbatim.
    #[serde(default)]
    #[schema(example = "1.0")]
    pub frequency_penalty: Option<f32>,
}

#[derive(Clone, Deserialize, Serialize, ToSchema, Default)]
pub(crate) struct Completion {
    pub id: String,
    pub object: String,
    #[schema(example = "1706270835")]
    pub created: u64,
    #[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")]
    pub model: String,
    pub system_fingerprint: String,
    pub choices: Vec<CompletionComplete>,
    pub usage: Usage,
}

#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct CompletionComplete {
    pub index: u32,
    pub text: String,
    pub logprobs: Option<Vec<f32>>,
    pub finish_reason: String,
}

339
#[derive(Clone, Deserialize, Serialize, ToSchema)]
340
341
342
pub(crate) struct ChatCompletion {
    pub id: String,
    pub object: String,
343
    #[schema(example = "1706270835")]
344
    pub created: u64,
345
    #[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")]
346
347
348
349
350
351
    pub model: String,
    pub system_fingerprint: String,
    pub choices: Vec<ChatCompletionComplete>,
    pub usage: Usage,
}

352
#[derive(Clone, Deserialize, Serialize, ToSchema)]
353
354
355
pub(crate) struct ChatCompletionComplete {
    pub index: u32,
    pub message: Message,
356
    pub logprobs: Option<ChatCompletionLogprobs>,
357
358
359
    pub finish_reason: String,
}

360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct ChatCompletionLogprobs {
    content: Vec<ChatCompletionLogprob>,
}

impl From<(Token, Vec<Token>)> for ChatCompletionLogprobs {
    fn from(value: (Token, Vec<Token>)) -> Self {
        let (token, top_tokens) = value;

        Self {
            content: vec![ChatCompletionLogprob {
                token: token.text,
                logprob: token.logprob,
                top_logprobs: top_tokens
                    .into_iter()
                    .map(|t| ChatCompletionTopLogprob {
                        token: t.text,
                        logprob: t.logprob,
                    })
                    .collect(),
            }],
        }
    }
}

impl From<(Vec<Token>, Vec<Vec<Token>>)> for ChatCompletionLogprobs {
    fn from(value: (Vec<Token>, Vec<Vec<Token>>)) -> Self {
        let (tokens, top_tokens) = value;
        Self {
            content: tokens
                .into_iter()
                .zip(top_tokens)
                .map(|(t, top_t)| ChatCompletionLogprob {
                    token: t.text,
                    logprob: t.logprob,
                    top_logprobs: top_t
                        .into_iter()
                        .map(|t| ChatCompletionTopLogprob {
                            token: t.text,
                            logprob: t.logprob,
                        })
                        .collect(),
                })
                .collect(),
        }
    }
}

#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct ChatCompletionLogprob {
    token: String,
    logprob: f32,
    top_logprobs: Vec<ChatCompletionTopLogprob>,
}

#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct ChatCompletionTopLogprob {
    token: String,
    logprob: f32,
}

421
#[derive(Clone, Deserialize, Serialize, ToSchema, Default)]
422
423
424
425
426
427
428
429
430
431
pub(crate) struct Usage {
    pub prompt_tokens: u32,
    pub completion_tokens: u32,
    pub total_tokens: u32,
}

impl ChatCompletion {
    pub(crate) fn new(
        model: String,
        system_fingerprint: String,
drbh's avatar
drbh committed
432
        output: Option<String>,
433
434
435
        created: u64,
        details: Details,
        return_logprobs: bool,
drbh's avatar
drbh committed
436
        tool_calls: Option<ToolCall>,
437
438
439
440
441
442
443
444
445
446
447
448
    ) -> Self {
        Self {
            id: String::new(),
            object: "text_completion".into(),
            created,
            model,
            system_fingerprint,
            choices: vec![ChatCompletionComplete {
                index: 0,
                message: Message {
                    role: "assistant".into(),
                    content: output,
449
                    name: None,
drbh's avatar
drbh committed
450
                    tool_calls,
451
452
                },
                logprobs: return_logprobs
453
                    .then(|| ChatCompletionLogprobs::from((details.tokens, details.top_tokens))),
454
455
456
457
458
459
460
461
462
463
                finish_reason: details.finish_reason.to_string(),
            }],
            usage: Usage {
                prompt_tokens: details.prefill.len() as u32,
                completion_tokens: details.generated_tokens,
                total_tokens: details.prefill.len() as u32 + details.generated_tokens,
            },
        }
    }
}
464
465
466
467
468
469
470
471
472
#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct CompletionCompleteChunk {
    pub id: String,
    pub object: String,
    pub created: u64,
    pub choices: Vec<CompletionComplete>,
    pub model: String,
    pub system_fingerprint: String,
}
473
#[derive(Clone, Deserialize, Serialize, ToSchema)]
474
475
476
pub(crate) struct ChatCompletionChunk {
    pub id: String,
    pub object: String,
477
    #[schema(example = "1706270978")]
478
    pub created: u64,
479
    #[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")]
480
481
482
483
484
    pub model: String,
    pub system_fingerprint: String,
    pub choices: Vec<ChatCompletionChoice>,
}

485
#[derive(Clone, Deserialize, Serialize, ToSchema)]
486
487
488
pub(crate) struct ChatCompletionChoice {
    pub index: u32,
    pub delta: ChatCompletionDelta,
489
    pub logprobs: Option<ChatCompletionLogprobs>,
490
491
492
    pub finish_reason: Option<String>,
}

493
#[derive(Clone, Debug, Deserialize, Serialize, ToSchema)]
494
pub(crate) struct ChatCompletionDelta {
495
    #[schema(example = "user")]
496
    pub role: String,
drbh's avatar
drbh committed
497
    #[serde(default, skip_serializing_if = "Option::is_none")]
498
    #[schema(example = "What is Deep Learning?")]
drbh's avatar
drbh committed
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
    pub content: Option<String>,
    // default to None
    #[serde(default, skip_serializing_if = "Option::is_none")]
    pub tool_calls: Option<DeltaToolCall>,
}

#[derive(Clone, Deserialize, Serialize, ToSchema, Debug)]
pub(crate) struct DeltaToolCall {
    pub index: u32,
    pub id: String,
    pub r#type: String,
    pub function: Function,
}

#[derive(Clone, Deserialize, Serialize, ToSchema, Debug)]
pub(crate) struct Function {
    pub name: Option<String>,
    pub arguments: String,
517
518
}

drbh's avatar
drbh committed
519
#[allow(clippy::too_many_arguments)]
520
521
522
523
impl ChatCompletionChunk {
    pub(crate) fn new(
        model: String,
        system_fingerprint: String,
drbh's avatar
drbh committed
524
525
        delta: Option<String>,
        tool_calls: Option<Vec<String>>,
526
        created: u64,
527
        logprobs: Option<ChatCompletionLogprobs>,
528
529
530
531
532
533
534
535
536
        finish_reason: Option<String>,
    ) -> Self {
        Self {
            id: String::new(),
            object: "text_completion".to_string(),
            created,
            model,
            system_fingerprint,
            choices: vec![ChatCompletionChoice {
537
                index: 0,
538
539
540
                delta: ChatCompletionDelta {
                    role: "assistant".to_string(),
                    content: delta,
drbh's avatar
drbh committed
541
                    tool_calls: tool_calls.map(|tc| DeltaToolCall {
542
                        index: 0,
drbh's avatar
drbh committed
543
544
545
546
547
548
549
                        id: String::new(),
                        r#type: "function".to_string(),
                        function: Function {
                            name: None,
                            arguments: tc[0].to_string(),
                        },
                    }),
550
551
552
553
554
555
556
557
558
559
                },
                logprobs,
                finish_reason,
            }],
        }
    }
}

#[derive(Clone, Deserialize, ToSchema, Serialize)]
pub(crate) struct ChatRequest {
560
    #[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")]
drbh's avatar
drbh committed
561
    /// [UNUSED] ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.
562
    pub model: String,
drbh's avatar
drbh committed
563

564
    /// A list of messages comprising the conversation so far.
drbh's avatar
drbh committed
565
    #[schema(example = "[{\"role\": \"user\", \"content\": \"What is Deep Learning?\"}]")]
566
567
568
569
570
    pub messages: Vec<Message>,

    /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,
    /// decreasing the model's likelihood to repeat the same line verbatim.
    #[serde(default)]
571
    #[schema(example = "1.0")]
572
573
574
575
576
577
578
579
580
581
582
583
584
585
    pub frequency_penalty: Option<f32>,

    /// UNUSED
    /// Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens
    /// (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically,
    /// the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model,
    /// but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should
    /// result in a ban or exclusive selection of the relevant token.
    #[serde(default)]
    pub logit_bias: Option<Vec<f32>>,

    /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each
    /// output token returned in the content of message.
    #[serde(default)]
586
    #[schema(example = "false")]
587
588
589
590
591
    pub logprobs: Option<bool>,

    /// An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with
    /// an associated log probability. logprobs must be set to true if this parameter is used.
    #[serde(default)]
592
    #[schema(example = "5")]
593
594
595
596
    pub top_logprobs: Option<u32>,

    /// The maximum number of tokens that can be generated in the chat completion.
    #[serde(default)]
597
    #[schema(example = "32")]
598
599
600
601
602
603
    pub max_tokens: Option<u32>,

    /// UNUSED
    /// How many chat completion choices to generate for each input message. Note that you will be charged based on the
    /// number of generated tokens across all of the choices. Keep n as 1 to minimize costs.
    #[serde(default)]
604
    #[schema(nullable = true, example = "2")]
605
606
607
608
609
    pub n: Option<u32>,

    /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,
    /// increasing the model's likelihood to talk about new topics
    #[serde(default)]
610
    #[schema(nullable = true, example = 0.1)]
611
612
    pub presence_penalty: Option<f32>,

613
614
615
616
617
    /// Up to 4 sequences where the API will stop generating further tokens.
    #[serde(default)]
    #[schema(nullable = true, example = "null")]
    pub stop: Option<Vec<String>>,

618
619
620
621
622
    #[serde(default = "bool::default")]
    pub stream: bool,

    #[schema(nullable = true, example = 42)]
    pub seed: Option<u64>,
623
624
625
626
627
628

    /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while
    /// lower values like 0.2 will make it more focused and deterministic.
    ///
    /// We generally recommend altering this or `top_p` but not both.
    #[serde(default)]
629
    #[schema(nullable = true, example = 1.0)]
630
631
632
633
634
    pub temperature: Option<f32>,

    /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the
    /// tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
    #[serde(default)]
635
    #[schema(nullable = true, example = 0.95)]
636
    pub top_p: Option<f32>,
drbh's avatar
drbh committed
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755

    /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of
    /// functions the model may generate JSON inputs for.
    #[serde(default)]
    #[schema(nullable = true, example = "null")]
    pub tools: Option<Vec<Tool>>,

    /// A prompt to be appended before the tools
    #[serde(default = "default_tool_prompt")]
    #[schema(
        nullable = true,
        example = "\"Based on the conversation, please choose the most appropriate tool to use: \""
    )]
    pub tool_prompt: Option<String>,

    /// A specific tool to use. If not provided, the model will default to use any of the tools provided in the tools parameter.
    #[serde(default)]
    #[schema(nullable = true, example = "null")]
    #[serde(deserialize_with = "deserialize_tool_choice::deserialize")]
    pub tool_choice: Option<ToolType>,
}

fn default_tool_prompt() -> Option<String> {
    Some(
        "\nBased on the conversation, please choose the most appropriate tool to use: ".to_string(),
    )
}
#[derive(Clone, Deserialize, ToSchema, Serialize)]
enum ToolType {
    FunctionName(String),
    OneOf,
}

/// Deserialize the tool choice from the JSON input or from the function name ("none" is allowed but mapped to None)
mod deserialize_tool_choice {
    use super::*;
    use serde::de;
    use serde::Deserializer;
    use serde_json::Value;

    pub fn deserialize<'de, D>(deserializer: D) -> Result<Option<ToolType>, D::Error>
    where
        D: Deserializer<'de>,
    {
        let value = Value::deserialize(deserializer)?;

        match value {
            Value::String(s) => match s.as_str() {
                "none" => Ok(None),
                "auto" => Ok(Some(ToolType::OneOf)),
                _ => Ok(Some(ToolType::FunctionName(s))),
            },
            Value::Object(map) => {
                if let Some(content) = map
                    .get("function")
                    .and_then(|v| v.get("name"))
                    .and_then(|v| v.as_str())
                {
                    Ok(Some(ToolType::FunctionName(content.to_string())))
                } else {
                    Err(de::Error::custom("function key not found in tool choice"))
                }
            }
            Value::Null => Ok(Some(ToolType::OneOf)),
            _ => Err(de::Error::custom("invalid token format")),
        }
    }
}

#[derive(Debug, Deserialize, Serialize, ToSchema)]
pub struct Tools {
    #[serde(flatten)]
    functions_map: FunctionsMap,
    properties: Properties,
}

#[derive(Debug, Serialize, Deserialize)]
struct FunctionsMap {
    #[serde(rename = "$functions")]
    functions: std::collections::HashMap<String, serde_json::Value>,
}

#[derive(Debug, Serialize, Deserialize)]
struct FunctionRef {
    #[serde(rename = "$ref")]
    ref_path: String,
}

#[derive(Debug, Serialize, Deserialize)]
struct Properties {
    #[serde(serialize_with = "serialize_function")]
    function: Vec<FunctionRef>,
}

fn serialize_function<S>(functions: &Vec<FunctionRef>, serializer: S) -> Result<S::Ok, S::Error>
where
    S: serde::Serializer,
{
    use serde::ser::SerializeStruct;
    let mut state = serializer.serialize_struct("Function", 1)?;
    state.serialize_field("anyOf", functions)?;
    state.end()
}

#[derive(Clone, Debug, Deserialize, Serialize, ToSchema, Default)]
pub(crate) struct FunctionDefinition {
    #[serde(default)]
    pub description: Option<String>,
    pub name: String,
    pub parameters: serde_json::Value,
}

#[derive(Clone, Debug, Deserialize, Serialize, ToSchema)]
pub(crate) struct Tool {
    // The type of the tool. Currently, only 'function' is supported.
    #[schema(example = "function")]
    pub r#type: String,
    // Grab the tool as generic JSON for debugging purposes.
    pub function: FunctionDefinition,
756
757
}

758
759
760
761
762
#[derive(Clone, Serialize, Deserialize)]
pub(crate) struct ChatTemplateInputs<'a> {
    messages: Vec<Message>,
    bos_token: Option<&'a str>,
    eos_token: Option<&'a str>,
763
    add_generation_prompt: bool,
764
765
}

drbh's avatar
drbh committed
766
767
768
769
770
771
772
#[derive(Clone, Deserialize, Serialize, ToSchema, Default, Debug)]
pub(crate) struct ToolCall {
    pub id: u32,
    pub r#type: String,
    pub function: FunctionDefinition,
}

773
774
775
776
#[derive(Clone, Deserialize, ToSchema, Serialize)]
pub(crate) struct Message {
    #[schema(example = "user")]
    pub role: String,
drbh's avatar
drbh committed
777
    #[serde(skip_serializing_if = "Option::is_none")]
778
    #[schema(example = "My name is David and I")]
drbh's avatar
drbh committed
779
    pub content: Option<String>,
drbh's avatar
drbh committed
780
    #[serde(default, skip_serializing_if = "Option::is_none")]
781
782
    #[schema(example = "\"David\"")]
    pub name: Option<String>,
drbh's avatar
drbh committed
783
784
    #[serde(default, skip_serializing_if = "Option::is_none")]
    pub tool_calls: Option<ToolCall>,
785
786
}

787
#[derive(Clone, Debug, Deserialize, ToSchema)]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
788
pub(crate) struct GenerateRequest {
789
    #[schema(example = "My name is Olivier and I")]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
790
791
792
793
794
    pub inputs: String,
    #[serde(default = "default_parameters")]
    pub parameters: GenerateParameters,
}

795
796
797
798
799
800
801
#[derive(Clone, Debug, Deserialize, ToSchema)]
pub(crate) struct CompatGenerateRequest {
    #[schema(example = "My name is Olivier and I")]
    pub inputs: String,
    #[serde(default = "default_parameters")]
    pub parameters: GenerateParameters,
    #[serde(default)]
OlivierDehaene's avatar
OlivierDehaene committed
802
    #[schema(default = "false")]
803
804
805
806
807
808
809
810
811
812
813
814
    pub stream: bool,
}

impl From<CompatGenerateRequest> for GenerateRequest {
    fn from(req: CompatGenerateRequest) -> Self {
        Self {
            inputs: req.inputs,
            parameters: req.parameters,
        }
    }
}

815
816
817
818
819
820
#[derive(Debug, Serialize, ToSchema)]
pub struct PrefillToken {
    #[schema(example = 0)]
    id: u32,
    #[schema(example = "test")]
    text: String,
821
    #[schema(nullable = true, example = - 0.34)]
822
823
824
    logprob: f32,
}

825
#[derive(Debug, Serialize, ToSchema, Clone)]
826
827
828
829
830
pub struct Token {
    #[schema(example = 0)]
    id: u32,
    #[schema(example = "test")]
    text: String,
831
    #[schema(nullable = true, example = - 0.34)]
832
    logprob: f32,
833
834
    #[schema(example = "false")]
    special: bool,
835
836
}

837
838
839
840
841
842
843
844
845
846
847
848
#[derive(Debug, Serialize, ToSchema)]
pub struct SimpleToken {
    #[schema(example = 0)]
    id: u32,
    #[schema(example = "test")]
    text: String,
    #[schema(example = 0)]
    start: usize,
    #[schema(example = 2)]
    stop: usize,
}

849
850
#[derive(Serialize, ToSchema)]
#[serde(rename_all(serialize = "snake_case"))]
851
#[schema(example = "Length")]
852
853
854
855
856
857
858
859
860
pub(crate) enum FinishReason {
    #[schema(rename = "length")]
    Length,
    #[serde(rename = "eos_token")]
    #[schema(rename = "eos_token")]
    EndOfSequenceToken,
    #[schema(rename = "stop_sequence")]
    StopSequence,
}
861

862
863
864
865
866
867
868
869
870
871
impl std::fmt::Display for FinishReason {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        match self {
            FinishReason::Length => write!(f, "length"),
            FinishReason::EndOfSequenceToken => write!(f, "eos_token"),
            FinishReason::StopSequence => write!(f, "stop_sequence"),
        }
    }
}

872
873
874
875
876
877
878
879
880
881
882
883
#[derive(Serialize, ToSchema)]
pub(crate) struct BestOfSequence {
    #[schema(example = "test")]
    pub generated_text: String,
    #[schema(example = "length")]
    pub finish_reason: FinishReason,
    #[schema(example = 1)]
    pub generated_tokens: u32,
    #[schema(nullable = true, example = 42)]
    pub seed: Option<u64>,
    pub prefill: Vec<PrefillToken>,
    pub tokens: Vec<Token>,
Nicolas Patry's avatar
Nicolas Patry committed
884
885
    #[serde(skip_serializing_if = "Vec::is_empty")]
    pub top_tokens: Vec<Vec<Token>>,
886
887
}

888
#[derive(Serialize, ToSchema)]
OlivierDehaene's avatar
OlivierDehaene committed
889
pub(crate) struct Details {
890
891
892
    #[schema(example = "length")]
    pub finish_reason: FinishReason,
    #[schema(example = 1)]
OlivierDehaene's avatar
OlivierDehaene committed
893
    pub generated_tokens: u32,
894
    #[schema(nullable = true, example = 42)]
895
    pub seed: Option<u64>,
896
897
    pub prefill: Vec<PrefillToken>,
    pub tokens: Vec<Token>,
898
899
    #[serde(skip_serializing_if = "Option::is_none")]
    pub best_of_sequences: Option<Vec<BestOfSequence>>,
Nicolas Patry's avatar
Nicolas Patry committed
900
901
    #[serde(skip_serializing_if = "Vec::is_empty")]
    pub top_tokens: Vec<Vec<Token>>,
OlivierDehaene's avatar
OlivierDehaene committed
902
903
}

904
#[derive(Serialize, ToSchema)]
905
pub(crate) struct GenerateResponse {
906
    #[schema(example = "test")]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
907
    pub generated_text: String,
OlivierDehaene's avatar
OlivierDehaene committed
908
909
    #[serde(skip_serializing_if = "Option::is_none")]
    pub details: Option<Details>,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
910
}
911

912
913
914
915
#[derive(Serialize, ToSchema)]
#[serde(transparent)]
pub(crate) struct TokenizeResponse(Vec<SimpleToken>);

916
917
918
919
920
921
#[derive(Serialize, ToSchema)]
pub(crate) struct StreamDetails {
    #[schema(example = "length")]
    pub finish_reason: FinishReason,
    #[schema(example = 1)]
    pub generated_tokens: u32,
922
    #[schema(nullable = true, example = 42)]
923
924
925
926
    pub seed: Option<u64>,
}

#[derive(Serialize, ToSchema)]
927
pub(crate) struct StreamResponse {
928
    pub index: u32,
929
    pub token: Token,
Nicolas Patry's avatar
Nicolas Patry committed
930
931
    #[serde(skip_serializing_if = "Vec::is_empty")]
    pub top_tokens: Vec<Token>,
932
    #[schema(nullable = true, default = "null", example = "test")]
933
    pub generated_text: Option<String>,
934
935
    #[schema(nullable = true, default = "null")]
    pub details: Option<StreamDetails>,
936
937
}

938
#[derive(Serialize, ToSchema)]
939
940
pub(crate) struct ErrorResponse {
    pub error: String,
941
    pub error_type: String,
942
}
943
944

#[cfg(test)]
945
mod tests {
946
947
    use super::*;

948
949
    use tokenizers::Tokenizer;

950
    pub(crate) async fn get_tokenizer() -> Tokenizer {
951
952
953
954
        let api = hf_hub::api::sync::Api::new().unwrap();
        let repo = api.model("gpt2".to_string());
        let filename = repo.get("tokenizer.json").unwrap();
        Tokenizer::from_file(filename).unwrap()
955
    }
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008

    #[test]
    fn test_hub_nested_tokens_tokenizer_config() {
        // this is a subset of the tokenizer.json file
        // in this case we expect the tokens to be encoded as simple strings
        let json_content = r#"{
            "chat_template": "test",
            "bos_token": "<|begin▁of▁sentence|>",
            "eos_token": "<|end▁of▁sentence|>"
        }"#;

        let config: HubTokenizerConfig = serde_json::from_str(json_content).unwrap();

        // check that we successfully parsed the tokens
        assert_eq!(config.chat_template, Some("test".to_string()));
        assert_eq!(
            config.bos_token,
            Some("<|begin▁of▁sentence|>".to_string())
        );
        assert_eq!(config.eos_token, Some("<|end▁of▁sentence|>".to_string()));

        // in this case we expect the tokens to be encoded as structured tokens
        // we want the content of the structured token
        let json_content = r#"{
            "chat_template": "test",
            "bos_token": {
              "__type": "AddedToken",
              "content": "<|begin▁of▁sentence|>",
              "lstrip": false,
              "normalized": true,
              "rstrip": false,
              "single_word": false
            },
            "eos_token": {
              "__type": "AddedToken",
              "content": "<|end▁of▁sentence|>",
              "lstrip": false,
              "normalized": true,
              "rstrip": false,
              "single_word": false
            }
        }"#;

        let config: HubTokenizerConfig = serde_json::from_str(json_content).unwrap();

        // check that we successfully parsed the tokens
        assert_eq!(config.chat_template, Some("test".to_string()));
        assert_eq!(
            config.bos_token,
            Some("<|begin▁of▁sentence|>".to_string())
        );
        assert_eq!(config.eos_token, Some("<|end▁of▁sentence|>".to_string()));
    }
1009
}