server.rs 27.4 KB
Newer Older
1
/// HTTP Server logic
2
3
use crate::infer::{InferError, InferResponse, InferStreamResponse};
use crate::validation::ValidationError;
4
use crate::{
5
    BestOfSequence, CompatGenerateRequest, Details, ErrorResponse, FinishReason,
6
    GenerateParameters, GenerateRequest, GenerateResponse, HubModelInfo, Infer, Info, PrefillToken,
7
    StreamDetails, StreamResponse, Token, Validation,
8
};
Olivier Dehaene's avatar
Olivier Dehaene committed
9
use axum::extract::Extension;
10
use axum::http::{HeaderMap, Method, StatusCode};
11
use axum::response::sse::{Event, KeepAlive, Sse};
12
use axum::response::{IntoResponse, Response};
Olivier Dehaene's avatar
Olivier Dehaene committed
13
use axum::routing::{get, post};
14
use axum::{http, Json, Router};
15
use axum_tracing_opentelemetry::opentelemetry_tracing_layer;
16
use futures::stream::StreamExt;
17
use futures::Stream;
18
use metrics_exporter_prometheus::{Matcher, PrometheusBuilder, PrometheusHandle};
19
use std::convert::Infallible;
Olivier Dehaene's avatar
Olivier Dehaene committed
20
use std::net::SocketAddr;
21
use text_generation_client::{ShardInfo, ShardedClient};
Olivier Dehaene's avatar
Olivier Dehaene committed
22
use tokenizers::Tokenizer;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
23
use tokio::signal;
Olivier Dehaene's avatar
Olivier Dehaene committed
24
use tokio::time::Instant;
25
use tower_http::cors::{AllowOrigin, CorsLayer};
26
use tracing::{info_span, instrument, Instrument};
27
28
use utoipa::OpenApi;
use utoipa_swagger_ui::SwaggerUi;
Olivier Dehaene's avatar
Olivier Dehaene committed
29

30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
/// Generate tokens if `stream == false` or a stream of token if `stream == true`
#[utoipa::path(
    post,
    tag = "Text Generation Inference",
    path = "/",
    request_body = CompatGenerateRequest,
    responses(
        (status = 200, description = "See /generate or /generate_stream"),
        (status = 424, description = "Generation Error", body = ErrorResponse,
            example = json ! ({"error": "Request failed during generation"})),
        (status = 429, description = "Model is overloaded", body = ErrorResponse,
            example = json ! ({"error": "Model is overloaded"})),
        (status = 422, description = "Input validation error", body = ErrorResponse,
            example = json ! ({"error": "Input validation error"})),
        (status = 500, description = "Incomplete generation", body = ErrorResponse,
            example = json ! ({"error": "Incomplete generation"})),
    )
)]
48
49
#[instrument(skip(infer))]
async fn compat_generate(
50
    default_return_full_text: Extension<bool>,
51
52
    infer: Extension<Infer>,
    req: Json<CompatGenerateRequest>,
53
) -> Result<Response, (StatusCode, Json<ErrorResponse>)> {
54
55
56
57
58
59
60
    let mut req = req.0;

    // default return_full_text given the pipeline_tag
    if req.parameters.return_full_text.is_none() {
        req.parameters.return_full_text = Some(default_return_full_text.0)
    }

61
62
63
64
65
66
67
68
69
70
71
72
    // switch on stream
    if req.stream {
        Ok(generate_stream(infer, Json(req.into()))
            .await
            .into_response())
    } else {
        let (headers, generation) = generate(infer, Json(req.into())).await?;
        // wrap generation inside a Vec to match api-inference
        Ok((headers, Json(vec![generation.0])).into_response())
    }
}

73
74
75
76
77
78
79
80
/// Text Generation Inference endpoint info
#[utoipa::path(
    get,
    tag = "Text Generation Inference",
    path = "/info",
    responses((status = 200, description = "Served model info", body = Info))
)]
#[instrument]
81
82
83
84
async fn get_model_info(
    model_info: Extension<HubModelInfo>,
    shard_info: Extension<ShardInfo>,
) -> Json<Info> {
85
    let model_info = model_info.0;
86
    let shard_info = shard_info.0;
87
88
89
90
91
    let info = Info {
        version: env!("CARGO_PKG_VERSION"),
        sha: option_env!("VERGEN_GIT_SHA"),
        model_id: model_info.model_id,
        model_sha: model_info.sha,
92
93
        model_dtype: shard_info.dtype,
        model_device_type: shard_info.device_type,
94
95
96
97
98
        model_pipeline_tag: model_info.pipeline_tag,
    };
    Json(info)
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
99
/// Health check method
100
101
#[instrument(skip(infer))]
async fn health(infer: Extension<Infer>) -> Result<(), (StatusCode, Json<ErrorResponse>)> {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
102
103
    // TODO: while this is the best health check we can do, it is a bit on the heavy side and might
    //       be a bit too slow for a health check.
104
    //       What we should do instead is check if the gRPC channels are still healthy.
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
105
106

    // Send a small inference request
107
108
109
110
    infer
        .generate(GenerateRequest {
            inputs: "liveness".to_string(),
            parameters: GenerateParameters {
111
                best_of: None,
112
113
114
115
                temperature: None,
                repetition_penalty: None,
                top_k: None,
                top_p: None,
116
                typical_p: None,
117
118
                do_sample: false,
                max_new_tokens: 1,
119
                return_full_text: None,
120
                stop: Vec::new(),
121
                truncate: None,
122
                watermark: false,
123
124
                details: false,
                seed: None,
Olivier Dehaene's avatar
Olivier Dehaene committed
125
            },
126
        })
Olivier Dehaene's avatar
Olivier Dehaene committed
127
128
        .await?;
    Ok(())
Olivier Dehaene's avatar
Olivier Dehaene committed
129
130
}

131
132
/// Generate tokens
#[utoipa::path(
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
    post,
    tag = "Text Generation Inference",
    path = "/generate",
    request_body = GenerateRequest,
    responses(
        (status = 200, description = "Generated Text", body = GenerateResponse),
        (status = 424, description = "Generation Error", body = ErrorResponse,
            example = json ! ({"error": "Request failed during generation"})),
        (status = 429, description = "Model is overloaded", body = ErrorResponse,
            example = json ! ({"error": "Model is overloaded"})),
        (status = 422, description = "Input validation error", body = ErrorResponse,
            example = json ! ({"error": "Input validation error"})),
        (status = 500, description = "Incomplete generation", body = ErrorResponse,
            example = json ! ({"error": "Incomplete generation"})),
    )
148
)]
149
#[instrument(
150
    skip(infer),
151
152
153
154
155
    fields(
        total_time,
        validation_time,
        queue_time,
        inference_time,
156
        time_per_token,
157
        seed,
158
159
    )
)]
Olivier Dehaene's avatar
Olivier Dehaene committed
160
async fn generate(
161
    infer: Extension<Infer>,
Olivier Dehaene's avatar
Olivier Dehaene committed
162
    req: Json<GenerateRequest>,
163
) -> Result<(HeaderMap, Json<GenerateResponse>), (StatusCode, Json<ErrorResponse>)> {
164
    let span = tracing::Span::current();
165
    let start_time = Instant::now();
166
    metrics::increment_counter!("tgi_request_count");
167

168
    let compute_characters = req.0.inputs.chars().count();
169
170
171
172
173
    let mut add_prompt = None;
    if req.0.parameters.return_full_text.unwrap_or(false) {
        add_prompt = Some(req.0.inputs.clone());
    }

174
    let details = req.0.parameters.details;
175
176

    // Inference
177
178
179
180
181
182
183
    let (response, best_of_responses) = match req.0.parameters.best_of {
        Some(best_of) if best_of > 1 => {
            let (response, best_of_responses) = infer.generate_best_of(req.0, best_of).await?;
            (response, Some(best_of_responses))
        }
        _ => (infer.generate(req.0).await?, None),
    };
Olivier Dehaene's avatar
Olivier Dehaene committed
184

OlivierDehaene's avatar
OlivierDehaene committed
185
186
    // Token details
    let details = match details {
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
        true => {
            // convert best_of_responses
            let best_of_sequences = best_of_responses.map(|responses: Vec<InferResponse>| {
                responses
                    .into_iter()
                    .map(|response: InferResponse| {
                        // Add prompt if return_full_text
                        let mut output_text = response.generated_text.text;
                        if let Some(prompt) = &add_prompt {
                            output_text = prompt.clone() + &output_text;
                        }

                        BestOfSequence {
                            generated_text: output_text,
                            finish_reason: FinishReason::from(
                                response.generated_text.finish_reason,
                            ),
                            generated_tokens: response.generated_text.generated_tokens,
                            prefill: response.prefill,
                            tokens: response.tokens,
                            seed: response.generated_text.seed,
                        }
                    })
                    .collect()
            });

            Some(Details {
                finish_reason: FinishReason::from(response.generated_text.finish_reason),
                generated_tokens: response.generated_text.generated_tokens,
                prefill: response.prefill,
                tokens: response.tokens,
                seed: response.generated_text.seed,
                best_of_sequences,
            })
        }
OlivierDehaene's avatar
OlivierDehaene committed
222
223
224
        false => None,
    };

225
226
227
228
    // Timings
    let total_time = start_time.elapsed();
    let validation_time = response.queued - start_time;
    let queue_time = response.start - response.queued;
229
230
    let inference_time = Instant::now() - response.start;
    let time_per_token = inference_time / response.generated_text.generated_tokens;
231

232
233
234
235
236
237
238
239
    // Tracing metadata
    span.record("total_time", format!("{total_time:?}"));
    span.record("validation_time", format!("{validation_time:?}"));
    span.record("queue_time", format!("{queue_time:?}"));
    span.record("inference_time", format!("{inference_time:?}"));
    span.record("time_per_token", format!("{time_per_token:?}"));
    span.record("seed", format!("{:?}", response.generated_text.seed));

240
241
    // Headers
    let mut headers = HeaderMap::new();
242
243
244
245
246
247
248
249
250
    headers.insert("x-compute-type", "gpu+optimized".parse().unwrap());
    headers.insert(
        "x-compute-time",
        total_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-compute-characters",
        compute_characters.to_string().parse().unwrap(),
    );
251
252
253
254
255
256
257
258
259
260
261
    headers.insert(
        "x-total-time",
        total_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-validation-time",
        validation_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-queue-time",
        queue_time.as_millis().to_string().parse().unwrap(),
Olivier Dehaene's avatar
Olivier Dehaene committed
262
    );
263
264
265
266
267
268
269
270
271
    headers.insert(
        "x-inference-time",
        inference_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-time-per-token",
        time_per_token.as_millis().to_string().parse().unwrap(),
    );

272
273
    // Metrics
    metrics::increment_counter!("tgi_request_success");
274
275
276
277
278
279
280
281
282
283
284
285
286
287
    metrics::histogram!("tgi_request_duration", total_time.as_secs_f64());
    metrics::histogram!(
        "tgi_request_validation_duration",
        validation_time.as_secs_f64()
    );
    metrics::histogram!("tgi_request_queue_duration", queue_time.as_secs_f64());
    metrics::histogram!(
        "tgi_request_inference_duration",
        inference_time.as_secs_f64()
    );
    metrics::histogram!(
        "tgi_request_mean_time_per_token_duration",
        time_per_token.as_secs_f64()
    );
288
289
290
291
292
    metrics::histogram!(
        "tgi_request_generated_tokens",
        response.generated_text.generated_tokens as f64
    );

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
293
    // Send response
294
295
296
297
298
    let mut output_text = response.generated_text.text;
    if let Some(prompt) = add_prompt {
        output_text = prompt + &output_text;
    }

299
300
    tracing::info!("Output: {}", output_text);

301
    let response = GenerateResponse {
302
        generated_text: output_text,
OlivierDehaene's avatar
OlivierDehaene committed
303
        details,
304
    };
305
    Ok((headers, Json(response)))
Olivier Dehaene's avatar
Olivier Dehaene committed
306
307
}

Yannic Kilcher's avatar
Yannic Kilcher committed
308
/// Generate a stream of token using Server-Sent Events
309
#[utoipa::path(
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
    post,
    tag = "Text Generation Inference",
    path = "/generate_stream",
    request_body = GenerateRequest,
    responses(
        (status = 200, description = "Generated Text", body = StreamResponse,
            content_type = "text/event-stream"),
        (status = 424, description = "Generation Error", body = ErrorResponse,
            example = json ! ({"error": "Request failed during generation"}),
            content_type = "text/event-stream"),
        (status = 429, description = "Model is overloaded", body = ErrorResponse,
            example = json ! ({"error": "Model is overloaded"}),
            content_type = "text/event-stream"),
        (status = 422, description = "Input validation error", body = ErrorResponse,
            example = json ! ({"error": "Input validation error"}),
            content_type = "text/event-stream"),
        (status = 500, description = "Incomplete generation", body = ErrorResponse,
            example = json ! ({"error": "Incomplete generation"}),
            content_type = "text/event-stream"),
    )
330
)]
331
332
333
334
335
336
337
#[instrument(
    skip(infer),
    fields(
        total_time,
        validation_time,
        queue_time,
        inference_time,
338
339
        time_per_token,
        seed,
340
341
342
343
344
    )
)]
async fn generate_stream(
    infer: Extension<Infer>,
    req: Json<GenerateRequest>,
345
346
347
348
) -> (
    HeaderMap,
    Sse<impl Stream<Item = Result<Event, Infallible>>>,
) {
349
350
    let span = tracing::Span::current();
    let start_time = Instant::now();
351
    metrics::increment_counter!("tgi_request_count");
352

353
354
355
356
357
358
359
360
361
    let compute_characters = req.0.inputs.chars().count();

    let mut headers = HeaderMap::new();
    headers.insert("x-compute-type", "gpu+optimized".parse().unwrap());
    headers.insert(
        "x-compute-characters",
        compute_characters.to_string().parse().unwrap(),
    );

362
363
364
365
    let stream = async_stream::stream! {
        // Inference
        let mut end_reached = false;
        let mut error = false;
366
367
368
369
370

        let mut add_prompt = None;
        if req.0.parameters.return_full_text.unwrap_or(false) {
            add_prompt = Some(req.0.inputs.clone());
        }
371
372
        let details = req.0.parameters.details;

373
374
375
        let best_of = req.0.parameters.best_of.unwrap_or(1);
        if best_of == 1 {
            match infer.generate_stream(req.0).instrument(info_span!(parent: &span, "async_stream")).await {
376
377
                // Keep permit as long as generate_stream lives
                Ok((_permit, mut response_stream)) => {
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
                    // Server-Sent Event stream
                    while let Some(response) = response_stream.next().await {
                        match response {
                            Ok(response) => {
                                match response {
                                    // Prefill is ignored
                                    InferStreamResponse::Prefill(_) => {}
                                    // Yield event for every new token
                                    InferStreamResponse::Token(token) => {
                                        // StreamResponse
                                        let stream_token = StreamResponse {
                                            token,
                                            generated_text: None,
                                            details: None,
                                        };

                                        yield Ok(Event::default().json_data(stream_token).unwrap())
395
                                    }
396
397
                                    // Yield event for last token and compute timings
                                    InferStreamResponse::End {
398
                                        token,
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
                                        generated_text,
                                        start,
                                        queued,
                                    } => {
                                        // Token details
                                        let details = match details {
                                            true => Some(StreamDetails {
                                                finish_reason: FinishReason::from(generated_text.finish_reason),
                                                generated_tokens: generated_text.generated_tokens,
                                                seed: generated_text.seed,
                                            }),
                                            false => None,
                                        };

                                        // Timings
                                        let total_time = start_time.elapsed();
                                        let validation_time = queued - start_time;
                                        let queue_time = start - queued;
                                        let inference_time = Instant::now() - start;
                                        let time_per_token = inference_time / generated_text.generated_tokens;

                                        // Tracing metadata
                                        span.record("total_time", format!("{total_time:?}"));
                                        span.record("validation_time", format!("{validation_time:?}"));
                                        span.record("queue_time", format!("{queue_time:?}"));
                                        span.record("inference_time", format!("{inference_time:?}"));
                                        span.record("time_per_token", format!("{time_per_token:?}"));
                                        span.record("seed", format!("{:?}", generated_text.seed));

                                        // Metrics
                                        metrics::increment_counter!("tgi_request_success");
430
431
432
433
434
                                        metrics::histogram!("tgi_request_duration", total_time.as_secs_f64());
                                        metrics::histogram!("tgi_request_validation_duration", validation_time.as_secs_f64());
                                        metrics::histogram!("tgi_request_queue_duration", queue_time.as_secs_f64());
                                        metrics::histogram!("tgi_request_inference_duration", inference_time.as_secs_f64());
                                        metrics::histogram!("tgi_request_mean_time_per_token_duration", time_per_token.as_secs_f64());
435
436
437
438
439
440
441
442
443
444
                                        metrics::histogram!("tgi_request_generated_tokens", generated_text.generated_tokens as f64);

                                        // StreamResponse
                                        end_reached = true;

                                        let mut output_text = generated_text.text;
                                        if let Some(prompt) = add_prompt {
                                            output_text = prompt + &output_text;
                                        }

445
446
                                        tracing::info!(parent: &span, "Output: {}", output_text);

447
448
449
450
451
452
453
454
455
                                        let stream_token = StreamResponse {
                                            token,
                                            generated_text: Some(output_text),
                                            details
                                        };

                                        yield Ok(Event::default().json_data(stream_token).unwrap());
                                        break;
                                    }
456
457
                                }
                            }
458
459
460
461
462
463
                            // yield error
                            Err(err) => {
                                error = true;
                                yield Ok(Event::from(err));
                                break;
                            }
464
465
                        }
                    }
466
467
468
469
470
                },
                // yield error
                Err(err) => {
                    error = true;
                    yield Ok(Event::from(err));
471
                }
472
473
474
475
476
477
478
            }
            // Check if generation reached the end
            // Skip if we already sent an error
            if !end_reached && !error {
                let err = InferError::IncompleteGeneration;
                metrics::increment_counter!("tgi_request_failure", "err" => "incomplete");
                tracing::error!("{err}");
479
                yield Ok(Event::from(err));
480
            }
481
482
483
        } else {
            let err = InferError::from(ValidationError::BestOfStream);
            metrics::increment_counter!("tgi_request_failure", "err" => "validation");
484
            tracing::error!("{err}");
485
            yield Ok(Event::from(err));
486
487
488
        }
    };

489
    (headers, Sse::new(stream).keep_alive(KeepAlive::default()))
490
491
}

492
493
/// Prometheus metrics scrape endpoint
#[utoipa::path(
494
495
496
497
    get,
    tag = "Text Generation Inference",
    path = "/metrics",
    responses((status = 200, description = "Prometheus Metrics", body = String))
498
499
500
501
502
)]
async fn metrics(prom_handle: Extension<PrometheusHandle>) -> String {
    prom_handle.render()
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
503
504
505
/// Serving method
#[allow(clippy::too_many_arguments)]
pub async fn run(
506
507
    model_info: HubModelInfo,
    shard_info: ShardInfo,
508
    compat_return_full_text: bool,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
509
    max_concurrent_requests: usize,
510
    max_best_of: usize,
511
    max_stop_sequences: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
512
    max_input_length: usize,
513
    max_total_tokens: usize,
514
515
    waiting_served_ratio: f32,
    max_batch_total_tokens: u32,
516
    max_waiting_tokens: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
517
    client: ShardedClient,
518
    tokenizer: Option<Tokenizer>,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
519
520
    validation_workers: usize,
    addr: SocketAddr,
521
    allow_origin: Option<AllowOrigin>,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
522
) {
523
524
525
    // OpenAPI documentation
    #[derive(OpenApi)]
    #[openapi(
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
        paths(
            get_model_info,
            compat_generate,
            generate,
            generate_stream,
            metrics,
        ),
        components(
            schemas(
                Info,
                CompatGenerateRequest,
                GenerateRequest,
                GenerateParameters,
                PrefillToken,
                Token,
                GenerateResponse,
                BestOfSequence,
                Details,
                FinishReason,
                StreamResponse,
                StreamDetails,
                ErrorResponse,
            )
        ),
        tags(
            (name = "Text Generation Inference", description = "Hugging Face Text Generation Inference API")
        ),
        info(
            title = "Text Generation Inference",
            license(
                name = "Apache 2.0",
                url = "https://www.apache.org/licenses/LICENSE-2.0"
            )
        )
560
561
562
    )]
    struct ApiDoc;

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
563
    // Create state
564
565
566
    let validation = Validation::new(
        validation_workers,
        tokenizer,
567
        max_best_of,
568
569
570
571
        max_stop_sequences,
        max_input_length,
        max_total_tokens,
    );
572
573
    let infer = Infer::new(
        client,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
574
        validation,
575
576
        waiting_served_ratio,
        max_batch_total_tokens,
577
578
        max_waiting_tokens,
        max_concurrent_requests,
579
        shard_info.requires_padding,
580
    );
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
581

582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
    // Duration buckets
    let duration_matcher = Matcher::Suffix(String::from("duration"));
    let n_duration_buckets = 35;
    let mut duration_buckets = Vec::with_capacity(n_duration_buckets);
    // Minimum duration in seconds
    let mut value = 0.0001;
    for _ in 0..n_duration_buckets {
        // geometric sequence
        value *= 1.5;
        duration_buckets.push(value);
    }
    // Input Length buckets
    let input_length_matcher = Matcher::Full(String::from("tgi_request_input_length"));
    let input_length_buckets: Vec<f64> = (0..100)
        .map(|x| (max_input_length as f64 / 100.0) * (x + 1) as f64)
        .collect();
    // Generated tokens buckets
    let generated_tokens_matcher = Matcher::Full(String::from("tgi_request_generated_tokens"));
    let generated_tokens_buckets: Vec<f64> = (0..100)
        .map(|x| (max_total_tokens as f64 / 100.0) * (x + 1) as f64)
        .collect();
    // Input Length buckets
    let max_new_tokens_matcher = Matcher::Full(String::from("tgi_request_max_new_tokens"));
    let max_new_tokens_buckets: Vec<f64> = (0..100)
        .map(|x| (max_total_tokens as f64 / 100.0) * (x + 1) as f64)
        .collect();
    // Batch size buckets
    let batch_size_matcher = Matcher::Full(String::from("tgi_batch_next_size"));
610
    let batch_size_buckets: Vec<f64> = (0..1024).map(|x| (x + 1) as f64).collect();
611

612
    // Prometheus handler
613
614
615
616
617
618
619
620
621
622
623
    let builder = PrometheusBuilder::new()
        .set_buckets_for_metric(duration_matcher, &duration_buckets)
        .unwrap()
        .set_buckets_for_metric(input_length_matcher, &input_length_buckets)
        .unwrap()
        .set_buckets_for_metric(generated_tokens_matcher, &generated_tokens_buckets)
        .unwrap()
        .set_buckets_for_metric(max_new_tokens_matcher, &max_new_tokens_buckets)
        .unwrap()
        .set_buckets_for_metric(batch_size_matcher, &batch_size_buckets)
        .unwrap();
624
625
626
627
    let prom_handle = builder
        .install_recorder()
        .expect("failed to install metrics recorder");

628
629
630
631
632
633
634
    // CORS layer
    let allow_origin = allow_origin.unwrap_or(AllowOrigin::any());
    let cors_layer = CorsLayer::new()
        .allow_methods([Method::GET, Method::POST])
        .allow_headers([http::header::CONTENT_TYPE])
        .allow_origin(allow_origin);

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
635
    // Create router
Olivier Dehaene's avatar
Olivier Dehaene committed
636
    let app = Router::new()
637
        .merge(SwaggerUi::new("/docs").url("/api-doc/openapi.json", ApiDoc::openapi()))
638
        // Base routes
639
        .route("/", post(compat_generate))
640
        .route("/info", get(get_model_info))
Olivier Dehaene's avatar
Olivier Dehaene committed
641
        .route("/generate", post(generate))
642
        .route("/generate_stream", post(generate_stream))
643
644
645
        // AWS Sagemaker route
        .route("/invocations", post(compat_generate))
        // Base Health route
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
646
        .route("/health", get(health))
647
648
649
650
651
        // Inference API health route
        .route("/", get(health))
        // AWS Sagemaker health route
        .route("/ping", get(health))
        // Prometheus metrics route
652
        .route("/metrics", get(metrics))
653
        .layer(Extension(model_info))
654
        .layer(Extension(shard_info))
655
656
        .layer(Extension(compat_return_full_text))
        .layer(Extension(infer))
657
        .layer(Extension(prom_handle))
658
659
        .layer(opentelemetry_tracing_layer())
        .layer(cors_layer);
Olivier Dehaene's avatar
Olivier Dehaene committed
660

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
661
    // Run server
Olivier Dehaene's avatar
Olivier Dehaene committed
662
    axum::Server::bind(&addr)
Olivier Dehaene's avatar
Olivier Dehaene committed
663
        .serve(app.into_make_service())
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
664
665
        // Wait until all requests are finished to shut down
        .with_graceful_shutdown(shutdown_signal())
Olivier Dehaene's avatar
Olivier Dehaene committed
666
667
        .await
        .unwrap();
Olivier Dehaene's avatar
Olivier Dehaene committed
668
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694

/// Shutdown signal handler
async fn shutdown_signal() {
    let ctrl_c = async {
        signal::ctrl_c()
            .await
            .expect("failed to install Ctrl+C handler");
    };

    #[cfg(unix)]
    let terminate = async {
        signal::unix::signal(signal::unix::SignalKind::terminate())
            .expect("failed to install signal handler")
            .recv()
            .await;
    };

    #[cfg(not(unix))]
    let terminate = std::future::pending::<()>();

    tokio::select! {
        _ = ctrl_c => {},
        _ = terminate => {},
    }

    tracing::info!("signal received, starting graceful shutdown");
695
    opentelemetry::global::shutdown_tracer_provider();
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
696
}
697

698
699
700
701
702
703
704
705
706
707
708
impl From<i32> for FinishReason {
    fn from(finish_reason: i32) -> Self {
        let finish_reason = text_generation_client::FinishReason::from_i32(finish_reason).unwrap();
        match finish_reason {
            text_generation_client::FinishReason::Length => FinishReason::Length,
            text_generation_client::FinishReason::EosToken => FinishReason::EndOfSequenceToken,
            text_generation_client::FinishReason::StopSequence => FinishReason::StopSequence,
        }
    }
}

709
710
711
712
713
714
715
716
717
718
719
720
721
722
/// Convert to Axum supported formats
impl From<InferError> for (StatusCode, Json<ErrorResponse>) {
    fn from(err: InferError) -> Self {
        let status_code = match err {
            InferError::GenerationError(_) => StatusCode::FAILED_DEPENDENCY,
            InferError::Overloaded(_) => StatusCode::TOO_MANY_REQUESTS,
            InferError::ValidationError(_) => StatusCode::UNPROCESSABLE_ENTITY,
            InferError::IncompleteGeneration => StatusCode::INTERNAL_SERVER_ERROR,
        };

        (
            status_code,
            Json(ErrorResponse {
                error: err.to_string(),
723
                error_type: err.error_type().to_string(),
724
725
726
727
728
729
730
731
732
733
            }),
        )
    }
}

impl From<InferError> for Event {
    fn from(err: InferError) -> Self {
        Event::default()
            .json_data(ErrorResponse {
                error: err.to_string(),
734
                error_type: err.error_type().to_string(),
735
736
737
738
            })
            .unwrap()
    }
}