server.rs 27.1 KB
Newer Older
1
/// HTTP Server logic
2
3
use crate::infer::{InferError, InferResponse, InferStreamResponse};
use crate::validation::ValidationError;
4
use crate::{
5
    BestOfSequence, CompatGenerateRequest, Details, ErrorResponse, FinishReason,
6
7
    GenerateParameters, GenerateRequest, GenerateResponse, Infer, Info, ModelInfo, PrefillToken,
    StreamDetails, StreamResponse, Token, Validation,
8
};
Olivier Dehaene's avatar
Olivier Dehaene committed
9
use axum::extract::Extension;
10
use axum::http::{HeaderMap, Method, StatusCode};
11
use axum::response::sse::{Event, KeepAlive, Sse};
12
use axum::response::{IntoResponse, Response};
Olivier Dehaene's avatar
Olivier Dehaene committed
13
use axum::routing::{get, post};
14
use axum::{http, Json, Router};
15
use axum_tracing_opentelemetry::opentelemetry_tracing_layer;
16
use futures::stream::StreamExt;
17
use futures::Stream;
18
use metrics_exporter_prometheus::{Matcher, PrometheusBuilder, PrometheusHandle};
19
use std::convert::Infallible;
Olivier Dehaene's avatar
Olivier Dehaene committed
20
use std::net::SocketAddr;
21
use text_generation_client::ShardedClient;
Olivier Dehaene's avatar
Olivier Dehaene committed
22
use tokenizers::Tokenizer;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
23
use tokio::signal;
Olivier Dehaene's avatar
Olivier Dehaene committed
24
use tokio::time::Instant;
25
use tower_http::cors::{AllowOrigin, CorsLayer};
26
use tracing::{info_span, instrument, Instrument};
27
28
use utoipa::OpenApi;
use utoipa_swagger_ui::SwaggerUi;
Olivier Dehaene's avatar
Olivier Dehaene committed
29

30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
/// Generate tokens if `stream == false` or a stream of token if `stream == true`
#[utoipa::path(
    post,
    tag = "Text Generation Inference",
    path = "/",
    request_body = CompatGenerateRequest,
    responses(
        (status = 200, description = "See /generate or /generate_stream"),
        (status = 424, description = "Generation Error", body = ErrorResponse,
            example = json ! ({"error": "Request failed during generation"})),
        (status = 429, description = "Model is overloaded", body = ErrorResponse,
            example = json ! ({"error": "Model is overloaded"})),
        (status = 422, description = "Input validation error", body = ErrorResponse,
            example = json ! ({"error": "Input validation error"})),
        (status = 500, description = "Incomplete generation", body = ErrorResponse,
            example = json ! ({"error": "Incomplete generation"})),
    )
)]
48
49
#[instrument(skip(infer))]
async fn compat_generate(
50
    default_return_full_text: Extension<bool>,
51
52
    infer: Extension<Infer>,
    req: Json<CompatGenerateRequest>,
53
) -> Result<Response, (StatusCode, Json<ErrorResponse>)> {
54
55
56
57
58
59
60
    let mut req = req.0;

    // default return_full_text given the pipeline_tag
    if req.parameters.return_full_text.is_none() {
        req.parameters.return_full_text = Some(default_return_full_text.0)
    }

61
62
63
64
65
66
67
68
69
70
71
72
    // switch on stream
    if req.stream {
        Ok(generate_stream(infer, Json(req.into()))
            .await
            .into_response())
    } else {
        let (headers, generation) = generate(infer, Json(req.into())).await?;
        // wrap generation inside a Vec to match api-inference
        Ok((headers, Json(vec![generation.0])).into_response())
    }
}

73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
/// Text Generation Inference endpoint info
#[utoipa::path(
    get,
    tag = "Text Generation Inference",
    path = "/info",
    responses((status = 200, description = "Served model info", body = Info))
)]
#[instrument]
async fn get_model_info(model_info: Extension<ModelInfo>) -> Json<Info> {
    let model_info = model_info.0;
    let info = Info {
        version: env!("CARGO_PKG_VERSION"),
        sha: option_env!("VERGEN_GIT_SHA"),
        model_id: model_info.model_id,
        model_sha: model_info.sha,
        model_pipeline_tag: model_info.pipeline_tag,
    };
    Json(info)
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
93
/// Health check method
94
95
#[instrument(skip(infer))]
async fn health(infer: Extension<Infer>) -> Result<(), (StatusCode, Json<ErrorResponse>)> {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
96
97
    // TODO: while this is the best health check we can do, it is a bit on the heavy side and might
    //       be a bit too slow for a health check.
98
    //       What we should do instead is check if the gRPC channels are still healthy.
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
99
100

    // Send a small inference request
101
102
103
104
    infer
        .generate(GenerateRequest {
            inputs: "liveness".to_string(),
            parameters: GenerateParameters {
105
                best_of: None,
106
107
108
109
                temperature: None,
                repetition_penalty: None,
                top_k: None,
                top_p: None,
110
                typical_p: None,
111
112
                do_sample: false,
                max_new_tokens: 1,
113
                return_full_text: None,
114
                stop: Vec::new(),
115
                truncate: None,
116
                watermark: false,
117
118
                details: false,
                seed: None,
Olivier Dehaene's avatar
Olivier Dehaene committed
119
            },
120
        })
Olivier Dehaene's avatar
Olivier Dehaene committed
121
122
        .await?;
    Ok(())
Olivier Dehaene's avatar
Olivier Dehaene committed
123
124
}

125
126
/// Generate tokens
#[utoipa::path(
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
    post,
    tag = "Text Generation Inference",
    path = "/generate",
    request_body = GenerateRequest,
    responses(
        (status = 200, description = "Generated Text", body = GenerateResponse),
        (status = 424, description = "Generation Error", body = ErrorResponse,
            example = json ! ({"error": "Request failed during generation"})),
        (status = 429, description = "Model is overloaded", body = ErrorResponse,
            example = json ! ({"error": "Model is overloaded"})),
        (status = 422, description = "Input validation error", body = ErrorResponse,
            example = json ! ({"error": "Input validation error"})),
        (status = 500, description = "Incomplete generation", body = ErrorResponse,
            example = json ! ({"error": "Incomplete generation"})),
    )
142
)]
143
#[instrument(
144
    skip(infer),
145
146
147
148
149
    fields(
        total_time,
        validation_time,
        queue_time,
        inference_time,
150
        time_per_token,
151
        seed,
152
153
    )
)]
Olivier Dehaene's avatar
Olivier Dehaene committed
154
async fn generate(
155
    infer: Extension<Infer>,
Olivier Dehaene's avatar
Olivier Dehaene committed
156
    req: Json<GenerateRequest>,
157
) -> Result<(HeaderMap, Json<GenerateResponse>), (StatusCode, Json<ErrorResponse>)> {
158
    let span = tracing::Span::current();
159
    let start_time = Instant::now();
160
    metrics::increment_counter!("tgi_request_count");
161

162
    let compute_characters = req.0.inputs.chars().count();
163
164
165
166
167
    let mut add_prompt = None;
    if req.0.parameters.return_full_text.unwrap_or(false) {
        add_prompt = Some(req.0.inputs.clone());
    }

168
    let details = req.0.parameters.details;
169
170

    // Inference
171
172
173
174
175
176
177
    let (response, best_of_responses) = match req.0.parameters.best_of {
        Some(best_of) if best_of > 1 => {
            let (response, best_of_responses) = infer.generate_best_of(req.0, best_of).await?;
            (response, Some(best_of_responses))
        }
        _ => (infer.generate(req.0).await?, None),
    };
Olivier Dehaene's avatar
Olivier Dehaene committed
178

OlivierDehaene's avatar
OlivierDehaene committed
179
180
    // Token details
    let details = match details {
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
        true => {
            // convert best_of_responses
            let best_of_sequences = best_of_responses.map(|responses: Vec<InferResponse>| {
                responses
                    .into_iter()
                    .map(|response: InferResponse| {
                        // Add prompt if return_full_text
                        let mut output_text = response.generated_text.text;
                        if let Some(prompt) = &add_prompt {
                            output_text = prompt.clone() + &output_text;
                        }

                        BestOfSequence {
                            generated_text: output_text,
                            finish_reason: FinishReason::from(
                                response.generated_text.finish_reason,
                            ),
                            generated_tokens: response.generated_text.generated_tokens,
                            prefill: response.prefill,
                            tokens: response.tokens,
                            seed: response.generated_text.seed,
                        }
                    })
                    .collect()
            });

            Some(Details {
                finish_reason: FinishReason::from(response.generated_text.finish_reason),
                generated_tokens: response.generated_text.generated_tokens,
                prefill: response.prefill,
                tokens: response.tokens,
                seed: response.generated_text.seed,
                best_of_sequences,
            })
        }
OlivierDehaene's avatar
OlivierDehaene committed
216
217
218
        false => None,
    };

219
220
221
222
    // Timings
    let total_time = start_time.elapsed();
    let validation_time = response.queued - start_time;
    let queue_time = response.start - response.queued;
223
224
    let inference_time = Instant::now() - response.start;
    let time_per_token = inference_time / response.generated_text.generated_tokens;
225

226
227
228
229
230
231
232
233
    // Tracing metadata
    span.record("total_time", format!("{total_time:?}"));
    span.record("validation_time", format!("{validation_time:?}"));
    span.record("queue_time", format!("{queue_time:?}"));
    span.record("inference_time", format!("{inference_time:?}"));
    span.record("time_per_token", format!("{time_per_token:?}"));
    span.record("seed", format!("{:?}", response.generated_text.seed));

234
235
    // Headers
    let mut headers = HeaderMap::new();
236
237
238
239
240
241
242
243
244
    headers.insert("x-compute-type", "gpu+optimized".parse().unwrap());
    headers.insert(
        "x-compute-time",
        total_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-compute-characters",
        compute_characters.to_string().parse().unwrap(),
    );
245
246
247
248
249
250
251
252
253
254
255
    headers.insert(
        "x-total-time",
        total_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-validation-time",
        validation_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-queue-time",
        queue_time.as_millis().to_string().parse().unwrap(),
Olivier Dehaene's avatar
Olivier Dehaene committed
256
    );
257
258
259
260
261
262
263
264
265
    headers.insert(
        "x-inference-time",
        inference_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-time-per-token",
        time_per_token.as_millis().to_string().parse().unwrap(),
    );

266
267
    // Metrics
    metrics::increment_counter!("tgi_request_success");
268
269
270
271
272
273
274
275
276
277
278
279
280
281
    metrics::histogram!("tgi_request_duration", total_time.as_secs_f64());
    metrics::histogram!(
        "tgi_request_validation_duration",
        validation_time.as_secs_f64()
    );
    metrics::histogram!("tgi_request_queue_duration", queue_time.as_secs_f64());
    metrics::histogram!(
        "tgi_request_inference_duration",
        inference_time.as_secs_f64()
    );
    metrics::histogram!(
        "tgi_request_mean_time_per_token_duration",
        time_per_token.as_secs_f64()
    );
282
283
284
285
286
    metrics::histogram!(
        "tgi_request_generated_tokens",
        response.generated_text.generated_tokens as f64
    );

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
287
    // Send response
288
289
290
291
292
    let mut output_text = response.generated_text.text;
    if let Some(prompt) = add_prompt {
        output_text = prompt + &output_text;
    }

293
294
    tracing::info!("Output: {}", output_text);

295
    let response = GenerateResponse {
296
        generated_text: output_text,
OlivierDehaene's avatar
OlivierDehaene committed
297
        details,
298
    };
299
    Ok((headers, Json(response)))
Olivier Dehaene's avatar
Olivier Dehaene committed
300
301
}

Yannic Kilcher's avatar
Yannic Kilcher committed
302
/// Generate a stream of token using Server-Sent Events
303
#[utoipa::path(
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
    post,
    tag = "Text Generation Inference",
    path = "/generate_stream",
    request_body = GenerateRequest,
    responses(
        (status = 200, description = "Generated Text", body = StreamResponse,
            content_type = "text/event-stream"),
        (status = 424, description = "Generation Error", body = ErrorResponse,
            example = json ! ({"error": "Request failed during generation"}),
            content_type = "text/event-stream"),
        (status = 429, description = "Model is overloaded", body = ErrorResponse,
            example = json ! ({"error": "Model is overloaded"}),
            content_type = "text/event-stream"),
        (status = 422, description = "Input validation error", body = ErrorResponse,
            example = json ! ({"error": "Input validation error"}),
            content_type = "text/event-stream"),
        (status = 500, description = "Incomplete generation", body = ErrorResponse,
            example = json ! ({"error": "Incomplete generation"}),
            content_type = "text/event-stream"),
    )
324
)]
325
326
327
328
329
330
331
#[instrument(
    skip(infer),
    fields(
        total_time,
        validation_time,
        queue_time,
        inference_time,
332
333
        time_per_token,
        seed,
334
335
336
337
338
    )
)]
async fn generate_stream(
    infer: Extension<Infer>,
    req: Json<GenerateRequest>,
339
340
341
342
) -> (
    HeaderMap,
    Sse<impl Stream<Item = Result<Event, Infallible>>>,
) {
343
344
    let span = tracing::Span::current();
    let start_time = Instant::now();
345
    metrics::increment_counter!("tgi_request_count");
346

347
348
349
350
351
352
353
354
355
    let compute_characters = req.0.inputs.chars().count();

    let mut headers = HeaderMap::new();
    headers.insert("x-compute-type", "gpu+optimized".parse().unwrap());
    headers.insert(
        "x-compute-characters",
        compute_characters.to_string().parse().unwrap(),
    );

356
357
358
359
    let stream = async_stream::stream! {
        // Inference
        let mut end_reached = false;
        let mut error = false;
360
361
362
363
364

        let mut add_prompt = None;
        if req.0.parameters.return_full_text.unwrap_or(false) {
            add_prompt = Some(req.0.inputs.clone());
        }
365
366
        let details = req.0.parameters.details;

367
368
369
        let best_of = req.0.parameters.best_of.unwrap_or(1);
        if best_of == 1 {
            match infer.generate_stream(req.0).instrument(info_span!(parent: &span, "async_stream")).await {
370
371
                // Keep permit as long as generate_stream lives
                Ok((_permit, mut response_stream)) => {
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
                    // Server-Sent Event stream
                    while let Some(response) = response_stream.next().await {
                        match response {
                            Ok(response) => {
                                match response {
                                    // Prefill is ignored
                                    InferStreamResponse::Prefill(_) => {}
                                    // Yield event for every new token
                                    InferStreamResponse::Token(token) => {
                                        // StreamResponse
                                        let stream_token = StreamResponse {
                                            token,
                                            generated_text: None,
                                            details: None,
                                        };

                                        yield Ok(Event::default().json_data(stream_token).unwrap())
389
                                    }
390
391
                                    // Yield event for last token and compute timings
                                    InferStreamResponse::End {
392
                                        token,
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
                                        generated_text,
                                        start,
                                        queued,
                                    } => {
                                        // Token details
                                        let details = match details {
                                            true => Some(StreamDetails {
                                                finish_reason: FinishReason::from(generated_text.finish_reason),
                                                generated_tokens: generated_text.generated_tokens,
                                                seed: generated_text.seed,
                                            }),
                                            false => None,
                                        };

                                        // Timings
                                        let total_time = start_time.elapsed();
                                        let validation_time = queued - start_time;
                                        let queue_time = start - queued;
                                        let inference_time = Instant::now() - start;
                                        let time_per_token = inference_time / generated_text.generated_tokens;

                                        // Tracing metadata
                                        span.record("total_time", format!("{total_time:?}"));
                                        span.record("validation_time", format!("{validation_time:?}"));
                                        span.record("queue_time", format!("{queue_time:?}"));
                                        span.record("inference_time", format!("{inference_time:?}"));
                                        span.record("time_per_token", format!("{time_per_token:?}"));
                                        span.record("seed", format!("{:?}", generated_text.seed));

                                        // Metrics
                                        metrics::increment_counter!("tgi_request_success");
424
425
426
427
428
                                        metrics::histogram!("tgi_request_duration", total_time.as_secs_f64());
                                        metrics::histogram!("tgi_request_validation_duration", validation_time.as_secs_f64());
                                        metrics::histogram!("tgi_request_queue_duration", queue_time.as_secs_f64());
                                        metrics::histogram!("tgi_request_inference_duration", inference_time.as_secs_f64());
                                        metrics::histogram!("tgi_request_mean_time_per_token_duration", time_per_token.as_secs_f64());
429
430
431
432
433
434
435
436
437
438
                                        metrics::histogram!("tgi_request_generated_tokens", generated_text.generated_tokens as f64);

                                        // StreamResponse
                                        end_reached = true;

                                        let mut output_text = generated_text.text;
                                        if let Some(prompt) = add_prompt {
                                            output_text = prompt + &output_text;
                                        }

439
440
                                        tracing::info!(parent: &span, "Output: {}", output_text);

441
442
443
444
445
446
447
448
449
                                        let stream_token = StreamResponse {
                                            token,
                                            generated_text: Some(output_text),
                                            details
                                        };

                                        yield Ok(Event::default().json_data(stream_token).unwrap());
                                        break;
                                    }
450
451
                                }
                            }
452
453
454
455
456
457
                            // yield error
                            Err(err) => {
                                error = true;
                                yield Ok(Event::from(err));
                                break;
                            }
458
459
                        }
                    }
460
461
462
463
464
                },
                // yield error
                Err(err) => {
                    error = true;
                    yield Ok(Event::from(err));
465
                }
466
467
468
469
470
471
472
            }
            // Check if generation reached the end
            // Skip if we already sent an error
            if !end_reached && !error {
                let err = InferError::IncompleteGeneration;
                metrics::increment_counter!("tgi_request_failure", "err" => "incomplete");
                tracing::error!("{err}");
473
                yield Ok(Event::from(err));
474
            }
475
476
477
        } else {
            let err = InferError::from(ValidationError::BestOfStream);
            metrics::increment_counter!("tgi_request_failure", "err" => "validation");
478
            tracing::error!("{err}");
479
            yield Ok(Event::from(err));
480
481
482
        }
    };

483
    (headers, Sse::new(stream).keep_alive(KeepAlive::default()))
484
485
}

486
487
/// Prometheus metrics scrape endpoint
#[utoipa::path(
488
489
490
491
    get,
    tag = "Text Generation Inference",
    path = "/metrics",
    responses((status = 200, description = "Prometheus Metrics", body = String))
492
493
494
495
496
)]
async fn metrics(prom_handle: Extension<PrometheusHandle>) -> String {
    prom_handle.render()
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
497
498
499
/// Serving method
#[allow(clippy::too_many_arguments)]
pub async fn run(
500
    model_info: ModelInfo,
501
    compat_return_full_text: bool,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
502
    max_concurrent_requests: usize,
503
    max_best_of: usize,
504
    max_stop_sequences: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
505
    max_input_length: usize,
506
    max_total_tokens: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
507
    max_batch_size: usize,
508
    max_waiting_tokens: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
509
    client: ShardedClient,
510
    tokenizer: Option<Tokenizer>,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
511
512
    validation_workers: usize,
    addr: SocketAddr,
513
    allow_origin: Option<AllowOrigin>,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
514
) {
515
516
517
    // OpenAPI documentation
    #[derive(OpenApi)]
    #[openapi(
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
        paths(
            get_model_info,
            compat_generate,
            generate,
            generate_stream,
            metrics,
        ),
        components(
            schemas(
                Info,
                CompatGenerateRequest,
                GenerateRequest,
                GenerateParameters,
                PrefillToken,
                Token,
                GenerateResponse,
                BestOfSequence,
                Details,
                FinishReason,
                StreamResponse,
                StreamDetails,
                ErrorResponse,
            )
        ),
        tags(
            (name = "Text Generation Inference", description = "Hugging Face Text Generation Inference API")
        ),
        info(
            title = "Text Generation Inference",
            license(
                name = "Apache 2.0",
                url = "https://www.apache.org/licenses/LICENSE-2.0"
            )
        )
552
553
554
    )]
    struct ApiDoc;

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
555
    // Create state
556
557
558
    let validation = Validation::new(
        validation_workers,
        tokenizer,
559
        max_best_of,
560
561
562
563
        max_stop_sequences,
        max_input_length,
        max_total_tokens,
    );
564
565
    let infer = Infer::new(
        client,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
566
        validation,
567
568
569
570
        max_batch_size,
        max_waiting_tokens,
        max_concurrent_requests,
    );
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
571

572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
    // Duration buckets
    let duration_matcher = Matcher::Suffix(String::from("duration"));
    let n_duration_buckets = 35;
    let mut duration_buckets = Vec::with_capacity(n_duration_buckets);
    // Minimum duration in seconds
    let mut value = 0.0001;
    for _ in 0..n_duration_buckets {
        // geometric sequence
        value *= 1.5;
        duration_buckets.push(value);
    }
    // Input Length buckets
    let input_length_matcher = Matcher::Full(String::from("tgi_request_input_length"));
    let input_length_buckets: Vec<f64> = (0..100)
        .map(|x| (max_input_length as f64 / 100.0) * (x + 1) as f64)
        .collect();
    // Generated tokens buckets
    let generated_tokens_matcher = Matcher::Full(String::from("tgi_request_generated_tokens"));
    let generated_tokens_buckets: Vec<f64> = (0..100)
        .map(|x| (max_total_tokens as f64 / 100.0) * (x + 1) as f64)
        .collect();
    // Input Length buckets
    let max_new_tokens_matcher = Matcher::Full(String::from("tgi_request_max_new_tokens"));
    let max_new_tokens_buckets: Vec<f64> = (0..100)
        .map(|x| (max_total_tokens as f64 / 100.0) * (x + 1) as f64)
        .collect();
    // Batch size buckets
    let batch_size_matcher = Matcher::Full(String::from("tgi_batch_next_size"));
    let batch_size_buckets: Vec<f64> = (0..max_batch_size).map(|x| (x + 1) as f64).collect();

602
    // Prometheus handler
603
604
605
606
607
608
609
610
611
612
613
    let builder = PrometheusBuilder::new()
        .set_buckets_for_metric(duration_matcher, &duration_buckets)
        .unwrap()
        .set_buckets_for_metric(input_length_matcher, &input_length_buckets)
        .unwrap()
        .set_buckets_for_metric(generated_tokens_matcher, &generated_tokens_buckets)
        .unwrap()
        .set_buckets_for_metric(max_new_tokens_matcher, &max_new_tokens_buckets)
        .unwrap()
        .set_buckets_for_metric(batch_size_matcher, &batch_size_buckets)
        .unwrap();
614
615
616
617
    let prom_handle = builder
        .install_recorder()
        .expect("failed to install metrics recorder");

618
619
620
621
622
623
624
    // CORS layer
    let allow_origin = allow_origin.unwrap_or(AllowOrigin::any());
    let cors_layer = CorsLayer::new()
        .allow_methods([Method::GET, Method::POST])
        .allow_headers([http::header::CONTENT_TYPE])
        .allow_origin(allow_origin);

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
625
    // Create router
Olivier Dehaene's avatar
Olivier Dehaene committed
626
    let app = Router::new()
627
        .merge(SwaggerUi::new("/docs").url("/api-doc/openapi.json", ApiDoc::openapi()))
628
        // Base routes
629
        .route("/", post(compat_generate))
630
        .route("/info", get(get_model_info))
Olivier Dehaene's avatar
Olivier Dehaene committed
631
        .route("/generate", post(generate))
632
        .route("/generate_stream", post(generate_stream))
633
634
635
        // AWS Sagemaker route
        .route("/invocations", post(compat_generate))
        // Base Health route
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
636
        .route("/health", get(health))
637
638
639
640
641
        // Inference API health route
        .route("/", get(health))
        // AWS Sagemaker health route
        .route("/ping", get(health))
        // Prometheus metrics route
642
        .route("/metrics", get(metrics))
643
        .layer(Extension(model_info))
644
645
        .layer(Extension(compat_return_full_text))
        .layer(Extension(infer))
646
        .layer(Extension(prom_handle))
647
648
        .layer(opentelemetry_tracing_layer())
        .layer(cors_layer);
Olivier Dehaene's avatar
Olivier Dehaene committed
649

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
650
    // Run server
Olivier Dehaene's avatar
Olivier Dehaene committed
651
    axum::Server::bind(&addr)
Olivier Dehaene's avatar
Olivier Dehaene committed
652
        .serve(app.into_make_service())
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
653
654
        // Wait until all requests are finished to shut down
        .with_graceful_shutdown(shutdown_signal())
Olivier Dehaene's avatar
Olivier Dehaene committed
655
656
        .await
        .unwrap();
Olivier Dehaene's avatar
Olivier Dehaene committed
657
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683

/// Shutdown signal handler
async fn shutdown_signal() {
    let ctrl_c = async {
        signal::ctrl_c()
            .await
            .expect("failed to install Ctrl+C handler");
    };

    #[cfg(unix)]
    let terminate = async {
        signal::unix::signal(signal::unix::SignalKind::terminate())
            .expect("failed to install signal handler")
            .recv()
            .await;
    };

    #[cfg(not(unix))]
    let terminate = std::future::pending::<()>();

    tokio::select! {
        _ = ctrl_c => {},
        _ = terminate => {},
    }

    tracing::info!("signal received, starting graceful shutdown");
684
    opentelemetry::global::shutdown_tracer_provider();
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
685
}
686

687
688
689
690
691
692
693
694
695
696
697
impl From<i32> for FinishReason {
    fn from(finish_reason: i32) -> Self {
        let finish_reason = text_generation_client::FinishReason::from_i32(finish_reason).unwrap();
        match finish_reason {
            text_generation_client::FinishReason::Length => FinishReason::Length,
            text_generation_client::FinishReason::EosToken => FinishReason::EndOfSequenceToken,
            text_generation_client::FinishReason::StopSequence => FinishReason::StopSequence,
        }
    }
}

698
699
700
701
702
703
704
705
706
707
708
709
710
711
/// Convert to Axum supported formats
impl From<InferError> for (StatusCode, Json<ErrorResponse>) {
    fn from(err: InferError) -> Self {
        let status_code = match err {
            InferError::GenerationError(_) => StatusCode::FAILED_DEPENDENCY,
            InferError::Overloaded(_) => StatusCode::TOO_MANY_REQUESTS,
            InferError::ValidationError(_) => StatusCode::UNPROCESSABLE_ENTITY,
            InferError::IncompleteGeneration => StatusCode::INTERNAL_SERVER_ERROR,
        };

        (
            status_code,
            Json(ErrorResponse {
                error: err.to_string(),
712
                error_type: err.error_type().to_string(),
713
714
715
716
717
718
719
720
721
722
            }),
        )
    }
}

impl From<InferError> for Event {
    fn from(err: InferError) -> Self {
        Event::default()
            .json_data(ErrorResponse {
                error: err.to_string(),
723
                error_type: err.error_type().to_string(),
724
725
726
727
            })
            .unwrap()
    }
}