"vscode:/vscode.git/clone" did not exist on "1138d63b519e37f0ce04e027b9f4a3261d27c628"
server.rs 25.3 KB
Newer Older
1
/// HTTP Server logic
2
3
use crate::infer::{InferError, InferResponse, InferStreamResponse};
use crate::validation::ValidationError;
4
use crate::{
5
6
7
    BestOfSequence, CompatGenerateRequest, Details, ErrorResponse, FinishReason,
    GenerateParameters, GenerateRequest, GenerateResponse, Infer, PrefillToken, StreamDetails,
    StreamResponse, Token, Validation,
8
};
Olivier Dehaene's avatar
Olivier Dehaene committed
9
use axum::extract::Extension;
10
use axum::http::{HeaderMap, Method, StatusCode};
11
use axum::response::sse::{Event, KeepAlive, Sse};
12
use axum::response::{IntoResponse, Response};
Olivier Dehaene's avatar
Olivier Dehaene committed
13
use axum::routing::{get, post};
14
use axum::{http, Json, Router};
15
use axum_tracing_opentelemetry::opentelemetry_tracing_layer;
16
use futures::Stream;
17
use metrics_exporter_prometheus::{Matcher, PrometheusBuilder, PrometheusHandle};
18
use std::convert::Infallible;
Olivier Dehaene's avatar
Olivier Dehaene committed
19
use std::net::SocketAddr;
20
use text_generation_client::ShardedClient;
Olivier Dehaene's avatar
Olivier Dehaene committed
21
use tokenizers::Tokenizer;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
22
use tokio::signal;
Olivier Dehaene's avatar
Olivier Dehaene committed
23
use tokio::time::Instant;
24
use tokio_stream::StreamExt;
25
use tower_http::cors::{AllowOrigin, CorsLayer};
26
use tracing::{info_span, instrument, Instrument};
27
28
use utoipa::OpenApi;
use utoipa_swagger_ui::SwaggerUi;
Olivier Dehaene's avatar
Olivier Dehaene committed
29

30
31
32
/// Compatibility route with api-inference and AzureML
#[instrument(skip(infer))]
async fn compat_generate(
33
    default_return_full_text: Extension<bool>,
34
35
    infer: Extension<Infer>,
    req: Json<CompatGenerateRequest>,
36
) -> Result<Response, (StatusCode, Json<ErrorResponse>)> {
37
38
39
40
41
42
43
    let mut req = req.0;

    // default return_full_text given the pipeline_tag
    if req.parameters.return_full_text.is_none() {
        req.parameters.return_full_text = Some(default_return_full_text.0)
    }

44
45
46
47
48
49
50
51
52
53
54
55
    // switch on stream
    if req.stream {
        Ok(generate_stream(infer, Json(req.into()))
            .await
            .into_response())
    } else {
        let (headers, generation) = generate(infer, Json(req.into())).await?;
        // wrap generation inside a Vec to match api-inference
        Ok((headers, Json(vec![generation.0])).into_response())
    }
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
56
/// Health check method
57
58
#[instrument(skip(infer))]
async fn health(infer: Extension<Infer>) -> Result<(), (StatusCode, Json<ErrorResponse>)> {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
59
60
    // TODO: while this is the best health check we can do, it is a bit on the heavy side and might
    //       be a bit too slow for a health check.
61
    //       What we should do instead is check if the gRPC channels are still healthy.
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
62
63

    // Send a small inference request
64
65
66
67
    infer
        .generate(GenerateRequest {
            inputs: "liveness".to_string(),
            parameters: GenerateParameters {
68
                best_of: None,
69
70
71
72
                temperature: None,
                repetition_penalty: None,
                top_k: None,
                top_p: None,
73
                typical_p: None,
74
75
                do_sample: false,
                max_new_tokens: 1,
76
                return_full_text: None,
77
                stop: Vec::new(),
78
                truncate: None,
79
                watermark: false,
80
81
                details: false,
                seed: None,
Olivier Dehaene's avatar
Olivier Dehaene committed
82
            },
83
        })
Olivier Dehaene's avatar
Olivier Dehaene committed
84
85
        .await?;
    Ok(())
Olivier Dehaene's avatar
Olivier Dehaene committed
86
87
}

88
89
90
91
92
93
94
/// Generate tokens
#[utoipa::path(
    post,
    tag = "Text Generation Inference",
    path = "/generate",
    request_body = GenerateRequest,
    responses(
95
96
        (status = 200, description = "Generated Text", body = GenerateResponse),
        (status = 424, description = "Generation Error", body = ErrorResponse,
97
            example = json ! ({"error": "Request failed during generation"})),
98
        (status = 429, description = "Model is overloaded", body = ErrorResponse,
99
            example = json ! ({"error": "Model is overloaded"})),
100
        (status = 422, description = "Input validation error", body = ErrorResponse,
101
            example = json ! ({"error": "Input validation error"})),
102
        (status = 500, description = "Incomplete generation", body = ErrorResponse,
103
            example = json ! ({"error": "Incomplete generation"})),
104
105
    )
)]
106
#[instrument(
107
    skip(infer),
108
109
110
111
112
    fields(
        total_time,
        validation_time,
        queue_time,
        inference_time,
113
        time_per_token,
114
        seed,
115
116
    )
)]
Olivier Dehaene's avatar
Olivier Dehaene committed
117
async fn generate(
118
    infer: Extension<Infer>,
Olivier Dehaene's avatar
Olivier Dehaene committed
119
    req: Json<GenerateRequest>,
120
) -> Result<(HeaderMap, Json<GenerateResponse>), (StatusCode, Json<ErrorResponse>)> {
121
    let span = tracing::Span::current();
122
    let start_time = Instant::now();
123
    metrics::increment_counter!("tgi_request_count");
124

125
    let compute_characters = req.0.inputs.chars().count();
126
127
128
129
130
    let mut add_prompt = None;
    if req.0.parameters.return_full_text.unwrap_or(false) {
        add_prompt = Some(req.0.inputs.clone());
    }

131
    let details = req.0.parameters.details;
132
133

    // Inference
134
135
136
137
138
139
140
    let (response, best_of_responses) = match req.0.parameters.best_of {
        Some(best_of) if best_of > 1 => {
            let (response, best_of_responses) = infer.generate_best_of(req.0, best_of).await?;
            (response, Some(best_of_responses))
        }
        _ => (infer.generate(req.0).await?, None),
    };
Olivier Dehaene's avatar
Olivier Dehaene committed
141

OlivierDehaene's avatar
OlivierDehaene committed
142
143
    // Token details
    let details = match details {
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
        true => {
            // convert best_of_responses
            let best_of_sequences = best_of_responses.map(|responses: Vec<InferResponse>| {
                responses
                    .into_iter()
                    .map(|response: InferResponse| {
                        // Add prompt if return_full_text
                        let mut output_text = response.generated_text.text;
                        if let Some(prompt) = &add_prompt {
                            output_text = prompt.clone() + &output_text;
                        }

                        BestOfSequence {
                            generated_text: output_text,
                            finish_reason: FinishReason::from(
                                response.generated_text.finish_reason,
                            ),
                            generated_tokens: response.generated_text.generated_tokens,
                            prefill: response.prefill,
                            tokens: response.tokens,
                            seed: response.generated_text.seed,
                        }
                    })
                    .collect()
            });

            Some(Details {
                finish_reason: FinishReason::from(response.generated_text.finish_reason),
                generated_tokens: response.generated_text.generated_tokens,
                prefill: response.prefill,
                tokens: response.tokens,
                seed: response.generated_text.seed,
                best_of_sequences,
            })
        }
OlivierDehaene's avatar
OlivierDehaene committed
179
180
181
        false => None,
    };

182
183
184
185
    // Timings
    let total_time = start_time.elapsed();
    let validation_time = response.queued - start_time;
    let queue_time = response.start - response.queued;
186
187
    let inference_time = Instant::now() - response.start;
    let time_per_token = inference_time / response.generated_text.generated_tokens;
188

189
190
191
192
193
194
195
196
    // Tracing metadata
    span.record("total_time", format!("{total_time:?}"));
    span.record("validation_time", format!("{validation_time:?}"));
    span.record("queue_time", format!("{queue_time:?}"));
    span.record("inference_time", format!("{inference_time:?}"));
    span.record("time_per_token", format!("{time_per_token:?}"));
    span.record("seed", format!("{:?}", response.generated_text.seed));

197
198
    // Headers
    let mut headers = HeaderMap::new();
199
200
201
202
203
204
205
206
207
    headers.insert("x-compute-type", "gpu+optimized".parse().unwrap());
    headers.insert(
        "x-compute-time",
        total_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-compute-characters",
        compute_characters.to_string().parse().unwrap(),
    );
208
209
210
211
212
213
214
215
216
217
218
    headers.insert(
        "x-total-time",
        total_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-validation-time",
        validation_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-queue-time",
        queue_time.as_millis().to_string().parse().unwrap(),
Olivier Dehaene's avatar
Olivier Dehaene committed
219
    );
220
221
222
223
224
225
226
227
228
    headers.insert(
        "x-inference-time",
        inference_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-time-per-token",
        time_per_token.as_millis().to_string().parse().unwrap(),
    );

229
230
    // Metrics
    metrics::increment_counter!("tgi_request_success");
231
232
233
234
235
236
237
238
239
240
241
242
243
244
    metrics::histogram!("tgi_request_duration", total_time.as_secs_f64());
    metrics::histogram!(
        "tgi_request_validation_duration",
        validation_time.as_secs_f64()
    );
    metrics::histogram!("tgi_request_queue_duration", queue_time.as_secs_f64());
    metrics::histogram!(
        "tgi_request_inference_duration",
        inference_time.as_secs_f64()
    );
    metrics::histogram!(
        "tgi_request_mean_time_per_token_duration",
        time_per_token.as_secs_f64()
    );
245
246
247
248
249
    metrics::histogram!(
        "tgi_request_generated_tokens",
        response.generated_text.generated_tokens as f64
    );

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
250
    // Send response
251
252
253
254
255
    let mut output_text = response.generated_text.text;
    if let Some(prompt) = add_prompt {
        output_text = prompt + &output_text;
    }

256
257
    tracing::info!("Output: {}", output_text);

258
    let response = GenerateResponse {
259
        generated_text: output_text,
OlivierDehaene's avatar
OlivierDehaene committed
260
        details,
261
    };
262
    Ok((headers, Json(response)))
Olivier Dehaene's avatar
Olivier Dehaene committed
263
264
}

Yannic Kilcher's avatar
Yannic Kilcher committed
265
/// Generate a stream of token using Server-Sent Events
266
267
268
269
270
271
#[utoipa::path(
    post,
    tag = "Text Generation Inference",
    path = "/generate_stream",
    request_body = GenerateRequest,
    responses(
272
        (status = 200, description = "Generated Text", body = StreamResponse,
273
            content_type = "text/event-stream"),
274
        (status = 424, description = "Generation Error", body = ErrorResponse,
275
276
            example = json ! ({"error": "Request failed during generation"}),
            content_type = "text/event-stream"),
277
        (status = 429, description = "Model is overloaded", body = ErrorResponse,
278
279
            example = json ! ({"error": "Model is overloaded"}),
            content_type = "text/event-stream"),
280
        (status = 422, description = "Input validation error", body = ErrorResponse,
281
282
            example = json ! ({"error": "Input validation error"}),
            content_type = "text/event-stream"),
283
        (status = 500, description = "Incomplete generation", body = ErrorResponse,
284
285
            example = json ! ({"error": "Incomplete generation"}),
            content_type = "text/event-stream"),
286
287
    )
)]
288
289
290
291
292
293
294
#[instrument(
    skip(infer),
    fields(
        total_time,
        validation_time,
        queue_time,
        inference_time,
295
296
        time_per_token,
        seed,
297
298
299
300
301
    )
)]
async fn generate_stream(
    infer: Extension<Infer>,
    req: Json<GenerateRequest>,
302
303
304
305
) -> (
    HeaderMap,
    Sse<impl Stream<Item = Result<Event, Infallible>>>,
) {
306
307
    let span = tracing::Span::current();
    let start_time = Instant::now();
308
    metrics::increment_counter!("tgi_request_count");
309

310
311
312
313
314
315
316
317
318
    let compute_characters = req.0.inputs.chars().count();

    let mut headers = HeaderMap::new();
    headers.insert("x-compute-type", "gpu+optimized".parse().unwrap());
    headers.insert(
        "x-compute-characters",
        compute_characters.to_string().parse().unwrap(),
    );

319
320
321
322
    let stream = async_stream::stream! {
        // Inference
        let mut end_reached = false;
        let mut error = false;
323
324
325
326
327

        let mut add_prompt = None;
        if req.0.parameters.return_full_text.unwrap_or(false) {
            add_prompt = Some(req.0.inputs.clone());
        }
328
329
        let details = req.0.parameters.details;

330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
        let best_of = req.0.parameters.best_of.unwrap_or(1);
        if best_of == 1 {
            match infer.generate_stream(req.0).instrument(info_span!(parent: &span, "async_stream")).await {
                Ok(mut response_stream) => {
                    // Server-Sent Event stream
                    while let Some(response) = response_stream.next().await {
                        match response {
                            Ok(response) => {
                                match response {
                                    // Prefill is ignored
                                    InferStreamResponse::Prefill(_) => {}
                                    // Yield event for every new token
                                    InferStreamResponse::Token(token) => {
                                        // StreamResponse
                                        let stream_token = StreamResponse {
                                            token,
                                            generated_text: None,
                                            details: None,
                                        };

                                        yield Ok(Event::default().json_data(stream_token).unwrap())
351
                                    }
352
353
                                    // Yield event for last token and compute timings
                                    InferStreamResponse::End {
354
                                        token,
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
                                        generated_text,
                                        start,
                                        queued,
                                    } => {
                                        // Token details
                                        let details = match details {
                                            true => Some(StreamDetails {
                                                finish_reason: FinishReason::from(generated_text.finish_reason),
                                                generated_tokens: generated_text.generated_tokens,
                                                seed: generated_text.seed,
                                            }),
                                            false => None,
                                        };

                                        // Timings
                                        let total_time = start_time.elapsed();
                                        let validation_time = queued - start_time;
                                        let queue_time = start - queued;
                                        let inference_time = Instant::now() - start;
                                        let time_per_token = inference_time / generated_text.generated_tokens;

                                        // Tracing metadata
                                        span.record("total_time", format!("{total_time:?}"));
                                        span.record("validation_time", format!("{validation_time:?}"));
                                        span.record("queue_time", format!("{queue_time:?}"));
                                        span.record("inference_time", format!("{inference_time:?}"));
                                        span.record("time_per_token", format!("{time_per_token:?}"));
                                        span.record("seed", format!("{:?}", generated_text.seed));

                                        // Metrics
                                        metrics::increment_counter!("tgi_request_success");
386
387
388
389
390
                                        metrics::histogram!("tgi_request_duration", total_time.as_secs_f64());
                                        metrics::histogram!("tgi_request_validation_duration", validation_time.as_secs_f64());
                                        metrics::histogram!("tgi_request_queue_duration", queue_time.as_secs_f64());
                                        metrics::histogram!("tgi_request_inference_duration", inference_time.as_secs_f64());
                                        metrics::histogram!("tgi_request_mean_time_per_token_duration", time_per_token.as_secs_f64());
391
392
393
394
395
396
397
398
399
400
                                        metrics::histogram!("tgi_request_generated_tokens", generated_text.generated_tokens as f64);

                                        // StreamResponse
                                        end_reached = true;

                                        let mut output_text = generated_text.text;
                                        if let Some(prompt) = add_prompt {
                                            output_text = prompt + &output_text;
                                        }

401
402
                                        tracing::info!(parent: &span, "Output: {}", output_text);

403
404
405
406
407
408
409
410
411
                                        let stream_token = StreamResponse {
                                            token,
                                            generated_text: Some(output_text),
                                            details
                                        };

                                        yield Ok(Event::default().json_data(stream_token).unwrap());
                                        break;
                                    }
412
413
                                }
                            }
414
415
416
417
418
419
                            // yield error
                            Err(err) => {
                                error = true;
                                yield Ok(Event::from(err));
                                break;
                            }
420
421
                        }
                    }
422
423
424
425
426
                },
                // yield error
                Err(err) => {
                    error = true;
                    yield Ok(Event::from(err));
427
                }
428
429
430
431
432
433
434
            }
            // Check if generation reached the end
            // Skip if we already sent an error
            if !end_reached && !error {
                let err = InferError::IncompleteGeneration;
                metrics::increment_counter!("tgi_request_failure", "err" => "incomplete");
                tracing::error!("{err}");
435
                yield Ok(Event::from(err));
436
            }
437
438
439
        } else {
            let err = InferError::from(ValidationError::BestOfStream);
            metrics::increment_counter!("tgi_request_failure", "err" => "validation");
440
            tracing::error!("{err}");
441
            yield Ok(Event::from(err));
442
443
444
        }
    };

445
    (headers, Sse::new(stream).keep_alive(KeepAlive::default()))
446
447
}

448
449
450
451
452
453
454
455
456
457
458
/// Prometheus metrics scrape endpoint
#[utoipa::path(
    get,
    tag = "Text Generation Inference",
    path = "/metrics",
    responses((status = 200, description = "Prometheus Metrics", body = String))
)]
async fn metrics(prom_handle: Extension<PrometheusHandle>) -> String {
    prom_handle.render()
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
459
460
461
/// Serving method
#[allow(clippy::too_many_arguments)]
pub async fn run(
462
    compat_return_full_text: bool,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
463
    max_concurrent_requests: usize,
464
    max_best_of: usize,
465
    max_stop_sequences: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
466
    max_input_length: usize,
467
    max_total_tokens: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
468
    max_batch_size: usize,
469
    max_waiting_tokens: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
470
471
472
473
    client: ShardedClient,
    tokenizer: Tokenizer,
    validation_workers: usize,
    addr: SocketAddr,
474
    allow_origin: Option<AllowOrigin>,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
475
) {
476
477
478
479
480
481
    // OpenAPI documentation
    #[derive(OpenApi)]
    #[openapi(
        paths(
            generate,
            generate_stream,
482
            metrics,
483
484
485
486
487
        ),
        components(
            schemas(
                GenerateRequest,
                GenerateParameters,
488
                PrefillToken,
489
490
                Token,
                GenerateResponse,
491
                BestOfSequence,
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
                Details,
                FinishReason,
                StreamResponse,
                StreamDetails,
                ErrorResponse,
            )
        ),
        tags(
            (name = "Text Generation Inference", description = "Hugging Face Text Generation Inference API")
        ),
        info(
            title = "Text Generation Inference",
            license(
                name = "Apache 2.0",
                url = "https://www.apache.org/licenses/LICENSE-2.0"
            )
        )
    )]
    struct ApiDoc;

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
512
    // Create state
513
514
515
    let validation = Validation::new(
        validation_workers,
        tokenizer,
516
        max_best_of,
517
518
519
520
        max_stop_sequences,
        max_input_length,
        max_total_tokens,
    );
521
522
    let infer = Infer::new(
        client,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
523
        validation,
524
525
526
527
        max_batch_size,
        max_waiting_tokens,
        max_concurrent_requests,
    );
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
528

529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
    // Duration buckets
    let duration_matcher = Matcher::Suffix(String::from("duration"));
    let n_duration_buckets = 35;
    let mut duration_buckets = Vec::with_capacity(n_duration_buckets);
    // Minimum duration in seconds
    let mut value = 0.0001;
    for _ in 0..n_duration_buckets {
        // geometric sequence
        value *= 1.5;
        duration_buckets.push(value);
    }
    // Input Length buckets
    let input_length_matcher = Matcher::Full(String::from("tgi_request_input_length"));
    let input_length_buckets: Vec<f64> = (0..100)
        .map(|x| (max_input_length as f64 / 100.0) * (x + 1) as f64)
        .collect();
    // Generated tokens buckets
    let generated_tokens_matcher = Matcher::Full(String::from("tgi_request_generated_tokens"));
    let generated_tokens_buckets: Vec<f64> = (0..100)
        .map(|x| (max_total_tokens as f64 / 100.0) * (x + 1) as f64)
        .collect();
    // Input Length buckets
    let max_new_tokens_matcher = Matcher::Full(String::from("tgi_request_max_new_tokens"));
    let max_new_tokens_buckets: Vec<f64> = (0..100)
        .map(|x| (max_total_tokens as f64 / 100.0) * (x + 1) as f64)
        .collect();
    // Batch size buckets
    let batch_size_matcher = Matcher::Full(String::from("tgi_batch_next_size"));
    let batch_size_buckets: Vec<f64> = (0..max_batch_size).map(|x| (x + 1) as f64).collect();

559
    // Prometheus handler
560
561
562
563
564
565
566
567
568
569
570
    let builder = PrometheusBuilder::new()
        .set_buckets_for_metric(duration_matcher, &duration_buckets)
        .unwrap()
        .set_buckets_for_metric(input_length_matcher, &input_length_buckets)
        .unwrap()
        .set_buckets_for_metric(generated_tokens_matcher, &generated_tokens_buckets)
        .unwrap()
        .set_buckets_for_metric(max_new_tokens_matcher, &max_new_tokens_buckets)
        .unwrap()
        .set_buckets_for_metric(batch_size_matcher, &batch_size_buckets)
        .unwrap();
571
572
573
574
    let prom_handle = builder
        .install_recorder()
        .expect("failed to install metrics recorder");

575
576
577
578
579
580
581
    // CORS layer
    let allow_origin = allow_origin.unwrap_or(AllowOrigin::any());
    let cors_layer = CorsLayer::new()
        .allow_methods([Method::GET, Method::POST])
        .allow_headers([http::header::CONTENT_TYPE])
        .allow_origin(allow_origin);

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
582
    // Create router
Olivier Dehaene's avatar
Olivier Dehaene committed
583
    let app = Router::new()
584
        .merge(SwaggerUi::new("/docs").url("/api-doc/openapi.json", ApiDoc::openapi()))
585
        // Base routes
586
        .route("/", post(compat_generate))
Olivier Dehaene's avatar
Olivier Dehaene committed
587
        .route("/generate", post(generate))
588
        .route("/generate_stream", post(generate_stream))
589
590
591
        // AWS Sagemaker route
        .route("/invocations", post(compat_generate))
        // Base Health route
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
592
        .route("/health", get(health))
593
594
595
596
597
        // Inference API health route
        .route("/", get(health))
        // AWS Sagemaker health route
        .route("/ping", get(health))
        // Prometheus metrics route
598
        .route("/metrics", get(metrics))
599
600
        .layer(Extension(compat_return_full_text))
        .layer(Extension(infer))
601
        .layer(Extension(prom_handle))
602
603
        .layer(opentelemetry_tracing_layer())
        .layer(cors_layer);
Olivier Dehaene's avatar
Olivier Dehaene committed
604

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
605
    // Run server
Olivier Dehaene's avatar
Olivier Dehaene committed
606
    axum::Server::bind(&addr)
Olivier Dehaene's avatar
Olivier Dehaene committed
607
        .serve(app.into_make_service())
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
608
609
        // Wait until all requests are finished to shut down
        .with_graceful_shutdown(shutdown_signal())
Olivier Dehaene's avatar
Olivier Dehaene committed
610
611
        .await
        .unwrap();
Olivier Dehaene's avatar
Olivier Dehaene committed
612
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638

/// Shutdown signal handler
async fn shutdown_signal() {
    let ctrl_c = async {
        signal::ctrl_c()
            .await
            .expect("failed to install Ctrl+C handler");
    };

    #[cfg(unix)]
    let terminate = async {
        signal::unix::signal(signal::unix::SignalKind::terminate())
            .expect("failed to install signal handler")
            .recv()
            .await;
    };

    #[cfg(not(unix))]
    let terminate = std::future::pending::<()>();

    tokio::select! {
        _ = ctrl_c => {},
        _ = terminate => {},
    }

    tracing::info!("signal received, starting graceful shutdown");
639
    opentelemetry::global::shutdown_tracer_provider();
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
640
}
641

642
643
644
645
646
647
648
649
650
651
652
impl From<i32> for FinishReason {
    fn from(finish_reason: i32) -> Self {
        let finish_reason = text_generation_client::FinishReason::from_i32(finish_reason).unwrap();
        match finish_reason {
            text_generation_client::FinishReason::Length => FinishReason::Length,
            text_generation_client::FinishReason::EosToken => FinishReason::EndOfSequenceToken,
            text_generation_client::FinishReason::StopSequence => FinishReason::StopSequence,
        }
    }
}

653
654
655
656
657
658
659
660
661
662
663
664
665
666
/// Convert to Axum supported formats
impl From<InferError> for (StatusCode, Json<ErrorResponse>) {
    fn from(err: InferError) -> Self {
        let status_code = match err {
            InferError::GenerationError(_) => StatusCode::FAILED_DEPENDENCY,
            InferError::Overloaded(_) => StatusCode::TOO_MANY_REQUESTS,
            InferError::ValidationError(_) => StatusCode::UNPROCESSABLE_ENTITY,
            InferError::IncompleteGeneration => StatusCode::INTERNAL_SERVER_ERROR,
        };

        (
            status_code,
            Json(ErrorResponse {
                error: err.to_string(),
667
                error_type: err.error_type().to_string(),
668
669
670
671
672
673
674
675
676
677
            }),
        )
    }
}

impl From<InferError> for Event {
    fn from(err: InferError) -> Self {
        Event::default()
            .json_data(ErrorResponse {
                error: err.to_string(),
678
                error_type: err.error_type().to_string(),
679
680
681
682
            })
            .unwrap()
    }
}