server.rs 29.6 KB
Newer Older
1
/// HTTP Server logic
2
use crate::health::Health;
3
4
use crate::infer::{InferError, InferResponse, InferStreamResponse};
use crate::validation::ValidationError;
5
use crate::{
6
    BestOfSequence, CompatGenerateRequest, Details, ErrorResponse, FinishReason,
7
    GenerateParameters, GenerateRequest, GenerateResponse, HubModelInfo, Infer, Info, PrefillToken,
8
    StreamDetails, StreamResponse, Token, Validation,
9
};
Olivier Dehaene's avatar
Olivier Dehaene committed
10
use axum::extract::Extension;
11
use axum::http::{HeaderMap, Method, StatusCode};
12
use axum::response::sse::{Event, KeepAlive, Sse};
13
use axum::response::{IntoResponse, Response};
Olivier Dehaene's avatar
Olivier Dehaene committed
14
use axum::routing::{get, post};
15
use axum::{http, Json, Router};
16
use axum_tracing_opentelemetry::opentelemetry_tracing_layer;
17
use futures::stream::StreamExt;
18
use futures::Stream;
19
use metrics_exporter_prometheus::{Matcher, PrometheusBuilder, PrometheusHandle};
20
use std::convert::Infallible;
Olivier Dehaene's avatar
Olivier Dehaene committed
21
use std::net::SocketAddr;
22
23
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
24
use text_generation_client::{ShardInfo, ShardedClient};
Olivier Dehaene's avatar
Olivier Dehaene committed
25
use tokenizers::Tokenizer;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
26
use tokio::signal;
Olivier Dehaene's avatar
Olivier Dehaene committed
27
use tokio::time::Instant;
28
use tower_http::cors::{AllowOrigin, CorsLayer};
29
use tracing::{info_span, instrument, Instrument};
30
31
use utoipa::OpenApi;
use utoipa_swagger_ui::SwaggerUi;
Olivier Dehaene's avatar
Olivier Dehaene committed
32

33
34
/// Generate tokens if `stream == false` or a stream of token if `stream == true`
#[utoipa::path(
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
post,
tag = "Text Generation Inference",
path = "/",
request_body = CompatGenerateRequest,
responses(
(status = 200, description = "Generated Text",
content(
("application/json" = GenerateResponse),
("text/event-stream" = StreamResponse),
)),
(status = 424, description = "Generation Error", body = ErrorResponse,
example = json ! ({"error": "Request failed during generation"})),
(status = 429, description = "Model is overloaded", body = ErrorResponse,
example = json ! ({"error": "Model is overloaded"})),
(status = 422, description = "Input validation error", body = ErrorResponse,
example = json ! ({"error": "Input validation error"})),
(status = 500, description = "Incomplete generation", body = ErrorResponse,
example = json ! ({"error": "Incomplete generation"})),
)
54
)]
55
#[instrument(skip(infer, req))]
56
async fn compat_generate(
57
    Extension(default_return_full_text): Extension<bool>,
58
    infer: Extension<Infer>,
59
    Json(mut req): Json<CompatGenerateRequest>,
60
) -> Result<Response, (StatusCode, Json<ErrorResponse>)> {
61
62
    // default return_full_text given the pipeline_tag
    if req.parameters.return_full_text.is_none() {
63
        req.parameters.return_full_text = Some(default_return_full_text)
64
65
    }

66
67
68
69
70
71
    // switch on stream
    if req.stream {
        Ok(generate_stream(infer, Json(req.into()))
            .await
            .into_response())
    } else {
72
        let (headers, Json(generation)) = generate(infer, Json(req.into())).await?;
73
        // wrap generation inside a Vec to match api-inference
74
        Ok((headers, Json(vec![generation])).into_response())
75
76
77
    }
}

78
79
/// Text Generation Inference endpoint info
#[utoipa::path(
80
81
82
83
get,
tag = "Text Generation Inference",
path = "/info",
responses((status = 200, description = "Served model info", body = Info))
84
85
)]
#[instrument]
86
87
async fn get_model_info(info: Extension<Info>) -> Json<Info> {
    Json(info.0)
88
89
}

90
#[utoipa::path(
91
92
93
94
95
96
97
98
get,
tag = "Text Generation Inference",
path = "/health",
responses(
(status = 200, description = "Everything is working fine"),
(status = 503, description = "Text generation inference is down", body = ErrorResponse,
example = json ! ({"error": "unhealthy", "error_type": "healthcheck"})),
)
99
100
)]
#[instrument(skip(health))]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
101
/// Health check method
102
103
104
105
106
107
108
109
110
111
112
async fn health(mut health: Extension<Health>) -> Result<(), (StatusCode, Json<ErrorResponse>)> {
    match health.check().await {
        true => Ok(()),
        false => Err((
            StatusCode::SERVICE_UNAVAILABLE,
            Json(ErrorResponse {
                error: "unhealthy".to_string(),
                error_type: "healthcheck".to_string(),
            }),
        )),
    }
Olivier Dehaene's avatar
Olivier Dehaene committed
113
114
}

115
116
/// Generate tokens
#[utoipa::path(
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
post,
tag = "Text Generation Inference",
path = "/generate",
request_body = GenerateRequest,
responses(
(status = 200, description = "Generated Text", body = GenerateResponse),
(status = 424, description = "Generation Error", body = ErrorResponse,
example = json ! ({"error": "Request failed during generation"})),
(status = 429, description = "Model is overloaded", body = ErrorResponse,
example = json ! ({"error": "Model is overloaded"})),
(status = 422, description = "Input validation error", body = ErrorResponse,
example = json ! ({"error": "Input validation error"})),
(status = 500, description = "Incomplete generation", body = ErrorResponse,
example = json ! ({"error": "Incomplete generation"})),
)
132
)]
133
#[instrument(
134
135
skip_all,
fields(
136
parameters = ? req.parameters,
137
138
139
140
141
142
143
total_time,
validation_time,
queue_time,
inference_time,
time_per_token,
seed,
)
144
)]
Olivier Dehaene's avatar
Olivier Dehaene committed
145
async fn generate(
146
    infer: Extension<Infer>,
147
    Json(req): Json<GenerateRequest>,
148
) -> Result<(HeaderMap, Json<GenerateResponse>), (StatusCode, Json<ErrorResponse>)> {
149
    let span = tracing::Span::current();
150
    let start_time = Instant::now();
151
    metrics::increment_counter!("tgi_request_count");
152

153
    tracing::debug!("Input: {}", req.inputs);
154

155
    let compute_characters = req.inputs.chars().count();
156
    let mut add_prompt = None;
157
158
    if req.parameters.return_full_text.unwrap_or(false) {
        add_prompt = Some(req.inputs.clone());
159
160
    }

161
    let details = req.parameters.details || req.parameters.decoder_input_details;
162
163

    // Inference
164
    let (response, best_of_responses) = match req.parameters.best_of {
165
        Some(best_of) if best_of > 1 => {
166
            let (response, best_of_responses) = infer.generate_best_of(req, best_of).await?;
167
168
            (response, Some(best_of_responses))
        }
169
        _ => (infer.generate(req).await?, None),
170
    };
Olivier Dehaene's avatar
Olivier Dehaene committed
171

OlivierDehaene's avatar
OlivierDehaene committed
172
173
    // Token details
    let details = match details {
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
        true => {
            // convert best_of_responses
            let best_of_sequences = best_of_responses.map(|responses: Vec<InferResponse>| {
                responses
                    .into_iter()
                    .map(|response: InferResponse| {
                        // Add prompt if return_full_text
                        let mut output_text = response.generated_text.text;
                        if let Some(prompt) = &add_prompt {
                            output_text = prompt.clone() + &output_text;
                        }

                        BestOfSequence {
                            generated_text: output_text,
                            finish_reason: FinishReason::from(
                                response.generated_text.finish_reason,
                            ),
                            generated_tokens: response.generated_text.generated_tokens,
                            prefill: response.prefill,
                            tokens: response.tokens,
                            seed: response.generated_text.seed,
                        }
                    })
                    .collect()
            });

            Some(Details {
                finish_reason: FinishReason::from(response.generated_text.finish_reason),
                generated_tokens: response.generated_text.generated_tokens,
                prefill: response.prefill,
                tokens: response.tokens,
                seed: response.generated_text.seed,
                best_of_sequences,
            })
        }
OlivierDehaene's avatar
OlivierDehaene committed
209
210
211
        false => None,
    };

212
213
214
215
    // Timings
    let total_time = start_time.elapsed();
    let validation_time = response.queued - start_time;
    let queue_time = response.start - response.queued;
216
217
    let inference_time = Instant::now() - response.start;
    let time_per_token = inference_time / response.generated_text.generated_tokens;
218

219
220
221
222
223
224
225
226
    // Tracing metadata
    span.record("total_time", format!("{total_time:?}"));
    span.record("validation_time", format!("{validation_time:?}"));
    span.record("queue_time", format!("{queue_time:?}"));
    span.record("inference_time", format!("{inference_time:?}"));
    span.record("time_per_token", format!("{time_per_token:?}"));
    span.record("seed", format!("{:?}", response.generated_text.seed));

227
228
    // Headers
    let mut headers = HeaderMap::new();
229
230
231
232
233
234
235
236
237
    headers.insert("x-compute-type", "gpu+optimized".parse().unwrap());
    headers.insert(
        "x-compute-time",
        total_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-compute-characters",
        compute_characters.to_string().parse().unwrap(),
    );
238
239
240
241
242
243
244
245
246
247
248
    headers.insert(
        "x-total-time",
        total_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-validation-time",
        validation_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-queue-time",
        queue_time.as_millis().to_string().parse().unwrap(),
Olivier Dehaene's avatar
Olivier Dehaene committed
249
    );
250
251
252
253
254
255
256
257
258
    headers.insert(
        "x-inference-time",
        inference_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-time-per-token",
        time_per_token.as_millis().to_string().parse().unwrap(),
    );

259
260
    // Metrics
    metrics::increment_counter!("tgi_request_success");
261
262
263
264
265
266
267
268
269
270
271
272
273
274
    metrics::histogram!("tgi_request_duration", total_time.as_secs_f64());
    metrics::histogram!(
        "tgi_request_validation_duration",
        validation_time.as_secs_f64()
    );
    metrics::histogram!("tgi_request_queue_duration", queue_time.as_secs_f64());
    metrics::histogram!(
        "tgi_request_inference_duration",
        inference_time.as_secs_f64()
    );
    metrics::histogram!(
        "tgi_request_mean_time_per_token_duration",
        time_per_token.as_secs_f64()
    );
275
276
277
278
279
    metrics::histogram!(
        "tgi_request_generated_tokens",
        response.generated_text.generated_tokens as f64
    );

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
280
    // Send response
281
282
283
284
285
    let mut output_text = response.generated_text.text;
    if let Some(prompt) = add_prompt {
        output_text = prompt + &output_text;
    }

286
287
    tracing::debug!("Output: {}", output_text);
    tracing::info!("Success");
288

289
    let response = GenerateResponse {
290
        generated_text: output_text,
OlivierDehaene's avatar
OlivierDehaene committed
291
        details,
292
    };
293
    Ok((headers, Json(response)))
Olivier Dehaene's avatar
Olivier Dehaene committed
294
295
}

Yannic Kilcher's avatar
Yannic Kilcher committed
296
/// Generate a stream of token using Server-Sent Events
297
#[utoipa::path(
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
post,
tag = "Text Generation Inference",
path = "/generate_stream",
request_body = GenerateRequest,
responses(
(status = 200, description = "Generated Text", body = StreamResponse,
content_type = "text/event-stream"),
(status = 424, description = "Generation Error", body = ErrorResponse,
example = json ! ({"error": "Request failed during generation"}),
content_type = "text/event-stream"),
(status = 429, description = "Model is overloaded", body = ErrorResponse,
example = json ! ({"error": "Model is overloaded"}),
content_type = "text/event-stream"),
(status = 422, description = "Input validation error", body = ErrorResponse,
example = json ! ({"error": "Input validation error"}),
content_type = "text/event-stream"),
(status = 500, description = "Incomplete generation", body = ErrorResponse,
example = json ! ({"error": "Incomplete generation"}),
content_type = "text/event-stream"),
)
318
)]
319
#[instrument(
320
321
skip_all,
fields(
322
parameters = ? req.parameters,
323
324
325
326
327
328
329
total_time,
validation_time,
queue_time,
inference_time,
time_per_token,
seed,
)
330
331
)]
async fn generate_stream(
332
333
    Extension(infer): Extension<Infer>,
    Json(req): Json<GenerateRequest>,
334
335
336
337
) -> (
    HeaderMap,
    Sse<impl Stream<Item = Result<Event, Infallible>>>,
) {
338
339
    let span = tracing::Span::current();
    let start_time = Instant::now();
340
    metrics::increment_counter!("tgi_request_count");
341

342
    tracing::debug!("Input: {}", req.inputs);
343

344
    let compute_characters = req.inputs.chars().count();
345
346
347
348
349
350
351

    let mut headers = HeaderMap::new();
    headers.insert("x-compute-type", "gpu+optimized".parse().unwrap());
    headers.insert(
        "x-compute-characters",
        compute_characters.to_string().parse().unwrap(),
    );
352
    headers.insert("X-Accel-Buffering", "no".parse().unwrap());
353

354
355
356
357
    let stream = async_stream::stream! {
        // Inference
        let mut end_reached = false;
        let mut error = false;
358
359

        let mut add_prompt = None;
360
361
        if req.parameters.return_full_text.unwrap_or(false) {
            add_prompt = Some(req.inputs.clone());
362
        }
363
        let details = req.parameters.details;
364

365
        let best_of = req.parameters.best_of.unwrap_or(1);
366
367
368
369
370
        if best_of != 1 {
            let err = InferError::from(ValidationError::BestOfStream);
            metrics::increment_counter!("tgi_request_failure", "err" => "validation");
            tracing::error!("{err}");
            yield Ok(Event::from(err));
371
        } else if req.parameters.decoder_input_details {
372
373
374
375
376
            let err = InferError::from(ValidationError::PrefillDetailsStream);
            metrics::increment_counter!("tgi_request_failure", "err" => "validation");
            tracing::error!("{err}");
            yield Ok(Event::from(err));
        } else {
377
            match infer.generate_stream(req).instrument(info_span!(parent: &span, "async_stream")).await {
378
379
                // Keep permit as long as generate_stream lives
                Ok((_permit, mut response_stream)) => {
380
381
382
383
384
385
386
387
388
                    // Server-Sent Event stream
                    while let Some(response) = response_stream.next().await {
                        match response {
                            Ok(response) => {
                                match response {
                                    // Prefill is ignored
                                    InferStreamResponse::Prefill(_) => {}
                                    // Yield event for every new token
                                    InferStreamResponse::Token(token) => {
389
390
                                        tracing::debug!(parent: &span, "Token: {:?}", token);

391
392
393
394
395
396
397
398
                                        // StreamResponse
                                        let stream_token = StreamResponse {
                                            token,
                                            generated_text: None,
                                            details: None,
                                        };

                                        yield Ok(Event::default().json_data(stream_token).unwrap())
399
                                    }
400
401
                                    // Yield event for last token and compute timings
                                    InferStreamResponse::End {
402
                                        token,
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
                                        generated_text,
                                        start,
                                        queued,
                                    } => {
                                        // Token details
                                        let details = match details {
                                            true => Some(StreamDetails {
                                                finish_reason: FinishReason::from(generated_text.finish_reason),
                                                generated_tokens: generated_text.generated_tokens,
                                                seed: generated_text.seed,
                                            }),
                                            false => None,
                                        };

                                        // Timings
                                        let total_time = start_time.elapsed();
                                        let validation_time = queued - start_time;
                                        let queue_time = start - queued;
                                        let inference_time = Instant::now() - start;
                                        let time_per_token = inference_time / generated_text.generated_tokens;

                                        // Tracing metadata
                                        span.record("total_time", format!("{total_time:?}"));
                                        span.record("validation_time", format!("{validation_time:?}"));
                                        span.record("queue_time", format!("{queue_time:?}"));
                                        span.record("inference_time", format!("{inference_time:?}"));
                                        span.record("time_per_token", format!("{time_per_token:?}"));
                                        span.record("seed", format!("{:?}", generated_text.seed));

                                        // Metrics
                                        metrics::increment_counter!("tgi_request_success");
434
435
436
437
438
                                        metrics::histogram!("tgi_request_duration", total_time.as_secs_f64());
                                        metrics::histogram!("tgi_request_validation_duration", validation_time.as_secs_f64());
                                        metrics::histogram!("tgi_request_queue_duration", queue_time.as_secs_f64());
                                        metrics::histogram!("tgi_request_inference_duration", inference_time.as_secs_f64());
                                        metrics::histogram!("tgi_request_mean_time_per_token_duration", time_per_token.as_secs_f64());
439
440
441
442
443
444
445
446
447
448
                                        metrics::histogram!("tgi_request_generated_tokens", generated_text.generated_tokens as f64);

                                        // StreamResponse
                                        end_reached = true;

                                        let mut output_text = generated_text.text;
                                        if let Some(prompt) = add_prompt {
                                            output_text = prompt + &output_text;
                                        }

449
450
                                        tracing::debug!(parent: &span, "Output: {}", output_text);
                                        tracing::info!(parent: &span, "Success");
451

452
453
454
455
456
457
458
459
460
                                        let stream_token = StreamResponse {
                                            token,
                                            generated_text: Some(output_text),
                                            details
                                        };

                                        yield Ok(Event::default().json_data(stream_token).unwrap());
                                        break;
                                    }
461
462
                                }
                            }
463
464
465
466
467
468
                            // yield error
                            Err(err) => {
                                error = true;
                                yield Ok(Event::from(err));
                                break;
                            }
469
470
                        }
                    }
471
472
473
474
475
                },
                // yield error
                Err(err) => {
                    error = true;
                    yield Ok(Event::from(err));
476
                }
477
478
479
480
481
482
483
            }
            // Check if generation reached the end
            // Skip if we already sent an error
            if !end_reached && !error {
                let err = InferError::IncompleteGeneration;
                metrics::increment_counter!("tgi_request_failure", "err" => "incomplete");
                tracing::error!("{err}");
484
                yield Ok(Event::from(err));
485
486
487
488
            }
        }
    };

489
    (headers, Sse::new(stream).keep_alive(KeepAlive::default()))
490
491
}

492
493
/// Prometheus metrics scrape endpoint
#[utoipa::path(
494
495
496
497
get,
tag = "Text Generation Inference",
path = "/metrics",
responses((status = 200, description = "Prometheus Metrics", body = String))
498
499
500
501
502
)]
async fn metrics(prom_handle: Extension<PrometheusHandle>) -> String {
    prom_handle.render()
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
503
504
505
/// Serving method
#[allow(clippy::too_many_arguments)]
pub async fn run(
506
507
    model_info: HubModelInfo,
    shard_info: ShardInfo,
508
    compat_return_full_text: bool,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
509
    max_concurrent_requests: usize,
510
    max_best_of: usize,
511
    max_stop_sequences: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
512
    max_input_length: usize,
513
    max_total_tokens: usize,
514
    waiting_served_ratio: f32,
515
    max_batch_prefill_tokens: u32,
516
    max_batch_total_tokens: u32,
517
    max_waiting_tokens: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
518
    client: ShardedClient,
519
    tokenizer: Option<Tokenizer>,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
520
521
    validation_workers: usize,
    addr: SocketAddr,
522
    allow_origin: Option<AllowOrigin>,
523
524
    ngrok: bool,
    ngrok_authtoken: Option<String>,
525
    ngrok_edge: Option<String>,
526
) -> Result<(), axum::BoxError> {
527
528
529
    // OpenAPI documentation
    #[derive(OpenApi)]
    #[openapi(
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
    paths(
    health,
    get_model_info,
    compat_generate,
    generate,
    generate_stream,
    metrics,
    ),
    components(
    schemas(
    Info,
    CompatGenerateRequest,
    GenerateRequest,
    GenerateParameters,
    PrefillToken,
    Token,
    GenerateResponse,
    BestOfSequence,
    Details,
    FinishReason,
    StreamResponse,
    StreamDetails,
    ErrorResponse,
    )
    ),
    tags(
    (name = "Text Generation Inference", description = "Hugging Face Text Generation Inference API")
    ),
    info(
    title = "Text Generation Inference",
    license(
    name = "Apache 2.0",
    url = "https://www.apache.org/licenses/LICENSE-2.0"
    )
    )
565
566
567
    )]
    struct ApiDoc;

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
568
    // Create state
569
570
571
    let validation = Validation::new(
        validation_workers,
        tokenizer,
572
        max_best_of,
573
574
575
576
        max_stop_sequences,
        max_input_length,
        max_total_tokens,
    );
577
578
    let generation_health = Arc::new(AtomicBool::new(false));
    let health_ext = Health::new(client.clone(), generation_health.clone());
579
580
    let infer = Infer::new(
        client,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
581
        validation,
582
        waiting_served_ratio,
583
        max_batch_prefill_tokens,
584
        max_batch_total_tokens,
585
586
        max_waiting_tokens,
        max_concurrent_requests,
587
        shard_info.requires_padding,
588
        generation_health,
589
    );
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
590

591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
    // Duration buckets
    let duration_matcher = Matcher::Suffix(String::from("duration"));
    let n_duration_buckets = 35;
    let mut duration_buckets = Vec::with_capacity(n_duration_buckets);
    // Minimum duration in seconds
    let mut value = 0.0001;
    for _ in 0..n_duration_buckets {
        // geometric sequence
        value *= 1.5;
        duration_buckets.push(value);
    }
    // Input Length buckets
    let input_length_matcher = Matcher::Full(String::from("tgi_request_input_length"));
    let input_length_buckets: Vec<f64> = (0..100)
        .map(|x| (max_input_length as f64 / 100.0) * (x + 1) as f64)
        .collect();
    // Generated tokens buckets
    let generated_tokens_matcher = Matcher::Full(String::from("tgi_request_generated_tokens"));
    let generated_tokens_buckets: Vec<f64> = (0..100)
        .map(|x| (max_total_tokens as f64 / 100.0) * (x + 1) as f64)
        .collect();
    // Input Length buckets
    let max_new_tokens_matcher = Matcher::Full(String::from("tgi_request_max_new_tokens"));
    let max_new_tokens_buckets: Vec<f64> = (0..100)
        .map(|x| (max_total_tokens as f64 / 100.0) * (x + 1) as f64)
        .collect();
    // Batch size buckets
    let batch_size_matcher = Matcher::Full(String::from("tgi_batch_next_size"));
619
    let batch_size_buckets: Vec<f64> = (0..1024).map(|x| (x + 1) as f64).collect();
620

621
    // Prometheus handler
622
623
624
625
626
627
628
629
630
631
632
    let builder = PrometheusBuilder::new()
        .set_buckets_for_metric(duration_matcher, &duration_buckets)
        .unwrap()
        .set_buckets_for_metric(input_length_matcher, &input_length_buckets)
        .unwrap()
        .set_buckets_for_metric(generated_tokens_matcher, &generated_tokens_buckets)
        .unwrap()
        .set_buckets_for_metric(max_new_tokens_matcher, &max_new_tokens_buckets)
        .unwrap()
        .set_buckets_for_metric(batch_size_matcher, &batch_size_buckets)
        .unwrap();
633
634
635
636
    let prom_handle = builder
        .install_recorder()
        .expect("failed to install metrics recorder");

637
638
639
640
641
642
643
    // CORS layer
    let allow_origin = allow_origin.unwrap_or(AllowOrigin::any());
    let cors_layer = CorsLayer::new()
        .allow_methods([Method::GET, Method::POST])
        .allow_headers([http::header::CONTENT_TYPE])
        .allow_origin(allow_origin);

644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
    // Endpoint info
    let info = Info {
        model_id: model_info.model_id,
        model_sha: model_info.sha,
        model_dtype: shard_info.dtype,
        model_device_type: shard_info.device_type,
        model_pipeline_tag: model_info.pipeline_tag,
        max_concurrent_requests,
        max_best_of,
        max_stop_sequences,
        max_input_length,
        max_total_tokens,
        waiting_served_ratio,
        max_batch_total_tokens,
        max_waiting_tokens,
        validation_workers,
        version: env!("CARGO_PKG_VERSION"),
        sha: option_env!("VERGEN_GIT_SHA"),
662
        docker_label: option_env!("DOCKER_LABEL"),
663
664
    };

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
665
    // Create router
Olivier Dehaene's avatar
Olivier Dehaene committed
666
    let app = Router::new()
667
        .merge(SwaggerUi::new("/docs").url("/api-doc/openapi.json", ApiDoc::openapi()))
668
        // Base routes
669
        .route("/", post(compat_generate))
670
        .route("/info", get(get_model_info))
Olivier Dehaene's avatar
Olivier Dehaene committed
671
        .route("/generate", post(generate))
672
        .route("/generate_stream", post(generate_stream))
673
674
675
        // AWS Sagemaker route
        .route("/invocations", post(compat_generate))
        // Base Health route
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
676
        .route("/health", get(health))
677
678
679
680
681
        // Inference API health route
        .route("/", get(health))
        // AWS Sagemaker health route
        .route("/ping", get(health))
        // Prometheus metrics route
682
        .route("/metrics", get(metrics))
683
        .layer(Extension(info))
684
        .layer(Extension(health_ext.clone()))
685
686
        .layer(Extension(compat_return_full_text))
        .layer(Extension(infer))
687
        .layer(Extension(prom_handle.clone()))
688
689
        .layer(opentelemetry_tracing_layer())
        .layer(cors_layer);
Olivier Dehaene's avatar
Olivier Dehaene committed
690

691
692
693
694
695
696
697
698
699
700
    if ngrok {
        #[cfg(feature = "ngrok")]
        {
            use ngrok::config::TunnelBuilder;

            let _ = addr;

            let authtoken =
                ngrok_authtoken.expect("`ngrok-authtoken` must be set when using ngrok tunneling");

701
702
703
            let edge = ngrok_edge.expect("`ngrok-edge` must be set when using ngrok tunneling");

            let tunnel = ngrok::Session::builder()
704
705
706
707
                .authtoken(authtoken)
                .connect()
                .await
                .unwrap()
708
709
                .labeled_tunnel()
                .label("edge", edge);
710
711

            let listener = tunnel.listen().await.unwrap();
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726

            // Run prom metrics and health locally too
            tokio::spawn(
                axum::Server::bind(&addr)
                    .serve(
                        Router::new()
                            .route("/health", get(health))
                            .route("/metrics", get(metrics))
                            .layer(Extension(health_ext))
                            .layer(Extension(prom_handle))
                            .into_make_service(),
                    )
                    //Wait until all requests are finished to shut down
                    .with_graceful_shutdown(shutdown_signal()),
            );
727
728
729
730
731
732

            // Run server
            axum::Server::builder(listener)
                .serve(app.into_make_service())
                //Wait until all requests are finished to shut down
                .with_graceful_shutdown(shutdown_signal())
733
                .await?;
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
        }
        #[cfg(not(feature = "ngrok"))]
        {
            let _ngrok_authtoken = ngrok_authtoken;
            let _ngrok_domain = ngrok_domain;
            let _ngrok_username = ngrok_username;
            let _ngrok_password = ngrok_password;

            panic!("`text-generation-router` was compiled without the `ngrok` feature");
        }
    } else {
        // Run server
        axum::Server::bind(&addr)
            .serve(app.into_make_service())
            // Wait until all requests are finished to shut down
            .with_graceful_shutdown(shutdown_signal())
750
            .await?;
751
    }
752
    Ok(())
Olivier Dehaene's avatar
Olivier Dehaene committed
753
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779

/// Shutdown signal handler
async fn shutdown_signal() {
    let ctrl_c = async {
        signal::ctrl_c()
            .await
            .expect("failed to install Ctrl+C handler");
    };

    #[cfg(unix)]
    let terminate = async {
        signal::unix::signal(signal::unix::SignalKind::terminate())
            .expect("failed to install signal handler")
            .recv()
            .await;
    };

    #[cfg(not(unix))]
    let terminate = std::future::pending::<()>();

    tokio::select! {
        _ = ctrl_c => {},
        _ = terminate => {},
    }

    tracing::info!("signal received, starting graceful shutdown");
780
    opentelemetry::global::shutdown_tracer_provider();
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
781
}
782

783
784
785
786
787
788
789
790
791
792
793
impl From<i32> for FinishReason {
    fn from(finish_reason: i32) -> Self {
        let finish_reason = text_generation_client::FinishReason::from_i32(finish_reason).unwrap();
        match finish_reason {
            text_generation_client::FinishReason::Length => FinishReason::Length,
            text_generation_client::FinishReason::EosToken => FinishReason::EndOfSequenceToken,
            text_generation_client::FinishReason::StopSequence => FinishReason::StopSequence,
        }
    }
}

794
795
796
797
798
799
800
801
802
803
804
805
806
807
/// Convert to Axum supported formats
impl From<InferError> for (StatusCode, Json<ErrorResponse>) {
    fn from(err: InferError) -> Self {
        let status_code = match err {
            InferError::GenerationError(_) => StatusCode::FAILED_DEPENDENCY,
            InferError::Overloaded(_) => StatusCode::TOO_MANY_REQUESTS,
            InferError::ValidationError(_) => StatusCode::UNPROCESSABLE_ENTITY,
            InferError::IncompleteGeneration => StatusCode::INTERNAL_SERVER_ERROR,
        };

        (
            status_code,
            Json(ErrorResponse {
                error: err.to_string(),
808
                error_type: err.error_type().to_string(),
809
810
811
812
813
814
815
816
817
818
            }),
        )
    }
}

impl From<InferError> for Event {
    fn from(err: InferError) -> Self {
        Event::default()
            .json_data(ErrorResponse {
                error: err.to_string(),
819
                error_type: err.error_type().to_string(),
820
821
822
823
            })
            .unwrap()
    }
}