server.rs 30.7 KB
Newer Older
1
/// HTTP Server logic
2
use crate::health::Health;
3
4
use crate::infer::{InferError, InferResponse, InferStreamResponse};
use crate::validation::ValidationError;
5
use crate::{
6
    BestOfSequence, CompatGenerateRequest, Details, ErrorResponse, FinishReason,
7
    GenerateParameters, GenerateRequest, GenerateResponse, HubModelInfo, Infer, Info, PrefillToken,
8
    StreamDetails, StreamResponse, Token, Validation,
9
};
Olivier Dehaene's avatar
Olivier Dehaene committed
10
use axum::extract::Extension;
11
use axum::http::{HeaderMap, Method, StatusCode};
12
use axum::response::sse::{Event, KeepAlive, Sse};
13
use axum::response::{IntoResponse, Response};
Olivier Dehaene's avatar
Olivier Dehaene committed
14
use axum::routing::{get, post};
15
use axum::{http, Json, Router};
Nicolas Patry's avatar
Nicolas Patry committed
16
use axum_tracing_opentelemetry::middleware::OtelAxumLayer;
17
use futures::stream::StreamExt;
18
use futures::Stream;
19
use metrics_exporter_prometheus::{Matcher, PrometheusBuilder, PrometheusHandle};
20
use std::convert::Infallible;
Olivier Dehaene's avatar
Olivier Dehaene committed
21
use std::net::SocketAddr;
22
23
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
24
use text_generation_client::{ShardInfo, ShardedClient};
Olivier Dehaene's avatar
Olivier Dehaene committed
25
use tokenizers::Tokenizer;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
26
use tokio::signal;
Olivier Dehaene's avatar
Olivier Dehaene committed
27
use tokio::time::Instant;
28
use tower_http::cors::{AllowOrigin, CorsLayer};
29
use tracing::{info_span, instrument, Instrument};
30
31
use utoipa::OpenApi;
use utoipa_swagger_ui::SwaggerUi;
Olivier Dehaene's avatar
Olivier Dehaene committed
32

33
34
/// Generate tokens if `stream == false` or a stream of token if `stream == true`
#[utoipa::path(
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
post,
tag = "Text Generation Inference",
path = "/",
request_body = CompatGenerateRequest,
responses(
(status = 200, description = "Generated Text",
content(
("application/json" = GenerateResponse),
("text/event-stream" = StreamResponse),
)),
(status = 424, description = "Generation Error", body = ErrorResponse,
example = json ! ({"error": "Request failed during generation"})),
(status = 429, description = "Model is overloaded", body = ErrorResponse,
example = json ! ({"error": "Model is overloaded"})),
(status = 422, description = "Input validation error", body = ErrorResponse,
example = json ! ({"error": "Input validation error"})),
(status = 500, description = "Incomplete generation", body = ErrorResponse,
example = json ! ({"error": "Incomplete generation"})),
)
54
)]
55
#[instrument(skip(infer, req))]
56
async fn compat_generate(
57
    Extension(default_return_full_text): Extension<bool>,
58
    infer: Extension<Infer>,
59
    Json(mut req): Json<CompatGenerateRequest>,
60
) -> Result<Response, (StatusCode, Json<ErrorResponse>)> {
61
62
    // default return_full_text given the pipeline_tag
    if req.parameters.return_full_text.is_none() {
63
        req.parameters.return_full_text = Some(default_return_full_text)
64
65
    }

66
67
68
69
70
71
    // switch on stream
    if req.stream {
        Ok(generate_stream(infer, Json(req.into()))
            .await
            .into_response())
    } else {
72
        let (headers, Json(generation)) = generate(infer, Json(req.into())).await?;
73
        // wrap generation inside a Vec to match api-inference
74
        Ok((headers, Json(vec![generation])).into_response())
75
76
77
    }
}

78
79
/// Text Generation Inference endpoint info
#[utoipa::path(
80
81
82
83
get,
tag = "Text Generation Inference",
path = "/info",
responses((status = 200, description = "Served model info", body = Info))
84
85
)]
#[instrument]
86
87
async fn get_model_info(info: Extension<Info>) -> Json<Info> {
    Json(info.0)
88
89
}

90
#[utoipa::path(
91
92
93
94
95
96
97
98
get,
tag = "Text Generation Inference",
path = "/health",
responses(
(status = 200, description = "Everything is working fine"),
(status = 503, description = "Text generation inference is down", body = ErrorResponse,
example = json ! ({"error": "unhealthy", "error_type": "healthcheck"})),
)
99
100
)]
#[instrument(skip(health))]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
101
/// Health check method
102
103
104
105
106
107
108
109
110
111
112
async fn health(mut health: Extension<Health>) -> Result<(), (StatusCode, Json<ErrorResponse>)> {
    match health.check().await {
        true => Ok(()),
        false => Err((
            StatusCode::SERVICE_UNAVAILABLE,
            Json(ErrorResponse {
                error: "unhealthy".to_string(),
                error_type: "healthcheck".to_string(),
            }),
        )),
    }
Olivier Dehaene's avatar
Olivier Dehaene committed
113
114
}

115
116
/// Generate tokens
#[utoipa::path(
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
post,
tag = "Text Generation Inference",
path = "/generate",
request_body = GenerateRequest,
responses(
(status = 200, description = "Generated Text", body = GenerateResponse),
(status = 424, description = "Generation Error", body = ErrorResponse,
example = json ! ({"error": "Request failed during generation"})),
(status = 429, description = "Model is overloaded", body = ErrorResponse,
example = json ! ({"error": "Model is overloaded"})),
(status = 422, description = "Input validation error", body = ErrorResponse,
example = json ! ({"error": "Input validation error"})),
(status = 500, description = "Incomplete generation", body = ErrorResponse,
example = json ! ({"error": "Incomplete generation"})),
)
132
)]
133
#[instrument(
134
135
skip_all,
fields(
136
parameters = ? req.parameters,
137
138
139
140
141
142
143
total_time,
validation_time,
queue_time,
inference_time,
time_per_token,
seed,
)
144
)]
Olivier Dehaene's avatar
Olivier Dehaene committed
145
async fn generate(
146
    infer: Extension<Infer>,
147
    Json(req): Json<GenerateRequest>,
148
) -> Result<(HeaderMap, Json<GenerateResponse>), (StatusCode, Json<ErrorResponse>)> {
149
    let span = tracing::Span::current();
150
    let start_time = Instant::now();
151
    metrics::increment_counter!("tgi_request_count");
152

153
    tracing::debug!("Input: {}", req.inputs);
154

155
    let compute_characters = req.inputs.chars().count();
156
    let mut add_prompt = None;
157
158
    if req.parameters.return_full_text.unwrap_or(false) {
        add_prompt = Some(req.inputs.clone());
159
160
    }

Nicolas Patry's avatar
Nicolas Patry committed
161
    let details: bool = req.parameters.details || req.parameters.decoder_input_details;
162
163

    // Inference
164
    let (response, best_of_responses) = match req.parameters.best_of {
165
        Some(best_of) if best_of > 1 => {
166
            let (response, best_of_responses) = infer.generate_best_of(req, best_of).await?;
167
168
            (response, Some(best_of_responses))
        }
169
        _ => (infer.generate(req).await?, None),
170
    };
Olivier Dehaene's avatar
Olivier Dehaene committed
171

OlivierDehaene's avatar
OlivierDehaene committed
172
    // Token details
173
    let input_length = response._input_length;
OlivierDehaene's avatar
OlivierDehaene committed
174
    let details = match details {
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
        true => {
            // convert best_of_responses
            let best_of_sequences = best_of_responses.map(|responses: Vec<InferResponse>| {
                responses
                    .into_iter()
                    .map(|response: InferResponse| {
                        // Add prompt if return_full_text
                        let mut output_text = response.generated_text.text;
                        if let Some(prompt) = &add_prompt {
                            output_text = prompt.clone() + &output_text;
                        }

                        BestOfSequence {
                            generated_text: output_text,
                            finish_reason: FinishReason::from(
                                response.generated_text.finish_reason,
                            ),
                            generated_tokens: response.generated_text.generated_tokens,
                            prefill: response.prefill,
                            tokens: response.tokens,
Nicolas Patry's avatar
Nicolas Patry committed
195
                            top_tokens: response.top_tokens,
196
197
198
199
200
201
202
203
204
205
206
207
208
                            seed: response.generated_text.seed,
                        }
                    })
                    .collect()
            });

            Some(Details {
                finish_reason: FinishReason::from(response.generated_text.finish_reason),
                generated_tokens: response.generated_text.generated_tokens,
                prefill: response.prefill,
                tokens: response.tokens,
                seed: response.generated_text.seed,
                best_of_sequences,
Nicolas Patry's avatar
Nicolas Patry committed
209
                top_tokens: response.top_tokens,
210
211
            })
        }
OlivierDehaene's avatar
OlivierDehaene committed
212
213
214
        false => None,
    };

215
216
217
218
    // Timings
    let total_time = start_time.elapsed();
    let validation_time = response.queued - start_time;
    let queue_time = response.start - response.queued;
219
220
    let inference_time = Instant::now() - response.start;
    let time_per_token = inference_time / response.generated_text.generated_tokens;
221

222
223
224
225
226
227
228
229
    // Tracing metadata
    span.record("total_time", format!("{total_time:?}"));
    span.record("validation_time", format!("{validation_time:?}"));
    span.record("queue_time", format!("{queue_time:?}"));
    span.record("inference_time", format!("{inference_time:?}"));
    span.record("time_per_token", format!("{time_per_token:?}"));
    span.record("seed", format!("{:?}", response.generated_text.seed));

230
231
    // Headers
    let mut headers = HeaderMap::new();
232
233
234
235
236
237
238
239
240
    headers.insert("x-compute-type", "gpu+optimized".parse().unwrap());
    headers.insert(
        "x-compute-time",
        total_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-compute-characters",
        compute_characters.to_string().parse().unwrap(),
    );
241
242
243
244
245
246
247
248
249
250
251
    headers.insert(
        "x-total-time",
        total_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-validation-time",
        validation_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-queue-time",
        queue_time.as_millis().to_string().parse().unwrap(),
Olivier Dehaene's avatar
Olivier Dehaene committed
252
    );
253
254
255
256
257
258
259
260
    headers.insert(
        "x-inference-time",
        inference_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-time-per-token",
        time_per_token.as_millis().to_string().parse().unwrap(),
    );
261
262
263
264
265
    headers.insert("x-prompt-tokens", input_length.into());
    headers.insert(
        "x-generated-tokens",
        response.generated_text.generated_tokens.into(),
    );
266

267
268
    // Metrics
    metrics::increment_counter!("tgi_request_success");
269
270
271
272
273
274
275
276
277
278
279
280
281
282
    metrics::histogram!("tgi_request_duration", total_time.as_secs_f64());
    metrics::histogram!(
        "tgi_request_validation_duration",
        validation_time.as_secs_f64()
    );
    metrics::histogram!("tgi_request_queue_duration", queue_time.as_secs_f64());
    metrics::histogram!(
        "tgi_request_inference_duration",
        inference_time.as_secs_f64()
    );
    metrics::histogram!(
        "tgi_request_mean_time_per_token_duration",
        time_per_token.as_secs_f64()
    );
283
284
285
286
287
    metrics::histogram!(
        "tgi_request_generated_tokens",
        response.generated_text.generated_tokens as f64
    );

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
288
    // Send response
289
290
291
292
293
    let mut output_text = response.generated_text.text;
    if let Some(prompt) = add_prompt {
        output_text = prompt + &output_text;
    }

294
295
    tracing::debug!("Output: {}", output_text);
    tracing::info!("Success");
296

297
    let response = GenerateResponse {
298
        generated_text: output_text,
OlivierDehaene's avatar
OlivierDehaene committed
299
        details,
300
    };
301
    Ok((headers, Json(response)))
Olivier Dehaene's avatar
Olivier Dehaene committed
302
303
}

Yannic Kilcher's avatar
Yannic Kilcher committed
304
/// Generate a stream of token using Server-Sent Events
305
#[utoipa::path(
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
post,
tag = "Text Generation Inference",
path = "/generate_stream",
request_body = GenerateRequest,
responses(
(status = 200, description = "Generated Text", body = StreamResponse,
content_type = "text/event-stream"),
(status = 424, description = "Generation Error", body = ErrorResponse,
example = json ! ({"error": "Request failed during generation"}),
content_type = "text/event-stream"),
(status = 429, description = "Model is overloaded", body = ErrorResponse,
example = json ! ({"error": "Model is overloaded"}),
content_type = "text/event-stream"),
(status = 422, description = "Input validation error", body = ErrorResponse,
example = json ! ({"error": "Input validation error"}),
content_type = "text/event-stream"),
(status = 500, description = "Incomplete generation", body = ErrorResponse,
example = json ! ({"error": "Incomplete generation"}),
content_type = "text/event-stream"),
)
326
)]
327
#[instrument(
328
329
skip_all,
fields(
330
parameters = ? req.parameters,
331
332
333
334
335
336
337
total_time,
validation_time,
queue_time,
inference_time,
time_per_token,
seed,
)
338
339
)]
async fn generate_stream(
340
341
    Extension(infer): Extension<Infer>,
    Json(req): Json<GenerateRequest>,
342
343
344
345
) -> (
    HeaderMap,
    Sse<impl Stream<Item = Result<Event, Infallible>>>,
) {
346
347
    let span = tracing::Span::current();
    let start_time = Instant::now();
348
    metrics::increment_counter!("tgi_request_count");
349

350
    tracing::debug!("Input: {}", req.inputs);
351

352
    let compute_characters = req.inputs.chars().count();
353
354
355
356
357
358
359

    let mut headers = HeaderMap::new();
    headers.insert("x-compute-type", "gpu+optimized".parse().unwrap());
    headers.insert(
        "x-compute-characters",
        compute_characters.to_string().parse().unwrap(),
    );
360
    headers.insert("X-Accel-Buffering", "no".parse().unwrap());
361

362
363
364
365
    let stream = async_stream::stream! {
        // Inference
        let mut end_reached = false;
        let mut error = false;
366
367

        let mut add_prompt = None;
368
369
        if req.parameters.return_full_text.unwrap_or(false) {
            add_prompt = Some(req.inputs.clone());
370
        }
371
        let details = req.parameters.details;
372

373
        let best_of = req.parameters.best_of.unwrap_or(1);
374
375
376
377
378
        if best_of != 1 {
            let err = InferError::from(ValidationError::BestOfStream);
            metrics::increment_counter!("tgi_request_failure", "err" => "validation");
            tracing::error!("{err}");
            yield Ok(Event::from(err));
379
        } else if req.parameters.decoder_input_details {
380
381
382
383
384
            let err = InferError::from(ValidationError::PrefillDetailsStream);
            metrics::increment_counter!("tgi_request_failure", "err" => "validation");
            tracing::error!("{err}");
            yield Ok(Event::from(err));
        } else {
385
            match infer.generate_stream(req).instrument(info_span!(parent: &span, "async_stream")).await {
386
                // Keep permit as long as generate_stream lives
387
                Ok((_permit, _input_length, mut response_stream)) => {
388
389
390
391
392
393
394
395
                    // Server-Sent Event stream
                    while let Some(response) = response_stream.next().await {
                        match response {
                            Ok(response) => {
                                match response {
                                    // Prefill is ignored
                                    InferStreamResponse::Prefill(_) => {}
                                    // Yield event for every new token
Nicolas Patry's avatar
Nicolas Patry committed
396
397
398
399
                                    InferStreamResponse::Intermediate{
                                        token,
                                        top_tokens,
                                    } => {
400
401
                                        tracing::debug!(parent: &span, "Token: {:?}", token);

402
403
404
                                        // StreamResponse
                                        let stream_token = StreamResponse {
                                            token,
Nicolas Patry's avatar
Nicolas Patry committed
405
                                            top_tokens,
406
407
408
409
410
                                            generated_text: None,
                                            details: None,
                                        };

                                        yield Ok(Event::default().json_data(stream_token).unwrap())
411
                                    }
412
413
                                    // Yield event for last token and compute timings
                                    InferStreamResponse::End {
414
                                        token,
415
416
417
                                        generated_text,
                                        start,
                                        queued,
Nicolas Patry's avatar
Nicolas Patry committed
418
                                        top_tokens,
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
                                    } => {
                                        // Token details
                                        let details = match details {
                                            true => Some(StreamDetails {
                                                finish_reason: FinishReason::from(generated_text.finish_reason),
                                                generated_tokens: generated_text.generated_tokens,
                                                seed: generated_text.seed,
                                            }),
                                            false => None,
                                        };

                                        // Timings
                                        let total_time = start_time.elapsed();
                                        let validation_time = queued - start_time;
                                        let queue_time = start - queued;
                                        let inference_time = Instant::now() - start;
                                        let time_per_token = inference_time / generated_text.generated_tokens;

                                        // Tracing metadata
                                        span.record("total_time", format!("{total_time:?}"));
                                        span.record("validation_time", format!("{validation_time:?}"));
                                        span.record("queue_time", format!("{queue_time:?}"));
                                        span.record("inference_time", format!("{inference_time:?}"));
                                        span.record("time_per_token", format!("{time_per_token:?}"));
                                        span.record("seed", format!("{:?}", generated_text.seed));

                                        // Metrics
                                        metrics::increment_counter!("tgi_request_success");
447
448
449
450
451
                                        metrics::histogram!("tgi_request_duration", total_time.as_secs_f64());
                                        metrics::histogram!("tgi_request_validation_duration", validation_time.as_secs_f64());
                                        metrics::histogram!("tgi_request_queue_duration", queue_time.as_secs_f64());
                                        metrics::histogram!("tgi_request_inference_duration", inference_time.as_secs_f64());
                                        metrics::histogram!("tgi_request_mean_time_per_token_duration", time_per_token.as_secs_f64());
452
453
454
455
456
457
458
459
460
461
                                        metrics::histogram!("tgi_request_generated_tokens", generated_text.generated_tokens as f64);

                                        // StreamResponse
                                        end_reached = true;

                                        let mut output_text = generated_text.text;
                                        if let Some(prompt) = add_prompt {
                                            output_text = prompt + &output_text;
                                        }

462
463
                                        tracing::debug!(parent: &span, "Output: {}", output_text);
                                        tracing::info!(parent: &span, "Success");
464

465
466
                                        let stream_token = StreamResponse {
                                            token,
Nicolas Patry's avatar
Nicolas Patry committed
467
                                            top_tokens,
468
469
470
471
472
473
474
                                            generated_text: Some(output_text),
                                            details
                                        };

                                        yield Ok(Event::default().json_data(stream_token).unwrap());
                                        break;
                                    }
475
476
                                }
                            }
477
478
479
480
481
482
                            // yield error
                            Err(err) => {
                                error = true;
                                yield Ok(Event::from(err));
                                break;
                            }
483
484
                        }
                    }
485
486
487
488
489
                },
                // yield error
                Err(err) => {
                    error = true;
                    yield Ok(Event::from(err));
490
                }
491
492
493
494
495
496
497
            }
            // Check if generation reached the end
            // Skip if we already sent an error
            if !end_reached && !error {
                let err = InferError::IncompleteGeneration;
                metrics::increment_counter!("tgi_request_failure", "err" => "incomplete");
                tracing::error!("{err}");
498
                yield Ok(Event::from(err));
499
500
501
502
            }
        }
    };

503
    (headers, Sse::new(stream).keep_alive(KeepAlive::default()))
504
505
}

506
507
/// Prometheus metrics scrape endpoint
#[utoipa::path(
508
509
510
511
get,
tag = "Text Generation Inference",
path = "/metrics",
responses((status = 200, description = "Prometheus Metrics", body = String))
512
513
514
515
516
)]
async fn metrics(prom_handle: Extension<PrometheusHandle>) -> String {
    prom_handle.render()
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
517
518
519
/// Serving method
#[allow(clippy::too_many_arguments)]
pub async fn run(
520
521
    model_info: HubModelInfo,
    shard_info: ShardInfo,
522
    compat_return_full_text: bool,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
523
    max_concurrent_requests: usize,
524
    max_best_of: usize,
525
    max_stop_sequences: usize,
Nicolas Patry's avatar
Nicolas Patry committed
526
    max_top_n_tokens: u32,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
527
    max_input_length: usize,
528
    max_total_tokens: usize,
529
    waiting_served_ratio: f32,
530
    max_batch_prefill_tokens: u32,
531
    max_batch_total_tokens: u32,
532
    max_waiting_tokens: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
533
    client: ShardedClient,
534
    tokenizer: Option<Tokenizer>,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
535
536
    validation_workers: usize,
    addr: SocketAddr,
537
    allow_origin: Option<AllowOrigin>,
538
539
    ngrok: bool,
    ngrok_authtoken: Option<String>,
540
    ngrok_edge: Option<String>,
541
) -> Result<(), axum::BoxError> {
542
543
544
    // OpenAPI documentation
    #[derive(OpenApi)]
    #[openapi(
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
    paths(
    health,
    get_model_info,
    compat_generate,
    generate,
    generate_stream,
    metrics,
    ),
    components(
    schemas(
    Info,
    CompatGenerateRequest,
    GenerateRequest,
    GenerateParameters,
    PrefillToken,
    Token,
    GenerateResponse,
    BestOfSequence,
    Details,
    FinishReason,
    StreamResponse,
    StreamDetails,
    ErrorResponse,
    )
    ),
    tags(
    (name = "Text Generation Inference", description = "Hugging Face Text Generation Inference API")
    ),
    info(
    title = "Text Generation Inference",
    license(
    name = "Apache 2.0",
    url = "https://www.apache.org/licenses/LICENSE-2.0"
    )
    )
580
581
582
    )]
    struct ApiDoc;

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
583
    // Create state
584
585
586
    let validation = Validation::new(
        validation_workers,
        tokenizer,
587
        max_best_of,
588
        max_stop_sequences,
Nicolas Patry's avatar
Nicolas Patry committed
589
        max_top_n_tokens,
590
591
592
        max_input_length,
        max_total_tokens,
    );
593
594
    let generation_health = Arc::new(AtomicBool::new(false));
    let health_ext = Health::new(client.clone(), generation_health.clone());
595
596
    let infer = Infer::new(
        client,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
597
        validation,
598
        waiting_served_ratio,
599
        max_batch_prefill_tokens,
600
        max_batch_total_tokens,
601
602
        max_waiting_tokens,
        max_concurrent_requests,
603
        shard_info.requires_padding,
604
        shard_info.window_size,
Nicolas Patry's avatar
Nicolas Patry committed
605
        shard_info.speculate,
606
        generation_health,
607
    );
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
608

609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
    // Duration buckets
    let duration_matcher = Matcher::Suffix(String::from("duration"));
    let n_duration_buckets = 35;
    let mut duration_buckets = Vec::with_capacity(n_duration_buckets);
    // Minimum duration in seconds
    let mut value = 0.0001;
    for _ in 0..n_duration_buckets {
        // geometric sequence
        value *= 1.5;
        duration_buckets.push(value);
    }
    // Input Length buckets
    let input_length_matcher = Matcher::Full(String::from("tgi_request_input_length"));
    let input_length_buckets: Vec<f64> = (0..100)
        .map(|x| (max_input_length as f64 / 100.0) * (x + 1) as f64)
        .collect();
    // Generated tokens buckets
    let generated_tokens_matcher = Matcher::Full(String::from("tgi_request_generated_tokens"));
    let generated_tokens_buckets: Vec<f64> = (0..100)
        .map(|x| (max_total_tokens as f64 / 100.0) * (x + 1) as f64)
        .collect();
    // Input Length buckets
    let max_new_tokens_matcher = Matcher::Full(String::from("tgi_request_max_new_tokens"));
    let max_new_tokens_buckets: Vec<f64> = (0..100)
        .map(|x| (max_total_tokens as f64 / 100.0) * (x + 1) as f64)
        .collect();
    // Batch size buckets
    let batch_size_matcher = Matcher::Full(String::from("tgi_batch_next_size"));
637
    let batch_size_buckets: Vec<f64> = (0..1024).map(|x| (x + 1) as f64).collect();
OlivierDehaene's avatar
OlivierDehaene committed
638
639
640
    // Speculated tokens buckets
    let skipped_matcher = Matcher::Full(String::from("tgi_request_skipped_tokens"));
    let skipped_buckets: Vec<f64> = (0..shard_info.speculate + 1).map(|x| x as f64).collect();
641

642
    // Prometheus handler
643
644
645
646
647
648
649
650
651
652
    let builder = PrometheusBuilder::new()
        .set_buckets_for_metric(duration_matcher, &duration_buckets)
        .unwrap()
        .set_buckets_for_metric(input_length_matcher, &input_length_buckets)
        .unwrap()
        .set_buckets_for_metric(generated_tokens_matcher, &generated_tokens_buckets)
        .unwrap()
        .set_buckets_for_metric(max_new_tokens_matcher, &max_new_tokens_buckets)
        .unwrap()
        .set_buckets_for_metric(batch_size_matcher, &batch_size_buckets)
OlivierDehaene's avatar
OlivierDehaene committed
653
654
        .unwrap()
        .set_buckets_for_metric(skipped_matcher, &skipped_buckets)
655
        .unwrap();
656
657
658
659
    let prom_handle = builder
        .install_recorder()
        .expect("failed to install metrics recorder");

660
661
662
663
664
665
666
    // CORS layer
    let allow_origin = allow_origin.unwrap_or(AllowOrigin::any());
    let cors_layer = CorsLayer::new()
        .allow_methods([Method::GET, Method::POST])
        .allow_headers([http::header::CONTENT_TYPE])
        .allow_origin(allow_origin);

667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
    // Endpoint info
    let info = Info {
        model_id: model_info.model_id,
        model_sha: model_info.sha,
        model_dtype: shard_info.dtype,
        model_device_type: shard_info.device_type,
        model_pipeline_tag: model_info.pipeline_tag,
        max_concurrent_requests,
        max_best_of,
        max_stop_sequences,
        max_input_length,
        max_total_tokens,
        waiting_served_ratio,
        max_batch_total_tokens,
        max_waiting_tokens,
        validation_workers,
        version: env!("CARGO_PKG_VERSION"),
        sha: option_env!("VERGEN_GIT_SHA"),
685
        docker_label: option_env!("DOCKER_LABEL"),
686
687
    };

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
688
    // Create router
Olivier Dehaene's avatar
Olivier Dehaene committed
689
    let app = Router::new()
690
        .merge(SwaggerUi::new("/docs").url("/api-doc/openapi.json", ApiDoc::openapi()))
691
        // Base routes
692
        .route("/", post(compat_generate))
693
        .route("/info", get(get_model_info))
Olivier Dehaene's avatar
Olivier Dehaene committed
694
        .route("/generate", post(generate))
695
        .route("/generate_stream", post(generate_stream))
696
697
698
        // AWS Sagemaker route
        .route("/invocations", post(compat_generate))
        // Base Health route
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
699
        .route("/health", get(health))
700
701
702
703
704
        // Inference API health route
        .route("/", get(health))
        // AWS Sagemaker health route
        .route("/ping", get(health))
        // Prometheus metrics route
705
        .route("/metrics", get(metrics))
706
        .layer(Extension(info))
707
        .layer(Extension(health_ext.clone()))
708
709
        .layer(Extension(compat_return_full_text))
        .layer(Extension(infer))
710
        .layer(Extension(prom_handle.clone()))
Nicolas Patry's avatar
Nicolas Patry committed
711
        .layer(OtelAxumLayer::default())
712
        .layer(cors_layer);
Olivier Dehaene's avatar
Olivier Dehaene committed
713

714
715
716
717
718
719
720
721
722
723
    if ngrok {
        #[cfg(feature = "ngrok")]
        {
            use ngrok::config::TunnelBuilder;

            let _ = addr;

            let authtoken =
                ngrok_authtoken.expect("`ngrok-authtoken` must be set when using ngrok tunneling");

724
725
726
            let edge = ngrok_edge.expect("`ngrok-edge` must be set when using ngrok tunneling");

            let tunnel = ngrok::Session::builder()
727
728
729
730
                .authtoken(authtoken)
                .connect()
                .await
                .unwrap()
731
732
                .labeled_tunnel()
                .label("edge", edge);
733
734

            let listener = tunnel.listen().await.unwrap();
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749

            // Run prom metrics and health locally too
            tokio::spawn(
                axum::Server::bind(&addr)
                    .serve(
                        Router::new()
                            .route("/health", get(health))
                            .route("/metrics", get(metrics))
                            .layer(Extension(health_ext))
                            .layer(Extension(prom_handle))
                            .into_make_service(),
                    )
                    //Wait until all requests are finished to shut down
                    .with_graceful_shutdown(shutdown_signal()),
            );
750
751
752
753
754
755

            // Run server
            axum::Server::builder(listener)
                .serve(app.into_make_service())
                //Wait until all requests are finished to shut down
                .with_graceful_shutdown(shutdown_signal())
756
                .await?;
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
        }
        #[cfg(not(feature = "ngrok"))]
        {
            let _ngrok_authtoken = ngrok_authtoken;
            let _ngrok_domain = ngrok_domain;
            let _ngrok_username = ngrok_username;
            let _ngrok_password = ngrok_password;

            panic!("`text-generation-router` was compiled without the `ngrok` feature");
        }
    } else {
        // Run server
        axum::Server::bind(&addr)
            .serve(app.into_make_service())
            // Wait until all requests are finished to shut down
            .with_graceful_shutdown(shutdown_signal())
773
            .await?;
774
    }
775
    Ok(())
Olivier Dehaene's avatar
Olivier Dehaene committed
776
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802

/// Shutdown signal handler
async fn shutdown_signal() {
    let ctrl_c = async {
        signal::ctrl_c()
            .await
            .expect("failed to install Ctrl+C handler");
    };

    #[cfg(unix)]
    let terminate = async {
        signal::unix::signal(signal::unix::SignalKind::terminate())
            .expect("failed to install signal handler")
            .recv()
            .await;
    };

    #[cfg(not(unix))]
    let terminate = std::future::pending::<()>();

    tokio::select! {
        _ = ctrl_c => {},
        _ = terminate => {},
    }

    tracing::info!("signal received, starting graceful shutdown");
803
    opentelemetry::global::shutdown_tracer_provider();
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
804
}
805

806
807
impl From<i32> for FinishReason {
    fn from(finish_reason: i32) -> Self {
Nicolas Patry's avatar
Nicolas Patry committed
808
        let finish_reason = text_generation_client::FinishReason::try_from(finish_reason).unwrap();
809
810
811
812
813
814
815
816
        match finish_reason {
            text_generation_client::FinishReason::Length => FinishReason::Length,
            text_generation_client::FinishReason::EosToken => FinishReason::EndOfSequenceToken,
            text_generation_client::FinishReason::StopSequence => FinishReason::StopSequence,
        }
    }
}

817
818
819
820
821
822
823
824
825
826
827
828
829
830
/// Convert to Axum supported formats
impl From<InferError> for (StatusCode, Json<ErrorResponse>) {
    fn from(err: InferError) -> Self {
        let status_code = match err {
            InferError::GenerationError(_) => StatusCode::FAILED_DEPENDENCY,
            InferError::Overloaded(_) => StatusCode::TOO_MANY_REQUESTS,
            InferError::ValidationError(_) => StatusCode::UNPROCESSABLE_ENTITY,
            InferError::IncompleteGeneration => StatusCode::INTERNAL_SERVER_ERROR,
        };

        (
            status_code,
            Json(ErrorResponse {
                error: err.to_string(),
831
                error_type: err.error_type().to_string(),
832
833
834
835
836
837
838
839
840
841
            }),
        )
    }
}

impl From<InferError> for Event {
    fn from(err: InferError) -> Self {
        Event::default()
            .json_data(ErrorResponse {
                error: err.to_string(),
842
                error_type: err.error_type().to_string(),
843
844
845
846
            })
            .unwrap()
    }
}