server.rs 30.3 KB
Newer Older
1
/// HTTP Server logic
2
use crate::health::Health;
3
4
use crate::infer::{InferError, InferResponse, InferStreamResponse};
use crate::validation::ValidationError;
5
use crate::{
6
    BestOfSequence, CompatGenerateRequest, Details, ErrorResponse, FinishReason,
7
    GenerateParameters, GenerateRequest, GenerateResponse, HubModelInfo, Infer, Info, PrefillToken,
8
    StreamDetails, StreamResponse, Token, Validation,
9
};
Olivier Dehaene's avatar
Olivier Dehaene committed
10
use axum::extract::Extension;
11
use axum::http::{HeaderMap, Method, StatusCode};
12
use axum::response::sse::{Event, KeepAlive, Sse};
13
use axum::response::{IntoResponse, Response};
Olivier Dehaene's avatar
Olivier Dehaene committed
14
use axum::routing::{get, post};
15
use axum::{http, Json, Router};
16
use axum_tracing_opentelemetry::opentelemetry_tracing_layer;
17
use futures::stream::StreamExt;
18
use futures::Stream;
19
use metrics_exporter_prometheus::{Matcher, PrometheusBuilder, PrometheusHandle};
20
use std::convert::Infallible;
Olivier Dehaene's avatar
Olivier Dehaene committed
21
use std::net::SocketAddr;
22
23
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
24
use text_generation_client::{ShardInfo, ShardedClient};
Olivier Dehaene's avatar
Olivier Dehaene committed
25
use tokenizers::Tokenizer;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
26
use tokio::signal;
Olivier Dehaene's avatar
Olivier Dehaene committed
27
use tokio::time::Instant;
28
use tower_http::cors::{AllowOrigin, CorsLayer};
29
use tracing::{info_span, instrument, Instrument};
30
31
use utoipa::OpenApi;
use utoipa_swagger_ui::SwaggerUi;
Olivier Dehaene's avatar
Olivier Dehaene committed
32

33
34
35
36
37
38
39
/// Generate tokens if `stream == false` or a stream of token if `stream == true`
#[utoipa::path(
    post,
    tag = "Text Generation Inference",
    path = "/",
    request_body = CompatGenerateRequest,
    responses(
40
        (status = 200, description = "Generated Text",
41
42
43
44
            content(
                ("application/json" = GenerateResponse),
                ("text/event-stream" = StreamResponse),
            )),
45
46
47
48
49
50
51
52
53
54
        (status = 424, description = "Generation Error", body = ErrorResponse,
            example = json ! ({"error": "Request failed during generation"})),
        (status = 429, description = "Model is overloaded", body = ErrorResponse,
            example = json ! ({"error": "Model is overloaded"})),
        (status = 422, description = "Input validation error", body = ErrorResponse,
            example = json ! ({"error": "Input validation error"})),
        (status = 500, description = "Incomplete generation", body = ErrorResponse,
            example = json ! ({"error": "Incomplete generation"})),
    )
)]
55
#[instrument(skip(infer, req))]
56
async fn compat_generate(
57
    default_return_full_text: Extension<bool>,
58
59
    infer: Extension<Infer>,
    req: Json<CompatGenerateRequest>,
60
) -> Result<Response, (StatusCode, Json<ErrorResponse>)> {
61
62
63
64
65
66
67
    let mut req = req.0;

    // default return_full_text given the pipeline_tag
    if req.parameters.return_full_text.is_none() {
        req.parameters.return_full_text = Some(default_return_full_text.0)
    }

68
69
70
71
72
73
74
75
76
77
78
79
    // switch on stream
    if req.stream {
        Ok(generate_stream(infer, Json(req.into()))
            .await
            .into_response())
    } else {
        let (headers, generation) = generate(infer, Json(req.into())).await?;
        // wrap generation inside a Vec to match api-inference
        Ok((headers, Json(vec![generation.0])).into_response())
    }
}

80
81
82
83
84
85
86
87
/// Text Generation Inference endpoint info
#[utoipa::path(
    get,
    tag = "Text Generation Inference",
    path = "/info",
    responses((status = 200, description = "Served model info", body = Info))
)]
#[instrument]
88
89
async fn get_model_info(info: Extension<Info>) -> Json<Info> {
    Json(info.0)
90
91
}

92
93
94
95
96
97
98
99
100
101
102
#[utoipa::path(
    get,
    tag = "Text Generation Inference",
    path = "/health",
    responses(
        (status = 200, description = "Everything is working fine"),
        (status = 503, description = "Text generation inference is down", body = ErrorResponse,
            example = json ! ({"error": "unhealthy", "error_type": "healthcheck"})),
    )
)]
#[instrument(skip(health))]
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
103
/// Health check method
104
105
106
107
108
109
110
111
112
113
114
async fn health(mut health: Extension<Health>) -> Result<(), (StatusCode, Json<ErrorResponse>)> {
    match health.check().await {
        true => Ok(()),
        false => Err((
            StatusCode::SERVICE_UNAVAILABLE,
            Json(ErrorResponse {
                error: "unhealthy".to_string(),
                error_type: "healthcheck".to_string(),
            }),
        )),
    }
Olivier Dehaene's avatar
Olivier Dehaene committed
115
116
}

117
118
/// Generate tokens
#[utoipa::path(
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
    post,
    tag = "Text Generation Inference",
    path = "/generate",
    request_body = GenerateRequest,
    responses(
        (status = 200, description = "Generated Text", body = GenerateResponse),
        (status = 424, description = "Generation Error", body = ErrorResponse,
            example = json ! ({"error": "Request failed during generation"})),
        (status = 429, description = "Model is overloaded", body = ErrorResponse,
            example = json ! ({"error": "Model is overloaded"})),
        (status = 422, description = "Input validation error", body = ErrorResponse,
            example = json ! ({"error": "Input validation error"})),
        (status = 500, description = "Incomplete generation", body = ErrorResponse,
            example = json ! ({"error": "Incomplete generation"})),
    )
134
)]
135
#[instrument(
136
    skip_all,
137
    fields(
138
        parameters = ?req.0.parameters,
139
140
141
142
        total_time,
        validation_time,
        queue_time,
        inference_time,
143
        time_per_token,
144
        seed,
145
146
    )
)]
Olivier Dehaene's avatar
Olivier Dehaene committed
147
async fn generate(
148
    infer: Extension<Infer>,
Olivier Dehaene's avatar
Olivier Dehaene committed
149
    req: Json<GenerateRequest>,
150
) -> Result<(HeaderMap, Json<GenerateResponse>), (StatusCode, Json<ErrorResponse>)> {
151
    let span = tracing::Span::current();
152
    let start_time = Instant::now();
153
    metrics::increment_counter!("tgi_request_count");
154

155
156
    tracing::debug!("Input: {}", req.0.inputs);

157
    let compute_characters = req.0.inputs.chars().count();
158
159
160
161
162
    let mut add_prompt = None;
    if req.0.parameters.return_full_text.unwrap_or(false) {
        add_prompt = Some(req.0.inputs.clone());
    }

163
    let details = req.0.parameters.details || req.0.parameters.decoder_input_details;
164
165

    // Inference
166
167
168
169
170
171
172
    let (response, best_of_responses) = match req.0.parameters.best_of {
        Some(best_of) if best_of > 1 => {
            let (response, best_of_responses) = infer.generate_best_of(req.0, best_of).await?;
            (response, Some(best_of_responses))
        }
        _ => (infer.generate(req.0).await?, None),
    };
Olivier Dehaene's avatar
Olivier Dehaene committed
173

OlivierDehaene's avatar
OlivierDehaene committed
174
175
    // Token details
    let details = match details {
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
        true => {
            // convert best_of_responses
            let best_of_sequences = best_of_responses.map(|responses: Vec<InferResponse>| {
                responses
                    .into_iter()
                    .map(|response: InferResponse| {
                        // Add prompt if return_full_text
                        let mut output_text = response.generated_text.text;
                        if let Some(prompt) = &add_prompt {
                            output_text = prompt.clone() + &output_text;
                        }

                        BestOfSequence {
                            generated_text: output_text,
                            finish_reason: FinishReason::from(
                                response.generated_text.finish_reason,
                            ),
                            generated_tokens: response.generated_text.generated_tokens,
                            prefill: response.prefill,
                            tokens: response.tokens,
                            seed: response.generated_text.seed,
                        }
                    })
                    .collect()
            });

            Some(Details {
                finish_reason: FinishReason::from(response.generated_text.finish_reason),
                generated_tokens: response.generated_text.generated_tokens,
                prefill: response.prefill,
                tokens: response.tokens,
                seed: response.generated_text.seed,
                best_of_sequences,
            })
        }
OlivierDehaene's avatar
OlivierDehaene committed
211
212
213
        false => None,
    };

214
215
216
217
    // Timings
    let total_time = start_time.elapsed();
    let validation_time = response.queued - start_time;
    let queue_time = response.start - response.queued;
218
219
    let inference_time = Instant::now() - response.start;
    let time_per_token = inference_time / response.generated_text.generated_tokens;
220

221
222
223
224
225
226
227
228
    // Tracing metadata
    span.record("total_time", format!("{total_time:?}"));
    span.record("validation_time", format!("{validation_time:?}"));
    span.record("queue_time", format!("{queue_time:?}"));
    span.record("inference_time", format!("{inference_time:?}"));
    span.record("time_per_token", format!("{time_per_token:?}"));
    span.record("seed", format!("{:?}", response.generated_text.seed));

229
230
    // Headers
    let mut headers = HeaderMap::new();
231
232
233
234
235
236
237
238
239
    headers.insert("x-compute-type", "gpu+optimized".parse().unwrap());
    headers.insert(
        "x-compute-time",
        total_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-compute-characters",
        compute_characters.to_string().parse().unwrap(),
    );
240
241
242
243
244
245
246
247
248
249
250
    headers.insert(
        "x-total-time",
        total_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-validation-time",
        validation_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-queue-time",
        queue_time.as_millis().to_string().parse().unwrap(),
Olivier Dehaene's avatar
Olivier Dehaene committed
251
    );
252
253
254
255
256
257
258
259
260
    headers.insert(
        "x-inference-time",
        inference_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-time-per-token",
        time_per_token.as_millis().to_string().parse().unwrap(),
    );

261
262
    // Metrics
    metrics::increment_counter!("tgi_request_success");
263
264
265
266
267
268
269
270
271
272
273
274
275
276
    metrics::histogram!("tgi_request_duration", total_time.as_secs_f64());
    metrics::histogram!(
        "tgi_request_validation_duration",
        validation_time.as_secs_f64()
    );
    metrics::histogram!("tgi_request_queue_duration", queue_time.as_secs_f64());
    metrics::histogram!(
        "tgi_request_inference_duration",
        inference_time.as_secs_f64()
    );
    metrics::histogram!(
        "tgi_request_mean_time_per_token_duration",
        time_per_token.as_secs_f64()
    );
277
278
279
280
281
    metrics::histogram!(
        "tgi_request_generated_tokens",
        response.generated_text.generated_tokens as f64
    );

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
282
    // Send response
283
284
285
286
287
    let mut output_text = response.generated_text.text;
    if let Some(prompt) = add_prompt {
        output_text = prompt + &output_text;
    }

288
289
    tracing::debug!("Output: {}", output_text);
    tracing::info!("Success");
290

291
    let response = GenerateResponse {
292
        generated_text: output_text,
OlivierDehaene's avatar
OlivierDehaene committed
293
        details,
294
    };
295
    Ok((headers, Json(response)))
Olivier Dehaene's avatar
Olivier Dehaene committed
296
297
}

Yannic Kilcher's avatar
Yannic Kilcher committed
298
/// Generate a stream of token using Server-Sent Events
299
#[utoipa::path(
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
    post,
    tag = "Text Generation Inference",
    path = "/generate_stream",
    request_body = GenerateRequest,
    responses(
        (status = 200, description = "Generated Text", body = StreamResponse,
            content_type = "text/event-stream"),
        (status = 424, description = "Generation Error", body = ErrorResponse,
            example = json ! ({"error": "Request failed during generation"}),
            content_type = "text/event-stream"),
        (status = 429, description = "Model is overloaded", body = ErrorResponse,
            example = json ! ({"error": "Model is overloaded"}),
            content_type = "text/event-stream"),
        (status = 422, description = "Input validation error", body = ErrorResponse,
            example = json ! ({"error": "Input validation error"}),
            content_type = "text/event-stream"),
        (status = 500, description = "Incomplete generation", body = ErrorResponse,
            example = json ! ({"error": "Incomplete generation"}),
            content_type = "text/event-stream"),
    )
320
)]
321
#[instrument(
322
    skip_all,
323
    fields(
324
        parameters = ?req.0.parameters,
325
326
327
328
        total_time,
        validation_time,
        queue_time,
        inference_time,
329
330
        time_per_token,
        seed,
331
332
333
334
335
    )
)]
async fn generate_stream(
    infer: Extension<Infer>,
    req: Json<GenerateRequest>,
336
337
338
339
) -> (
    HeaderMap,
    Sse<impl Stream<Item = Result<Event, Infallible>>>,
) {
340
341
    let span = tracing::Span::current();
    let start_time = Instant::now();
342
    metrics::increment_counter!("tgi_request_count");
343

344
345
    tracing::debug!("Input: {}", req.0.inputs);

346
347
348
349
350
351
352
353
    let compute_characters = req.0.inputs.chars().count();

    let mut headers = HeaderMap::new();
    headers.insert("x-compute-type", "gpu+optimized".parse().unwrap());
    headers.insert(
        "x-compute-characters",
        compute_characters.to_string().parse().unwrap(),
    );
354
    headers.insert("X-Accel-Buffering", "no".parse().unwrap());
355

356
357
358
359
    let stream = async_stream::stream! {
        // Inference
        let mut end_reached = false;
        let mut error = false;
360
361
362
363
364

        let mut add_prompt = None;
        if req.0.parameters.return_full_text.unwrap_or(false) {
            add_prompt = Some(req.0.inputs.clone());
        }
365
366
        let details = req.0.parameters.details;

367
        let best_of = req.0.parameters.best_of.unwrap_or(1);
368
369
370
371
372
373
374
375
376
377
378
        if best_of != 1 {
            let err = InferError::from(ValidationError::BestOfStream);
            metrics::increment_counter!("tgi_request_failure", "err" => "validation");
            tracing::error!("{err}");
            yield Ok(Event::from(err));
        } else if req.0.parameters.decoder_input_details {
            let err = InferError::from(ValidationError::PrefillDetailsStream);
            metrics::increment_counter!("tgi_request_failure", "err" => "validation");
            tracing::error!("{err}");
            yield Ok(Event::from(err));
        } else {
379
            match infer.generate_stream(req.0).instrument(info_span!(parent: &span, "async_stream")).await {
380
381
                // Keep permit as long as generate_stream lives
                Ok((_permit, mut response_stream)) => {
382
383
384
385
386
387
388
389
390
                    // Server-Sent Event stream
                    while let Some(response) = response_stream.next().await {
                        match response {
                            Ok(response) => {
                                match response {
                                    // Prefill is ignored
                                    InferStreamResponse::Prefill(_) => {}
                                    // Yield event for every new token
                                    InferStreamResponse::Token(token) => {
391
392
                                        tracing::debug!(parent: &span, "Token: {:?}", token);

393
394
395
396
397
398
399
400
                                        // StreamResponse
                                        let stream_token = StreamResponse {
                                            token,
                                            generated_text: None,
                                            details: None,
                                        };

                                        yield Ok(Event::default().json_data(stream_token).unwrap())
401
                                    }
402
403
                                    // Yield event for last token and compute timings
                                    InferStreamResponse::End {
404
                                        token,
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
                                        generated_text,
                                        start,
                                        queued,
                                    } => {
                                        // Token details
                                        let details = match details {
                                            true => Some(StreamDetails {
                                                finish_reason: FinishReason::from(generated_text.finish_reason),
                                                generated_tokens: generated_text.generated_tokens,
                                                seed: generated_text.seed,
                                            }),
                                            false => None,
                                        };

                                        // Timings
                                        let total_time = start_time.elapsed();
                                        let validation_time = queued - start_time;
                                        let queue_time = start - queued;
                                        let inference_time = Instant::now() - start;
                                        let time_per_token = inference_time / generated_text.generated_tokens;

                                        // Tracing metadata
                                        span.record("total_time", format!("{total_time:?}"));
                                        span.record("validation_time", format!("{validation_time:?}"));
                                        span.record("queue_time", format!("{queue_time:?}"));
                                        span.record("inference_time", format!("{inference_time:?}"));
                                        span.record("time_per_token", format!("{time_per_token:?}"));
                                        span.record("seed", format!("{:?}", generated_text.seed));

                                        // Metrics
                                        metrics::increment_counter!("tgi_request_success");
436
437
438
439
440
                                        metrics::histogram!("tgi_request_duration", total_time.as_secs_f64());
                                        metrics::histogram!("tgi_request_validation_duration", validation_time.as_secs_f64());
                                        metrics::histogram!("tgi_request_queue_duration", queue_time.as_secs_f64());
                                        metrics::histogram!("tgi_request_inference_duration", inference_time.as_secs_f64());
                                        metrics::histogram!("tgi_request_mean_time_per_token_duration", time_per_token.as_secs_f64());
441
442
443
444
445
446
447
448
449
450
                                        metrics::histogram!("tgi_request_generated_tokens", generated_text.generated_tokens as f64);

                                        // StreamResponse
                                        end_reached = true;

                                        let mut output_text = generated_text.text;
                                        if let Some(prompt) = add_prompt {
                                            output_text = prompt + &output_text;
                                        }

451
452
                                        tracing::debug!(parent: &span, "Output: {}", output_text);
                                        tracing::info!(parent: &span, "Success");
453

454
455
456
457
458
459
460
461
462
                                        let stream_token = StreamResponse {
                                            token,
                                            generated_text: Some(output_text),
                                            details
                                        };

                                        yield Ok(Event::default().json_data(stream_token).unwrap());
                                        break;
                                    }
463
464
                                }
                            }
465
466
467
468
469
470
                            // yield error
                            Err(err) => {
                                error = true;
                                yield Ok(Event::from(err));
                                break;
                            }
471
472
                        }
                    }
473
474
475
476
477
                },
                // yield error
                Err(err) => {
                    error = true;
                    yield Ok(Event::from(err));
478
                }
479
480
481
482
483
484
485
            }
            // Check if generation reached the end
            // Skip if we already sent an error
            if !end_reached && !error {
                let err = InferError::IncompleteGeneration;
                metrics::increment_counter!("tgi_request_failure", "err" => "incomplete");
                tracing::error!("{err}");
486
                yield Ok(Event::from(err));
487
488
489
490
            }
        }
    };

491
    (headers, Sse::new(stream).keep_alive(KeepAlive::default()))
492
493
}

494
495
/// Prometheus metrics scrape endpoint
#[utoipa::path(
496
497
498
499
    get,
    tag = "Text Generation Inference",
    path = "/metrics",
    responses((status = 200, description = "Prometheus Metrics", body = String))
500
501
502
503
504
)]
async fn metrics(prom_handle: Extension<PrometheusHandle>) -> String {
    prom_handle.render()
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
505
506
507
/// Serving method
#[allow(clippy::too_many_arguments)]
pub async fn run(
508
509
    model_info: HubModelInfo,
    shard_info: ShardInfo,
510
    compat_return_full_text: bool,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
511
    max_concurrent_requests: usize,
512
    max_best_of: usize,
513
    max_stop_sequences: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
514
    max_input_length: usize,
515
    max_total_tokens: usize,
516
    waiting_served_ratio: f32,
517
    max_batch_prefill_tokens: u32,
518
    max_batch_total_tokens: u32,
519
    max_waiting_tokens: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
520
    client: ShardedClient,
521
    tokenizer: Option<Tokenizer>,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
522
523
    validation_workers: usize,
    addr: SocketAddr,
524
    allow_origin: Option<AllowOrigin>,
525
526
527
528
529
    ngrok: bool,
    ngrok_authtoken: Option<String>,
    ngrok_domain: Option<String>,
    ngrok_username: Option<String>,
    ngrok_password: Option<String>,
530
) -> Result<(), axum::BoxError> {
531
532
533
    // OpenAPI documentation
    #[derive(OpenApi)]
    #[openapi(
534
        paths(
OlivierDehaene's avatar
OlivierDehaene committed
535
            health,
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
            get_model_info,
            compat_generate,
            generate,
            generate_stream,
            metrics,
        ),
        components(
            schemas(
                Info,
                CompatGenerateRequest,
                GenerateRequest,
                GenerateParameters,
                PrefillToken,
                Token,
                GenerateResponse,
                BestOfSequence,
                Details,
                FinishReason,
                StreamResponse,
                StreamDetails,
                ErrorResponse,
            )
        ),
        tags(
            (name = "Text Generation Inference", description = "Hugging Face Text Generation Inference API")
        ),
        info(
            title = "Text Generation Inference",
            license(
                name = "Apache 2.0",
                url = "https://www.apache.org/licenses/LICENSE-2.0"
            )
        )
569
570
571
    )]
    struct ApiDoc;

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
572
    // Create state
573
574
575
    let validation = Validation::new(
        validation_workers,
        tokenizer,
576
        max_best_of,
577
578
579
580
        max_stop_sequences,
        max_input_length,
        max_total_tokens,
    );
581
582
    let generation_health = Arc::new(AtomicBool::new(false));
    let health_ext = Health::new(client.clone(), generation_health.clone());
583
584
    let infer = Infer::new(
        client,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
585
        validation,
586
        waiting_served_ratio,
587
        max_batch_prefill_tokens,
588
        max_batch_total_tokens,
589
590
        max_waiting_tokens,
        max_concurrent_requests,
591
        shard_info.requires_padding,
592
        generation_health,
593
    );
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
594

595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
    // Duration buckets
    let duration_matcher = Matcher::Suffix(String::from("duration"));
    let n_duration_buckets = 35;
    let mut duration_buckets = Vec::with_capacity(n_duration_buckets);
    // Minimum duration in seconds
    let mut value = 0.0001;
    for _ in 0..n_duration_buckets {
        // geometric sequence
        value *= 1.5;
        duration_buckets.push(value);
    }
    // Input Length buckets
    let input_length_matcher = Matcher::Full(String::from("tgi_request_input_length"));
    let input_length_buckets: Vec<f64> = (0..100)
        .map(|x| (max_input_length as f64 / 100.0) * (x + 1) as f64)
        .collect();
    // Generated tokens buckets
    let generated_tokens_matcher = Matcher::Full(String::from("tgi_request_generated_tokens"));
    let generated_tokens_buckets: Vec<f64> = (0..100)
        .map(|x| (max_total_tokens as f64 / 100.0) * (x + 1) as f64)
        .collect();
    // Input Length buckets
    let max_new_tokens_matcher = Matcher::Full(String::from("tgi_request_max_new_tokens"));
    let max_new_tokens_buckets: Vec<f64> = (0..100)
        .map(|x| (max_total_tokens as f64 / 100.0) * (x + 1) as f64)
        .collect();
    // Batch size buckets
    let batch_size_matcher = Matcher::Full(String::from("tgi_batch_next_size"));
623
    let batch_size_buckets: Vec<f64> = (0..1024).map(|x| (x + 1) as f64).collect();
624

625
    // Prometheus handler
626
627
628
629
630
631
632
633
634
635
636
    let builder = PrometheusBuilder::new()
        .set_buckets_for_metric(duration_matcher, &duration_buckets)
        .unwrap()
        .set_buckets_for_metric(input_length_matcher, &input_length_buckets)
        .unwrap()
        .set_buckets_for_metric(generated_tokens_matcher, &generated_tokens_buckets)
        .unwrap()
        .set_buckets_for_metric(max_new_tokens_matcher, &max_new_tokens_buckets)
        .unwrap()
        .set_buckets_for_metric(batch_size_matcher, &batch_size_buckets)
        .unwrap();
637
638
639
640
    let prom_handle = builder
        .install_recorder()
        .expect("failed to install metrics recorder");

641
642
643
644
645
646
647
    // CORS layer
    let allow_origin = allow_origin.unwrap_or(AllowOrigin::any());
    let cors_layer = CorsLayer::new()
        .allow_methods([Method::GET, Method::POST])
        .allow_headers([http::header::CONTENT_TYPE])
        .allow_origin(allow_origin);

648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
    // Endpoint info
    let info = Info {
        model_id: model_info.model_id,
        model_sha: model_info.sha,
        model_dtype: shard_info.dtype,
        model_device_type: shard_info.device_type,
        model_pipeline_tag: model_info.pipeline_tag,
        max_concurrent_requests,
        max_best_of,
        max_stop_sequences,
        max_input_length,
        max_total_tokens,
        waiting_served_ratio,
        max_batch_total_tokens,
        max_waiting_tokens,
        validation_workers,
        version: env!("CARGO_PKG_VERSION"),
        sha: option_env!("VERGEN_GIT_SHA"),
666
        docker_label: option_env!("DOCKER_LABEL"),
667
668
    };

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
669
    // Create router
Olivier Dehaene's avatar
Olivier Dehaene committed
670
    let app = Router::new()
671
        .merge(SwaggerUi::new("/docs").url("/api-doc/openapi.json", ApiDoc::openapi()))
672
        // Base routes
673
        .route("/", post(compat_generate))
674
        .route("/info", get(get_model_info))
Olivier Dehaene's avatar
Olivier Dehaene committed
675
        .route("/generate", post(generate))
676
        .route("/generate_stream", post(generate_stream))
677
678
679
        // AWS Sagemaker route
        .route("/invocations", post(compat_generate))
        // Base Health route
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
680
        .route("/health", get(health))
681
682
683
684
685
        // Inference API health route
        .route("/", get(health))
        // AWS Sagemaker health route
        .route("/ping", get(health))
        // Prometheus metrics route
686
        .route("/metrics", get(metrics))
687
        .layer(Extension(info))
688
        .layer(Extension(health_ext))
689
690
        .layer(Extension(compat_return_full_text))
        .layer(Extension(infer))
691
        .layer(Extension(prom_handle))
692
693
        .layer(opentelemetry_tracing_layer())
        .layer(cors_layer);
Olivier Dehaene's avatar
Olivier Dehaene committed
694

695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
    if ngrok {
        #[cfg(feature = "ngrok")]
        {
            use ngrok::config::TunnelBuilder;
            use ngrok::tunnel::UrlTunnel;

            let _ = addr;

            let authtoken =
                ngrok_authtoken.expect("`ngrok-authtoken` must be set when using ngrok tunneling");

            let mut tunnel = ngrok::Session::builder()
                .authtoken(authtoken)
                .connect()
                .await
                .unwrap()
                .http_endpoint();

            if let Some(domain) = ngrok_domain {
                tunnel = tunnel.domain(domain);
            }

            if let (Some(username), Some(password)) = (ngrok_username, ngrok_password) {
                tunnel = tunnel.basic_auth(username, password);
            }

            let listener = tunnel.listen().await.unwrap();

            // Run server
            tracing::info!("Ingress URL: {:?}", listener.url());
            axum::Server::builder(listener)
                .serve(app.into_make_service())
                //Wait until all requests are finished to shut down
                .with_graceful_shutdown(shutdown_signal())
729
                .await?;
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
        }
        #[cfg(not(feature = "ngrok"))]
        {
            let _ngrok_authtoken = ngrok_authtoken;
            let _ngrok_domain = ngrok_domain;
            let _ngrok_username = ngrok_username;
            let _ngrok_password = ngrok_password;

            panic!("`text-generation-router` was compiled without the `ngrok` feature");
        }
    } else {
        // Run server
        axum::Server::bind(&addr)
            .serve(app.into_make_service())
            // Wait until all requests are finished to shut down
            .with_graceful_shutdown(shutdown_signal())
746
            .await?;
747
    }
748
    Ok(())
Olivier Dehaene's avatar
Olivier Dehaene committed
749
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775

/// Shutdown signal handler
async fn shutdown_signal() {
    let ctrl_c = async {
        signal::ctrl_c()
            .await
            .expect("failed to install Ctrl+C handler");
    };

    #[cfg(unix)]
    let terminate = async {
        signal::unix::signal(signal::unix::SignalKind::terminate())
            .expect("failed to install signal handler")
            .recv()
            .await;
    };

    #[cfg(not(unix))]
    let terminate = std::future::pending::<()>();

    tokio::select! {
        _ = ctrl_c => {},
        _ = terminate => {},
    }

    tracing::info!("signal received, starting graceful shutdown");
776
    opentelemetry::global::shutdown_tracer_provider();
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
777
}
778

779
780
781
782
783
784
785
786
787
788
789
impl From<i32> for FinishReason {
    fn from(finish_reason: i32) -> Self {
        let finish_reason = text_generation_client::FinishReason::from_i32(finish_reason).unwrap();
        match finish_reason {
            text_generation_client::FinishReason::Length => FinishReason::Length,
            text_generation_client::FinishReason::EosToken => FinishReason::EndOfSequenceToken,
            text_generation_client::FinishReason::StopSequence => FinishReason::StopSequence,
        }
    }
}

790
791
792
793
794
795
796
797
798
799
800
801
802
803
/// Convert to Axum supported formats
impl From<InferError> for (StatusCode, Json<ErrorResponse>) {
    fn from(err: InferError) -> Self {
        let status_code = match err {
            InferError::GenerationError(_) => StatusCode::FAILED_DEPENDENCY,
            InferError::Overloaded(_) => StatusCode::TOO_MANY_REQUESTS,
            InferError::ValidationError(_) => StatusCode::UNPROCESSABLE_ENTITY,
            InferError::IncompleteGeneration => StatusCode::INTERNAL_SERVER_ERROR,
        };

        (
            status_code,
            Json(ErrorResponse {
                error: err.to_string(),
804
                error_type: err.error_type().to_string(),
805
806
807
808
809
810
811
812
813
814
            }),
        )
    }
}

impl From<InferError> for Event {
    fn from(err: InferError) -> Self {
        Event::default()
            .json_data(ErrorResponse {
                error: err.to_string(),
815
                error_type: err.error_type().to_string(),
816
817
818
819
            })
            .unwrap()
    }
}