"vscode:/vscode.git/clone" did not exist on "8f335b5bd6e2656b6fbe03366b4bbcdab50e2cd8"
server.rs 19.7 KB
Newer Older
1
2
/// HTTP Server logic
use crate::infer::{InferError, InferStreamResponse};
3
use crate::{
4
5
6
    CompatGenerateRequest, Details, ErrorResponse, FinishReason, GenerateParameters,
    GenerateRequest, GenerateResponse, Infer, PrefillToken, StreamDetails, StreamResponse, Token,
    Validation,
7
};
Olivier Dehaene's avatar
Olivier Dehaene committed
8
use axum::extract::Extension;
9
use axum::http::{HeaderMap, Method, StatusCode};
10
use axum::response::sse::{Event, KeepAlive, Sse};
11
use axum::response::IntoResponse;
Olivier Dehaene's avatar
Olivier Dehaene committed
12
use axum::routing::{get, post};
13
use axum::{http, Json, Router};
14
use axum_tracing_opentelemetry::opentelemetry_tracing_layer;
15
use futures::Stream;
16
use metrics_exporter_prometheus::{PrometheusBuilder, PrometheusHandle};
17
use std::convert::Infallible;
Olivier Dehaene's avatar
Olivier Dehaene committed
18
use std::net::SocketAddr;
19
use text_generation_client::ShardedClient;
Olivier Dehaene's avatar
Olivier Dehaene committed
20
use tokenizers::Tokenizer;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
21
use tokio::signal;
Olivier Dehaene's avatar
Olivier Dehaene committed
22
use tokio::time::Instant;
23
use tokio_stream::StreamExt;
24
use tower_http::cors::{AllowOrigin, CorsLayer};
25
use tracing::{info_span, instrument, Instrument};
26
27
use utoipa::OpenApi;
use utoipa_swagger_ui::SwaggerUi;
Olivier Dehaene's avatar
Olivier Dehaene committed
28

29
30
31
/// Compatibility route with api-inference and AzureML
#[instrument(skip(infer))]
async fn compat_generate(
32
    default_return_full_text: Extension<bool>,
33
34
35
    infer: Extension<Infer>,
    req: Json<CompatGenerateRequest>,
) -> Result<impl IntoResponse, (StatusCode, Json<ErrorResponse>)> {
36
37
38
39
40
41
42
    let mut req = req.0;

    // default return_full_text given the pipeline_tag
    if req.parameters.return_full_text.is_none() {
        req.parameters.return_full_text = Some(default_return_full_text.0)
    }

43
44
45
46
47
48
49
50
51
52
53
54
    // switch on stream
    if req.stream {
        Ok(generate_stream(infer, Json(req.into()))
            .await
            .into_response())
    } else {
        let (headers, generation) = generate(infer, Json(req.into())).await?;
        // wrap generation inside a Vec to match api-inference
        Ok((headers, Json(vec![generation.0])).into_response())
    }
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
55
/// Health check method
56
57
#[instrument(skip(infer))]
async fn health(infer: Extension<Infer>) -> Result<(), (StatusCode, Json<ErrorResponse>)> {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
58
59
    // TODO: while this is the best health check we can do, it is a bit on the heavy side and might
    //       be a bit too slow for a health check.
60
    //       What we should do instead is check if the gRPC channels are still healthy.
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
61
62

    // Send a small inference request
63
64
65
66
    infer
        .generate(GenerateRequest {
            inputs: "liveness".to_string(),
            parameters: GenerateParameters {
67
68
69
70
                temperature: None,
                repetition_penalty: None,
                top_k: None,
                top_p: None,
71
72
                do_sample: false,
                max_new_tokens: 1,
73
                return_full_text: None,
74
                stop: Vec::new(),
75
76
                details: false,
                seed: None,
Olivier Dehaene's avatar
Olivier Dehaene committed
77
            },
78
        })
Olivier Dehaene's avatar
Olivier Dehaene committed
79
80
        .await?;
    Ok(())
Olivier Dehaene's avatar
Olivier Dehaene committed
81
82
}

83
84
85
86
87
88
89
/// Generate tokens
#[utoipa::path(
    post,
    tag = "Text Generation Inference",
    path = "/generate",
    request_body = GenerateRequest,
    responses(
90
91
        (status = 200, description = "Generated Text", body = GenerateResponse),
        (status = 424, description = "Generation Error", body = ErrorResponse,
92
            example = json ! ({"error": "Request failed during generation"})),
93
        (status = 429, description = "Model is overloaded", body = ErrorResponse,
94
            example = json ! ({"error": "Model is overloaded"})),
95
        (status = 422, description = "Input validation error", body = ErrorResponse,
96
            example = json ! ({"error": "Input validation error"})),
97
        (status = 500, description = "Incomplete generation", body = ErrorResponse,
98
            example = json ! ({"error": "Incomplete generation"})),
99
100
    )
)]
101
#[instrument(
102
    skip(infer),
103
104
105
106
107
    fields(
        total_time,
        validation_time,
        queue_time,
        inference_time,
108
        time_per_token,
109
        seed,
110
111
    )
)]
Olivier Dehaene's avatar
Olivier Dehaene committed
112
async fn generate(
113
    infer: Extension<Infer>,
Olivier Dehaene's avatar
Olivier Dehaene committed
114
    req: Json<GenerateRequest>,
115
) -> Result<(HeaderMap, Json<GenerateResponse>), (StatusCode, Json<ErrorResponse>)> {
116
    let span = tracing::Span::current();
117
    let start_time = Instant::now();
118

119
120
121
122
123
    let mut add_prompt = None;
    if req.0.parameters.return_full_text.unwrap_or(false) {
        add_prompt = Some(req.0.inputs.clone());
    }

124
    let details = req.0.parameters.details;
125
126

    // Inference
127
    let response = infer.generate(req.0).await?;
Olivier Dehaene's avatar
Olivier Dehaene committed
128

OlivierDehaene's avatar
OlivierDehaene committed
129
130
    // Token details
    let details = match details {
131
        true => Some(Details {
132
            finish_reason: FinishReason::from(response.generated_text.finish_reason),
133
134
135
136
137
            generated_tokens: response.generated_text.generated_tokens,
            prefill: Some(response.prefill),
            tokens: Some(response.tokens),
            seed: response.generated_text.seed,
        }),
OlivierDehaene's avatar
OlivierDehaene committed
138
139
140
        false => None,
    };

141
142
143
144
    // Timings
    let total_time = start_time.elapsed();
    let validation_time = response.queued - start_time;
    let queue_time = response.start - response.queued;
145
146
    let inference_time = Instant::now() - response.start;
    let time_per_token = inference_time / response.generated_text.generated_tokens;
147
148
149
150
151
152
153
154
155
156
157
158
159
160

    // Headers
    let mut headers = HeaderMap::new();
    headers.insert(
        "x-total-time",
        total_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-validation-time",
        validation_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-queue-time",
        queue_time.as_millis().to_string().parse().unwrap(),
Olivier Dehaene's avatar
Olivier Dehaene committed
161
    );
162
163
164
165
166
167
168
169
170
171
    headers.insert(
        "x-inference-time",
        inference_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-time-per-token",
        time_per_token.as_millis().to_string().parse().unwrap(),
    );

    // Tracing metadata
172
173
174
175
176
    span.record("total_time", format!("{total_time:?}"));
    span.record("validation_time", format!("{validation_time:?}"));
    span.record("queue_time", format!("{queue_time:?}"));
    span.record("inference_time", format!("{inference_time:?}"));
    span.record("time_per_token", format!("{time_per_token:?}"));
177
178
    span.record("seed", format!("{:?}", response.generated_text.seed));
    tracing::info!("Output: {}", response.generated_text.text);
Olivier Dehaene's avatar
Olivier Dehaene committed
179

180
181
182
183
184
185
186
187
188
189
190
191
    // Metrics
    metrics::increment_counter!("tgi_request_success");
    metrics::histogram!("tgi_request_duration", total_time);
    metrics::histogram!("tgi_request_validation_duration", validation_time);
    metrics::histogram!("tgi_request_queue_duration", queue_time);
    metrics::histogram!("tgi_request_inference_duration", inference_time);
    metrics::histogram!("tgi_request_mean_time_per_token_duration", time_per_token);
    metrics::histogram!(
        "tgi_request_generated_tokens",
        response.generated_text.generated_tokens as f64
    );

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
192
    // Send response
193
194
195
196
197
    let mut output_text = response.generated_text.text;
    if let Some(prompt) = add_prompt {
        output_text = prompt + &output_text;
    }

198
    let response = GenerateResponse {
199
        generated_text: output_text,
OlivierDehaene's avatar
OlivierDehaene committed
200
        details,
201
    };
202
    Ok((headers, Json(response)))
Olivier Dehaene's avatar
Olivier Dehaene committed
203
204
}

Yannic Kilcher's avatar
Yannic Kilcher committed
205
/// Generate a stream of token using Server-Sent Events
206
207
208
209
210
211
#[utoipa::path(
    post,
    tag = "Text Generation Inference",
    path = "/generate_stream",
    request_body = GenerateRequest,
    responses(
212
        (status = 200, description = "Generated Text", body = StreamResponse,
213
            content_type = "text/event-stream"),
214
        (status = 424, description = "Generation Error", body = ErrorResponse,
215
216
            example = json ! ({"error": "Request failed during generation"}),
            content_type = "text/event-stream"),
217
        (status = 429, description = "Model is overloaded", body = ErrorResponse,
218
219
            example = json ! ({"error": "Model is overloaded"}),
            content_type = "text/event-stream"),
220
        (status = 422, description = "Input validation error", body = ErrorResponse,
221
222
            example = json ! ({"error": "Input validation error"}),
            content_type = "text/event-stream"),
223
        (status = 500, description = "Incomplete generation", body = ErrorResponse,
224
225
            example = json ! ({"error": "Incomplete generation"}),
            content_type = "text/event-stream"),
226
227
    )
)]
228
229
230
231
232
233
234
#[instrument(
    skip(infer),
    fields(
        total_time,
        validation_time,
        queue_time,
        inference_time,
235
236
        time_per_token,
        seed,
237
238
239
240
241
242
243
244
245
246
247
248
249
    )
)]
async fn generate_stream(
    infer: Extension<Infer>,
    req: Json<GenerateRequest>,
) -> Sse<impl Stream<Item = Result<Event, Infallible>>> {
    let span = tracing::Span::current();
    let start_time = Instant::now();

    let stream = async_stream::stream! {
        // Inference
        let mut end_reached = false;
        let mut error = false;
250
251
252
253
254

        let mut add_prompt = None;
        if req.0.parameters.return_full_text.unwrap_or(false) {
            add_prompt = Some(req.0.inputs.clone());
        }
255
256
        let details = req.0.parameters.details;

257
        match infer.generate_stream(req.0).instrument(info_span!(parent: &span, "async_stream")).await {
258
            Ok(mut response_stream) => {
Yannic Kilcher's avatar
Yannic Kilcher committed
259
                // Server-Sent Event stream
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
                while let Some(response) = response_stream.next().await {
                    match response {
                        Ok(response) => {
                            match response {
                                // Prefill is ignored
                                InferStreamResponse::Prefill(_) => {}
                                // Yield event for every new token
                                InferStreamResponse::Token(token) => {
                                    // StreamResponse
                                    let stream_token = StreamResponse {
                                        token,
                                        generated_text: None,
                                        details: None,
                                    };

                                    yield Ok(Event::default().json_data(stream_token).unwrap())
                                }
                                // Yield event for last token and compute timings
                                InferStreamResponse::End {
                                    token,
                                    generated_text,
                                    start,
                                    queued,
                                } => {
                                    // Token details
                                    let details = match details {
286
287
                                        true => Some(StreamDetails {
                                            finish_reason: FinishReason::from(generated_text.finish_reason),
288
289
290
291
292
293
294
295
296
297
298
299
300
301
                                            generated_tokens: generated_text.generated_tokens,
                                            seed: generated_text.seed,
                                        }),
                                        false => None,
                                    };

                                    // Timings
                                    let total_time = start_time.elapsed();
                                    let validation_time = queued - start_time;
                                    let queue_time = start - queued;
                                    let inference_time = Instant::now() - start;
                                    let time_per_token = inference_time / generated_text.generated_tokens;

                                    // Tracing metadata
302
303
304
305
306
                                    span.record("total_time", format!("{total_time:?}"));
                                    span.record("validation_time", format!("{validation_time:?}"));
                                    span.record("queue_time", format!("{queue_time:?}"));
                                    span.record("inference_time", format!("{inference_time:?}"));
                                    span.record("time_per_token", format!("{time_per_token:?}"));
307
                                    span.record("seed", format!("{:?}", generated_text.seed));
308
309
                                    tracing::info!(parent: &span, "Output: {}", generated_text.text);

310
311
312
313
314
315
316
317
318
                                    // Metrics
                                    metrics::increment_counter!("tgi_request_success");
                                    metrics::histogram!("tgi_request_duration", total_time);
                                    metrics::histogram!("tgi_request_validation_duration", validation_time);
                                    metrics::histogram!("tgi_request_queue_duration", queue_time);
                                    metrics::histogram!("tgi_request_inference_duration", inference_time);
                                    metrics::histogram!("tgi_request_mean_time_per_token_duration", time_per_token);
                                    metrics::histogram!("tgi_request_generated_tokens", generated_text.generated_tokens as f64);

319
320
                                    // StreamResponse
                                    end_reached = true;
321
322
323
324
325
326

                                    let mut output_text = generated_text.text;
                                    if let Some(prompt) = add_prompt {
                                        output_text = prompt + &output_text;
                                    }

327
328
                                    let stream_token = StreamResponse {
                                        token,
329
                                        generated_text: Some(output_text),
330
331
332
                                        details
                                    };

333
334
                                    yield Ok(Event::default().json_data(stream_token).unwrap());
                                    break;
335
336
337
                                }
                            }
                        }
338
                        // yield error
339
340
                        Err(err) => {
                            error = true;
341
342
                            yield Ok(Event::from(err));
                            break;
343
344
345
346
                        }
                    }
                }
            },
347
            // yield error
348
349
            Err(err) => {
                error = true;
350
                yield Ok(Event::from(err));
351
352
353
354
355
356
            }
        }
        // Check if generation reached the end
        // Skip if we already sent an error
        if !end_reached && !error {
            let err = InferError::IncompleteGeneration;
357
            metrics::increment_counter!("tgi_request_failure", "err" => "incomplete");
358
            tracing::error!("{err}");
359
            yield Ok(Event::from(err));
360
361
362
363
364
365
        }
    };

    Sse::new(stream).keep_alive(KeepAlive::default())
}

366
367
368
369
370
371
372
373
374
375
376
/// Prometheus metrics scrape endpoint
#[utoipa::path(
    get,
    tag = "Text Generation Inference",
    path = "/metrics",
    responses((status = 200, description = "Prometheus Metrics", body = String))
)]
async fn metrics(prom_handle: Extension<PrometheusHandle>) -> String {
    prom_handle.render()
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
377
378
379
/// Serving method
#[allow(clippy::too_many_arguments)]
pub async fn run(
380
    compat_return_full_text: bool,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
381
    max_concurrent_requests: usize,
382
    max_stop_sequences: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
383
    max_input_length: usize,
384
    max_total_tokens: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
385
    max_batch_size: usize,
386
    max_waiting_tokens: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
387
388
389
390
    client: ShardedClient,
    tokenizer: Tokenizer,
    validation_workers: usize,
    addr: SocketAddr,
391
    allow_origin: Option<AllowOrigin>,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
392
) {
393
394
395
396
397
398
    // OpenAPI documentation
    #[derive(OpenApi)]
    #[openapi(
        paths(
            generate,
            generate_stream,
399
            metrics,
400
401
402
403
404
        ),
        components(
            schemas(
                GenerateRequest,
                GenerateParameters,
405
                PrefillToken,
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
                Token,
                GenerateResponse,
                Details,
                FinishReason,
                StreamResponse,
                StreamDetails,
                ErrorResponse,
            )
        ),
        tags(
            (name = "Text Generation Inference", description = "Hugging Face Text Generation Inference API")
        ),
        info(
            title = "Text Generation Inference",
            license(
                name = "Apache 2.0",
                url = "https://www.apache.org/licenses/LICENSE-2.0"
            )
        )
    )]
    struct ApiDoc;

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
428
    // Create state
429
430
431
432
433
434
435
    let validation = Validation::new(
        validation_workers,
        tokenizer,
        max_stop_sequences,
        max_input_length,
        max_total_tokens,
    );
436
437
    let infer = Infer::new(
        client,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
438
        validation,
439
440
441
442
        max_batch_size,
        max_waiting_tokens,
        max_concurrent_requests,
    );
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
443

444
445
446
447
448
449
    // Prometheus handler
    let builder = PrometheusBuilder::new();
    let prom_handle = builder
        .install_recorder()
        .expect("failed to install metrics recorder");

450
451
452
453
454
455
456
    // CORS layer
    let allow_origin = allow_origin.unwrap_or(AllowOrigin::any());
    let cors_layer = CorsLayer::new()
        .allow_methods([Method::GET, Method::POST])
        .allow_headers([http::header::CONTENT_TYPE])
        .allow_origin(allow_origin);

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
457
    // Create router
Olivier Dehaene's avatar
Olivier Dehaene committed
458
    let app = Router::new()
459
        .merge(SwaggerUi::new("/docs").url("/api-doc/openapi.json", ApiDoc::openapi()))
460
        .route("/", post(compat_generate))
Olivier Dehaene's avatar
Olivier Dehaene committed
461
        .route("/generate", post(generate))
462
        .route("/generate_stream", post(generate_stream))
463
        .route("/", get(health))
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
464
        .route("/health", get(health))
465
        .route("/metrics", get(metrics))
466
467
        .layer(Extension(compat_return_full_text))
        .layer(Extension(infer))
468
        .layer(Extension(prom_handle))
469
470
        .layer(opentelemetry_tracing_layer())
        .layer(cors_layer);
Olivier Dehaene's avatar
Olivier Dehaene committed
471

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
472
    // Run server
Olivier Dehaene's avatar
Olivier Dehaene committed
473
    axum::Server::bind(&addr)
Olivier Dehaene's avatar
Olivier Dehaene committed
474
        .serve(app.into_make_service())
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
475
476
        // Wait until all requests are finished to shut down
        .with_graceful_shutdown(shutdown_signal())
Olivier Dehaene's avatar
Olivier Dehaene committed
477
478
        .await
        .unwrap();
Olivier Dehaene's avatar
Olivier Dehaene committed
479
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505

/// Shutdown signal handler
async fn shutdown_signal() {
    let ctrl_c = async {
        signal::ctrl_c()
            .await
            .expect("failed to install Ctrl+C handler");
    };

    #[cfg(unix)]
    let terminate = async {
        signal::unix::signal(signal::unix::SignalKind::terminate())
            .expect("failed to install signal handler")
            .recv()
            .await;
    };

    #[cfg(not(unix))]
    let terminate = std::future::pending::<()>();

    tokio::select! {
        _ = ctrl_c => {},
        _ = terminate => {},
    }

    tracing::info!("signal received, starting graceful shutdown");
506
    opentelemetry::global::shutdown_tracer_provider();
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
507
}
508

509
510
511
512
513
514
515
516
517
518
519
impl From<i32> for FinishReason {
    fn from(finish_reason: i32) -> Self {
        let finish_reason = text_generation_client::FinishReason::from_i32(finish_reason).unwrap();
        match finish_reason {
            text_generation_client::FinishReason::Length => FinishReason::Length,
            text_generation_client::FinishReason::EosToken => FinishReason::EndOfSequenceToken,
            text_generation_client::FinishReason::StopSequence => FinishReason::StopSequence,
        }
    }
}

520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
/// Convert to Axum supported formats
impl From<InferError> for (StatusCode, Json<ErrorResponse>) {
    fn from(err: InferError) -> Self {
        let status_code = match err {
            InferError::GenerationError(_) => StatusCode::FAILED_DEPENDENCY,
            InferError::Overloaded(_) => StatusCode::TOO_MANY_REQUESTS,
            InferError::ValidationError(_) => StatusCode::UNPROCESSABLE_ENTITY,
            InferError::IncompleteGeneration => StatusCode::INTERNAL_SERVER_ERROR,
        };

        (
            status_code,
            Json(ErrorResponse {
                error: err.to_string(),
            }),
        )
    }
}

impl From<InferError> for Event {
    fn from(err: InferError) -> Self {
        Event::default()
            .json_data(ErrorResponse {
                error: err.to_string(),
            })
            .unwrap()
    }
}