server.rs 15.4 KB
Newer Older
1
2
/// HTTP Server logic
use crate::infer::{InferError, InferStreamResponse};
3
use crate::{
4
5
    Details, ErrorResponse, FinishReason, GenerateParameters, GenerateRequest, GenerateResponse,
    Infer, StreamDetails, StreamResponse, Token, Validation,
6
};
Olivier Dehaene's avatar
Olivier Dehaene committed
7
use axum::extract::Extension;
8
use axum::http::{HeaderMap, StatusCode};
9
use axum::response::sse::{Event, KeepAlive, Sse};
10
use axum::response::IntoResponse;
Olivier Dehaene's avatar
Olivier Dehaene committed
11
use axum::routing::{get, post};
Olivier Dehaene's avatar
Olivier Dehaene committed
12
use axum::{Json, Router};
13
use axum_tracing_opentelemetry::opentelemetry_tracing_layer;
14
15
use futures::Stream;
use std::convert::Infallible;
Olivier Dehaene's avatar
Olivier Dehaene committed
16
use std::net::SocketAddr;
17
use text_generation_client::ShardedClient;
Olivier Dehaene's avatar
Olivier Dehaene committed
18
use tokenizers::Tokenizer;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
19
use tokio::signal;
Olivier Dehaene's avatar
Olivier Dehaene committed
20
use tokio::time::Instant;
21
use tokio_stream::StreamExt;
22
use tracing::{info_span, instrument, Instrument};
23
24
use utoipa::OpenApi;
use utoipa_swagger_ui::SwaggerUi;
Olivier Dehaene's avatar
Olivier Dehaene committed
25

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
26
/// Health check method
27
28
#[instrument(skip(infer))]
async fn health(infer: Extension<Infer>) -> Result<(), (StatusCode, Json<ErrorResponse>)> {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
29
30
    // TODO: while this is the best health check we can do, it is a bit on the heavy side and might
    //       be a bit too slow for a health check.
31
    //       What we should do instead is check if the gRPC channels are still healthy.
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
32
33

    // Send a small inference request
34
35
36
37
    infer
        .generate(GenerateRequest {
            inputs: "liveness".to_string(),
            parameters: GenerateParameters {
38
39
40
41
                temperature: None,
                repetition_penalty: None,
                top_k: None,
                top_p: None,
42
43
                do_sample: false,
                max_new_tokens: 1,
44
                stop: Vec::new(),
45
46
                details: false,
                seed: None,
Olivier Dehaene's avatar
Olivier Dehaene committed
47
            },
48
        })
Olivier Dehaene's avatar
Olivier Dehaene committed
49
50
        .await?;
    Ok(())
Olivier Dehaene's avatar
Olivier Dehaene committed
51
52
}

53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
/// Generate tokens
#[utoipa::path(
    post,
    tag = "Text Generation Inference",
    path = "/generate",
    request_body = GenerateRequest,
    responses(
        (status = 200, description = "Generated Text", body = [GenerateResponse]),
        (status = 424, description = "Generation Error", body = [ErrorResponse],
            example = json!({"error": "Request failed during generation"})),
        (status = 429, description = "Model is overloaded", body = [ErrorResponse],
            example = json!({"error": "Model is overloaded"})),
        (status = 422, description = "Input validation error", body = [ErrorResponse],
            example = json!({"error": "Input validation error"})),
        (status = 500, description = "Incomplete generation", body = [ErrorResponse],
            example = json!({"error": "Incomplete generation"})),
    )
)]
71
#[instrument(
72
    skip(infer),
73
74
75
76
77
    fields(
        total_time,
        validation_time,
        queue_time,
        inference_time,
78
        time_per_token,
79
        seed,
80
81
    )
)]
Olivier Dehaene's avatar
Olivier Dehaene committed
82
async fn generate(
83
    infer: Extension<Infer>,
Olivier Dehaene's avatar
Olivier Dehaene committed
84
    req: Json<GenerateRequest>,
85
) -> Result<impl IntoResponse, (StatusCode, Json<ErrorResponse>)> {
86
    let span = tracing::Span::current();
87
    let start_time = Instant::now();
88
89

    // Inference
90
    let details = req.0.parameters.details;
91
    let response = infer.generate(req.0).await?;
Olivier Dehaene's avatar
Olivier Dehaene committed
92

OlivierDehaene's avatar
OlivierDehaene committed
93
94
    // Token details
    let details = match details {
95
        true => Some(Details {
96
            finish_reason: FinishReason::from(response.generated_text.finish_reason),
97
98
99
100
101
            generated_tokens: response.generated_text.generated_tokens,
            prefill: Some(response.prefill),
            tokens: Some(response.tokens),
            seed: response.generated_text.seed,
        }),
OlivierDehaene's avatar
OlivierDehaene committed
102
103
104
        false => None,
    };

105
106
107
108
    // Timings
    let total_time = start_time.elapsed();
    let validation_time = response.queued - start_time;
    let queue_time = response.start - response.queued;
109
110
    let inference_time = Instant::now() - response.start;
    let time_per_token = inference_time / response.generated_text.generated_tokens;
111
112
113
114
115
116
117
118
119
120
121
122
123
124

    // Headers
    let mut headers = HeaderMap::new();
    headers.insert(
        "x-total-time",
        total_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-validation-time",
        validation_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-queue-time",
        queue_time.as_millis().to_string().parse().unwrap(),
Olivier Dehaene's avatar
Olivier Dehaene committed
125
    );
126
127
128
129
130
131
132
133
134
135
    headers.insert(
        "x-inference-time",
        inference_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-time-per-token",
        time_per_token.as_millis().to_string().parse().unwrap(),
    );

    // Tracing metadata
136
137
138
139
140
    span.record("total_time", format!("{total_time:?}"));
    span.record("validation_time", format!("{validation_time:?}"));
    span.record("queue_time", format!("{queue_time:?}"));
    span.record("inference_time", format!("{inference_time:?}"));
    span.record("time_per_token", format!("{time_per_token:?}"));
141
142
    span.record("seed", format!("{:?}", response.generated_text.seed));
    tracing::info!("Output: {}", response.generated_text.text);
Olivier Dehaene's avatar
Olivier Dehaene committed
143

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
144
    // Send response
145
    let response = GenerateResponse {
146
        generated_text: response.generated_text.text,
OlivierDehaene's avatar
OlivierDehaene committed
147
        details,
148
    };
149
    Ok((headers, Json(response)))
Olivier Dehaene's avatar
Olivier Dehaene committed
150
151
}

Yannic Kilcher's avatar
Yannic Kilcher committed
152
/// Generate a stream of token using Server-Sent Events
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
#[utoipa::path(
    post,
    tag = "Text Generation Inference",
    path = "/generate_stream",
    request_body = GenerateRequest,
    responses(
        (status = 200, description = "Generated Text", body = [StreamResponse],
            content_type="text/event-stream "),
        (status = 424, description = "Generation Error", body = [ErrorResponse],
            example = json!({"error": "Request failed during generation"}),
            content_type="text/event-stream "),
        (status = 429, description = "Model is overloaded", body = [ErrorResponse],
            example = json!({"error": "Model is overloaded"}),
            content_type="text/event-stream "),
        (status = 422, description = "Input validation error", body = [ErrorResponse],
            example = json!({"error": "Input validation error"}),
            content_type="text/event-stream "),
        (status = 500, description = "Incomplete generation", body = [ErrorResponse],
            example = json!({"error": "Incomplete generation"}),
            content_type="text/event-stream "),
    )
)]
175
176
177
178
179
180
181
#[instrument(
    skip(infer),
    fields(
        total_time,
        validation_time,
        queue_time,
        inference_time,
182
183
        time_per_token,
        seed,
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
    )
)]
async fn generate_stream(
    infer: Extension<Infer>,
    req: Json<GenerateRequest>,
) -> Sse<impl Stream<Item = Result<Event, Infallible>>> {
    let span = tracing::Span::current();
    let start_time = Instant::now();

    let stream = async_stream::stream! {
        // Inference
        let mut end_reached = false;
        let mut error = false;
        let details = req.0.parameters.details;

199
        match infer.generate_stream(req.0).instrument(info_span!(parent: &span, "async_stream")).await {
200
            Ok(mut response_stream) => {
Yannic Kilcher's avatar
Yannic Kilcher committed
201
                // Server-Sent Event stream
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
                while let Some(response) = response_stream.next().await {
                    match response {
                        Ok(response) => {
                            match response {
                                // Prefill is ignored
                                InferStreamResponse::Prefill(_) => {}
                                // Yield event for every new token
                                InferStreamResponse::Token(token) => {
                                    // StreamResponse
                                    let stream_token = StreamResponse {
                                        token,
                                        generated_text: None,
                                        details: None,
                                    };

                                    yield Ok(Event::default().json_data(stream_token).unwrap())
                                }
                                // Yield event for last token and compute timings
                                InferStreamResponse::End {
                                    token,
                                    generated_text,
                                    start,
                                    queued,
                                } => {
                                    // Token details
                                    let details = match details {
228
229
                                        true => Some(StreamDetails {
                                            finish_reason: FinishReason::from(generated_text.finish_reason),
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
                                            generated_tokens: generated_text.generated_tokens,
                                            seed: generated_text.seed,
                                        }),
                                        false => None,
                                    };

                                    // Timings
                                    let total_time = start_time.elapsed();
                                    let validation_time = queued - start_time;
                                    let queue_time = start - queued;
                                    let inference_time = Instant::now() - start;
                                    let time_per_token = inference_time / generated_text.generated_tokens;

                                    // Tracing metadata
                                    span.record("total_time", format!("{:?}", total_time));
245
                                    span.record("validation_time", format!("{:?}", validation_time));
246
                                    span.record("queue_time", format!("{:?}", queue_time));
247
248
249
                                    span.record("inference_time", format!("{:?}", inference_time));
                                    span.record("time_per_token", format!("{:?}", time_per_token));
                                    span.record("seed", format!("{:?}", generated_text.seed));
250
251
252
253
254
255
256
257
258
259
260
261
262
263
                                    tracing::info!(parent: &span, "Output: {}", generated_text.text);

                                    // StreamResponse
                                    end_reached = true;
                                    let stream_token = StreamResponse {
                                        token,
                                        generated_text: Some(generated_text.text),
                                        details
                                    };

                                    yield Ok(Event::default().json_data(stream_token).unwrap())
                                }
                            }
                        }
264
                        // yield error
265
266
267
268
269
270
271
                        Err(err) => {
                            error = true;
                            yield Ok(Event::from(err))
                        }
                    }
                }
            },
272
            // yield error
273
274
275
276
277
278
279
280
281
            Err(err) => {
                error = true;
                yield Ok(Event::from(err))
            }
        }
        // Check if generation reached the end
        // Skip if we already sent an error
        if !end_reached && !error {
            let err = InferError::IncompleteGeneration;
282
            tracing::error!("{err}");
283
284
285
286
287
288
289
            yield Ok(Event::from(err))
        }
    };

    Sse::new(stream).keep_alive(KeepAlive::default())
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
290
291
292
293
/// Serving method
#[allow(clippy::too_many_arguments)]
pub async fn run(
    max_concurrent_requests: usize,
294
    max_stop_sequences: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
295
    max_input_length: usize,
296
    max_total_tokens: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
297
    max_batch_size: usize,
298
    max_waiting_tokens: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
299
300
301
302
303
    client: ShardedClient,
    tokenizer: Tokenizer,
    validation_workers: usize,
    addr: SocketAddr,
) {
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
    // OpenAPI documentation
    #[derive(OpenApi)]
    #[openapi(
        paths(
            generate,
            generate_stream,
        ),
        components(
            schemas(
                GenerateRequest,
                GenerateParameters,
                Token,
                GenerateResponse,
                Details,
                FinishReason,
                StreamResponse,
                StreamDetails,
                ErrorResponse,
            )
        ),
        tags(
            (name = "Text Generation Inference", description = "Hugging Face Text Generation Inference API")
        ),
        info(
            title = "Text Generation Inference",
            license(
                name = "Apache 2.0",
                url = "https://www.apache.org/licenses/LICENSE-2.0"
            )
        )
    )]
    struct ApiDoc;

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
337
    // Create state
338
339
340
341
342
343
344
    let validation = Validation::new(
        validation_workers,
        tokenizer,
        max_stop_sequences,
        max_input_length,
        max_total_tokens,
    );
345
346
    let infer = Infer::new(
        client,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
347
        validation,
348
349
350
351
        max_batch_size,
        max_waiting_tokens,
        max_concurrent_requests,
    );
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
352
353

    // Create router
Olivier Dehaene's avatar
Olivier Dehaene committed
354
    let app = Router::new()
355
        .merge(SwaggerUi::new("/docs").url("/api-doc/openapi.json", ApiDoc::openapi()))
356
        .route("/", post(generate))
Olivier Dehaene's avatar
Olivier Dehaene committed
357
        .route("/generate", post(generate))
358
        .route("/generate_stream", post(generate_stream))
359
        .route("/", get(health))
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
360
        .route("/health", get(health))
361
362
        .layer(Extension(infer))
        .layer(opentelemetry_tracing_layer());
Olivier Dehaene's avatar
Olivier Dehaene committed
363

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
364
    // Run server
Olivier Dehaene's avatar
Olivier Dehaene committed
365
    axum::Server::bind(&addr)
Olivier Dehaene's avatar
Olivier Dehaene committed
366
        .serve(app.into_make_service())
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
367
368
        // Wait until all requests are finished to shut down
        .with_graceful_shutdown(shutdown_signal())
Olivier Dehaene's avatar
Olivier Dehaene committed
369
370
        .await
        .unwrap();
Olivier Dehaene's avatar
Olivier Dehaene committed
371
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397

/// Shutdown signal handler
async fn shutdown_signal() {
    let ctrl_c = async {
        signal::ctrl_c()
            .await
            .expect("failed to install Ctrl+C handler");
    };

    #[cfg(unix)]
    let terminate = async {
        signal::unix::signal(signal::unix::SignalKind::terminate())
            .expect("failed to install signal handler")
            .recv()
            .await;
    };

    #[cfg(not(unix))]
    let terminate = std::future::pending::<()>();

    tokio::select! {
        _ = ctrl_c => {},
        _ = terminate => {},
    }

    tracing::info!("signal received, starting graceful shutdown");
398
    opentelemetry::global::shutdown_tracer_provider();
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
399
}
400

401
402
403
404
405
406
407
408
409
410
411
impl From<i32> for FinishReason {
    fn from(finish_reason: i32) -> Self {
        let finish_reason = text_generation_client::FinishReason::from_i32(finish_reason).unwrap();
        match finish_reason {
            text_generation_client::FinishReason::Length => FinishReason::Length,
            text_generation_client::FinishReason::EosToken => FinishReason::EndOfSequenceToken,
            text_generation_client::FinishReason::StopSequence => FinishReason::StopSequence,
        }
    }
}

412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
/// Convert to Axum supported formats
impl From<InferError> for (StatusCode, Json<ErrorResponse>) {
    fn from(err: InferError) -> Self {
        let status_code = match err {
            InferError::GenerationError(_) => StatusCode::FAILED_DEPENDENCY,
            InferError::Overloaded(_) => StatusCode::TOO_MANY_REQUESTS,
            InferError::ValidationError(_) => StatusCode::UNPROCESSABLE_ENTITY,
            InferError::IncompleteGeneration => StatusCode::INTERNAL_SERVER_ERROR,
        };

        (
            status_code,
            Json(ErrorResponse {
                error: err.to_string(),
            }),
        )
    }
}

impl From<InferError> for Event {
    fn from(err: InferError) -> Self {
        Event::default()
            .json_data(ErrorResponse {
                error: err.to_string(),
            })
            .unwrap()
    }
}