server.rs 15.3 KB
Newer Older
1
2
/// HTTP Server logic
use crate::infer::{InferError, InferStreamResponse};
3
use crate::{
4
5
    Details, ErrorResponse, FinishReason, GenerateParameters, GenerateRequest, GenerateResponse,
    Infer, StreamDetails, StreamResponse, Token, Validation,
6
};
Olivier Dehaene's avatar
Olivier Dehaene committed
7
use axum::extract::Extension;
8
use axum::http::{HeaderMap, StatusCode};
9
use axum::response::sse::{Event, KeepAlive, Sse};
10
use axum::response::IntoResponse;
Olivier Dehaene's avatar
Olivier Dehaene committed
11
use axum::routing::{get, post};
Olivier Dehaene's avatar
Olivier Dehaene committed
12
use axum::{Json, Router};
13
14
use futures::Stream;
use std::convert::Infallible;
Olivier Dehaene's avatar
Olivier Dehaene committed
15
use std::net::SocketAddr;
16
use text_generation_client::ShardedClient;
Olivier Dehaene's avatar
Olivier Dehaene committed
17
use tokenizers::Tokenizer;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
18
use tokio::signal;
Olivier Dehaene's avatar
Olivier Dehaene committed
19
use tokio::time::Instant;
20
use tokio_stream::StreamExt;
Olivier Dehaene's avatar
Olivier Dehaene committed
21
use tracing::instrument;
22
23
use utoipa::OpenApi;
use utoipa_swagger_ui::SwaggerUi;
Olivier Dehaene's avatar
Olivier Dehaene committed
24

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
25
/// Health check method
26
27
#[instrument(skip(infer))]
async fn health(infer: Extension<Infer>) -> Result<(), (StatusCode, Json<ErrorResponse>)> {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
28
29
30
31
32
    // TODO: while this is the best health check we can do, it is a bit on the heavy side and might
    //       be a bit too slow for a health check.
    //       What we should do instead if check if the gRPC channels are still healthy.

    // Send a small inference request
33
34
35
36
    infer
        .generate(GenerateRequest {
            inputs: "liveness".to_string(),
            parameters: GenerateParameters {
37
38
39
40
                temperature: None,
                repetition_penalty: None,
                top_k: None,
                top_p: None,
41
42
                do_sample: false,
                max_new_tokens: 1,
43
                stop: Vec::new(),
44
45
                details: false,
                seed: None,
Olivier Dehaene's avatar
Olivier Dehaene committed
46
            },
47
        })
Olivier Dehaene's avatar
Olivier Dehaene committed
48
49
        .await?;
    Ok(())
Olivier Dehaene's avatar
Olivier Dehaene committed
50
51
}

52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
/// Generate tokens
#[utoipa::path(
    post,
    tag = "Text Generation Inference",
    path = "/generate",
    request_body = GenerateRequest,
    responses(
        (status = 200, description = "Generated Text", body = [GenerateResponse]),
        (status = 424, description = "Generation Error", body = [ErrorResponse],
            example = json!({"error": "Request failed during generation"})),
        (status = 429, description = "Model is overloaded", body = [ErrorResponse],
            example = json!({"error": "Model is overloaded"})),
        (status = 422, description = "Input validation error", body = [ErrorResponse],
            example = json!({"error": "Input validation error"})),
        (status = 500, description = "Incomplete generation", body = [ErrorResponse],
            example = json!({"error": "Incomplete generation"})),
    )
)]
70
#[instrument(
71
    skip(infer),
72
73
74
75
76
    fields(
        total_time,
        validation_time,
        queue_time,
        inference_time,
77
78
        time_per_token,
        seed
79
80
    )
)]
Olivier Dehaene's avatar
Olivier Dehaene committed
81
async fn generate(
82
    infer: Extension<Infer>,
Olivier Dehaene's avatar
Olivier Dehaene committed
83
    req: Json<GenerateRequest>,
84
) -> Result<impl IntoResponse, (StatusCode, Json<ErrorResponse>)> {
85
    let span = tracing::Span::current();
86
    let start_time = Instant::now();
87
88

    // Inference
89
90
91
92
93
    let details = req.0.parameters.details;
    let response = infer.generate(req.0).await.map_err(|err| {
        tracing::error!("{}", err.to_string());
        err
    })?;
Olivier Dehaene's avatar
Olivier Dehaene committed
94

OlivierDehaene's avatar
OlivierDehaene committed
95
96
    // Token details
    let details = match details {
97
        true => Some(Details {
98
            finish_reason: FinishReason::from(response.generated_text.finish_reason),
99
100
101
102
103
            generated_tokens: response.generated_text.generated_tokens,
            prefill: Some(response.prefill),
            tokens: Some(response.tokens),
            seed: response.generated_text.seed,
        }),
OlivierDehaene's avatar
OlivierDehaene committed
104
105
106
        false => None,
    };

107
108
109
110
    // Timings
    let total_time = start_time.elapsed();
    let validation_time = response.queued - start_time;
    let queue_time = response.start - response.queued;
111
112
    let inference_time = Instant::now() - response.start;
    let time_per_token = inference_time / response.generated_text.generated_tokens;
113
114
115
116
117
118
119
120
121
122
123
124
125
126

    // Headers
    let mut headers = HeaderMap::new();
    headers.insert(
        "x-total-time",
        total_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-validation-time",
        validation_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-queue-time",
        queue_time.as_millis().to_string().parse().unwrap(),
Olivier Dehaene's avatar
Olivier Dehaene committed
127
    );
128
129
130
131
132
133
134
135
136
137
    headers.insert(
        "x-inference-time",
        inference_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-time-per-token",
        time_per_token.as_millis().to_string().parse().unwrap(),
    );

    // Tracing metadata
138
139
140
141
142
143
144
    span.record("total_time", format!("{:?}", total_time));
    span.record("validation_time", format!("{:?}", validation_time));
    span.record("queue_time", format!("{:?}", queue_time));
    span.record("inference_time", format!("{:?}", inference_time));
    span.record("time_per_token", format!("{:?}", time_per_token));
    span.record("seed", format!("{:?}", response.generated_text.seed));
    tracing::info!("Output: {}", response.generated_text.text);
Olivier Dehaene's avatar
Olivier Dehaene committed
145

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
146
    // Send response
147
    let response = GenerateResponse {
148
        generated_text: response.generated_text.text,
OlivierDehaene's avatar
OlivierDehaene committed
149
        details,
150
    };
151
    Ok((headers, Json(response)))
Olivier Dehaene's avatar
Olivier Dehaene committed
152
153
}

Yannic Kilcher's avatar
Yannic Kilcher committed
154
/// Generate a stream of token using Server-Sent Events
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
#[utoipa::path(
    post,
    tag = "Text Generation Inference",
    path = "/generate_stream",
    request_body = GenerateRequest,
    responses(
        (status = 200, description = "Generated Text", body = [StreamResponse],
            content_type="text/event-stream "),
        (status = 424, description = "Generation Error", body = [ErrorResponse],
            example = json!({"error": "Request failed during generation"}),
            content_type="text/event-stream "),
        (status = 429, description = "Model is overloaded", body = [ErrorResponse],
            example = json!({"error": "Model is overloaded"}),
            content_type="text/event-stream "),
        (status = 422, description = "Input validation error", body = [ErrorResponse],
            example = json!({"error": "Input validation error"}),
            content_type="text/event-stream "),
        (status = 500, description = "Incomplete generation", body = [ErrorResponse],
            example = json!({"error": "Incomplete generation"}),
            content_type="text/event-stream "),
    )
)]
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
#[instrument(
    skip(infer),
    fields(
        total_time,
        validation_time,
        queue_time,
        inference_time,
        time_per_token
    )
)]
async fn generate_stream(
    infer: Extension<Infer>,
    req: Json<GenerateRequest>,
) -> Sse<impl Stream<Item = Result<Event, Infallible>>> {
    let span = tracing::Span::current();
    let start_time = Instant::now();

    let stream = async_stream::stream! {
        // Inference
        let mut end_reached = false;
        let mut error = false;
        let details = req.0.parameters.details;

        match infer.generate_stream(req.0).await {
            Ok(mut response_stream) => {
Yannic Kilcher's avatar
Yannic Kilcher committed
202
                // Server-Sent Event stream
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
                while let Some(response) = response_stream.next().await {
                    match response {
                        Ok(response) => {
                            match response {
                                // Prefill is ignored
                                InferStreamResponse::Prefill(_) => {}
                                // Yield event for every new token
                                InferStreamResponse::Token(token) => {
                                    // StreamResponse
                                    let stream_token = StreamResponse {
                                        token,
                                        generated_text: None,
                                        details: None,
                                    };

                                    yield Ok(Event::default().json_data(stream_token).unwrap())
                                }
                                // Yield event for last token and compute timings
                                InferStreamResponse::End {
                                    token,
                                    generated_text,
                                    start,
                                    queued,
                                } => {
                                    // Token details
                                    let details = match details {
229
230
                                        true => Some(StreamDetails {
                                            finish_reason: FinishReason::from(generated_text.finish_reason),
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
                                            generated_tokens: generated_text.generated_tokens,
                                            seed: generated_text.seed,
                                        }),
                                        false => None,
                                    };

                                    // Timings
                                    let total_time = start_time.elapsed();
                                    let validation_time = queued - start_time;
                                    let queue_time = start - queued;
                                    let inference_time = Instant::now() - start;
                                    let time_per_token = inference_time / generated_text.generated_tokens;

                                    // Tracing metadata
                                    span.record("total_time", format!("{:?}", total_time));
                                    span
                                        .record("validation_time", format!("{:?}", validation_time));
                                    span.record("queue_time", format!("{:?}", queue_time));
                                    span
                                        .record("inference_time", format!("{:?}", inference_time));
                                    span
                                        .record("time_per_token", format!("{:?}", time_per_token));
                                    tracing::info!(parent: &span, "Output: {}", generated_text.text);

                                    // StreamResponse
                                    end_reached = true;
                                    let stream_token = StreamResponse {
                                        token,
                                        generated_text: Some(generated_text.text),
                                        details
                                    };

                                    yield Ok(Event::default().json_data(stream_token).unwrap())
                                }
                            }
                        }
                        // Trace and yield error
                        Err(err) => {
                            error = true;
                            tracing::error!("{}", err.to_string());
                            yield Ok(Event::from(err))
                        }
                    }
                }
            },
            // Trace and yield error
            Err(err) => {
                error = true;
                tracing::error!("{}", err.to_string());
                yield Ok(Event::from(err))
            }
        }
        // Check if generation reached the end
        // Skip if we already sent an error
        if !end_reached && !error {
            let err = InferError::IncompleteGeneration;
            tracing::error!("{}", err.to_string());
            yield Ok(Event::from(err))
        }
    };

    Sse::new(stream).keep_alive(KeepAlive::default())
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
295
296
297
298
299
300
/// Serving method
#[allow(clippy::too_many_arguments)]
pub async fn run(
    max_concurrent_requests: usize,
    max_input_length: usize,
    max_batch_size: usize,
301
    max_waiting_tokens: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
302
303
304
305
306
    client: ShardedClient,
    tokenizer: Tokenizer,
    validation_workers: usize,
    addr: SocketAddr,
) {
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
    // OpenAPI documentation
    #[derive(OpenApi)]
    #[openapi(
        paths(
            generate,
            generate_stream,
        ),
        components(
            schemas(
                GenerateRequest,
                GenerateParameters,
                Token,
                GenerateResponse,
                Details,
                FinishReason,
                StreamResponse,
                StreamDetails,
                ErrorResponse,
            )
        ),
        tags(
            (name = "Text Generation Inference", description = "Hugging Face Text Generation Inference API")
        ),
        info(
            title = "Text Generation Inference",
            license(
                name = "Apache 2.0",
                url = "https://www.apache.org/licenses/LICENSE-2.0"
            )
        )
    )]
    struct ApiDoc;

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
340
341
    // Create state
    let validation = Validation::new(validation_workers, tokenizer, max_input_length);
342
343
    let infer = Infer::new(
        client,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
344
        validation,
345
346
347
348
        max_batch_size,
        max_waiting_tokens,
        max_concurrent_requests,
    );
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
349
350

    // Create router
Olivier Dehaene's avatar
Olivier Dehaene committed
351
    let app = Router::new()
352
        .merge(SwaggerUi::new("/docs").url("/api-doc/openapi.json", ApiDoc::openapi()))
353
        .route("/", post(generate))
Olivier Dehaene's avatar
Olivier Dehaene committed
354
        .route("/generate", post(generate))
355
        .route("/generate_stream", post(generate_stream))
356
        .route("/", get(health))
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
357
        .route("/health", get(health))
358
        .layer(Extension(infer));
Olivier Dehaene's avatar
Olivier Dehaene committed
359

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
360
    // Run server
Olivier Dehaene's avatar
Olivier Dehaene committed
361
    axum::Server::bind(&addr)
Olivier Dehaene's avatar
Olivier Dehaene committed
362
        .serve(app.into_make_service())
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
363
364
        // Wait until all requests are finished to shut down
        .with_graceful_shutdown(shutdown_signal())
Olivier Dehaene's avatar
Olivier Dehaene committed
365
366
        .await
        .unwrap();
Olivier Dehaene's avatar
Olivier Dehaene committed
367
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394

/// Shutdown signal handler
async fn shutdown_signal() {
    let ctrl_c = async {
        signal::ctrl_c()
            .await
            .expect("failed to install Ctrl+C handler");
    };

    #[cfg(unix)]
    let terminate = async {
        signal::unix::signal(signal::unix::SignalKind::terminate())
            .expect("failed to install signal handler")
            .recv()
            .await;
    };

    #[cfg(not(unix))]
    let terminate = std::future::pending::<()>();

    tokio::select! {
        _ = ctrl_c => {},
        _ = terminate => {},
    }

    tracing::info!("signal received, starting graceful shutdown");
}
395

396
397
398
399
400
401
402
403
404
405
406
impl From<i32> for FinishReason {
    fn from(finish_reason: i32) -> Self {
        let finish_reason = text_generation_client::FinishReason::from_i32(finish_reason).unwrap();
        match finish_reason {
            text_generation_client::FinishReason::Length => FinishReason::Length,
            text_generation_client::FinishReason::EosToken => FinishReason::EndOfSequenceToken,
            text_generation_client::FinishReason::StopSequence => FinishReason::StopSequence,
        }
    }
}

407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
/// Convert to Axum supported formats
impl From<InferError> for (StatusCode, Json<ErrorResponse>) {
    fn from(err: InferError) -> Self {
        let status_code = match err {
            InferError::GenerationError(_) => StatusCode::FAILED_DEPENDENCY,
            InferError::Overloaded(_) => StatusCode::TOO_MANY_REQUESTS,
            InferError::ValidationError(_) => StatusCode::UNPROCESSABLE_ENTITY,
            InferError::IncompleteGeneration => StatusCode::INTERNAL_SERVER_ERROR,
        };

        (
            status_code,
            Json(ErrorResponse {
                error: err.to_string(),
            }),
        )
    }
}

impl From<InferError> for Event {
    fn from(err: InferError) -> Self {
        Event::default()
            .json_data(ErrorResponse {
                error: err.to_string(),
            })
            .unwrap()
    }
}