server.rs 11.9 KB
Newer Older
1
2
/// HTTP Server logic
use crate::infer::{InferError, InferStreamResponse};
3
use crate::{
4
5
    Details, ErrorResponse, GenerateParameters, GenerateRequest, GenerateResponse, Infer,
    StreamResponse, Validation,
6
};
Olivier Dehaene's avatar
Olivier Dehaene committed
7
use axum::extract::Extension;
8
use axum::http::{HeaderMap, StatusCode};
9
use axum::response::sse::{Event, KeepAlive, Sse};
10
use axum::response::IntoResponse;
Olivier Dehaene's avatar
Olivier Dehaene committed
11
use axum::routing::{get, post};
Olivier Dehaene's avatar
Olivier Dehaene committed
12
use axum::{Json, Router};
13
14
use futures::Stream;
use std::convert::Infallible;
Olivier Dehaene's avatar
Olivier Dehaene committed
15
use std::net::SocketAddr;
16
use text_generation_client::ShardedClient;
Olivier Dehaene's avatar
Olivier Dehaene committed
17
use tokenizers::Tokenizer;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
18
use tokio::signal;
Olivier Dehaene's avatar
Olivier Dehaene committed
19
use tokio::time::Instant;
20
use tokio_stream::StreamExt;
Olivier Dehaene's avatar
Olivier Dehaene committed
21
22
use tracing::instrument;

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
23
/// Health check method
24
25
#[instrument(skip(infer))]
async fn health(infer: Extension<Infer>) -> Result<(), (StatusCode, Json<ErrorResponse>)> {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
26
27
28
29
30
    // TODO: while this is the best health check we can do, it is a bit on the heavy side and might
    //       be a bit too slow for a health check.
    //       What we should do instead if check if the gRPC channels are still healthy.

    // Send a small inference request
31
32
33
34
35
36
37
38
39
40
41
42
    infer
        .generate(GenerateRequest {
            inputs: "liveness".to_string(),
            parameters: GenerateParameters {
                temperature: 1.0,
                top_k: 0,
                top_p: 1.0,
                do_sample: false,
                max_new_tokens: 1,
                stop: vec![],
                details: false,
                seed: None,
Olivier Dehaene's avatar
Olivier Dehaene committed
43
            },
44
        })
Olivier Dehaene's avatar
Olivier Dehaene committed
45
46
        .await?;
    Ok(())
Olivier Dehaene's avatar
Olivier Dehaene committed
47
48
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
49
/// Generate method
50
#[instrument(
51
    skip(infer),
52
53
54
55
56
    fields(
        total_time,
        validation_time,
        queue_time,
        inference_time,
57
58
        time_per_token,
        seed
59
60
    )
)]
Olivier Dehaene's avatar
Olivier Dehaene committed
61
async fn generate(
62
    infer: Extension<Infer>,
Olivier Dehaene's avatar
Olivier Dehaene committed
63
    req: Json<GenerateRequest>,
64
) -> Result<impl IntoResponse, (StatusCode, Json<ErrorResponse>)> {
65
    let span = tracing::Span::current();
66
    let start_time = Instant::now();
Olivier Dehaene's avatar
Olivier Dehaene committed
67

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
68
    // Inference
69
70
71
72
73
    let details = req.0.parameters.details;
    let response = infer.generate(req.0).await.map_err(|err| {
        tracing::error!("{}", err.to_string());
        err
    })?;
Olivier Dehaene's avatar
Olivier Dehaene committed
74

OlivierDehaene's avatar
OlivierDehaene committed
75
76
    // Token details
    let details = match details {
77
78
79
80
81
82
83
        true => Some(Details {
            finish_reason: response.generated_text.finish_reason,
            generated_tokens: response.generated_text.generated_tokens,
            prefill: Some(response.prefill),
            tokens: Some(response.tokens),
            seed: response.generated_text.seed,
        }),
OlivierDehaene's avatar
OlivierDehaene committed
84
85
86
        false => None,
    };

87
88
89
90
    // Timings
    let total_time = start_time.elapsed();
    let validation_time = response.queued - start_time;
    let queue_time = response.start - response.queued;
91
92
    let inference_time = Instant::now() - response.start;
    let time_per_token = inference_time / response.generated_text.generated_tokens;
93
94
95
96
97
98
99
100
101
102
103
104
105
106

    // Headers
    let mut headers = HeaderMap::new();
    headers.insert(
        "x-total-time",
        total_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-validation-time",
        validation_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-queue-time",
        queue_time.as_millis().to_string().parse().unwrap(),
Olivier Dehaene's avatar
Olivier Dehaene committed
107
    );
108
109
110
111
112
113
114
115
116
117
    headers.insert(
        "x-inference-time",
        inference_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-time-per-token",
        time_per_token.as_millis().to_string().parse().unwrap(),
    );

    // Tracing metadata
118
119
120
121
122
123
124
    span.record("total_time", format!("{:?}", total_time));
    span.record("validation_time", format!("{:?}", validation_time));
    span.record("queue_time", format!("{:?}", queue_time));
    span.record("inference_time", format!("{:?}", inference_time));
    span.record("time_per_token", format!("{:?}", time_per_token));
    span.record("seed", format!("{:?}", response.generated_text.seed));
    tracing::info!("Output: {}", response.generated_text.text);
Olivier Dehaene's avatar
Olivier Dehaene committed
125

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
126
    // Send response
127
128
    let response = vec![GenerateResponse {
        generated_text: response.generated_text.text,
OlivierDehaene's avatar
OlivierDehaene committed
129
        details,
130
131
    }];
    Ok((headers, Json(response)))
Olivier Dehaene's avatar
Olivier Dehaene committed
132
133
}

134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
/// Generate stream method
#[instrument(
    skip(infer),
    fields(
        total_time,
        validation_time,
        queue_time,
        inference_time,
        time_per_token
    )
)]
async fn generate_stream(
    infer: Extension<Infer>,
    req: Json<GenerateRequest>,
) -> Sse<impl Stream<Item = Result<Event, Infallible>>> {
    let span = tracing::Span::current();
    let start_time = Instant::now();

    let stream = async_stream::stream! {
        // Inference
        let mut end_reached = false;
        let mut error = false;
        let details = req.0.parameters.details;

        match infer.generate_stream(req.0).await {
            Ok(mut response_stream) => {
                // Server Side Event stream
                while let Some(response) = response_stream.next().await {
                    match response {
                        Ok(response) => {
                            match response {
                                // Prefill is ignored
                                InferStreamResponse::Prefill(_) => {}
                                // Yield event for every new token
                                InferStreamResponse::Token(token) => {
                                    // StreamResponse
                                    let stream_token = StreamResponse {
                                        token,
                                        generated_text: None,
                                        details: None,
                                    };

                                    yield Ok(Event::default().json_data(stream_token).unwrap())
                                }
                                // Yield event for last token and compute timings
                                InferStreamResponse::End {
                                    token,
                                    generated_text,
                                    start,
                                    queued,
                                } => {
                                    // Token details
                                    let details = match details {
                                        true => Some(Details {
                                            finish_reason: generated_text.finish_reason,
                                            generated_tokens: generated_text.generated_tokens,
                                            prefill: None,
                                            tokens: None,
                                            seed: generated_text.seed,
                                        }),
                                        false => None,
                                    };

                                    // Timings
                                    let total_time = start_time.elapsed();
                                    let validation_time = queued - start_time;
                                    let queue_time = start - queued;
                                    let inference_time = Instant::now() - start;
                                    let time_per_token = inference_time / generated_text.generated_tokens;

                                    // Tracing metadata
                                    span.record("total_time", format!("{:?}", total_time));
                                    span
                                        .record("validation_time", format!("{:?}", validation_time));
                                    span.record("queue_time", format!("{:?}", queue_time));
                                    span
                                        .record("inference_time", format!("{:?}", inference_time));
                                    span
                                        .record("time_per_token", format!("{:?}", time_per_token));
                                    tracing::info!(parent: &span, "Output: {}", generated_text.text);

                                    // StreamResponse
                                    end_reached = true;
                                    let stream_token = StreamResponse {
                                        token,
                                        generated_text: Some(generated_text.text),
                                        details
                                    };

                                    yield Ok(Event::default().json_data(stream_token).unwrap())
                                }
                            }
                        }
                        // Trace and yield error
                        Err(err) => {
                            error = true;
                            tracing::error!("{}", err.to_string());
                            yield Ok(Event::from(err))
                        }
                    }
                }
            },
            // Trace and yield error
            Err(err) => {
                error = true;
                tracing::error!("{}", err.to_string());
                yield Ok(Event::from(err))
            }
        }
        // Check if generation reached the end
        // Skip if we already sent an error
        if !end_reached && !error {
            let err = InferError::IncompleteGeneration;
            tracing::error!("{}", err.to_string());
            yield Ok(Event::from(err))
        }
    };

    Sse::new(stream).keep_alive(KeepAlive::default())
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
255
256
257
258
259
260
/// Serving method
#[allow(clippy::too_many_arguments)]
pub async fn run(
    max_concurrent_requests: usize,
    max_input_length: usize,
    max_batch_size: usize,
261
    max_waiting_tokens: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
262
263
264
265
266
267
268
    client: ShardedClient,
    tokenizer: Tokenizer,
    validation_workers: usize,
    addr: SocketAddr,
) {
    // Create state
    let validation = Validation::new(validation_workers, tokenizer, max_input_length);
269
270
    let infer = Infer::new(
        client,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
271
        validation,
272
273
274
275
        max_batch_size,
        max_waiting_tokens,
        max_concurrent_requests,
    );
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
276
277

    // Create router
Olivier Dehaene's avatar
Olivier Dehaene committed
278
    let app = Router::new()
279
        .route("/", post(generate))
Olivier Dehaene's avatar
Olivier Dehaene committed
280
        .route("/generate", post(generate))
281
        .route("/generate_stream", post(generate_stream))
282
        .route("/", get(health))
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
283
        .route("/health", get(health))
284
        .layer(Extension(infer));
Olivier Dehaene's avatar
Olivier Dehaene committed
285

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
286
    // Run server
Olivier Dehaene's avatar
Olivier Dehaene committed
287
    axum::Server::bind(&addr)
Olivier Dehaene's avatar
Olivier Dehaene committed
288
        .serve(app.into_make_service())
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
289
290
        // Wait until all requests are finished to shut down
        .with_graceful_shutdown(shutdown_signal())
Olivier Dehaene's avatar
Olivier Dehaene committed
291
292
        .await
        .unwrap();
Olivier Dehaene's avatar
Olivier Dehaene committed
293
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320

/// Shutdown signal handler
async fn shutdown_signal() {
    let ctrl_c = async {
        signal::ctrl_c()
            .await
            .expect("failed to install Ctrl+C handler");
    };

    #[cfg(unix)]
    let terminate = async {
        signal::unix::signal(signal::unix::SignalKind::terminate())
            .expect("failed to install signal handler")
            .recv()
            .await;
    };

    #[cfg(not(unix))]
    let terminate = std::future::pending::<()>();

    tokio::select! {
        _ = ctrl_c => {},
        _ = terminate => {},
    }

    tracing::info!("signal received, starting graceful shutdown");
}
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349

/// Convert to Axum supported formats
impl From<InferError> for (StatusCode, Json<ErrorResponse>) {
    fn from(err: InferError) -> Self {
        let status_code = match err {
            InferError::GenerationError(_) => StatusCode::FAILED_DEPENDENCY,
            InferError::Overloaded(_) => StatusCode::TOO_MANY_REQUESTS,
            InferError::ValidationError(_) => StatusCode::UNPROCESSABLE_ENTITY,
            InferError::IncompleteGeneration => StatusCode::INTERNAL_SERVER_ERROR,
        };

        (
            status_code,
            Json(ErrorResponse {
                error: err.to_string(),
            }),
        )
    }
}

impl From<InferError> for Event {
    fn from(err: InferError) -> Self {
        Event::default()
            .json_data(ErrorResponse {
                error: err.to_string(),
            })
            .unwrap()
    }
}