server.rs 7.16 KB
Newer Older
1
use crate::{
2
    Batcher, Details, ErrorResponse, GenerateParameters, GenerateRequest, GeneratedText, Validation,
3
};
Olivier Dehaene's avatar
Olivier Dehaene committed
4
use axum::extract::Extension;
5
6
use axum::http::{HeaderMap, StatusCode};
use axum::response::IntoResponse;
Olivier Dehaene's avatar
Olivier Dehaene committed
7
use axum::routing::{get, post};
Olivier Dehaene's avatar
Olivier Dehaene committed
8
9
use axum::{Json, Router};
use std::net::SocketAddr;
10
use std::sync::Arc;
11
use text_generation_client::ShardedClient;
Olivier Dehaene's avatar
Olivier Dehaene committed
12
use tokenizers::Tokenizer;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
13
use tokio::signal;
14
use tokio::sync::Semaphore;
Olivier Dehaene's avatar
Olivier Dehaene committed
15
16
17
use tokio::time::Instant;
use tracing::instrument;

18
19
20
21
22
23
24
25
// Server shared state
#[derive(Clone)]
struct ServerState {
    validation: Validation,
    batcher: Batcher,
    limit_concurrent_requests: Arc<Semaphore>,
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
26
/// Health check method
27
28
#[instrument(skip(state), fields(time, time_per_token))]
async fn health(state: Extension<ServerState>) -> Result<(), (StatusCode, Json<ErrorResponse>)> {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
29
30
31
32
    // TODO: while this is the best health check we can do, it is a bit on the heavy side and might
    //       be a bit too slow for a health check.
    //       What we should do instead if check if the gRPC channels are still healthy.

33
34
35
36
37
38
39
40
41
42
    // Limit concurrent requests by acquiring a permit from the semaphore
    let _permit = state.limit_concurrent_requests.try_acquire().map_err(|_| {
        (
            StatusCode::TOO_MANY_REQUESTS,
            Json(ErrorResponse {
                error: "Model is overloaded".to_string(),
            }),
        )
    })?;

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
43
    // Send a small inference request
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
    state
        .batcher
        .infer(
            1,
            GenerateRequest {
                inputs: "liveness".to_string(),
                parameters: GenerateParameters {
                    temperature: 1.0,
                    top_k: 0,
                    top_p: 1.0,
                    do_sample: false,
                    max_new_tokens: 1,
                    stop: vec![],
                    details: false,
                    seed: None,
                },
Olivier Dehaene's avatar
Olivier Dehaene committed
60
            },
61
        )
Olivier Dehaene's avatar
Olivier Dehaene committed
62
63
        .await?;
    Ok(())
Olivier Dehaene's avatar
Olivier Dehaene committed
64
65
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
66
/// Generate method
67
#[instrument(
68
    skip(state),
69
70
71
72
73
    fields(
        total_time,
        validation_time,
        queue_time,
        inference_time,
74
75
        time_per_token,
        seed
76
77
    )
)]
Olivier Dehaene's avatar
Olivier Dehaene committed
78
async fn generate(
79
    state: Extension<ServerState>,
Olivier Dehaene's avatar
Olivier Dehaene committed
80
    req: Json<GenerateRequest>,
81
) -> Result<impl IntoResponse, (StatusCode, Json<ErrorResponse>)> {
82
    let start_time = Instant::now();
83
84
85
86
87
88
89
90
91
92
    // Limit concurrent requests by acquiring a permit from the semaphore
    let _permit = state.limit_concurrent_requests.try_acquire().map_err(|_| {
        tracing::error!("Model is overloaded");
        (
            StatusCode::TOO_MANY_REQUESTS,
            Json(ErrorResponse {
                error: "Model is overloaded".to_string(),
            }),
        )
    })?;
Olivier Dehaene's avatar
Olivier Dehaene committed
93

94
    // Validate request
95
    let details = req.0.parameters.details;
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
    let (input_length, validated_request) =
        state.validation.validate(req.0).await.map_err(|err| {
            tracing::error!("{}", err.to_string());
            err
        })?;

    // Inference
    let response = state
        .batcher
        .infer(input_length, validated_request)
        .await
        .map_err(|err| {
            tracing::error!("{}", err.to_string());
            err
        })?;
Olivier Dehaene's avatar
Olivier Dehaene committed
111

OlivierDehaene's avatar
OlivierDehaene committed
112
113
    // Token details
    let details = match details {
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
        true => {
            let tokens = response
                .token_ids
                .into_iter()
                .zip(response.tokens.into_iter())
                .zip(response.logprobs.into_iter())
                .map(|((id, text), logprob)| (id, text, logprob))
                .collect();
            Some(Details {
                seed: response.seed,
                finish_reason: response.finish_reason,
                generated_tokens: response.generated_tokens,
                tokens,
            })
        }
OlivierDehaene's avatar
OlivierDehaene committed
129
130
131
        false => None,
    };

132
133
134
135
    // Timings
    let total_time = start_time.elapsed();
    let validation_time = response.queued - start_time;
    let queue_time = response.start - response.queued;
136
137
    let inference_time = response.end - response.start;
    let time_per_token = inference_time / response.generated_tokens;
138
139
140
141
142
143
144
145
146
147
148
149
150
151

    // Headers
    let mut headers = HeaderMap::new();
    headers.insert(
        "x-total-time",
        total_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-validation-time",
        validation_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-queue-time",
        queue_time.as_millis().to_string().parse().unwrap(),
Olivier Dehaene's avatar
Olivier Dehaene committed
152
    );
153
154
155
156
157
158
159
160
161
162
    headers.insert(
        "x-inference-time",
        inference_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-time-per-token",
        time_per_token.as_millis().to_string().parse().unwrap(),
    );

    // Tracing metadata
163
164
165
166
167
168
169
    tracing::Span::current().record("total_time", format!("{:?}", total_time));
    tracing::Span::current().record("validation_time", format!("{:?}", validation_time));
    tracing::Span::current().record("queue_time", format!("{:?}", queue_time));
    tracing::Span::current().record("inference_time", format!("{:?}", inference_time));
    tracing::Span::current().record("time_per_token", format!("{:?}", time_per_token));
    tracing::Span::current().record("seed", format!("{:?}", response.seed));
    tracing::info!("Output: {}", response.output_text);
Olivier Dehaene's avatar
Olivier Dehaene committed
170

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
171
    // Send response
172
173
    let response = vec![GeneratedText {
        generated_text: response.output_text,
OlivierDehaene's avatar
OlivierDehaene committed
174
        details,
175
176
    }];
    Ok((headers, Json(response)))
Olivier Dehaene's avatar
Olivier Dehaene committed
177
178
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
179
180
181
182
183
184
/// Serving method
#[allow(clippy::too_many_arguments)]
pub async fn run(
    max_concurrent_requests: usize,
    max_input_length: usize,
    max_batch_size: usize,
185
    max_waiting_tokens: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
186
187
188
189
190
191
    client: ShardedClient,
    tokenizer: Tokenizer,
    validation_workers: usize,
    addr: SocketAddr,
) {
    // Create state
192
    let batcher = Batcher::new(client, max_batch_size, max_waiting_tokens);
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
193
    let validation = Validation::new(validation_workers, tokenizer, max_input_length);
194
    let shared_state = ServerState {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
195
        validation,
196
197
198
        batcher,
        limit_concurrent_requests: Arc::new(Semaphore::new(max_concurrent_requests)),
    };
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
199
200

    // Create router
Olivier Dehaene's avatar
Olivier Dehaene committed
201
    let app = Router::new()
202
        .route("/", post(generate))
Olivier Dehaene's avatar
Olivier Dehaene committed
203
        .route("/generate", post(generate))
204
        .route("/", get(health))
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
205
        .route("/health", get(health))
206
        .layer(Extension(shared_state.clone()));
Olivier Dehaene's avatar
Olivier Dehaene committed
207

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
208
    // Run server
Olivier Dehaene's avatar
Olivier Dehaene committed
209
    axum::Server::bind(&addr)
Olivier Dehaene's avatar
Olivier Dehaene committed
210
        .serve(app.into_make_service())
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
211
212
        // Wait until all requests are finished to shut down
        .with_graceful_shutdown(shutdown_signal())
Olivier Dehaene's avatar
Olivier Dehaene committed
213
214
        .await
        .unwrap();
Olivier Dehaene's avatar
Olivier Dehaene committed
215
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242

/// Shutdown signal handler
async fn shutdown_signal() {
    let ctrl_c = async {
        signal::ctrl_c()
            .await
            .expect("failed to install Ctrl+C handler");
    };

    #[cfg(unix)]
    let terminate = async {
        signal::unix::signal(signal::unix::SignalKind::terminate())
            .expect("failed to install signal handler")
            .recv()
            .await;
    };

    #[cfg(not(unix))]
    let terminate = std::future::pending::<()>();

    tokio::select! {
        _ = ctrl_c => {},
        _ = terminate => {},
    }

    tracing::info!("signal received, starting graceful shutdown");
}