server.rs 6.28 KB
Newer Older
1
2
3
use crate::{
    Batcher, ErrorResponse, GenerateParameters, GenerateRequest, GeneratedText, Validation,
};
Olivier Dehaene's avatar
Olivier Dehaene committed
4
use axum::extract::Extension;
5
6
use axum::http::{HeaderMap, StatusCode};
use axum::response::IntoResponse;
Olivier Dehaene's avatar
Olivier Dehaene committed
7
use axum::routing::{get, post};
Olivier Dehaene's avatar
Olivier Dehaene committed
8
9
use axum::{Json, Router};
use std::net::SocketAddr;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
10
use std::sync::Arc;
11
use text_generation_client::ShardedClient;
Olivier Dehaene's avatar
Olivier Dehaene committed
12
use tokenizers::Tokenizer;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
13
14
use tokio::signal;
use tokio::sync::Semaphore;
Olivier Dehaene's avatar
Olivier Dehaene committed
15
16
17
use tokio::time::Instant;
use tracing::instrument;

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
18
19
20
21
22
23
// Server shared state
#[derive(Clone)]
struct ServerState {
    validation: Validation,
    batcher: Batcher,
    limit_concurrent_requests: Arc<Semaphore>,
Olivier Dehaene's avatar
Olivier Dehaene committed
24
25
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
26
/// Health check method
Olivier Dehaene's avatar
Olivier Dehaene committed
27
#[instrument(skip(state), fields(time, time_per_token))]
28
async fn health(state: Extension<ServerState>) -> Result<(), (StatusCode, Json<ErrorResponse>)> {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
29
30
31
32
33
34
35
36
    // TODO: while this is the best health check we can do, it is a bit on the heavy side and might
    //       be a bit too slow for a health check.
    //       What we should do instead if check if the gRPC channels are still healthy.

    // Limit concurrent requests by acquiring a permit from the semaphore
    let _permit = state.limit_concurrent_requests.try_acquire().map_err(|_| {
        (
            StatusCode::TOO_MANY_REQUESTS,
37
38
39
            Json(ErrorResponse {
                error: "Model is overloaded".to_string(),
            }),
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
40
41
42
43
        )
    })?;

    // Send a small inference request
Olivier Dehaene's avatar
Olivier Dehaene committed
44
    state
Olivier Dehaene's avatar
Olivier Dehaene committed
45
        .batcher
Olivier Dehaene's avatar
Olivier Dehaene committed
46
47
48
49
50
51
52
53
54
55
56
57
58
        .infer(
            1,
            GenerateRequest {
                inputs: "liveness".to_string(),
                parameters: GenerateParameters {
                    temperature: 1.0,
                    top_k: 0,
                    top_p: 1.0,
                    do_sample: false,
                    max_new_tokens: 1,
                },
            },
        )
Olivier Dehaene's avatar
Olivier Dehaene committed
59
60
        .await?;
    Ok(())
Olivier Dehaene's avatar
Olivier Dehaene committed
61
62
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
63
/// Generate method
64
65
66
67
68
69
70
71
72
73
#[instrument(
    skip(state),
    fields(
        total_time,
        validation_time,
        queue_time,
        inference_time,
        time_per_token
    )
)]
Olivier Dehaene's avatar
Olivier Dehaene committed
74
async fn generate(
Olivier Dehaene's avatar
Olivier Dehaene committed
75
    state: Extension<ServerState>,
Olivier Dehaene's avatar
Olivier Dehaene committed
76
    req: Json<GenerateRequest>,
77
) -> Result<impl IntoResponse, (StatusCode, Json<ErrorResponse>)> {
78
    let start_time = Instant::now();
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
79
80
    // Limit concurrent requests by acquiring a permit from the semaphore
    let _permit = state.limit_concurrent_requests.try_acquire().map_err(|_| {
81
        tracing::error!("Model is overloaded");
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
82
83
        (
            StatusCode::TOO_MANY_REQUESTS,
84
85
86
            Json(ErrorResponse {
                error: "Model is overloaded".to_string(),
            }),
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
87
88
        )
    })?;
Olivier Dehaene's avatar
Olivier Dehaene committed
89

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
90
    // Validate request
Olivier Dehaene's avatar
Olivier Dehaene committed
91
    let (input_length, validated_request) = state
Olivier Dehaene's avatar
Olivier Dehaene committed
92
        .validation
93
        .validate(req.0)
94
95
96
97
98
        .await
        .map_err(|err| {
            tracing::error!("{}", err.to_string());
            err
        })?;
Olivier Dehaene's avatar
Olivier Dehaene committed
99

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
100
    // Inference
101
102
103
104
105
106
107
108
    let response = state
        .batcher
        .infer(input_length, validated_request)
        .await
        .map_err(|err| {
            tracing::error!("{}", err.to_string());
            err
        })?;
Olivier Dehaene's avatar
Olivier Dehaene committed
109

110
111
112
113
114
    // Timings
    let total_time = start_time.elapsed();
    let validation_time = response.queued - start_time;
    let queue_time = response.start - response.queued;
    let inference_time = response.end - response.start;
115
    let time_per_token = inference_time / response.tokens;
116
117
118
119
120
121
122
123
124
125
126
127
128
129

    // Headers
    let mut headers = HeaderMap::new();
    headers.insert(
        "x-total-time",
        total_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-validation-time",
        validation_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-queue-time",
        queue_time.as_millis().to_string().parse().unwrap(),
Olivier Dehaene's avatar
Olivier Dehaene committed
130
    );
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
    headers.insert(
        "x-inference-time",
        inference_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-time-per-token",
        time_per_token.as_millis().to_string().parse().unwrap(),
    );

    // Tracing metadata
    tracing::Span::current().record("total_time", format!("{:?}", total_time));
    tracing::Span::current().record("validation_time", format!("{:?}", validation_time));
    tracing::Span::current().record("queue_time", format!("{:?}", queue_time));
    tracing::Span::current().record("inference_time", format!("{:?}", inference_time));
    tracing::Span::current().record("time_per_token", format!("{:?}", time_per_token));
    tracing::info!("Output: {}", response.output);
Olivier Dehaene's avatar
Olivier Dehaene committed
147

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
148
    // Send response
149
150
151
152
    let response = vec![GeneratedText {
        generated_text: response.output,
    }];
    Ok((headers, Json(response)))
Olivier Dehaene's avatar
Olivier Dehaene committed
153
154
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
155
156
157
158
159
160
/// Serving method
#[allow(clippy::too_many_arguments)]
pub async fn run(
    max_concurrent_requests: usize,
    max_input_length: usize,
    max_batch_size: usize,
161
    max_waiting_tokens: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
162
163
164
165
166
167
    client: ShardedClient,
    tokenizer: Tokenizer,
    validation_workers: usize,
    addr: SocketAddr,
) {
    // Create state
168
    let batcher = Batcher::new(client, max_batch_size, max_waiting_tokens);
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
169
170
171
172
173
174
175
176
    let validation = Validation::new(validation_workers, tokenizer, max_input_length);
    let shared_state = ServerState {
        validation,
        batcher,
        limit_concurrent_requests: Arc::new(Semaphore::new(max_concurrent_requests)),
    };

    // Create router
Olivier Dehaene's avatar
Olivier Dehaene committed
177
178
179
    let app = Router::new()
        .route("/generate", post(generate))
        .layer(Extension(shared_state.clone()))
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
180
        .route("/health", get(health))
Olivier Dehaene's avatar
Olivier Dehaene committed
181
        .layer(Extension(shared_state.clone()));
Olivier Dehaene's avatar
Olivier Dehaene committed
182

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
183
    // Run server
Olivier Dehaene's avatar
Olivier Dehaene committed
184
    axum::Server::bind(&addr)
Olivier Dehaene's avatar
Olivier Dehaene committed
185
        .serve(app.into_make_service())
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
186
187
        // Wait until all requests are finished to shut down
        .with_graceful_shutdown(shutdown_signal())
Olivier Dehaene's avatar
Olivier Dehaene committed
188
189
        .await
        .unwrap();
Olivier Dehaene's avatar
Olivier Dehaene committed
190
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217

/// Shutdown signal handler
async fn shutdown_signal() {
    let ctrl_c = async {
        signal::ctrl_c()
            .await
            .expect("failed to install Ctrl+C handler");
    };

    #[cfg(unix)]
    let terminate = async {
        signal::unix::signal(signal::unix::SignalKind::terminate())
            .expect("failed to install signal handler")
            .recv()
            .await;
    };

    #[cfg(not(unix))]
    let terminate = std::future::pending::<()>();

    tokio::select! {
        _ = ctrl_c => {},
        _ = terminate => {},
    }

    tracing::info!("signal received, starting graceful shutdown");
}