server.rs 23.2 KB
Newer Older
1
/// HTTP Server logic
2
3
use crate::infer::{InferError, InferResponse, InferStreamResponse};
use crate::validation::ValidationError;
4
use crate::{
5
6
7
    BestOfSequence, CompatGenerateRequest, Details, ErrorResponse, FinishReason,
    GenerateParameters, GenerateRequest, GenerateResponse, Infer, PrefillToken, StreamDetails,
    StreamResponse, Token, Validation,
8
};
Olivier Dehaene's avatar
Olivier Dehaene committed
9
use axum::extract::Extension;
10
use axum::http::{HeaderMap, Method, StatusCode};
11
use axum::response::sse::{Event, KeepAlive, Sse};
12
use axum::response::{IntoResponse, Response};
Olivier Dehaene's avatar
Olivier Dehaene committed
13
use axum::routing::{get, post};
14
use axum::{http, Json, Router};
15
use axum_tracing_opentelemetry::opentelemetry_tracing_layer;
16
use futures::Stream;
17
use metrics_exporter_prometheus::{PrometheusBuilder, PrometheusHandle};
18
use std::convert::Infallible;
Olivier Dehaene's avatar
Olivier Dehaene committed
19
use std::net::SocketAddr;
20
use text_generation_client::ShardedClient;
Olivier Dehaene's avatar
Olivier Dehaene committed
21
use tokenizers::Tokenizer;
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
22
use tokio::signal;
Olivier Dehaene's avatar
Olivier Dehaene committed
23
use tokio::time::Instant;
24
use tokio_stream::StreamExt;
25
use tower_http::cors::{AllowOrigin, CorsLayer};
26
use tracing::{info_span, instrument, Instrument};
27
28
use utoipa::OpenApi;
use utoipa_swagger_ui::SwaggerUi;
Olivier Dehaene's avatar
Olivier Dehaene committed
29

30
31
32
/// Compatibility route with api-inference and AzureML
#[instrument(skip(infer))]
async fn compat_generate(
33
    default_return_full_text: Extension<bool>,
34
35
    infer: Extension<Infer>,
    req: Json<CompatGenerateRequest>,
36
) -> Result<Response, (StatusCode, Json<ErrorResponse>)> {
37
38
39
40
41
42
43
    let mut req = req.0;

    // default return_full_text given the pipeline_tag
    if req.parameters.return_full_text.is_none() {
        req.parameters.return_full_text = Some(default_return_full_text.0)
    }

44
45
46
47
48
49
50
51
52
53
54
55
    // switch on stream
    if req.stream {
        Ok(generate_stream(infer, Json(req.into()))
            .await
            .into_response())
    } else {
        let (headers, generation) = generate(infer, Json(req.into())).await?;
        // wrap generation inside a Vec to match api-inference
        Ok((headers, Json(vec![generation.0])).into_response())
    }
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
56
/// Health check method
57
58
#[instrument(skip(infer))]
async fn health(infer: Extension<Infer>) -> Result<(), (StatusCode, Json<ErrorResponse>)> {
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
59
60
    // TODO: while this is the best health check we can do, it is a bit on the heavy side and might
    //       be a bit too slow for a health check.
61
    //       What we should do instead is check if the gRPC channels are still healthy.
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
62
63

    // Send a small inference request
64
65
66
67
    infer
        .generate(GenerateRequest {
            inputs: "liveness".to_string(),
            parameters: GenerateParameters {
68
                best_of: None,
69
70
71
72
                temperature: None,
                repetition_penalty: None,
                top_k: None,
                top_p: None,
73
                typical_p: None,
74
75
                do_sample: false,
                max_new_tokens: 1,
76
                return_full_text: None,
77
                stop: Vec::new(),
78
                truncate: None,
79
                watermark: false,
80
81
                details: false,
                seed: None,
Olivier Dehaene's avatar
Olivier Dehaene committed
82
            },
83
        })
Olivier Dehaene's avatar
Olivier Dehaene committed
84
85
        .await?;
    Ok(())
Olivier Dehaene's avatar
Olivier Dehaene committed
86
87
}

88
89
90
91
92
93
94
/// Generate tokens
#[utoipa::path(
    post,
    tag = "Text Generation Inference",
    path = "/generate",
    request_body = GenerateRequest,
    responses(
95
96
        (status = 200, description = "Generated Text", body = GenerateResponse),
        (status = 424, description = "Generation Error", body = ErrorResponse,
97
            example = json ! ({"error": "Request failed during generation"})),
98
        (status = 429, description = "Model is overloaded", body = ErrorResponse,
99
            example = json ! ({"error": "Model is overloaded"})),
100
        (status = 422, description = "Input validation error", body = ErrorResponse,
101
            example = json ! ({"error": "Input validation error"})),
102
        (status = 500, description = "Incomplete generation", body = ErrorResponse,
103
            example = json ! ({"error": "Incomplete generation"})),
104
105
    )
)]
106
#[instrument(
107
    skip(infer),
108
109
110
111
112
    fields(
        total_time,
        validation_time,
        queue_time,
        inference_time,
113
        time_per_token,
114
        seed,
115
116
    )
)]
Olivier Dehaene's avatar
Olivier Dehaene committed
117
async fn generate(
118
    infer: Extension<Infer>,
Olivier Dehaene's avatar
Olivier Dehaene committed
119
    req: Json<GenerateRequest>,
120
) -> Result<(HeaderMap, Json<GenerateResponse>), (StatusCode, Json<ErrorResponse>)> {
121
    let span = tracing::Span::current();
122
    let start_time = Instant::now();
123

124
    let compute_characters = req.0.inputs.chars().count();
125
126
127
128
129
    let mut add_prompt = None;
    if req.0.parameters.return_full_text.unwrap_or(false) {
        add_prompt = Some(req.0.inputs.clone());
    }

130
    let details = req.0.parameters.details;
131
132

    // Inference
133
134
135
136
137
138
139
    let (response, best_of_responses) = match req.0.parameters.best_of {
        Some(best_of) if best_of > 1 => {
            let (response, best_of_responses) = infer.generate_best_of(req.0, best_of).await?;
            (response, Some(best_of_responses))
        }
        _ => (infer.generate(req.0).await?, None),
    };
Olivier Dehaene's avatar
Olivier Dehaene committed
140

OlivierDehaene's avatar
OlivierDehaene committed
141
142
    // Token details
    let details = match details {
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
        true => {
            // convert best_of_responses
            let best_of_sequences = best_of_responses.map(|responses: Vec<InferResponse>| {
                responses
                    .into_iter()
                    .map(|response: InferResponse| {
                        // Add prompt if return_full_text
                        let mut output_text = response.generated_text.text;
                        if let Some(prompt) = &add_prompt {
                            output_text = prompt.clone() + &output_text;
                        }

                        BestOfSequence {
                            generated_text: output_text,
                            finish_reason: FinishReason::from(
                                response.generated_text.finish_reason,
                            ),
                            generated_tokens: response.generated_text.generated_tokens,
                            prefill: response.prefill,
                            tokens: response.tokens,
                            seed: response.generated_text.seed,
                        }
                    })
                    .collect()
            });

            Some(Details {
                finish_reason: FinishReason::from(response.generated_text.finish_reason),
                generated_tokens: response.generated_text.generated_tokens,
                prefill: response.prefill,
                tokens: response.tokens,
                seed: response.generated_text.seed,
                best_of_sequences,
            })
        }
OlivierDehaene's avatar
OlivierDehaene committed
178
179
180
        false => None,
    };

181
182
183
184
    // Timings
    let total_time = start_time.elapsed();
    let validation_time = response.queued - start_time;
    let queue_time = response.start - response.queued;
185
186
    let inference_time = Instant::now() - response.start;
    let time_per_token = inference_time / response.generated_text.generated_tokens;
187
188
189

    // Headers
    let mut headers = HeaderMap::new();
190
191
192
193
194
195
196
197
198
    headers.insert("x-compute-type", "gpu+optimized".parse().unwrap());
    headers.insert(
        "x-compute-time",
        total_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-compute-characters",
        compute_characters.to_string().parse().unwrap(),
    );
199
200
201
202
203
204
205
206
207
208
209
    headers.insert(
        "x-total-time",
        total_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-validation-time",
        validation_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-queue-time",
        queue_time.as_millis().to_string().parse().unwrap(),
Olivier Dehaene's avatar
Olivier Dehaene committed
210
    );
211
212
213
214
215
216
217
218
219
220
    headers.insert(
        "x-inference-time",
        inference_time.as_millis().to_string().parse().unwrap(),
    );
    headers.insert(
        "x-time-per-token",
        time_per_token.as_millis().to_string().parse().unwrap(),
    );

    // Tracing metadata
221
222
223
224
225
    span.record("total_time", format!("{total_time:?}"));
    span.record("validation_time", format!("{validation_time:?}"));
    span.record("queue_time", format!("{queue_time:?}"));
    span.record("inference_time", format!("{inference_time:?}"));
    span.record("time_per_token", format!("{time_per_token:?}"));
226
227
    span.record("seed", format!("{:?}", response.generated_text.seed));
    tracing::info!("Output: {}", response.generated_text.text);
Olivier Dehaene's avatar
Olivier Dehaene committed
228

229
230
231
232
233
234
235
236
237
238
239
240
    // Metrics
    metrics::increment_counter!("tgi_request_success");
    metrics::histogram!("tgi_request_duration", total_time);
    metrics::histogram!("tgi_request_validation_duration", validation_time);
    metrics::histogram!("tgi_request_queue_duration", queue_time);
    metrics::histogram!("tgi_request_inference_duration", inference_time);
    metrics::histogram!("tgi_request_mean_time_per_token_duration", time_per_token);
    metrics::histogram!(
        "tgi_request_generated_tokens",
        response.generated_text.generated_tokens as f64
    );

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
241
    // Send response
242
243
244
245
246
    let mut output_text = response.generated_text.text;
    if let Some(prompt) = add_prompt {
        output_text = prompt + &output_text;
    }

247
    let response = GenerateResponse {
248
        generated_text: output_text,
OlivierDehaene's avatar
OlivierDehaene committed
249
        details,
250
    };
251
    Ok((headers, Json(response)))
Olivier Dehaene's avatar
Olivier Dehaene committed
252
253
}

Yannic Kilcher's avatar
Yannic Kilcher committed
254
/// Generate a stream of token using Server-Sent Events
255
256
257
258
259
260
#[utoipa::path(
    post,
    tag = "Text Generation Inference",
    path = "/generate_stream",
    request_body = GenerateRequest,
    responses(
261
        (status = 200, description = "Generated Text", body = StreamResponse,
262
            content_type = "text/event-stream"),
263
        (status = 424, description = "Generation Error", body = ErrorResponse,
264
265
            example = json ! ({"error": "Request failed during generation"}),
            content_type = "text/event-stream"),
266
        (status = 429, description = "Model is overloaded", body = ErrorResponse,
267
268
            example = json ! ({"error": "Model is overloaded"}),
            content_type = "text/event-stream"),
269
        (status = 422, description = "Input validation error", body = ErrorResponse,
270
271
            example = json ! ({"error": "Input validation error"}),
            content_type = "text/event-stream"),
272
        (status = 500, description = "Incomplete generation", body = ErrorResponse,
273
274
            example = json ! ({"error": "Incomplete generation"}),
            content_type = "text/event-stream"),
275
276
    )
)]
277
278
279
280
281
282
283
#[instrument(
    skip(infer),
    fields(
        total_time,
        validation_time,
        queue_time,
        inference_time,
284
285
        time_per_token,
        seed,
286
287
288
289
290
    )
)]
async fn generate_stream(
    infer: Extension<Infer>,
    req: Json<GenerateRequest>,
291
292
293
294
) -> (
    HeaderMap,
    Sse<impl Stream<Item = Result<Event, Infallible>>>,
) {
295
296
297
    let span = tracing::Span::current();
    let start_time = Instant::now();

298
299
300
301
302
303
304
305
306
    let compute_characters = req.0.inputs.chars().count();

    let mut headers = HeaderMap::new();
    headers.insert("x-compute-type", "gpu+optimized".parse().unwrap());
    headers.insert(
        "x-compute-characters",
        compute_characters.to_string().parse().unwrap(),
    );

307
308
309
310
    let stream = async_stream::stream! {
        // Inference
        let mut end_reached = false;
        let mut error = false;
311
312
313
314
315

        let mut add_prompt = None;
        if req.0.parameters.return_full_text.unwrap_or(false) {
            add_prompt = Some(req.0.inputs.clone());
        }
316
317
        let details = req.0.parameters.details;

318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
        let best_of = req.0.parameters.best_of.unwrap_or(1);
        if best_of == 1 {
            match infer.generate_stream(req.0).instrument(info_span!(parent: &span, "async_stream")).await {
                Ok(mut response_stream) => {
                    // Server-Sent Event stream
                    while let Some(response) = response_stream.next().await {
                        match response {
                            Ok(response) => {
                                match response {
                                    // Prefill is ignored
                                    InferStreamResponse::Prefill(_) => {}
                                    // Yield event for every new token
                                    InferStreamResponse::Token(token) => {
                                        // StreamResponse
                                        let stream_token = StreamResponse {
                                            token,
                                            generated_text: None,
                                            details: None,
                                        };

                                        yield Ok(Event::default().json_data(stream_token).unwrap())
339
                                    }
340
341
                                    // Yield event for last token and compute timings
                                    InferStreamResponse::End {
342
                                        token,
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
                                        generated_text,
                                        start,
                                        queued,
                                    } => {
                                        // Token details
                                        let details = match details {
                                            true => Some(StreamDetails {
                                                finish_reason: FinishReason::from(generated_text.finish_reason),
                                                generated_tokens: generated_text.generated_tokens,
                                                seed: generated_text.seed,
                                            }),
                                            false => None,
                                        };

                                        // Timings
                                        let total_time = start_time.elapsed();
                                        let validation_time = queued - start_time;
                                        let queue_time = start - queued;
                                        let inference_time = Instant::now() - start;
                                        let time_per_token = inference_time / generated_text.generated_tokens;

                                        // Tracing metadata
                                        span.record("total_time", format!("{total_time:?}"));
                                        span.record("validation_time", format!("{validation_time:?}"));
                                        span.record("queue_time", format!("{queue_time:?}"));
                                        span.record("inference_time", format!("{inference_time:?}"));
                                        span.record("time_per_token", format!("{time_per_token:?}"));
                                        span.record("seed", format!("{:?}", generated_text.seed));
                                        tracing::info!(parent: &span, "Output: {}", generated_text.text);

                                        // Metrics
                                        metrics::increment_counter!("tgi_request_success");
                                        metrics::histogram!("tgi_request_duration", total_time);
                                        metrics::histogram!("tgi_request_validation_duration", validation_time);
                                        metrics::histogram!("tgi_request_queue_duration", queue_time);
                                        metrics::histogram!("tgi_request_inference_duration", inference_time);
                                        metrics::histogram!("tgi_request_mean_time_per_token_duration", time_per_token);
                                        metrics::histogram!("tgi_request_generated_tokens", generated_text.generated_tokens as f64);

                                        // StreamResponse
                                        end_reached = true;

                                        let mut output_text = generated_text.text;
                                        if let Some(prompt) = add_prompt {
                                            output_text = prompt + &output_text;
                                        }

                                        let stream_token = StreamResponse {
                                            token,
                                            generated_text: Some(output_text),
                                            details
                                        };

                                        yield Ok(Event::default().json_data(stream_token).unwrap());
                                        break;
                                    }
399
400
                                }
                            }
401
402
403
404
405
406
                            // yield error
                            Err(err) => {
                                error = true;
                                yield Ok(Event::from(err));
                                break;
                            }
407
408
                        }
                    }
409
410
411
412
413
                },
                // yield error
                Err(err) => {
                    error = true;
                    yield Ok(Event::from(err));
414
                }
415
416
417
418
419
420
421
            }
            // Check if generation reached the end
            // Skip if we already sent an error
            if !end_reached && !error {
                let err = InferError::IncompleteGeneration;
                metrics::increment_counter!("tgi_request_failure", "err" => "incomplete");
                tracing::error!("{err}");
422
                yield Ok(Event::from(err));
423
            }
424
425
426
        } else {
            let err = InferError::from(ValidationError::BestOfStream);
            metrics::increment_counter!("tgi_request_failure", "err" => "validation");
427
            tracing::error!("{err}");
428
            yield Ok(Event::from(err));
429
430
431
        }
    };

432
    (headers, Sse::new(stream).keep_alive(KeepAlive::default()))
433
434
}

435
436
437
438
439
440
441
442
443
444
445
/// Prometheus metrics scrape endpoint
#[utoipa::path(
    get,
    tag = "Text Generation Inference",
    path = "/metrics",
    responses((status = 200, description = "Prometheus Metrics", body = String))
)]
async fn metrics(prom_handle: Extension<PrometheusHandle>) -> String {
    prom_handle.render()
}

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
446
447
448
/// Serving method
#[allow(clippy::too_many_arguments)]
pub async fn run(
449
    compat_return_full_text: bool,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
450
    max_concurrent_requests: usize,
451
    max_best_of: usize,
452
    max_stop_sequences: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
453
    max_input_length: usize,
454
    max_total_tokens: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
455
    max_batch_size: usize,
456
    max_waiting_tokens: usize,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
457
458
459
460
    client: ShardedClient,
    tokenizer: Tokenizer,
    validation_workers: usize,
    addr: SocketAddr,
461
    allow_origin: Option<AllowOrigin>,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
462
) {
463
464
465
466
467
468
    // OpenAPI documentation
    #[derive(OpenApi)]
    #[openapi(
        paths(
            generate,
            generate_stream,
469
            metrics,
470
471
472
473
474
        ),
        components(
            schemas(
                GenerateRequest,
                GenerateParameters,
475
                PrefillToken,
476
477
                Token,
                GenerateResponse,
478
                BestOfSequence,
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
                Details,
                FinishReason,
                StreamResponse,
                StreamDetails,
                ErrorResponse,
            )
        ),
        tags(
            (name = "Text Generation Inference", description = "Hugging Face Text Generation Inference API")
        ),
        info(
            title = "Text Generation Inference",
            license(
                name = "Apache 2.0",
                url = "https://www.apache.org/licenses/LICENSE-2.0"
            )
        )
    )]
    struct ApiDoc;

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
499
    // Create state
500
501
502
    let validation = Validation::new(
        validation_workers,
        tokenizer,
503
        max_best_of,
504
505
506
507
        max_stop_sequences,
        max_input_length,
        max_total_tokens,
    );
508
509
    let infer = Infer::new(
        client,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
510
        validation,
511
512
513
514
        max_batch_size,
        max_waiting_tokens,
        max_concurrent_requests,
    );
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
515

516
517
518
519
520
521
    // Prometheus handler
    let builder = PrometheusBuilder::new();
    let prom_handle = builder
        .install_recorder()
        .expect("failed to install metrics recorder");

522
523
524
525
526
527
528
    // CORS layer
    let allow_origin = allow_origin.unwrap_or(AllowOrigin::any());
    let cors_layer = CorsLayer::new()
        .allow_methods([Method::GET, Method::POST])
        .allow_headers([http::header::CONTENT_TYPE])
        .allow_origin(allow_origin);

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
529
    // Create router
Olivier Dehaene's avatar
Olivier Dehaene committed
530
    let app = Router::new()
531
        .merge(SwaggerUi::new("/docs").url("/api-doc/openapi.json", ApiDoc::openapi()))
532
        // Base routes
533
        .route("/", post(compat_generate))
Olivier Dehaene's avatar
Olivier Dehaene committed
534
        .route("/generate", post(generate))
535
        .route("/generate_stream", post(generate_stream))
536
537
538
        // AWS Sagemaker route
        .route("/invocations", post(compat_generate))
        // Base Health route
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
539
        .route("/health", get(health))
540
541
542
543
544
        // Inference API health route
        .route("/", get(health))
        // AWS Sagemaker health route
        .route("/ping", get(health))
        // Prometheus metrics route
545
        .route("/metrics", get(metrics))
546
547
        .layer(Extension(compat_return_full_text))
        .layer(Extension(infer))
548
        .layer(Extension(prom_handle))
549
550
        .layer(opentelemetry_tracing_layer())
        .layer(cors_layer);
Olivier Dehaene's avatar
Olivier Dehaene committed
551

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
552
    // Run server
Olivier Dehaene's avatar
Olivier Dehaene committed
553
    axum::Server::bind(&addr)
Olivier Dehaene's avatar
Olivier Dehaene committed
554
        .serve(app.into_make_service())
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
555
556
        // Wait until all requests are finished to shut down
        .with_graceful_shutdown(shutdown_signal())
Olivier Dehaene's avatar
Olivier Dehaene committed
557
558
        .await
        .unwrap();
Olivier Dehaene's avatar
Olivier Dehaene committed
559
}
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585

/// Shutdown signal handler
async fn shutdown_signal() {
    let ctrl_c = async {
        signal::ctrl_c()
            .await
            .expect("failed to install Ctrl+C handler");
    };

    #[cfg(unix)]
    let terminate = async {
        signal::unix::signal(signal::unix::SignalKind::terminate())
            .expect("failed to install signal handler")
            .recv()
            .await;
    };

    #[cfg(not(unix))]
    let terminate = std::future::pending::<()>();

    tokio::select! {
        _ = ctrl_c => {},
        _ = terminate => {},
    }

    tracing::info!("signal received, starting graceful shutdown");
586
    opentelemetry::global::shutdown_tracer_provider();
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
587
}
588

589
590
591
592
593
594
595
596
597
598
599
impl From<i32> for FinishReason {
    fn from(finish_reason: i32) -> Self {
        let finish_reason = text_generation_client::FinishReason::from_i32(finish_reason).unwrap();
        match finish_reason {
            text_generation_client::FinishReason::Length => FinishReason::Length,
            text_generation_client::FinishReason::EosToken => FinishReason::EndOfSequenceToken,
            text_generation_client::FinishReason::StopSequence => FinishReason::StopSequence,
        }
    }
}

600
601
602
603
604
605
606
607
608
609
610
611
612
613
/// Convert to Axum supported formats
impl From<InferError> for (StatusCode, Json<ErrorResponse>) {
    fn from(err: InferError) -> Self {
        let status_code = match err {
            InferError::GenerationError(_) => StatusCode::FAILED_DEPENDENCY,
            InferError::Overloaded(_) => StatusCode::TOO_MANY_REQUESTS,
            InferError::ValidationError(_) => StatusCode::UNPROCESSABLE_ENTITY,
            InferError::IncompleteGeneration => StatusCode::INTERNAL_SERVER_ERROR,
        };

        (
            status_code,
            Json(ErrorResponse {
                error: err.to_string(),
614
                error_type: err.error_type().to_string(),
615
616
617
618
619
620
621
622
623
624
            }),
        )
    }
}

impl From<InferError> for Event {
    fn from(err: InferError) -> Self {
        Event::default()
            .json_data(ErrorResponse {
                error: err.to_string(),
625
                error_type: err.error_type().to_string(),
626
627
628
629
            })
            .unwrap()
    }
}