server.rs 22.4 KB
Newer Older
1
use crate::{
2
    config::{ConnectionMode, HistoryBackend, RouterConfig},
3
    core::{WorkerManager, WorkerRegistry, WorkerType},
4
    data_connector::{MemoryResponseStorage, NoOpResponseStorage, SharedResponseStorage},
5
6
7
8
9
10
11
    logging::{self, LoggingConfig},
    metrics::{self, PrometheusConfig},
    middleware::{self, QueuedRequest, TokenBucket},
    policies::PolicyRegistry,
    protocols::{
        spec::{
            ChatCompletionRequest, CompletionRequest, EmbeddingRequest, GenerateRequest,
12
            RerankRequest, ResponsesGetParams, ResponsesRequest, V1RerankReqInput,
13
14
15
16
        },
        worker_spec::{WorkerApiResponse, WorkerConfigRequest, WorkerErrorResponse},
    },
    reasoning_parser::ParserFactory,
17
    routers::{router_manager::RouterManager, RouterTrait},
18
19
20
    service_discovery::{start_service_discovery, ServiceDiscoveryConfig},
    tokenizer::{factory as tokenizer_factory, traits::Tokenizer},
    tool_parser::ParserRegistry,
21
};
22
use axum::{
23
    extract::{Path, Query, Request, State},
24
25
    http::StatusCode,
    response::{IntoResponse, Response},
26
    routing::{delete, get, post},
27
    serve, Json, Router,
28
};
29
use reqwest::Client;
30
31
32
33
34
35
36
37
use serde::Deserialize;
use serde_json::json;
use std::{
    sync::atomic::{AtomicBool, Ordering},
    sync::Arc,
    time::Duration,
};
use tokio::{net::TcpListener, signal, spawn};
38
use tracing::{error, info, warn, Level};
39

40
#[derive(Clone)]
41
pub struct AppContext {
42
    pub client: Client,
43
    pub router_config: RouterConfig,
44
    pub rate_limiter: Arc<TokenBucket>,
45
46
47
    pub tokenizer: Option<Arc<dyn Tokenizer>>,
    pub reasoning_parser_factory: Option<ParserFactory>,
    pub tool_parser_registry: Option<&'static ParserRegistry>,
48
49
50
    pub worker_registry: Arc<WorkerRegistry>,
    pub policy_registry: Arc<PolicyRegistry>,
    pub router_manager: Option<Arc<RouterManager>>,
51
    pub response_storage: SharedResponseStorage,
52
53
}

54
impl AppContext {
55
56
57
58
    pub fn new(
        router_config: RouterConfig,
        client: Client,
        max_concurrent_requests: usize,
59
        rate_limit_tokens_per_second: Option<usize>,
60
    ) -> Result<Self, String> {
61
62
        let rate_limit_tokens = rate_limit_tokens_per_second.unwrap_or(max_concurrent_requests);
        let rate_limiter = Arc::new(TokenBucket::new(max_concurrent_requests, rate_limit_tokens));
63
64

        let (tokenizer, reasoning_parser_factory, tool_parser_registry) =
65
            if router_config.connection_mode == ConnectionMode::Grpc {
66
67
68
69
70
71
72
73
74
75
76
                let tokenizer_path = router_config
                    .tokenizer_path
                    .clone()
                    .or_else(|| router_config.model_path.clone())
                    .ok_or_else(|| {
                        "gRPC mode requires either --tokenizer-path or --model-path to be specified"
                            .to_string()
                    })?;

                let tokenizer = Some(
                    tokenizer_factory::create_tokenizer(&tokenizer_path)
77
                        .map_err(|e| format!("Failed to create tokenizer: {e}"))?,
78
79
80
81
82
83
84
85
86
                );
                let reasoning_parser_factory = Some(ParserFactory::new());
                let tool_parser_registry = Some(ParserRegistry::new());

                (tokenizer, reasoning_parser_factory, tool_parser_registry)
            } else {
                (None, None, None)
            };

87
        let worker_registry = Arc::new(WorkerRegistry::new());
88
        let policy_registry = Arc::new(PolicyRegistry::new(router_config.policy.clone()));
89

90
        let router_manager = None;
91

92
93
94
95
96
        let response_storage: SharedResponseStorage = match router_config.history_backend {
            HistoryBackend::Memory => Arc::new(MemoryResponseStorage::new()),
            HistoryBackend::None => Arc::new(NoOpResponseStorage::new()),
        };

97
        Ok(Self {
98
            client,
99
            router_config,
100
            rate_limiter,
101
102
103
            tokenizer,
            reasoning_parser_factory,
            tool_parser_registry,
104
105
106
            worker_registry,
            policy_registry,
            router_manager,
107
            response_storage,
108
        })
109
110
111
    }
}

112
113
114
115
#[derive(Clone)]
pub struct AppState {
    pub router: Arc<dyn RouterTrait>,
    pub context: Arc<AppContext>,
116
    pub concurrency_queue_tx: Option<tokio::sync::mpsc::Sender<QueuedRequest>>,
117
    pub router_manager: Option<Arc<RouterManager>>,
118
119
}

120
121
async fn sink_handler() -> Response {
    StatusCode::NOT_FOUND.into_response()
122
123
}

124
125
async fn liveness(State(state): State<Arc<AppState>>) -> Response {
    state.router.liveness()
126
127
}

128
129
async fn readiness(State(state): State<Arc<AppState>>) -> Response {
    state.router.readiness()
130
131
}

132
async fn health(State(state): State<Arc<AppState>>, req: Request) -> Response {
133
    state.router.health(req).await
134
135
}

136
async fn health_generate(State(state): State<Arc<AppState>>, req: Request) -> Response {
137
    state.router.health_generate(req).await
138
139
}

140
async fn get_server_info(State(state): State<Arc<AppState>>, req: Request) -> Response {
141
    state.router.get_server_info(req).await
142
143
}

144
async fn v1_models(State(state): State<Arc<AppState>>, req: Request) -> Response {
145
    state.router.get_models(req).await
146
147
}

148
async fn get_model_info(State(state): State<Arc<AppState>>, req: Request) -> Response {
149
    state.router.get_model_info(req).await
150
}
151

152
async fn generate(
153
154
155
156
    State(state): State<Arc<AppState>>,
    headers: http::HeaderMap,
    Json(body): Json<GenerateRequest>,
) -> Response {
157
158
159
160
    state
        .router
        .route_generate(Some(&headers), &body, None)
        .await
161
162
163
}

async fn v1_chat_completions(
164
165
166
167
    State(state): State<Arc<AppState>>,
    headers: http::HeaderMap,
    Json(body): Json<ChatCompletionRequest>,
) -> Response {
168
    state.router.route_chat(Some(&headers), &body, None).await
169
170
171
}

async fn v1_completions(
172
173
174
175
    State(state): State<Arc<AppState>>,
    headers: http::HeaderMap,
    Json(body): Json<CompletionRequest>,
) -> Response {
176
177
178
179
    state
        .router
        .route_completion(Some(&headers), &body, None)
        .await
180
181
}

182
183
184
185
186
async fn rerank(
    State(state): State<Arc<AppState>>,
    headers: http::HeaderMap,
    Json(body): Json<RerankRequest>,
) -> Response {
187
    state.router.route_rerank(Some(&headers), &body, None).await
188
189
190
191
192
193
194
195
196
}

async fn v1_rerank(
    State(state): State<Arc<AppState>>,
    headers: http::HeaderMap,
    Json(body): Json<V1RerankReqInput>,
) -> Response {
    state
        .router
197
        .route_rerank(Some(&headers), &body.into(), None)
198
199
200
        .await
}

201
202
203
204
205
async fn v1_responses(
    State(state): State<Arc<AppState>>,
    headers: http::HeaderMap,
    Json(body): Json<ResponsesRequest>,
) -> Response {
206
207
208
209
    state
        .router
        .route_responses(Some(&headers), &body, None)
        .await
210
211
}

212
213
214
215
216
217
218
219
220
221
222
async fn v1_embeddings(
    State(state): State<Arc<AppState>>,
    headers: http::HeaderMap,
    Json(body): Json<EmbeddingRequest>,
) -> Response {
    state
        .router
        .route_embeddings(Some(&headers), &body, None)
        .await
}

223
224
225
226
async fn v1_responses_get(
    State(state): State<Arc<AppState>>,
    Path(response_id): Path<String>,
    headers: http::HeaderMap,
227
    Query(params): Query<ResponsesGetParams>,
228
229
230
) -> Response {
    state
        .router
231
        .get_response(Some(&headers), &response_id, &params)
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
        .await
}

async fn v1_responses_cancel(
    State(state): State<Arc<AppState>>,
    Path(response_id): Path<String>,
    headers: http::HeaderMap,
) -> Response {
    state
        .router
        .cancel_response(Some(&headers), &response_id)
        .await
}

async fn v1_responses_delete(
    State(state): State<Arc<AppState>>,
    Path(response_id): Path<String>,
    headers: http::HeaderMap,
) -> Response {
    state
        .router
        .delete_response(Some(&headers), &response_id)
        .await
}

async fn v1_responses_list_input_items(
    State(state): State<Arc<AppState>>,
    Path(response_id): Path<String>,
    headers: http::HeaderMap,
) -> Response {
    state
        .router
        .list_response_input_items(Some(&headers), &response_id)
        .await
}

268
#[derive(Deserialize)]
269
struct AddWorkerQuery {
270
    url: String,
271
    api_key: Option<String>,
272
273
}

274
async fn add_worker(
275
    State(state): State<Arc<AppState>>,
276
    Query(AddWorkerQuery { url, api_key }): Query<AddWorkerQuery>,
277
) -> Response {
278
279
280
    let result = WorkerManager::add_worker(&url, &api_key, &state.context).await;

    match result {
281
282
        Ok(message) => (StatusCode::OK, message).into_response(),
        Err(error) => (StatusCode::BAD_REQUEST, error).into_response(),
283
    }
284
285
}

286
async fn list_workers(State(state): State<Arc<AppState>>) -> Response {
287
288
    let worker_list = WorkerManager::get_worker_urls(&state.context.worker_registry);
    Json(json!({ "urls": worker_list })).into_response()
289
290
}

291
async fn remove_worker(
292
    State(state): State<Arc<AppState>>,
293
    Query(AddWorkerQuery { url, .. }): Query<AddWorkerQuery>,
294
) -> Response {
295
296
297
298
299
300
    let result = WorkerManager::remove_worker(&url, &state.context);

    match result {
        Ok(message) => (StatusCode::OK, message).into_response(),
        Err(error) => (StatusCode::BAD_REQUEST, error).into_response(),
    }
301
302
}

303
async fn flush_cache(State(state): State<Arc<AppState>>, _req: Request) -> Response {
304
    state.router.flush_cache().await
305
306
}

307
async fn get_loads(State(state): State<Arc<AppState>>, _req: Request) -> Response {
308
    state.router.get_worker_loads().await
309
310
}

311
312
313
314
async fn create_worker(
    State(state): State<Arc<AppState>>,
    Json(config): Json<WorkerConfigRequest>,
) -> Response {
315
316
317
318
319
320
321
322
323
324
    let result = WorkerManager::add_worker_from_config(&config, &state.context).await;

    match result {
        Ok(message) => {
            let response = WorkerApiResponse {
                success: true,
                message,
                worker: None,
            };
            (StatusCode::OK, Json(response)).into_response()
325
        }
326
327
328
329
330
331
        Err(error) => {
            let error_response = WorkerErrorResponse {
                error,
                code: "ADD_WORKER_FAILED".to_string(),
            };
            (StatusCode::BAD_REQUEST, Json(error_response)).into_response()
332
333
334
335
336
        }
    }
}

async fn list_workers_rest(State(state): State<Arc<AppState>>) -> Response {
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
    let workers = state.context.worker_registry.get_all();
    let response = serde_json::json!({
        "workers": workers.iter().map(|worker| {
            let mut worker_info = serde_json::json!({
                "url": worker.url(),
                "model_id": worker.model_id(),
                "worker_type": match worker.worker_type() {
                    WorkerType::Regular => "regular",
                    WorkerType::Prefill { .. } => "prefill",
                    WorkerType::Decode => "decode",
                },
                "is_healthy": worker.is_healthy(),
                "load": worker.load(),
                "connection_mode": format!("{:?}", worker.connection_mode()),
                "priority": worker.priority(),
                "cost": worker.cost(),
            });

            if let WorkerType::Prefill { bootstrap_port } = worker.worker_type() {
                worker_info["bootstrap_port"] = serde_json::json!(bootstrap_port);
357
            }
358
359
360
361
362
363
364
365
366
367
368

            worker_info
        }).collect::<Vec<_>>(),
        "total": workers.len(),
        "stats": {
            "prefill_count": state.context.worker_registry.get_prefill_workers().len(),
            "decode_count": state.context.worker_registry.get_decode_workers().len(),
            "regular_count": state.context.worker_registry.get_by_type(&WorkerType::Regular).len(),
        }
    });
    Json(response).into_response()
369
370
}

371
async fn get_worker(State(state): State<Arc<AppState>>, Path(url): Path<String>) -> Response {
372
373
374
375
376
377
378
379
    let workers = WorkerManager::get_worker_urls(&state.context.worker_registry);
    if workers.contains(&url) {
        Json(json!({
            "url": url,
            "model_id": "unknown",
            "is_healthy": true
        }))
        .into_response()
380
    } else {
381
382
383
384
385
        let error = WorkerErrorResponse {
            error: format!("Worker {url} not found"),
            code: "WORKER_NOT_FOUND".to_string(),
        };
        (StatusCode::NOT_FOUND, Json(error)).into_response()
386
387
388
    }
}

389
async fn delete_worker(State(state): State<Arc<AppState>>, Path(url): Path<String>) -> Response {
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
    let result = WorkerManager::remove_worker(&url, &state.context);

    match result {
        Ok(message) => {
            let response = WorkerApiResponse {
                success: true,
                message,
                worker: None,
            };
            (StatusCode::OK, Json(response)).into_response()
        }
        Err(error) => {
            let error_response = WorkerErrorResponse {
                error,
                code: "REMOVE_WORKER_FAILED".to_string(),
            };
            (StatusCode::BAD_REQUEST, Json(error_response)).into_response()
407
408
409
410
        }
    }
}

411
412
413
pub struct ServerConfig {
    pub host: String,
    pub port: u16,
414
    pub router_config: RouterConfig,
415
    pub max_payload_size: usize,
416
    pub log_dir: Option<String>,
417
    pub log_level: Option<String>,
418
    pub service_discovery_config: Option<ServiceDiscoveryConfig>,
419
    pub prometheus_config: Option<PrometheusConfig>,
420
    pub request_timeout_secs: u64,
421
    pub request_id_headers: Option<Vec<String>>,
422
423
}

424
425
426
427
428
429
430
431
432
pub fn build_app(
    app_state: Arc<AppState>,
    max_payload_size: usize,
    request_id_headers: Vec<String>,
    cors_allowed_origins: Vec<String>,
) -> Router {
    let protected_routes = Router::new()
        .route("/generate", post(generate))
        .route("/v1/chat/completions", post(v1_chat_completions))
433
        .route("/v1/completions", post(v1_completions))
434
435
        .route("/rerank", post(rerank))
        .route("/v1/rerank", post(v1_rerank))
436
        .route("/v1/responses", post(v1_responses))
437
        .route("/v1/embeddings", post(v1_embeddings))
438
439
440
441
442
443
444
445
446
447
        .route("/v1/responses/{response_id}", get(v1_responses_get))
        .route(
            "/v1/responses/{response_id}/cancel",
            post(v1_responses_cancel),
        )
        .route("/v1/responses/{response_id}", delete(v1_responses_delete))
        .route(
            "/v1/responses/{response_id}/input",
            get(v1_responses_list_input_items),
        )
448
449
        .route_layer(axum::middleware::from_fn_with_state(
            app_state.clone(),
450
            middleware::concurrency_limit_middleware,
451
        ));
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468

    let public_routes = Router::new()
        .route("/liveness", get(liveness))
        .route("/readiness", get(readiness))
        .route("/health", get(health))
        .route("/health_generate", get(health_generate))
        .route("/v1/models", get(v1_models))
        .route("/get_model_info", get(get_model_info))
        .route("/get_server_info", get(get_server_info));

    let admin_routes = Router::new()
        .route("/add_worker", post(add_worker))
        .route("/remove_worker", post(remove_worker))
        .route("/list_workers", get(list_workers))
        .route("/flush_cache", post(flush_cache))
        .route("/get_loads", get(get_loads));

469
470
471
472
    let worker_routes = Router::new()
        .route("/workers", post(create_worker))
        .route("/workers", get(list_workers_rest))
        .route("/workers/{url}", get(get_worker))
473
        .route("/workers/{url}", delete(delete_worker));
474

475
476
477
478
    Router::new()
        .merge(protected_routes)
        .merge(public_routes)
        .merge(admin_routes)
479
        .merge(worker_routes)
480
        .layer(axum::extract::DefaultBodyLimit::max(max_payload_size))
481
482
483
        .layer(tower_http::limit::RequestBodyLimitLayer::new(
            max_payload_size,
        ))
484
485
        .layer(middleware::create_logging_layer())
        .layer(middleware::RequestIdLayer::new(request_id_headers))
486
487
488
489
490
491
        .layer(create_cors_layer(cors_allowed_origins))
        .fallback(sink_handler)
        .with_state(app_state)
}

pub async fn startup(config: ServerConfig) -> Result<(), Box<dyn std::error::Error>> {
492
493
494
495
    static LOGGING_INITIALIZED: AtomicBool = AtomicBool::new(false);

    let _log_guard = if !LOGGING_INITIALIZED.swap(true, Ordering::SeqCst) {
        Some(logging::init_logging(LoggingConfig {
496
497
498
499
500
501
            level: config
                .log_level
                .as_deref()
                .and_then(|s| match s.to_uppercase().parse::<Level>() {
                    Ok(l) => Some(l),
                    Err(_) => {
502
                        warn!("Invalid log level string: '{s}'. Defaulting to INFO.");
503
504
505
506
                        None
                    }
                })
                .unwrap_or(Level::INFO),
507
508
509
510
511
512
513
514
515
            json_format: false,
            log_dir: config.log_dir.clone(),
            colorize: true,
            log_file_name: "sgl-router".to_string(),
            log_targets: None,
        }))
    } else {
        None
    };
516

517
518
    if let Some(prometheus_config) = &config.prometheus_config {
        metrics::start_prometheus(prometheus_config.clone());
519
520
    }

521
    info!(
522
523
524
525
526
        "Starting router on {}:{} | mode: {:?} | policy: {:?} | max_payload: {}MB",
        config.host,
        config.port,
        config.router_config.mode,
        config.router_config.policy,
527
528
529
        config.max_payload_size / (1024 * 1024)
    );

530
    let client = Client::builder()
531
        .pool_idle_timeout(Some(Duration::from_secs(50)))
532
        .pool_max_idle_per_host(500)
533
        .timeout(Duration::from_secs(config.request_timeout_secs))
534
        .connect_timeout(Duration::from_secs(10))
535
        .tcp_nodelay(true)
536
        .tcp_keepalive(Some(Duration::from_secs(30)))
537
538
539
        .build()
        .expect("Failed to create HTTP client");

540
    let app_context = AppContext::new(
541
542
543
        config.router_config.clone(),
        client.clone(),
        config.router_config.max_concurrent_requests,
544
        config.router_config.rate_limit_tokens_per_second,
545
546
547
548
    )?;

    let app_context = Arc::new(app_context);

549
550
551
552
    info!(
        "Initializing workers for routing mode: {:?}",
        config.router_config.mode
    );
553
    WorkerManager::initialize_workers(
554
555
556
557
558
559
        &config.router_config,
        &app_context.worker_registry,
        Some(&app_context.policy_registry),
    )
    .await
    .map_err(|e| format!("Failed to initialize workers: {}", e))?;
560
561
562
563
564
565
566

    let worker_stats = app_context.worker_registry.stats();
    info!(
        "Workers initialized: {} total, {} healthy",
        worker_stats.total_workers, worker_stats.healthy_workers
    );

567
568
    let router_manager = RouterManager::from_config(&config, &app_context).await?;
    let router: Arc<dyn RouterTrait> = router_manager.clone();
569
570
571
572
573
574
575
576

    let _health_checker = app_context
        .worker_registry
        .start_health_checker(config.router_config.health_check.check_interval_secs);
    info!(
        "Started health checker for workers with {}s interval",
        config.router_config.health_check.check_interval_secs
    );
577

578
    let (limiter, processor) = middleware::ConcurrencyLimiter::new(
579
580
581
582
583
584
        app_context.rate_limiter.clone(),
        config.router_config.queue_size,
        Duration::from_secs(config.router_config.queue_timeout_secs),
    );

    if let Some(processor) = processor {
585
        spawn(processor.run());
586
587
588
589
590
591
        info!(
            "Started request queue with size: {}, timeout: {}s",
            config.router_config.queue_size, config.router_config.queue_timeout_secs
        );
    }

592
    let app_state = Arc::new(AppState {
593
        router,
594
        context: app_context.clone(),
595
        concurrency_queue_tx: limiter.queue_tx.clone(),
596
        router_manager: Some(router_manager),
597
    });
598
599
    if let Some(service_discovery_config) = config.service_discovery_config {
        if service_discovery_config.enabled {
600
601
            let app_context_arc = Arc::clone(&app_state.context);
            match start_service_discovery(service_discovery_config, app_context_arc).await {
602
                Ok(handle) => {
603
                    info!("Service discovery started");
604
605
606
607
608
609
610
                    spawn(async move {
                        if let Err(e) = handle.await {
                            error!("Service discovery task failed: {:?}", e);
                        }
                    });
                }
                Err(e) => {
611
                    error!("Failed to start service discovery: {e}");
612
613
614
615
616
617
                    warn!("Continuing without service discovery");
                }
            }
        }
    }

618
    info!(
619
        "Router ready | workers: {:?}",
620
        WorkerManager::get_worker_urls(&app_state.context.worker_registry)
621
    );
622

623
624
625
626
627
628
629
630
631
    let request_id_headers = config.request_id_headers.clone().unwrap_or_else(|| {
        vec![
            "x-request-id".to_string(),
            "x-correlation-id".to_string(),
            "x-trace-id".to_string(),
            "request-id".to_string(),
        ]
    });

632
633
634
635
636
637
    let app = build_app(
        app_state,
        config.max_payload_size,
        request_id_headers,
        config.router_config.cors_allowed_origins.clone(),
    );
638

639
640
641
    let addr = format!("{}:{}", config.host, config.port);
    let listener = TcpListener::bind(&addr).await?;
    info!("Starting server on {}", addr);
642
    serve(listener, app)
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
        .with_graceful_shutdown(shutdown_signal())
        .await
        .map_err(|e| Box::new(e) as Box<dyn std::error::Error>)?;

    Ok(())
}

async fn shutdown_signal() {
    let ctrl_c = async {
        signal::ctrl_c()
            .await
            .expect("failed to install Ctrl+C handler");
    };

    #[cfg(unix)]
    let terminate = async {
        signal::unix::signal(signal::unix::SignalKind::terminate())
            .expect("failed to install signal handler")
            .recv()
            .await;
    };

    #[cfg(not(unix))]
    let terminate = std::future::pending::<()>();

    tokio::select! {
        _ = ctrl_c => {
            info!("Received Ctrl+C, starting graceful shutdown");
        },
        _ = terminate => {
            info!("Received terminate signal, starting graceful shutdown");
        },
    }
}

fn create_cors_layer(allowed_origins: Vec<String>) -> tower_http::cors::CorsLayer {
    use tower_http::cors::Any;

    let cors = if allowed_origins.is_empty() {
        tower_http::cors::CorsLayer::new()
            .allow_origin(Any)
            .allow_methods(Any)
            .allow_headers(Any)
            .expose_headers(Any)
    } else {
        let origins: Vec<http::HeaderValue> = allowed_origins
            .into_iter()
            .filter_map(|origin| origin.parse().ok())
            .collect();

        tower_http::cors::CorsLayer::new()
            .allow_origin(origins)
            .allow_methods([http::Method::GET, http::Method::POST, http::Method::OPTIONS])
            .allow_headers([http::header::CONTENT_TYPE, http::header::AUTHORIZATION])
            .expose_headers([http::header::HeaderName::from_static("x-request-id")])
    };

    cors.max_age(Duration::from_secs(3600))
701
}