server.rs 38.2 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
use std::{
    sync::{
        atomic::{AtomicBool, Ordering},
        Arc, OnceLock,
    },
    time::Duration,
};

use axum::{
    extract::{Path, Query, Request, State},
    http::StatusCode,
    response::{IntoResponse, Response},
    routing::{delete, get, post},
    serve, Json, Router,
};
use reqwest::Client;
use serde::Deserialize;
use serde_json::{json, Value};
use tokio::{net::TcpListener, signal, spawn};
use tracing::{error, info, warn, Level};

22
use crate::{
23
    config::{ConnectionMode, HistoryBackend, RouterConfig, RoutingMode},
24
    core::{
25
26
27
28
29
30
        worker_to_info,
        workflow::{
            create_worker_registration_workflow, create_worker_removal_workflow, LoggingSubscriber,
            WorkflowEngine,
        },
        Job, JobQueue, JobQueueConfig, LoadMonitor, WorkerManager, WorkerRegistry, WorkerType,
31
    },
32
    data_connector::{
33
34
        MemoryConversationItemStorage, MemoryConversationStorage, MemoryResponseStorage,
        NoOpConversationStorage, NoOpResponseStorage, OracleConversationItemStorage,
35
36
        OracleConversationStorage, OracleResponseStorage, SharedConversationItemStorage,
        SharedConversationStorage, SharedResponseStorage,
37
    },
38
39
    logging::{self, LoggingConfig},
    metrics::{self, PrometheusConfig},
40
    middleware::{self, AuthConfig, QueuedRequest, TokenBucket},
41
42
    policies::PolicyRegistry,
    protocols::{
43
        chat::ChatCompletionRequest,
44
        classify::ClassifyRequest,
45
46
47
48
49
        completion::CompletionRequest,
        embedding::EmbeddingRequest,
        generate::GenerateRequest,
        rerank::{RerankRequest, V1RerankReqInput},
        responses::{ResponsesGetParams, ResponsesRequest},
50
        validated::ValidatedJson,
51
        worker_spec::{WorkerConfigRequest, WorkerErrorResponse, WorkerInfo},
52
    },
53
    reasoning_parser::ParserFactory as ReasoningParserFactory,
54
    routers::{router_manager::RouterManager, RouterTrait},
55
    service_discovery::{start_service_discovery, ServiceDiscoveryConfig},
56
57
58
59
60
    tokenizer::{
        cache::{CacheConfig, CachedTokenizer},
        factory as tokenizer_factory,
        traits::Tokenizer,
    },
61
    tool_parser::ParserFactory as ToolParserFactory,
62
};
63

64
65
//

66
#[derive(Clone)]
67
pub struct AppContext {
68
    pub client: Client,
69
    pub router_config: RouterConfig,
70
    pub rate_limiter: Option<Arc<TokenBucket>>,
71
    pub tokenizer: Option<Arc<dyn Tokenizer>>,
72
    pub reasoning_parser_factory: Option<ReasoningParserFactory>,
73
    pub tool_parser_factory: Option<ToolParserFactory>,
74
75
76
    pub worker_registry: Arc<WorkerRegistry>,
    pub policy_registry: Arc<PolicyRegistry>,
    pub router_manager: Option<Arc<RouterManager>>,
77
    pub response_storage: SharedResponseStorage,
78
    pub conversation_storage: SharedConversationStorage,
79
    pub conversation_item_storage: SharedConversationItemStorage,
80
    pub load_monitor: Option<Arc<LoadMonitor>>,
81
82
    pub configured_reasoning_parser: Option<String>,
    pub configured_tool_parser: Option<String>,
83
    pub worker_job_queue: Arc<OnceLock<Arc<JobQueue>>>,
84
    pub workflow_engine: Arc<OnceLock<Arc<WorkflowEngine>>>,
85
86
}

87
impl AppContext {
88
    #[allow(clippy::too_many_arguments)]
89
90
91
    pub fn new(
        router_config: RouterConfig,
        client: Client,
92
93
94
95
96
97
98
99
100
101
102
        rate_limiter: Option<Arc<TokenBucket>>,
        tokenizer: Option<Arc<dyn Tokenizer>>,
        reasoning_parser_factory: Option<ReasoningParserFactory>,
        tool_parser_factory: Option<ToolParserFactory>,
        worker_registry: Arc<WorkerRegistry>,
        policy_registry: Arc<PolicyRegistry>,
        response_storage: SharedResponseStorage,
        conversation_storage: SharedConversationStorage,
        conversation_item_storage: SharedConversationItemStorage,
        load_monitor: Option<Arc<LoadMonitor>>,
        worker_job_queue: Arc<OnceLock<Arc<JobQueue>>>,
103
        workflow_engine: Arc<OnceLock<Arc<WorkflowEngine>>>,
104
    ) -> Self {
105
106
107
        let configured_reasoning_parser = router_config.reasoning_parser.clone();
        let configured_tool_parser = router_config.tool_call_parser.clone();

108
        Self {
109
            client,
110
            router_config,
111
            rate_limiter,
112
113
            tokenizer,
            reasoning_parser_factory,
114
            tool_parser_factory,
115
116
            worker_registry,
            policy_registry,
117
            router_manager: None,
118
            response_storage,
119
            conversation_storage,
120
            conversation_item_storage,
121
            load_monitor,
122
123
            configured_reasoning_parser,
            configured_tool_parser,
124
            worker_job_queue,
125
            workflow_engine,
126
        }
127
128
129
    }
}

130
131
132
133
#[derive(Clone)]
pub struct AppState {
    pub router: Arc<dyn RouterTrait>,
    pub context: Arc<AppContext>,
134
    pub concurrency_queue_tx: Option<tokio::sync::mpsc::Sender<QueuedRequest>>,
135
    pub router_manager: Option<Arc<RouterManager>>,
136
137
}

138
139
async fn sink_handler() -> Response {
    StatusCode::NOT_FOUND.into_response()
140
141
}

142
143
async fn liveness() -> Response {
    (StatusCode::OK, "OK").into_response()
144
145
}

146
async fn readiness(State(state): State<Arc<AppState>>) -> Response {
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
    let workers = state.context.worker_registry.get_all();
    let healthy_workers: Vec<_> = workers.iter().filter(|w| w.is_healthy()).collect();

    let is_ready = if state.context.router_config.enable_igw {
        !healthy_workers.is_empty()
    } else {
        match &state.context.router_config.mode {
            RoutingMode::PrefillDecode { .. } => {
                let has_prefill = healthy_workers
                    .iter()
                    .any(|w| matches!(w.worker_type(), WorkerType::Prefill { .. }));
                let has_decode = healthy_workers
                    .iter()
                    .any(|w| matches!(w.worker_type(), WorkerType::Decode));
                has_prefill && has_decode
            }
            RoutingMode::Regular { .. } => !healthy_workers.is_empty(),
            RoutingMode::OpenAI { .. } => !healthy_workers.is_empty(),
        }
    };

    if is_ready {
        (
            StatusCode::OK,
            Json(json!({
                "status": "ready",
                "healthy_workers": healthy_workers.len(),
                "total_workers": workers.len()
            })),
        )
            .into_response()
    } else {
        (
            StatusCode::SERVICE_UNAVAILABLE,
            Json(json!({
                "status": "not ready",
                "reason": "insufficient healthy workers"
            })),
        )
            .into_response()
    }
188
189
}

190
191
async fn health(_state: State<Arc<AppState>>) -> Response {
    liveness().await
192
193
}

194
async fn health_generate(State(state): State<Arc<AppState>>, req: Request) -> Response {
195
    state.router.health_generate(req).await
196
197
}

198
async fn get_server_info(State(state): State<Arc<AppState>>, req: Request) -> Response {
199
    state.router.get_server_info(req).await
200
201
}

202
async fn v1_models(State(state): State<Arc<AppState>>, req: Request) -> Response {
203
    state.router.get_models(req).await
204
205
}

206
async fn get_model_info(State(state): State<Arc<AppState>>, req: Request) -> Response {
207
    state.router.get_model_info(req).await
208
}
209

210
async fn generate(
211
212
213
214
    State(state): State<Arc<AppState>>,
    headers: http::HeaderMap,
    Json(body): Json<GenerateRequest>,
) -> Response {
215
216
217
218
    state
        .router
        .route_generate(Some(&headers), &body, None)
        .await
219
220
221
}

async fn v1_chat_completions(
222
223
    State(state): State<Arc<AppState>>,
    headers: http::HeaderMap,
224
    ValidatedJson(body): ValidatedJson<ChatCompletionRequest>,
225
) -> Response {
226
    state.router.route_chat(Some(&headers), &body, None).await
227
228
229
}

async fn v1_completions(
230
231
232
233
    State(state): State<Arc<AppState>>,
    headers: http::HeaderMap,
    Json(body): Json<CompletionRequest>,
) -> Response {
234
235
236
237
    state
        .router
        .route_completion(Some(&headers), &body, None)
        .await
238
239
}

240
241
242
async fn rerank(
    State(state): State<Arc<AppState>>,
    headers: http::HeaderMap,
243
    ValidatedJson(body): ValidatedJson<RerankRequest>,
244
) -> Response {
245
    state.router.route_rerank(Some(&headers), &body, None).await
246
247
248
249
250
251
252
253
254
}

async fn v1_rerank(
    State(state): State<Arc<AppState>>,
    headers: http::HeaderMap,
    Json(body): Json<V1RerankReqInput>,
) -> Response {
    state
        .router
255
        .route_rerank(Some(&headers), &body.into(), None)
256
257
258
        .await
}

259
260
261
262
263
async fn v1_responses(
    State(state): State<Arc<AppState>>,
    headers: http::HeaderMap,
    Json(body): Json<ResponsesRequest>,
) -> Response {
264
265
266
267
    state
        .router
        .route_responses(Some(&headers), &body, None)
        .await
268
269
}

270
271
272
273
274
275
276
277
278
279
280
async fn v1_embeddings(
    State(state): State<Arc<AppState>>,
    headers: http::HeaderMap,
    Json(body): Json<EmbeddingRequest>,
) -> Response {
    state
        .router
        .route_embeddings(Some(&headers), &body, None)
        .await
}

281
282
283
284
285
286
287
288
289
290
291
async fn v1_classify(
    State(state): State<Arc<AppState>>,
    headers: http::HeaderMap,
    Json(body): Json<ClassifyRequest>,
) -> Response {
    state
        .router
        .route_classify(Some(&headers), &body, None)
        .await
}

292
293
294
295
async fn v1_responses_get(
    State(state): State<Arc<AppState>>,
    Path(response_id): Path<String>,
    headers: http::HeaderMap,
296
    Query(params): Query<ResponsesGetParams>,
297
298
299
) -> Response {
    state
        .router
300
        .get_response(Some(&headers), &response_id, &params)
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
        .await
}

async fn v1_responses_cancel(
    State(state): State<Arc<AppState>>,
    Path(response_id): Path<String>,
    headers: http::HeaderMap,
) -> Response {
    state
        .router
        .cancel_response(Some(&headers), &response_id)
        .await
}

async fn v1_responses_delete(
    State(state): State<Arc<AppState>>,
    Path(response_id): Path<String>,
    headers: http::HeaderMap,
) -> Response {
    state
        .router
        .delete_response(Some(&headers), &response_id)
        .await
}

async fn v1_responses_list_input_items(
    State(state): State<Arc<AppState>>,
    Path(response_id): Path<String>,
    headers: http::HeaderMap,
) -> Response {
    state
        .router
        .list_response_input_items(Some(&headers), &response_id)
        .await
}

337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
async fn v1_conversations_create(
    State(state): State<Arc<AppState>>,
    headers: http::HeaderMap,
    Json(body): Json<Value>,
) -> Response {
    state
        .router
        .create_conversation(Some(&headers), &body)
        .await
}

async fn v1_conversations_get(
    State(state): State<Arc<AppState>>,
    Path(conversation_id): Path<String>,
    headers: http::HeaderMap,
) -> Response {
    state
        .router
        .get_conversation(Some(&headers), &conversation_id)
        .await
}

async fn v1_conversations_update(
    State(state): State<Arc<AppState>>,
    Path(conversation_id): Path<String>,
    headers: http::HeaderMap,
    Json(body): Json<Value>,
) -> Response {
    state
        .router
        .update_conversation(Some(&headers), &conversation_id, &body)
        .await
}

async fn v1_conversations_delete(
    State(state): State<Arc<AppState>>,
    Path(conversation_id): Path<String>,
    headers: http::HeaderMap,
) -> Response {
    state
        .router
        .delete_conversation(Some(&headers), &conversation_id)
        .await
}

382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
#[derive(Deserialize, Default)]
struct ListItemsQuery {
    limit: Option<usize>,
    order: Option<String>,
    after: Option<String>,
}

async fn v1_conversations_list_items(
    State(state): State<Arc<AppState>>,
    Path(conversation_id): Path<String>,
    Query(ListItemsQuery {
        limit,
        order,
        after,
    }): Query<ListItemsQuery>,
    headers: http::HeaderMap,
) -> Response {
    state
        .router
        .list_conversation_items(Some(&headers), &conversation_id, limit, order, after)
        .await
}

405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
#[derive(Deserialize, Default)]
struct GetItemQuery {
    /// Additional fields to include in response (not yet implemented)
    include: Option<Vec<String>>,
}

async fn v1_conversations_create_items(
    State(state): State<Arc<AppState>>,
    Path(conversation_id): Path<String>,
    headers: http::HeaderMap,
    Json(body): Json<Value>,
) -> Response {
    state
        .router
        .create_conversation_items(Some(&headers), &conversation_id, &body)
        .await
}

async fn v1_conversations_get_item(
    State(state): State<Arc<AppState>>,
    Path((conversation_id, item_id)): Path<(String, String)>,
    Query(query): Query<GetItemQuery>,
    headers: http::HeaderMap,
) -> Response {
    state
        .router
        .get_conversation_item(Some(&headers), &conversation_id, &item_id, query.include)
        .await
}

async fn v1_conversations_delete_item(
    State(state): State<Arc<AppState>>,
    Path((conversation_id, item_id)): Path<(String, String)>,
    headers: http::HeaderMap,
) -> Response {
    state
        .router
        .delete_conversation_item(Some(&headers), &conversation_id, &item_id)
        .await
}

446
async fn flush_cache(State(state): State<Arc<AppState>>, _req: Request) -> Response {
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
    match WorkerManager::flush_cache_all(&state.context.worker_registry, &state.context.client)
        .await
    {
        Ok(result) => {
            if result.failed.is_empty() {
                (
                    StatusCode::OK,
                    Json(json!({
                        "status": "success",
                        "message": result.message,
                        "workers_flushed": result.successful.len(),
                        "total_http_workers": result.http_workers,
                        "total_workers": result.total_workers
                    })),
                )
                    .into_response()
            } else {
                (
                    StatusCode::PARTIAL_CONTENT,
                    Json(json!({
                        "status": "partial_success",
                        "message": result.message,
                        "successful": result.successful,
                        "failed": result.failed.into_iter().map(|(url, err)| json!({
                            "worker": url,
                            "error": err
                        })).collect::<Vec<_>>(),
                        "total_http_workers": result.http_workers,
                        "total_workers": result.total_workers
                    })),
                )
                    .into_response()
            }
        }
        Err(e) => {
            error!("Failed to flush cache: {}", e);
            (
                StatusCode::INTERNAL_SERVER_ERROR,
                Json(json!({
                    "status": "error",
                    "message": format!("Failed to flush cache: {}", e)
                })),
            )
                .into_response()
        }
    }
493
494
}

495
async fn get_loads(State(state): State<Arc<AppState>>, _req: Request) -> Response {
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
    let result =
        WorkerManager::get_all_worker_loads(&state.context.worker_registry, &state.context.client)
            .await;

    let loads: Vec<Value> = result
        .loads
        .iter()
        .map(|info| {
            json!({
                "worker": &info.worker,
                "load": info.load
            })
        })
        .collect();

511
    (StatusCode::OK, Json(json!({ "workers": loads }))).into_response()
512
513
}

514
515
516
517
async fn create_worker(
    State(state): State<Arc<AppState>>,
    Json(config): Json<WorkerConfigRequest>,
) -> Response {
518
519
520
521
522
523
524
525
526
527
    // Warn if router has API key but worker is being added without one
    if state.context.router_config.api_key.is_some() && config.api_key.is_none() {
        warn!(
            "Adding worker {} without API key while router has API key configured. \
            Worker will be accessible without authentication. \
            If the worker requires the same API key as the router, please specify it explicitly.",
            config.url
        );
    }

528
529
530
531
532
533
    // Populate dp_aware from router's configuration
    let config = WorkerConfigRequest {
        dp_aware: state.context.router_config.dp_aware,
        ..config
    };

534
535
536
537
538
    // Submit job for async processing
    let worker_url = config.url.clone();
    let job = Job::AddWorker {
        config: Box::new(config),
    };
539

540
541
542
543
544
545
546
547
548
549
550
551
552
    let job_queue = state
        .context
        .worker_job_queue
        .get()
        .expect("JobQueue not initialized");
    match job_queue.submit(job).await {
        Ok(_) => {
            let response = json!({
                "status": "accepted",
                "worker_id": worker_url,
                "message": "Worker addition queued for background processing"
            });
            (StatusCode::ACCEPTED, Json(response)).into_response()
553
        }
554
555
556
        Err(error) => {
            let error_response = WorkerErrorResponse {
                error,
557
                code: "INTERNAL_SERVER_ERROR".to_string(),
558
            };
559
            (StatusCode::INTERNAL_SERVER_ERROR, Json(error_response)).into_response()
560
561
562
563
564
        }
    }
}

async fn list_workers_rest(State(state): State<Arc<AppState>>) -> Response {
565
    let workers = state.context.worker_registry.get_all();
566
    let worker_infos: Vec<WorkerInfo> = workers.iter().map(worker_to_info).collect();
567

568
569
    let response = json!({
        "workers": worker_infos,
570
571
572
573
574
575
576
577
        "total": workers.len(),
        "stats": {
            "prefill_count": state.context.worker_registry.get_prefill_workers().len(),
            "decode_count": state.context.worker_registry.get_decode_workers().len(),
            "regular_count": state.context.worker_registry.get_by_type(&WorkerType::Regular).len(),
        }
    });
    Json(response).into_response()
578
579
}

580
async fn get_worker(State(state): State<Arc<AppState>>, Path(url): Path<String>) -> Response {
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
    let job_queue = state
        .context
        .worker_job_queue
        .get()
        .expect("JobQueue not initialized");

    if let Some(worker) = state.context.worker_registry.get_by_url(&url) {
        // Worker exists in registry, get its full info and attach job status if any
        let mut worker_info = worker_to_info(&worker);
        if let Some(status) = job_queue.get_status(&url) {
            worker_info.job_status = Some(status);
        }
        return Json(worker_info).into_response();
    }

    // Worker not in registry, check job queue for its status
    if let Some(status) = job_queue.get_status(&url) {
        // Create a partial WorkerInfo to report the job status
        let worker_info = WorkerInfo {
            id: url.clone(),
            url: url.clone(),
            model_id: "unknown".to_string(),
            priority: 0,
            cost: 1.0,
            worker_type: "unknown".to_string(),
            is_healthy: false,
            load: 0,
            connection_mode: "unknown".to_string(),
            tokenizer_path: None,
            reasoning_parser: None,
            tool_parser: None,
            chat_template: None,
            bootstrap_port: None,
            metadata: std::collections::HashMap::new(),
            job_status: Some(status),
616
        };
617
        return Json(worker_info).into_response();
618
    }
619
620
621
622
623
624
625

    // Worker not found in registry or job queue
    let error = WorkerErrorResponse {
        error: format!("Worker {url} not found"),
        code: "WORKER_NOT_FOUND".to_string(),
    };
    (StatusCode::NOT_FOUND, Json(error)).into_response()
626
627
}

628
async fn delete_worker(State(state): State<Arc<AppState>>, Path(url): Path<String>) -> Response {
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
    let worker_id = url.clone();
    let job = Job::RemoveWorker { url };

    let job_queue = state
        .context
        .worker_job_queue
        .get()
        .expect("JobQueue not initialized");
    match job_queue.submit(job).await {
        Ok(_) => {
            let response = json!({
                "status": "accepted",
                "worker_id": worker_id,
                "message": "Worker removal queued for background processing"
            });
            (StatusCode::ACCEPTED, Json(response)).into_response()
645
646
647
648
        }
        Err(error) => {
            let error_response = WorkerErrorResponse {
                error,
649
                code: "INTERNAL_SERVER_ERROR".to_string(),
650
            };
651
            (StatusCode::INTERNAL_SERVER_ERROR, Json(error_response)).into_response()
652
653
654
655
        }
    }
}

656
657
658
pub struct ServerConfig {
    pub host: String,
    pub port: u16,
659
    pub router_config: RouterConfig,
660
    pub max_payload_size: usize,
661
    pub log_dir: Option<String>,
662
    pub log_level: Option<String>,
663
    pub service_discovery_config: Option<ServiceDiscoveryConfig>,
664
    pub prometheus_config: Option<PrometheusConfig>,
665
    pub request_timeout_secs: u64,
666
    pub request_id_headers: Option<Vec<String>>,
667
668
}

669
670
pub fn build_app(
    app_state: Arc<AppState>,
671
    auth_config: AuthConfig,
672
673
674
675
676
677
678
    max_payload_size: usize,
    request_id_headers: Vec<String>,
    cors_allowed_origins: Vec<String>,
) -> Router {
    let protected_routes = Router::new()
        .route("/generate", post(generate))
        .route("/v1/chat/completions", post(v1_chat_completions))
679
        .route("/v1/completions", post(v1_completions))
680
681
        .route("/rerank", post(rerank))
        .route("/v1/rerank", post(v1_rerank))
682
        .route("/v1/responses", post(v1_responses))
683
        .route("/v1/embeddings", post(v1_embeddings))
684
        .route("/v1/classify", post(v1_classify))
685
686
687
688
689
690
691
692
693
694
        .route("/v1/responses/{response_id}", get(v1_responses_get))
        .route(
            "/v1/responses/{response_id}/cancel",
            post(v1_responses_cancel),
        )
        .route("/v1/responses/{response_id}", delete(v1_responses_delete))
        .route(
            "/v1/responses/{response_id}/input",
            get(v1_responses_list_input_items),
        )
695
696
697
698
699
700
701
        .route("/v1/conversations", post(v1_conversations_create))
        .route(
            "/v1/conversations/{conversation_id}",
            get(v1_conversations_get)
                .post(v1_conversations_update)
                .delete(v1_conversations_delete),
        )
702
703
        .route(
            "/v1/conversations/{conversation_id}/items",
704
705
706
707
708
            get(v1_conversations_list_items).post(v1_conversations_create_items),
        )
        .route(
            "/v1/conversations/{conversation_id}/items/{item_id}",
            get(v1_conversations_get_item).delete(v1_conversations_delete_item),
709
        )
710
711
        .route_layer(axum::middleware::from_fn_with_state(
            app_state.clone(),
712
            middleware::concurrency_limit_middleware,
713
714
715
716
        ))
        .route_layer(axum::middleware::from_fn_with_state(
            auth_config.clone(),
            middleware::auth_middleware,
717
        ));
718
719
720
721
722
723
724
725
726
727
728
729

    let public_routes = Router::new()
        .route("/liveness", get(liveness))
        .route("/readiness", get(readiness))
        .route("/health", get(health))
        .route("/health_generate", get(health_generate))
        .route("/v1/models", get(v1_models))
        .route("/get_model_info", get(get_model_info))
        .route("/get_server_info", get(get_server_info));

    let admin_routes = Router::new()
        .route("/flush_cache", post(flush_cache))
730
731
732
733
734
        .route("/get_loads", get(get_loads))
        .route_layer(axum::middleware::from_fn_with_state(
            auth_config.clone(),
            middleware::auth_middleware,
        ));
735

736
737
738
739
    let worker_routes = Router::new()
        .route("/workers", post(create_worker))
        .route("/workers", get(list_workers_rest))
        .route("/workers/{url}", get(get_worker))
740
741
742
743
744
        .route("/workers/{url}", delete(delete_worker))
        .route_layer(axum::middleware::from_fn_with_state(
            auth_config.clone(),
            middleware::auth_middleware,
        ));
745

746
747
748
749
    Router::new()
        .merge(protected_routes)
        .merge(public_routes)
        .merge(admin_routes)
750
        .merge(worker_routes)
751
        .layer(axum::extract::DefaultBodyLimit::max(max_payload_size))
752
753
754
        .layer(tower_http::limit::RequestBodyLimitLayer::new(
            max_payload_size,
        ))
755
756
        .layer(middleware::create_logging_layer())
        .layer(middleware::RequestIdLayer::new(request_id_headers))
757
758
759
760
761
762
        .layer(create_cors_layer(cors_allowed_origins))
        .fallback(sink_handler)
        .with_state(app_state)
}

pub async fn startup(config: ServerConfig) -> Result<(), Box<dyn std::error::Error>> {
763
764
765
766
    static LOGGING_INITIALIZED: AtomicBool = AtomicBool::new(false);

    let _log_guard = if !LOGGING_INITIALIZED.swap(true, Ordering::SeqCst) {
        Some(logging::init_logging(LoggingConfig {
767
768
769
770
771
772
            level: config
                .log_level
                .as_deref()
                .and_then(|s| match s.to_uppercase().parse::<Level>() {
                    Ok(l) => Some(l),
                    Err(_) => {
773
                        warn!("Invalid log level string: '{s}'. Defaulting to INFO.");
774
775
776
777
                        None
                    }
                })
                .unwrap_or(Level::INFO),
778
779
780
781
782
783
784
785
786
            json_format: false,
            log_dir: config.log_dir.clone(),
            colorize: true,
            log_file_name: "sgl-router".to_string(),
            log_targets: None,
        }))
    } else {
        None
    };
787

788
789
    if let Some(prometheus_config) = &config.prometheus_config {
        metrics::start_prometheus(prometheus_config.clone());
790
791
    }

792
    info!(
793
794
795
796
797
        "Starting router on {}:{} | mode: {:?} | policy: {:?} | max_payload: {}MB",
        config.host,
        config.port,
        config.router_config.mode,
        config.router_config.policy,
798
799
800
        config.max_payload_size / (1024 * 1024)
    );

801
    let client = Client::builder()
802
        .pool_idle_timeout(Some(Duration::from_secs(50)))
803
        .pool_max_idle_per_host(500)
804
        .timeout(Duration::from_secs(config.request_timeout_secs))
805
        .connect_timeout(Duration::from_secs(10))
806
        .tcp_nodelay(true)
807
        .tcp_keepalive(Some(Duration::from_secs(30)))
808
809
810
        .build()
        .expect("Failed to create HTTP client");

811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
    // Initialize rate limiter
    let rate_limiter = match config.router_config.max_concurrent_requests {
        n if n <= 0 => None,
        n => {
            let rate_limit_tokens = config
                .router_config
                .rate_limit_tokens_per_second
                .filter(|&t| t > 0)
                .unwrap_or(n);
            Some(Arc::new(TokenBucket::new(
                n as usize,
                rate_limit_tokens as usize,
            )))
        }
    };

    // Initialize tokenizer and parser factories for gRPC mode
    let (tokenizer, reasoning_parser_factory, tool_parser_factory) = if config
        .router_config
        .connection_mode
        == ConnectionMode::Grpc
    {
        let tokenizer_path = config
            .router_config
            .tokenizer_path
            .clone()
            .or_else(|| config.router_config.model_path.clone())
            .ok_or_else(|| {
                "gRPC mode requires either --tokenizer-path or --model-path to be specified"
                    .to_string()
            })?;

843
        let base_tokenizer =
844
845
846
847
848
849
850
851
852
853
854
                tokenizer_factory::create_tokenizer_with_chat_template_blocking(
                    &tokenizer_path,
                    config.router_config.chat_template.as_deref(),
                )
                .map_err(|e| {
                    format!(
                        "Failed to create tokenizer from '{}': {}. \
                        Ensure the path is valid and points to a tokenizer file (tokenizer.json) \
                        or a HuggingFace model ID. For directories, ensure they contain tokenizer files.",
                        tokenizer_path, e
                    )
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
                })?;

        // Conditionally wrap with caching layer if at least one cache is enabled
        let tokenizer = if config.router_config.tokenizer_cache.enable_l0
            || config.router_config.tokenizer_cache.enable_l1
        {
            let cache_config = CacheConfig {
                enable_l0: config.router_config.tokenizer_cache.enable_l0,
                l0_max_entries: config.router_config.tokenizer_cache.l0_max_entries,
                enable_l1: config.router_config.tokenizer_cache.enable_l1,
                l1_max_memory: config.router_config.tokenizer_cache.l1_max_memory,
            };
            Some(Arc::new(CachedTokenizer::new(base_tokenizer, cache_config)) as Arc<dyn Tokenizer>)
        } else {
            // Use base tokenizer directly without caching
            Some(base_tokenizer)
        };
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
        let reasoning_parser_factory = Some(ReasoningParserFactory::new());
        let tool_parser_factory = Some(ToolParserFactory::new());

        (tokenizer, reasoning_parser_factory, tool_parser_factory)
    } else {
        (None, None, None)
    };

    // Initialize worker registry and policy registry
    let worker_registry = Arc::new(WorkerRegistry::new());
    let policy_registry = Arc::new(PolicyRegistry::new(config.router_config.policy.clone()));

    // Initialize storage backends
    let (response_storage, conversation_storage): (
        SharedResponseStorage,
        SharedConversationStorage,
    ) = match config.router_config.history_backend {
        HistoryBackend::Memory => {
            info!("Initializing data connector: Memory");
            (
                Arc::new(MemoryResponseStorage::new()),
                Arc::new(MemoryConversationStorage::new()),
            )
        }
        HistoryBackend::None => {
            info!("Initializing data connector: None (no persistence)");
            (
                Arc::new(NoOpResponseStorage::new()),
                Arc::new(NoOpConversationStorage::new()),
            )
        }
        HistoryBackend::Oracle => {
            let oracle_cfg = config.router_config.oracle.clone().ok_or_else(|| {
                "oracle configuration is required when history_backend=oracle".to_string()
            })?;
            info!(
                "Initializing data connector: Oracle ATP (pool: {}-{})",
                oracle_cfg.pool_min, oracle_cfg.pool_max
            );

            let response_storage = OracleResponseStorage::new(oracle_cfg.clone())
                .map_err(|err| format!("failed to initialize Oracle response storage: {err}"))?;

            let conversation_storage =
                OracleConversationStorage::new(oracle_cfg.clone()).map_err(|err| {
                    format!("failed to initialize Oracle conversation storage: {err}")
                })?;
            info!("Data connector initialized successfully: Oracle ATP");

            (Arc::new(response_storage), Arc::new(conversation_storage))
        }
    };

    // Initialize conversation items storage
    let conversation_item_storage: SharedConversationItemStorage =
        match config.router_config.history_backend {
            HistoryBackend::Oracle => {
                let oracle_cfg = config.router_config.oracle.clone().ok_or_else(|| {
                    "oracle configuration is required when history_backend=oracle".to_string()
                })?;
                Arc::new(OracleConversationItemStorage::new(oracle_cfg).map_err(|e| {
                    format!("failed to initialize Oracle conversation item storage: {e}")
                })?)
            }
            _ => Arc::new(MemoryConversationItemStorage::new()),
        };

    // Initialize load monitor
    let load_monitor = Some(Arc::new(LoadMonitor::new(
        worker_registry.clone(),
        policy_registry.clone(),
        client.clone(),
        config.router_config.worker_startup_check_interval_secs,
    )));

947
    // Create empty OnceLock for worker job queue and workflow engine (will be initialized below)
948
    let worker_job_queue = Arc::new(OnceLock::new());
949
    let workflow_engine = Arc::new(OnceLock::new());
950
951

    // Create AppContext with all initialized components
952
    let app_context = AppContext::new(
953
954
        config.router_config.clone(),
        client.clone(),
955
956
957
958
959
960
961
962
963
964
965
        rate_limiter,
        tokenizer,
        reasoning_parser_factory,
        tool_parser_factory,
        worker_registry,
        policy_registry,
        response_storage,
        conversation_storage,
        conversation_item_storage,
        load_monitor,
        worker_job_queue,
966
        workflow_engine,
967
    );
968
969
970

    let app_context = Arc::new(app_context);

971
972
973
974
975
976
977
    let weak_context = Arc::downgrade(&app_context);
    let worker_job_queue = JobQueue::new(JobQueueConfig::default(), weak_context);
    app_context
        .worker_job_queue
        .set(worker_job_queue)
        .expect("JobQueue should only be initialized once");

978
979
980
981
982
    // Initialize workflow engine and register workflows
    let engine = Arc::new(WorkflowEngine::new());

    engine
        .event_bus()
983
        .subscribe(Arc::new(LoggingSubscriber))
984
985
        .await;

986
987
    engine.register_workflow(create_worker_registration_workflow());
    engine.register_workflow(create_worker_removal_workflow());
988
989
990
991
    app_context
        .workflow_engine
        .set(engine)
        .expect("WorkflowEngine should only be initialized once");
992
    info!("Workflow engine initialized with worker registration and removal workflows");
993

994
995
996
997
    info!(
        "Initializing workers for routing mode: {:?}",
        config.router_config.mode
    );
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010

    // Submit worker initialization job to queue
    let job_queue = app_context
        .worker_job_queue
        .get()
        .expect("JobQueue should be initialized");
    let job = Job::InitializeWorkersFromConfig {
        router_config: Box::new(config.router_config.clone()),
    };
    job_queue
        .submit(job)
        .await
        .map_err(|e| format!("Failed to submit worker initialization job: {}", e))?;
1011
1012
1013
1014
1015
1016
1017

    let worker_stats = app_context.worker_registry.stats();
    info!(
        "Workers initialized: {} total, {} healthy",
        worker_stats.total_workers, worker_stats.healthy_workers
    );

1018
1019
    let router_manager = RouterManager::from_config(&config, &app_context).await?;
    let router: Arc<dyn RouterTrait> = router_manager.clone();
1020
1021
1022
1023
1024
1025
1026
1027

    let _health_checker = app_context
        .worker_registry
        .start_health_checker(config.router_config.health_check.check_interval_secs);
    info!(
        "Started health checker for workers with {}s interval",
        config.router_config.health_check.check_interval_secs
    );
1028

1029
1030
1031
1032
1033
    if let Some(ref load_monitor) = app_context.load_monitor {
        load_monitor.start().await;
        info!("Started LoadMonitor for PowerOfTwo policies");
    }

1034
    let (limiter, processor) = middleware::ConcurrencyLimiter::new(
1035
1036
1037
1038
1039
        app_context.rate_limiter.clone(),
        config.router_config.queue_size,
        Duration::from_secs(config.router_config.queue_timeout_secs),
    );

1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
    if app_context.rate_limiter.is_none() {
        info!("Rate limiting is disabled (max_concurrent_requests = -1)");
    }

    match processor {
        Some(proc) => {
            spawn(proc.run());
            info!(
                "Started request queue (size: {}, timeout: {}s)",
                config.router_config.queue_size, config.router_config.queue_timeout_secs
            );
        }
        None => {
            info!(
                "Rate limiting enabled (max_concurrent_requests = {}, queue disabled)",
                config.router_config.max_concurrent_requests
            );
        }
1058
1059
    }

1060
    let app_state = Arc::new(AppState {
1061
        router,
1062
        context: app_context.clone(),
1063
        concurrency_queue_tx: limiter.queue_tx.clone(),
1064
        router_manager: Some(router_manager),
1065
    });
1066
1067
    if let Some(service_discovery_config) = config.service_discovery_config {
        if service_discovery_config.enabled {
1068
1069
            let app_context_arc = Arc::clone(&app_state.context);
            match start_service_discovery(service_discovery_config, app_context_arc).await {
1070
                Ok(handle) => {
1071
                    info!("Service discovery started");
1072
1073
1074
1075
1076
1077
1078
                    spawn(async move {
                        if let Err(e) = handle.await {
                            error!("Service discovery task failed: {:?}", e);
                        }
                    });
                }
                Err(e) => {
1079
                    error!("Failed to start service discovery: {e}");
1080
1081
1082
1083
1084
1085
                    warn!("Continuing without service discovery");
                }
            }
        }
    }

1086
    info!(
1087
        "Router ready | workers: {:?}",
1088
        WorkerManager::get_worker_urls(&app_state.context.worker_registry)
1089
    );
1090

1091
1092
1093
1094
1095
1096
1097
1098
1099
    let request_id_headers = config.request_id_headers.clone().unwrap_or_else(|| {
        vec![
            "x-request-id".to_string(),
            "x-correlation-id".to_string(),
            "x-trace-id".to_string(),
            "request-id".to_string(),
        ]
    });

1100
1101
1102
1103
    let auth_config = AuthConfig {
        api_key: config.router_config.api_key.clone(),
    };

1104
1105
    let app = build_app(
        app_state,
1106
        auth_config,
1107
1108
1109
1110
        config.max_payload_size,
        request_id_headers,
        config.router_config.cors_allowed_origins.clone(),
    );
1111

1112
1113
1114
1115
1116
1117
    // TcpListener::bind accepts &str and handles IPv4/IPv6 via ToSocketAddrs
    let bind_addr = format!("{}:{}", config.host, config.port);
    info!("Starting server on {}", bind_addr);
    let listener = TcpListener::bind(&bind_addr)
        .await
        .map_err(|e| format!("Failed to bind to {}: {}", bind_addr, e))?;
1118
    serve(listener, app)
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
        .with_graceful_shutdown(shutdown_signal())
        .await
        .map_err(|e| Box::new(e) as Box<dyn std::error::Error>)?;

    Ok(())
}

async fn shutdown_signal() {
    let ctrl_c = async {
        signal::ctrl_c()
            .await
            .expect("failed to install Ctrl+C handler");
    };

    #[cfg(unix)]
    let terminate = async {
        signal::unix::signal(signal::unix::SignalKind::terminate())
            .expect("failed to install signal handler")
            .recv()
            .await;
    };

    #[cfg(not(unix))]
    let terminate = std::future::pending::<()>();

    tokio::select! {
        _ = ctrl_c => {
            info!("Received Ctrl+C, starting graceful shutdown");
        },
        _ = terminate => {
            info!("Received terminate signal, starting graceful shutdown");
        },
    }
}

fn create_cors_layer(allowed_origins: Vec<String>) -> tower_http::cors::CorsLayer {
    use tower_http::cors::Any;

    let cors = if allowed_origins.is_empty() {
        tower_http::cors::CorsLayer::new()
            .allow_origin(Any)
            .allow_methods(Any)
            .allow_headers(Any)
            .expose_headers(Any)
    } else {
        let origins: Vec<http::HeaderValue> = allowed_origins
            .into_iter()
            .filter_map(|origin| origin.parse().ok())
            .collect();

        tower_http::cors::CorsLayer::new()
            .allow_origin(origins)
            .allow_methods([http::Method::GET, http::Method::POST, http::Method::OPTIONS])
            .allow_headers([http::header::CONTENT_TYPE, http::header::AUTHORIZATION])
            .expose_headers([http::header::HeaderName::from_static("x-request-id")])
    };

    cors.max_age(Duration::from_secs(3600))
1177
}