pd_router.rs 89.2 KB
Newer Older
1
2
// PD (Prefill-Decode) Router Implementation
// This module handles routing for disaggregated prefill-decode systems
3
use super::pd_types::{api_path, PDRouterError};
4
5
6
7
use crate::config::types::{
    CircuitBreakerConfig as ConfigCircuitBreakerConfig,
    HealthCheckConfig as ConfigHealthCheckConfig, RetryConfig,
};
8
use crate::core::{
9
10
    is_retryable_status, BasicWorker, CircuitBreakerConfig, HealthChecker, HealthConfig,
    RetryExecutor, Worker, WorkerFactory, WorkerLoadGuard, WorkerType,
11
};
12
use crate::metrics::RouterMetrics;
13
use crate::policies::LoadBalancingPolicy;
14
15
16
use crate::protocols::spec::{
    ChatCompletionRequest, ChatMessage, CompletionRequest, GenerateRequest, StringOrArray,
    UserMessageContent,
17
};
18
use crate::routers::header_utils;
19
20
use crate::routers::{RouterTrait, WorkerManagement};
use async_trait::async_trait;
21
22
23
24
25
26
27
28
use axum::{
    body::Body,
    extract::Request,
    http::{header::CONTENT_TYPE, HeaderMap, HeaderValue, StatusCode},
    response::{IntoResponse, Response},
    Json,
};
use futures_util::StreamExt;
29
use reqwest::Client;
30
use serde::Serialize;
31
use serde_json::{json, Value};
32
use std::collections::HashMap;
33
use std::sync::{Arc, RwLock};
34
use std::time::{Duration, Instant};
35
use tokio::sync::mpsc;
36
use tokio_stream::wrappers::UnboundedReceiverStream;
37
38
39
40
use tracing::{debug, error, info, warn};

#[derive(Debug)]
pub struct PDRouter {
41
42
    pub prefill_workers: Arc<RwLock<Vec<Box<dyn Worker>>>>,
    pub decode_workers: Arc<RwLock<Vec<Box<dyn Worker>>>>,
43
44
    pub prefill_policy: Arc<dyn LoadBalancingPolicy>,
    pub decode_policy: Arc<dyn LoadBalancingPolicy>,
45
46
47
48
    pub timeout_secs: u64,
    pub interval_secs: u64,
    pub worker_loads: Arc<tokio::sync::watch::Receiver<HashMap<String, isize>>>,
    pub load_monitor_handle: Option<Arc<tokio::task::JoinHandle<()>>>,
49
    pub client: Client,
50
51
    // Dedicated client for prefill fire-and-forget (non-logprob) requests
    pub prefill_client: Client,
52
    pub retry_config: RetryConfig,
53
    pub circuit_breaker_config: CircuitBreakerConfig,
54
55
    _prefill_health_checker: Option<HealthChecker>,
    _decode_health_checker: Option<HealthChecker>,
56
57
    // Channel for sending prefill responses to background workers for draining
    prefill_drain_tx: mpsc::Sender<reqwest::Response>,
58
59
}

60
61
62
63
64
65
66
67
68
69
// Request context for PD router operations
#[derive(Clone)]
struct PDRequestContext {
    route: &'static str,
    batch_size: Option<usize>,
    is_stream: bool,
    return_logprob: bool,
    request_text: Option<String>,
}

70
impl PDRouter {
71
    // Dynamic worker management methods for service discovery
72
73
74

    // Private helper method to perform health check on a new server
    async fn wait_for_server_health(&self, url: &str) -> Result<(), PDRouterError> {
75
        crate::routers::http::router::Router::wait_for_healthy_workers(
76
77
78
79
            &[url.to_string()],
            self.timeout_secs,
            self.interval_secs,
        )
80
        .await
81
82
83
84
85
        .map_err(|_| PDRouterError::HealthCheckFailed {
            url: url.to_string(),
        })
    }

86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
    // Generic helper for processing all workers with an endpoint
    async fn process_workers(
        &self,
        workers: &RwLock<Vec<Box<dyn Worker>>>,
        worker_type: &str,
        endpoint: &str,
    ) -> (Vec<String>, Vec<String>) {
        let mut results = Vec::new();
        let mut errors = Vec::new();

        // Get worker URLs first to avoid holding lock across await
        let urls = match workers.read() {
            Ok(workers) => workers
                .iter()
                .map(|w| w.url().to_string())
                .collect::<Vec<_>>(),
            Err(_) => {
                errors.push(format!("Failed to access {} workers", worker_type));
                Vec::new()
            }
        };

        // Process each worker
        for worker_url in urls {
            let url = format!("{}/{}", worker_url, endpoint);
            match self.client.post(&url).send().await {
                Ok(res) if res.status().is_success() => {
                    results.push(format!("{} {}: OK", worker_type, worker_url));
                }
                Ok(res) => {
                    errors.push(format!(
                        "{} {} returned status: {}",
                        worker_type,
                        worker_url,
                        res.status()
                    ));
                }
                Err(e) => {
                    errors.push(format!("{} {} error: {}", worker_type, worker_url, e));
                }
            }
        }

        (results, errors)
    }

    // Helper to get worker URLs from a worker collection
    fn get_worker_urls(
        workers: &RwLock<Vec<Box<dyn Worker>>>,
        worker_type: &str,
    ) -> Result<Vec<String>, String> {
        workers
            .read()
            .map(|workers| {
                workers
                    .iter()
                    .map(|w| w.url().to_string())
                    .collect::<Vec<_>>()
            })
            .map_err(|_| format!("Failed to access {} workers", worker_type))
    }

    // Generic helper for proxying requests to the first worker
    async fn proxy_to_first_worker(
        &self,
        workers: &RwLock<Vec<Box<dyn Worker>>>,
        endpoint: &str,
        worker_type: &str,
        headers: Option<Vec<(String, String)>>,
    ) -> Response {
        // Get first worker URL to avoid holding lock across await
        let first_worker_url = match workers.read() {
            Ok(workers) => workers.first().map(|w| w.url().to_string()),
            Err(_) => {
                return (
                    StatusCode::INTERNAL_SERVER_ERROR,
                    format!("Failed to access {} workers", worker_type),
                )
                    .into_response();
            }
        };

        if let Some(worker_url) = first_worker_url {
            let url = format!("{}/{}", worker_url, endpoint);
            let mut request_builder = self.client.get(&url);

            // Add headers if provided
            if let Some(headers) = headers {
                for (name, value) in headers {
                    request_builder = request_builder.header(name, value);
                }
            }

            match request_builder.send().await {
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
                Ok(res) if res.status().is_success() => {
                    let response_headers = header_utils::preserve_response_headers(res.headers());

                    match res.bytes().await {
                        Ok(body) => {
                            let mut response = Response::new(axum::body::Body::from(body));
                            *response.status_mut() = StatusCode::OK;
                            *response.headers_mut() = response_headers;
                            response
                        }
                        Err(e) => {
                            error!("Failed to read response body: {}", e);
                            (
                                StatusCode::INTERNAL_SERVER_ERROR,
                                format!("Failed to read response body: {}", e),
                            )
                                .into_response()
                        }
198
                    }
199
                }
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
                Ok(res) => {
                    let status = StatusCode::from_u16(res.status().as_u16())
                        .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
                    (
                        status,
                        format!("{} server returned status: {}", worker_type, res.status()),
                    )
                        .into_response()
                }
                Err(e) => {
                    error!("Failed to proxy request to {} server: {}", worker_type, e);
                    (
                        StatusCode::INTERNAL_SERVER_ERROR,
                        format!("Failed to proxy request: {}", e),
                    )
                        .into_response()
                }
            }
        } else {
            (
                StatusCode::SERVICE_UNAVAILABLE,
                format!("No {} servers available", worker_type),
            )
                .into_response()
        }
    }

227
228
229
230
231
232
    pub async fn add_prefill_server(
        &self,
        url: String,
        bootstrap_port: Option<u16>,
    ) -> Result<String, PDRouterError> {
        // Wait for the new server to be healthy
233
        self.wait_for_server_health(&url).await?;
234

235
236
237
238
239
240
        // Create Worker for the new prefill server with circuit breaker configuration
        let worker = WorkerFactory::create_prefill_with_config(
            url.clone(),
            bootstrap_port,
            self.circuit_breaker_config.clone(),
        );
241

242
243
244
245
246
247
248
249
250
        // Add to prefill workers list
        let mut workers = self
            .prefill_workers
            .write()
            .map_err(|_| PDRouterError::LockError {
                operation: "prefill_workers write".to_string(),
            })?;

        // Check if already exists
251
        if workers.iter().any(|w| w.url() == url) {
252
253
254
            return Err(PDRouterError::WorkerAlreadyExists { url: url.clone() });
        }

255
        workers.push(worker);
256

257
258
259
260
261
262
263
264
        // Update cache-aware policy if applicable
        drop(workers); // Release write lock
        if let Some(cache_policy) = self
            .prefill_policy
            .as_any()
            .downcast_ref::<crate::policies::CacheAwarePolicy>()
        {
            cache_policy.add_worker(&url);
265
266
267
268
269
270
271
272
        }

        info!("Added prefill server: {}", url);
        Ok(format!("Successfully added prefill server: {}", url))
    }

    pub async fn add_decode_server(&self, url: String) -> Result<String, PDRouterError> {
        // Wait for the new server to be healthy
273
        self.wait_for_server_health(&url).await?;
274

275
276
277
278
279
        // Create Worker for the new decode server with circuit breaker configuration
        let worker = WorkerFactory::create_decode_with_config(
            url.clone(),
            self.circuit_breaker_config.clone(),
        );
280

281
282
283
284
285
286
287
288
289
        // Add to decode workers list
        let mut workers = self
            .decode_workers
            .write()
            .map_err(|_| PDRouterError::LockError {
                operation: "decode_workers write".to_string(),
            })?;

        // Check if already exists
290
        if workers.iter().any(|w| w.url() == url) {
291
292
293
            return Err(PDRouterError::WorkerAlreadyExists { url: url.clone() });
        }

294
        workers.push(worker);
295

296
297
298
299
300
301
302
303
        // Update cache-aware policy if applicable
        drop(workers); // Release write lock
        if let Some(cache_policy) = self
            .decode_policy
            .as_any()
            .downcast_ref::<crate::policies::CacheAwarePolicy>()
        {
            cache_policy.add_worker(&url);
304
305
        }

306
307
308
309
310
311
312
313
314
315
316
317
318
319
        info!("Added decode server: {}", url);
        Ok(format!("Successfully added decode server: {}", url))
    }

    pub async fn remove_prefill_server(&self, url: &str) -> Result<String, PDRouterError> {
        let mut workers = self
            .prefill_workers
            .write()
            .map_err(|_| PDRouterError::LockError {
                operation: "prefill_workers write".to_string(),
            })?;

        // Find and remove the server
        let initial_len = workers.len();
320
        workers.retain(|w| w.url() != url);
321
322
323
324
325
326
327

        if workers.len() == initial_len {
            return Err(PDRouterError::WorkerNotFound {
                url: url.to_string(),
            });
        }

328
329
330
331
332
333
334
        // Remove from cache-aware policy if applicable
        if let Some(cache_policy) = self
            .prefill_policy
            .as_any()
            .downcast_ref::<crate::policies::CacheAwarePolicy>()
        {
            cache_policy.remove_worker(url);
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
        }

        info!("Removed prefill server: {}", url);
        Ok(format!("Successfully removed prefill server: {}", url))
    }

    pub async fn remove_decode_server(&self, url: &str) -> Result<String, PDRouterError> {
        let mut workers = self
            .decode_workers
            .write()
            .map_err(|_| PDRouterError::LockError {
                operation: "decode_workers write".to_string(),
            })?;

        // Find and remove the server
        let initial_len = workers.len();
351
        workers.retain(|w| w.url() != url);
352
353
354
355
356
357
358

        if workers.len() == initial_len {
            return Err(PDRouterError::WorkerNotFound {
                url: url.to_string(),
            });
        }

359
360
361
362
363
364
365
        // Remove from cache-aware policy if applicable
        if let Some(cache_policy) = self
            .decode_policy
            .as_any()
            .downcast_ref::<crate::policies::CacheAwarePolicy>()
        {
            cache_policy.remove_worker(url);
366
367
        }

368
369
370
        info!("Removed decode server: {}", url);
        Ok(format!("Successfully removed decode server: {}", url))
    }
371

372
    #[allow(clippy::too_many_arguments)]
373
    pub async fn new(
374
375
        prefill_urls: Vec<(String, Option<u16>)>,
        decode_urls: Vec<String>,
376
377
        prefill_policy: Arc<dyn LoadBalancingPolicy>,
        decode_policy: Arc<dyn LoadBalancingPolicy>,
378
        client: Client,
379
380
        timeout_secs: u64,
        interval_secs: u64,
381
        retry_config: RetryConfig,
382
        circuit_breaker_config: ConfigCircuitBreakerConfig,
383
        health_check_config: ConfigHealthCheckConfig,
384
    ) -> Result<Self, String> {
385
386
387
388
        // Convert config CircuitBreakerConfig to core CircuitBreakerConfig
        let core_cb_config = CircuitBreakerConfig {
            failure_threshold: circuit_breaker_config.failure_threshold,
            success_threshold: circuit_breaker_config.success_threshold,
389
390
            timeout_duration: Duration::from_secs(circuit_breaker_config.timeout_duration_secs),
            window_duration: Duration::from_secs(circuit_breaker_config.window_duration_secs),
391
392
        };

393
        // Convert URLs to Worker trait objects with health check config
394
        let prefill_workers: Vec<Box<dyn Worker>> = prefill_urls
395
            .into_iter()
396
            .map(|(url, port)| {
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
                let worker = BasicWorker::new(
                    url,
                    WorkerType::Prefill {
                        bootstrap_port: port,
                    },
                )
                .with_circuit_breaker_config(core_cb_config.clone())
                .with_health_config(HealthConfig {
                    timeout_secs: health_check_config.timeout_secs,
                    check_interval_secs: health_check_config.check_interval_secs,
                    endpoint: health_check_config.endpoint.clone(),
                    failure_threshold: health_check_config.failure_threshold,
                    success_threshold: health_check_config.success_threshold,
                });
                Box::new(worker) as Box<dyn Worker>
412
            })
413
414
            .collect();

415
        let decode_workers: Vec<Box<dyn Worker>> = decode_urls
416
            .into_iter()
417
418
419
420
421
422
423
424
425
426
427
428
            .map(|url| {
                let worker = BasicWorker::new(url, WorkerType::Decode)
                    .with_circuit_breaker_config(core_cb_config.clone())
                    .with_health_config(HealthConfig {
                        timeout_secs: health_check_config.timeout_secs,
                        check_interval_secs: health_check_config.check_interval_secs,
                        endpoint: health_check_config.endpoint.clone(),
                        failure_threshold: health_check_config.failure_threshold,
                        success_threshold: health_check_config.success_threshold,
                    });
                Box::new(worker) as Box<dyn Worker>
            })
429
430
            .collect();

431
        // Wait for PD workers to be healthy (skip if empty - for service discovery mode)
432
433
434
        let all_urls: Vec<String> = prefill_workers
            .iter()
            .chain(decode_workers.iter())
435
            .map(|worker| worker.url().to_string())
436
            .collect();
437
        if !all_urls.is_empty() {
438
            crate::routers::http::router::Router::wait_for_healthy_workers(
439
440
441
                &all_urls,
                timeout_secs,
                interval_secs,
442
443
            )
            .await?;
444
        }
445

446
447
448
449
450
451
452
        // Initialize cache-aware policies with workers
        if let Some(cache_policy) = prefill_policy
            .as_any()
            .downcast_ref::<crate::policies::CacheAwarePolicy>()
        {
            cache_policy.init_workers(&prefill_workers);
        }
453

454
455
456
457
458
459
        if let Some(cache_policy) = decode_policy
            .as_any()
            .downcast_ref::<crate::policies::CacheAwarePolicy>()
        {
            cache_policy.init_workers(&decode_workers);
        }
460

461
462
463
464
        // Set up background load monitoring for power-of-two selection
        let (tx, rx) = tokio::sync::watch::channel(HashMap::new());
        let worker_loads = Arc::new(rx);

465
466
467
468
        let load_monitor_handle =
            if prefill_policy.name() == "power_of_two" || decode_policy.name() == "power_of_two" {
                let monitor_urls = all_urls.clone();
                let monitor_interval = interval_secs;
469
                let monitor_client = client.clone();
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
                let prefill_policy_clone = Arc::clone(&prefill_policy);
                let decode_policy_clone = Arc::clone(&decode_policy);

                Some(Arc::new(tokio::spawn(async move {
                    Self::monitor_worker_loads_with_client(
                        monitor_urls,
                        tx,
                        monitor_interval,
                        monitor_client,
                        prefill_policy_clone,
                        decode_policy_clone,
                    )
                    .await;
                })))
            } else {
                None
            };
487

488
489
490
491
        let prefill_workers = Arc::new(RwLock::new(prefill_workers));
        let decode_workers = Arc::new(RwLock::new(decode_workers));

        // Start health checkers for both worker pools
492
493
494
495
496
497
498
499
        let prefill_health_checker = crate::core::start_health_checker(
            Arc::clone(&prefill_workers),
            health_check_config.check_interval_secs,
        );
        let decode_health_checker = crate::core::start_health_checker(
            Arc::clone(&decode_workers),
            health_check_config.check_interval_secs,
        );
500

501
502
503
504
505
506
507
508
509
        // Build a dedicated prefill client for fire-and-forget semantics
        let prefill_client = reqwest::Client::builder()
            .pool_max_idle_per_host(0)
            .http1_only()
            .connect_timeout(Duration::from_millis(300))
            .timeout(Duration::from_secs(2))
            .build()
            .map_err(|e| format!("Failed to build prefill client: {}", e))?;

510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
        // Create bounded channel for prefill response draining
        // Larger buffer for high concurrency scenarios
        let (prefill_drain_tx, mut prefill_drain_rx) = mpsc::channel::<reqwest::Response>(2000);

        // Spawn a coordinator with limited concurrent drain tasks
        // This prevents unbounded task spawning under extreme load
        tokio::spawn(async move {
            info!("Prefill drain coordinator started");

            // Use a semaphore to limit concurrent drain operations
            let max_concurrent_drains = 100;
            let semaphore = Arc::new(tokio::sync::Semaphore::new(max_concurrent_drains));

            while let Some(response) = prefill_drain_rx.recv().await {
                let permit = semaphore.clone().acquire_owned().await;

                match permit {
                    Ok(permit) => {
                        // Spawn a task to drain this response
                        tokio::spawn(async move {
                            let url = response.url().to_string();
                            let status = response.status();

                            if !status.is_success() {
                                error!("Prefill drain: error status={} url={}", status, url);
                                RouterMetrics::record_pd_prefill_error(&url);
                            }

                            // Drain the response body efficiently
                            // Use streaming to avoid loading entire body into memory
                            let start = std::time::Instant::now();
                            let mut stream = response.bytes_stream();
                            let mut bytes_drained = 0;

                            while let Some(chunk_result) = stream.next().await {
                                match chunk_result {
                                    Ok(chunk) => bytes_drained += chunk.len(),
                                    Err(e) => {
                                        debug!(
                                            "Prefill drain: error streaming url={} error={}",
                                            url, e
                                        );
                                        break;
                                    }
                                }
                            }

                            let elapsed = start.elapsed();
                            if elapsed > Duration::from_millis(100) {
                                // Only log slow drains
                                debug!(
                                    "Prefill drain: slow drain {} bytes from {} in {:?}",
                                    bytes_drained, url, elapsed
                                );
                            }

                            // Permit is automatically released when dropped
                            drop(permit);
                        });
                    }
                    Err(_) => {
                        // Semaphore closed, shutting down
                        break;
                    }
                }
            }
            info!("Prefill drain coordinator shutting down");
        });

579
        Ok(PDRouter {
580
581
            prefill_workers,
            decode_workers,
582
583
            prefill_policy,
            decode_policy,
584
585
586
587
            timeout_secs,
            interval_secs,
            worker_loads,
            load_monitor_handle,
588
            client,
589
            prefill_client,
590
            prefill_drain_tx,
591
            retry_config,
592
            circuit_breaker_config: core_cb_config,
593
594
            _prefill_health_checker: Some(prefill_health_checker),
            _decode_health_checker: Some(decode_health_checker),
595
596
597
        })
    }

598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
    // Helper to handle server selection errors
    fn handle_server_selection_error(error: String) -> Response {
        error!("Failed to select PD pair error={}", error);
        RouterMetrics::record_pd_error("server_selection");
        (
            StatusCode::SERVICE_UNAVAILABLE,
            format!("No available servers: {}", error),
        )
            .into_response()
    }

    // Helper to handle serialization errors
    fn handle_serialization_error(error: impl std::fmt::Display) -> Response {
        error!("Failed to serialize request error={}", error);
        (
            StatusCode::INTERNAL_SERVER_ERROR,
            "Failed to serialize request",
        )
            .into_response()
    }

619
620
621
    // Helper to determine batch size from a GenerateRequest
    fn get_generate_batch_size(req: &GenerateRequest) -> Option<usize> {
        // Check prompt array
622
        if let Some(StringOrArray::Array(arr)) = &req.prompt {
623
624
            if !arr.is_empty() {
                return Some(arr.len());
625
            }
626
627
628
629
630
631
        }
        // Check text array
        if let Some(text) = &req.text {
            if text.contains("[") && text.contains("]") {
                // This is a simplified check - in reality we'd need to parse JSON
                return None; // For now, fall back to non-batch
632
            }
633
634
635
        }
        None
    }
636

637
638
639
640
641
642
    // Helper to determine batch size from a ChatCompletionRequest
    fn get_chat_batch_size(req: &ChatCompletionRequest) -> Option<usize> {
        // Check 'n' parameter for multiple responses
        if let Some(n) = req.n {
            if n > 1 {
                return Some(n as usize);
643
            }
644
645
646
        }
        None
    }
647

648
649
650
    // Helper to determine batch size from a CompletionRequest
    fn get_completion_batch_size(req: &CompletionRequest) -> Option<usize> {
        // Check prompt array
651
        if let StringOrArray::Array(arr) = &req.prompt {
652
653
            if !arr.is_empty() {
                return Some(arr.len());
654
655
            }
        }
656
657
        None
    }
658

659
660
661
    // Helper to inject bootstrap fields into an existing JSON request value
    fn inject_bootstrap_into_value(
        mut original: Value,
662
663
        prefill_worker: &dyn Worker,
        batch_size: Option<usize>,
664
    ) -> Result<Value, String> {
665
666
667
668
669
670
        let bootstrap_port = match prefill_worker.worker_type() {
            crate::core::WorkerType::Prefill { bootstrap_port } => bootstrap_port,
            _ => None,
        };
        let hostname = super::pd_types::get_hostname(prefill_worker.url());

671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
        let obj = original
            .as_object_mut()
            .ok_or_else(|| "Request must be a JSON object".to_string())?;

        if let Some(n) = batch_size {
            let mut hosts = Vec::with_capacity(n);
            let mut ports = Vec::with_capacity(n);
            let mut rooms = Vec::with_capacity(n);
            for _ in 0..n {
                hosts.push(hostname.clone());
                ports.push(bootstrap_port);
                rooms.push(super::pd_types::generate_room_id());
            }
            obj.insert(
                "bootstrap_host".to_string(),
                Value::Array(hosts.into_iter().map(serde_json::Value::from).collect()),
            );
            obj.insert(
                "bootstrap_port".to_string(),
                Value::Array(
                    ports
                        .into_iter()
                        .map(|p| match p {
                            Some(v) => serde_json::Value::from(v),
                            None => Value::Null,
                        })
                        .collect(),
                ),
            );
            obj.insert(
                "bootstrap_room".to_string(),
                Value::Array(rooms.into_iter().map(serde_json::Value::from).collect()),
            );
704
        } else {
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
            obj.insert(
                "bootstrap_host".to_string(),
                serde_json::Value::from(hostname),
            );
            obj.insert(
                "bootstrap_port".to_string(),
                match bootstrap_port {
                    Some(v) => serde_json::Value::from(v),
                    None => Value::Null,
                },
            );
            obj.insert(
                "bootstrap_room".to_string(),
                serde_json::Value::from(super::pd_types::generate_room_id()),
            );
720
        }
721
        Ok(original)
722
723
    }

724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
    // Execute the dual dispatch to prefill and decode servers with retries and bootstrap injection
    async fn execute_dual_dispatch<T: Serialize + Clone>(
        &self,
        headers: Option<&HeaderMap>,
        original_request: &T,
        context: PDRequestContext,
    ) -> Response {
        let start_time = Instant::now();

        let route = context.route;
        RetryExecutor::execute_response_with_retry(
            &self.retry_config,
            // Operation per attempt
            {
                let original_request = original_request.clone();
                move |attempt: u32| {
                    let original_request = original_request.clone();
                    let context = context.clone();
                    async move {
                        // Select workers fresh for each attempt
                        let (prefill, decode) =
                            match self.select_pd_pair(context.request_text.as_deref()).await {
                                Ok(pair) => pair,
                                Err(e) => {
                                    RouterMetrics::record_pd_error("server_selection");
                                    return Self::handle_server_selection_error(e);
                                }
                            };

                        debug!(
                            "PD retry attempt {} using prefill={} decode={}",
                            attempt,
                            prefill.url(),
                            decode.url()
                        );

                        // Serialize the original request
                        let mut json_request = match serde_json::to_value(&original_request) {
                            Ok(v) => v,
                            Err(e) => return Self::handle_serialization_error(e),
                        };

                        // Inject bootstrap based on current prefill worker
                        json_request = match Self::inject_bootstrap_into_value(
                            json_request,
                            prefill.as_ref(),
                            context.batch_size,
                        ) {
                            Ok(v) => v,
                            Err(e) => return Self::handle_serialization_error(e),
                        };

                        // Execute the actual dual dispatch
                        let response = self
                            .execute_dual_dispatch_internal(
                                headers,
                                json_request,
781
                                context,
782
783
784
785
786
787
788
                                prefill.as_ref(),
                                decode.as_ref(),
                                start_time,
                            )
                            .await;

                        // Record outcomes for circuit breakers
789
790
791
792
                        let _status = response.status();
                        let not_error = _status.is_success() || _status.is_client_error();
                        prefill.record_outcome(not_error);
                        decode.record_outcome(not_error);
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810

                        response
                    }
                }
            },
            // Should retry predicate
            |res, _attempt| is_retryable_status(res.status()),
            // On backoff hook
            |delay, attempt| {
                RouterMetrics::record_retry(route);
                RouterMetrics::record_retry_backoff_duration(delay, attempt);
            },
            // On exhausted hook
            || RouterMetrics::record_retries_exhausted(route),
        )
        .await
    }

811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
    async fn handle_decode_error_response(
        &self,
        res: reqwest::Response,
        context: &PDRequestContext,
        prefill: &dyn Worker,
        decode: &dyn Worker,
    ) -> Response {
        let status = res.status();

        if context.is_stream {
            // Handle streaming error response
            let response_headers = header_utils::preserve_response_headers(res.headers());
            let error_payload = match res.bytes().await {
                Ok(error_body) => {
                    if let Ok(error_json) = serde_json::from_slice::<Value>(&error_body) {
                        json!({ "message": error_json, "status": status.as_u16() })
                    } else {
                        json!({ "message": String::from_utf8_lossy(&error_body).to_string(), "status": status.as_u16() })
                    }
                }
                Err(e) => {
                    json!({ "message": format!("Decode server error: {}", e), "status": status.as_u16() })
                }
            };

            let sse_data = format!(
                "data: {{'error': {}}}",
                serde_json::to_string(&error_payload).unwrap_or_default()
            );
            let error_stream = tokio_stream::once(Ok(axum::body::Bytes::from(sse_data)));

            let decode_url = decode.url().to_string();
            self.create_streaming_response(
                error_stream,
                status,
                None,
                context.return_logprob,
                Some(decode_url),
                Some(response_headers),
                prefill,
                decode,
            )
        } else {
            // Handle non-streaming error response
            match res.bytes().await {
                Ok(error_body) => (status, error_body).into_response(),
                Err(e) => (status, format!("Decode server error: {}", e)).into_response(),
            }
        }
    }

862
863
    // Internal method that performs the actual dual dispatch (without retry logic)
    async fn execute_dual_dispatch_internal(
864
865
866
        &self,
        headers: Option<&HeaderMap>,
        json_request: Value,
867
        context: PDRequestContext,
868
869
870
871
        prefill: &dyn Worker,
        decode: &dyn Worker,
        start_time: Instant,
    ) -> Response {
872
873
874
875
876
877
878
        // For non-streaming: use guard for automatic load management
        // For streaming: load will be managed in create_streaming_response
        let _guard = if !context.is_stream {
            Some(WorkerLoadGuard::new_multi(vec![prefill, decode]))
        } else {
            None
        };
879

880
881
882
883
        // Build decode request with shared client
        let decode_request = self.build_post_with_headers(
            &self.client,
            decode.url(),
884
            context.route,
885
886
887
888
            &json_request,
            headers,
            false,
        );
889

890
        // Send both requests concurrently
891
892
893
894
895
896
        debug!(
            "Sending concurrent requests to prefill={} decode={}",
            prefill.url(),
            decode.url()
        );

897
        if context.return_logprob {
898
899
900
901
            // Build prefill request with shared client when we need response body
            let prefill_request = self.build_post_with_headers(
                &self.client,
                prefill.url(),
902
                context.route,
903
904
905
906
                &json_request,
                headers,
                false,
            );
907
908
909
910
911
912
913
            // When we need logprobs, wait for both responses
            let (prefill_result, decode_result) =
                tokio::join!(prefill_request.send(), decode_request.send());
            debug!("Received responses from both servers");

            // Update metrics
            let duration = start_time.elapsed();
914
915
            RouterMetrics::record_pd_request_duration(context.route, duration);
            RouterMetrics::record_pd_request(context.route);
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
            RouterMetrics::record_pd_prefill_request(prefill.url());
            RouterMetrics::record_pd_decode_request(decode.url());

            // Process decode response with prefill for logprobs
            debug!("Processing decode response with logprobs");
            match decode_result {
                Ok(res) => {
                    let status = StatusCode::from_u16(res.status().as_u16())
                        .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
                    debug!("Decode response status: {}", status);

                    if !status.is_success() {
                        RouterMetrics::record_pd_decode_error(decode.url());
                        error!(
                            "Decode server returned error status decode_url={} status={}",
                            decode.url(),
                            status
                        );

935
936
937
                        return self
                            .handle_decode_error_response(res, &context, prefill, decode)
                            .await;
938
939
                    }

940
941
                    // Process prefill response for logprobs
                    let prefill_body = match self
942
943
944
945
946
                        .process_prefill_response(
                            prefill_result,
                            prefill.url(),
                            context.return_logprob,
                        )
947
948
949
950
951
952
                        .await
                    {
                        Ok((_, body)) => body,
                        Err(error_response) => return error_response,
                    };

953
                    if context.is_stream {
954
955
                        // Streaming response with logprobs
                        let prefill_logprobs = prefill_body
956
957
958
959
                            .as_ref()
                            .and_then(|body| serde_json::from_slice::<Value>(body).ok())
                            .and_then(|json| {
                                json.pointer("/meta_info/input_token_logprobs").cloned()
960
961
                            });

962
963
964
                        let response_headers =
                            header_utils::preserve_response_headers(res.headers());

965
                        self.create_streaming_response(
966
967
968
                            res.bytes_stream(),
                            status,
                            prefill_logprobs,
969
                            context.return_logprob,
970
                            None,
971
                            Some(response_headers),
972
973
                            prefill,
                            decode,
974
                        )
975
                    } else {
976
977
978
979
                        // Non-streaming response with logprobs
                        self.process_non_streaming_response(
                            res,
                            status,
980
                            context.return_logprob,
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
                            prefill_body,
                        )
                        .await
                    }
                }
                Err(e) => {
                    error!(
                        decode_url = %decode.url(),
                        error = %e,
                        "Decode request failed"
                    );
                    RouterMetrics::record_pd_decode_error(decode.url());
                    (
                        StatusCode::BAD_GATEWAY,
                        format!("Decode server error: {}", e),
                    )
                        .into_response()
                }
            }
        } else {
            // When we don't need logprobs, only wait for decode response
            // Send both requests concurrently but don't wait for prefill
1003
1004
1005
1006
1007
            // Use dedicated prefill client with Connection: close
            let prefill_future = self
                .build_post_with_headers(
                    &self.prefill_client,
                    prefill.url(),
1008
                    context.route,
1009
1010
1011
1012
1013
                    &json_request,
                    headers,
                    true,
                )
                .send();
1014
1015
            let decode_future = decode_request.send();

1016
1017
1018
1019
            // Send prefill response to background worker for draining
            // This ensures HTTP compliance without blocking
            let drain_tx = self.prefill_drain_tx.clone();
            let prefill_url = prefill.url().to_string();
1020
1021
            tokio::spawn(async move {
                if let Ok(response) = prefill_future.await {
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
                    // Try to send to drain worker
                    // If channel is full (under extreme load), drain inline as fallback
                    match drain_tx.try_send(response) {
                        Ok(_) => {
                            // Successfully queued for draining
                            debug!("Prefill response queued for draining");
                        }
                        Err(mpsc::error::TrySendError::Full(response)) => {
                            // Channel full - drain inline as fallback
                            warn!("Prefill drain channel full (capacity exceeded), draining inline for {}", prefill_url);
                            RouterMetrics::record_pd_prefill_error(&prefill_url);

                            // Drain inline with timeout to prevent blocking too long
                            let drain_future = async {
                                let mut stream = response.bytes_stream();
                                while stream.next().await.is_some() {
                                    // Just drain
                                }
                            };

                            match tokio::time::timeout(Duration::from_secs(1), drain_future).await {
                                Ok(_) => debug!("Inline drain completed for {}", prefill_url),
                                Err(_) => error!("Inline drain timeout for {}", prefill_url),
                            }
                        }
                        Err(mpsc::error::TrySendError::Closed(_)) => {
                            error!("Prefill drain channel closed!");
                        }
                    }
1051
1052
                }
            });
1053

1054
1055
1056
1057
1058
1059
            // Wait only for decode response
            let decode_result = decode_future.await;
            debug!("Received decode response");

            // Update metrics
            let duration = start_time.elapsed();
1060
1061
            RouterMetrics::record_pd_request_duration(context.route, duration);
            RouterMetrics::record_pd_request(context.route);
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
            RouterMetrics::record_pd_prefill_request(prefill.url());
            RouterMetrics::record_pd_decode_request(decode.url());

            // Process decode response immediately
            debug!("Processing decode response (no logprobs)");
            match decode_result {
                Ok(res) => {
                    let status = StatusCode::from_u16(res.status().as_u16())
                        .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
                    debug!("Decode response status: {}", status);

                    if !status.is_success() {
                        RouterMetrics::record_pd_decode_error(decode.url());
                        error!(
                            "Decode server returned error status decode_url={} status={}",
                            decode.url(),
                            status
                        );

1081
1082
                        self.handle_decode_error_response(res, &context, prefill, decode)
                            .await
1083
                    } else if context.is_stream {
1084
1085
                        // Streaming response without logprobs - direct passthrough
                        let decode_url = decode.url().to_string();
1086
1087
1088
                        let response_headers =
                            header_utils::preserve_response_headers(res.headers());

1089
                        self.create_streaming_response(
1090
1091
1092
1093
1094
                            res.bytes_stream(),
                            status,
                            None,
                            false,
                            Some(decode_url),
1095
                            Some(response_headers),
1096
1097
                            prefill,
                            decode,
1098
1099
1100
                        )
                    } else {
                        // Non-streaming response without logprobs - direct passthrough like fast version
1101
1102
1103
                        let response_headers =
                            header_utils::preserve_response_headers(res.headers());

1104
                        match res.bytes().await {
1105
1106
1107
1108
1109
1110
1111
                            Ok(decode_body) => {
                                let mut response =
                                    Response::new(axum::body::Body::from(decode_body));
                                *response.status_mut() = status;
                                *response.headers_mut() = response_headers;
                                response
                            }
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
                            Err(e) => {
                                error!("Failed to read decode response: {}", e);
                                (StatusCode::INTERNAL_SERVER_ERROR, "Failed to read response")
                                    .into_response()
                            }
                        }
                    }
                }
                Err(e) => {
                    error!(
                        decode_url = %decode.url(),
                        error = %e,
                        "Decode request failed"
                    );
                    RouterMetrics::record_pd_decode_error(decode.url());
                    (
                        StatusCode::BAD_GATEWAY,
                        format!("Decode server error: {}", e),
1130
                    )
1131
                        .into_response()
1132
1133
1134
1135
1136
                }
            }
        }
    }

1137
1138
1139
1140
1141
    // Check if either prefill or decode policy needs request text
    fn policies_need_request_text(&self) -> bool {
        self.prefill_policy.needs_request_text() || self.decode_policy.needs_request_text()
    }

1142
    // Select a pair of prefill and decode servers considering circuit breaker state
1143
1144
    async fn select_pd_pair(
        &self,
1145
        request_text: Option<&str>,
1146
    ) -> Result<(Box<dyn Worker>, Box<dyn Worker>), String> {
1147
1148
        // Get read locks for both worker lists
        let prefill_workers = self
1149
1150
            .prefill_workers
            .read()
1151
1152
            .map_err(|e| format!("Failed to acquire prefill workers lock: {}", e))?;
        let decode_workers = self
1153
1154
            .decode_workers
            .read()
1155
1156
            .map_err(|e| format!("Failed to acquire decode workers lock: {}", e))?;

1157
1158
        // Select workers using helper function
        let prefill = Self::pick_worker_by_policy(
1159
            &prefill_workers,
1160
1161
1162
1163
1164
1165
            &*self.prefill_policy,
            request_text,
            "prefill",
        )?;

        let decode = Self::pick_worker_by_policy(
1166
            &decode_workers,
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
            &*self.decode_policy,
            request_text,
            "decode",
        )?;

        Ok((prefill, decode))
    }

    // Helper function to select a worker using the policy
    fn pick_worker_by_policy(
        workers: &[Box<dyn Worker>],
        policy: &dyn LoadBalancingPolicy,
        request_text: Option<&str>,
        worker_type: &str,
    ) -> Result<Box<dyn Worker>, String> {
        // Check if we have any workers
        if workers.is_empty() {
            return Err(format!(
                "No {} workers available. Please check if {} servers are configured and healthy.",
                worker_type, worker_type
            ));
1188
1189
        }

1190
1191
1192
1193
1194
1195
        // Filter available workers (healthy + circuit breaker not open)
        let available_workers: Vec<Box<dyn Worker>> = workers
            .iter()
            .filter(|w| w.is_available())
            .map(|w| w.clone_worker())
            .collect();
1196

1197
1198
1199
1200
1201
1202
        if available_workers.is_empty() {
            return Err(format!(
                "No available {} workers (all circuits open or unhealthy)",
                worker_type
            ));
        }
1203

1204
1205
1206
1207
1208
        // Let policy select from available workers only
        match policy.select_worker(&available_workers, request_text) {
            Some(idx) => Ok(available_workers[idx].clone_worker()),
            None => Err(format!("Policy could not select a {} worker", worker_type)),
        }
1209
1210
1211
1212
1213
1214
1215
    }

    // Background task to monitor worker loads with shared client
    async fn monitor_worker_loads_with_client(
        worker_urls: Vec<String>,
        tx: tokio::sync::watch::Sender<HashMap<String, isize>>,
        interval_secs: u64,
1216
        client: Client,
1217
1218
        prefill_policy: Arc<dyn LoadBalancingPolicy>,
        decode_policy: Arc<dyn LoadBalancingPolicy>,
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
    ) {
        loop {
            let mut loads = HashMap::new();

            let futures: Vec<_> = worker_urls
                .iter()
                .map(|url| {
                    let client = client.clone();
                    let url = url.clone();
                    async move {
                        let load = get_worker_load(&client, &url).await.unwrap_or(0);
                        (url, load)
                    }
                })
                .collect();

            let results = futures_util::future::join_all(futures).await;

            for (url, load) in results {
                loads.insert(url, load);
            }

            debug!("Worker loads updated: {:?}", loads);

1243
1244
1245
            // Update both policies with current loads
            prefill_policy.update_loads(&loads);
            decode_policy.update_loads(&loads);
1246

1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
            // Check if receiver is still active
            if tx.send(loads).is_err() {
                info!("Load monitor receiver dropped, shutting down monitor task");
                break;
            }

            tokio::time::sleep(Duration::from_secs(interval_secs)).await;
        }
    }

1257
    // Helper to create a streaming response
1258
    #[allow(clippy::too_many_arguments)]
1259
    fn create_streaming_response(
1260
        &self,
1261
1262
1263
1264
1265
        stream: impl futures_util::Stream<Item = Result<bytes::Bytes, reqwest::Error>> + Send + 'static,
        status: StatusCode,
        prefill_logprobs: Option<Value>,
        return_logprob: bool,
        decode_url: Option<String>,
1266
        headers: Option<HeaderMap>,
1267
1268
        prefill: &dyn Worker,
        decode: &dyn Worker,
1269
    ) -> Response {
1270
1271
1272
1273
1274
1275
1276
1277
        // For streaming, increment load now - will be decremented when streaming completes
        prefill.increment_load();
        decode.increment_load();

        // Store URLs to find workers later for decrementing
        let prefill_url = prefill.url().to_string();
        let decode_url_str = decode.url().to_string();

1278
1279
        let (tx, rx) = tokio::sync::mpsc::unbounded_channel();

1280
1281
1282
1283
        // Clone the worker collections for the spawned task
        let prefill_workers = self.prefill_workers.clone();
        let decode_workers = self.decode_workers.clone();

1284
        tokio::spawn(async move {
1285
1286
1287
            // Use a flag to track whether stream completed successfully
            let mut stream_completed = false;

1288
1289
1290
1291
            futures_util::pin_mut!(stream);
            while let Some(chunk_result) = stream.next().await {
                match chunk_result {
                    Ok(chunk) => {
1292
1293
1294
1295
1296
1297
                        // Check for stream end marker to decrement load early
                        let is_done = chunk
                            .as_ref()
                            .windows(12)
                            .any(|window| window == b"data: [DONE]");

1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
                        let result = if return_logprob && prefill_logprobs.is_some() {
                            // Try to merge logprobs
                            Self::merge_streaming_logprobs(prefill_logprobs.clone(), &chunk)
                                .unwrap_or(chunk)
                        } else {
                            chunk
                        };

                        if tx.send(Ok(result)).is_err() {
                            break;
                        }
1309
1310
1311
1312
1313
1314

                        // If we see the done marker, decrement load immediately
                        if is_done {
                            stream_completed = true;
                            break;
                        }
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
                    }
                    Err(e) => {
                        if let Some(ref url) = decode_url {
                            error!("Stream error from decode server {}: {}", url, e);
                            RouterMetrics::record_pd_stream_error(url);
                        }
                        let _ = tx.send(Err(format!("Stream error: {}", e)));
                        break;
                    }
                }
            }
1326

1327
1328
            // Always decrement load after streaming (either completes or errors)
            // Find and decrement prefill worker
1329
1330
1331
1332
            if let Ok(prefill_workers_guard) = prefill_workers.read() {
                for worker in prefill_workers_guard.iter() {
                    if worker.url() == prefill_url.as_str() {
                        worker.decrement_load();
1333
1334
1335
1336
                        debug!(
                            "Decremented load for prefill worker: {} (stream_completed: {})",
                            prefill_url, stream_completed
                        );
1337
1338
1339
1340
1341
                        break;
                    }
                }
            }

1342
            // Find and decrement decode worker
1343
1344
1345
1346
            if let Ok(decode_workers_guard) = decode_workers.read() {
                for worker in decode_workers_guard.iter() {
                    if worker.url() == decode_url_str.as_str() {
                        worker.decrement_load();
1347
1348
1349
1350
                        debug!(
                            "Decremented load for decode worker: {} (stream_completed: {})",
                            decode_url_str, stream_completed
                        );
1351
1352
1353
1354
                        break;
                    }
                }
            }
1355
1356
1357
1358
1359
1360
1361
        });

        let stream = UnboundedReceiverStream::new(rx);
        let body = Body::from_stream(stream);

        let mut response = Response::new(body);
        *response.status_mut() = status;
1362
1363

        // Use provided headers or create new ones, then ensure content-type is set for streaming
1364
        let mut headers = headers.unwrap_or_default();
1365
1366
1367
        headers.insert(CONTENT_TYPE, HeaderValue::from_static("text/event-stream"));
        *response.headers_mut() = headers;

1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
        response
    }

    // Helper to process non-streaming decode response with logprob merging
    async fn process_non_streaming_response(
        &self,
        res: reqwest::Response,
        status: StatusCode,
        return_logprob: bool,
        prefill_body: Option<bytes::Bytes>,
    ) -> Response {
1379
1380
1381
        let response = res.bytes().await;
        let decode_body = match response {
            Ok(decode_body) => decode_body,
1382
1383
            Err(e) => {
                error!("Failed to read decode response: {}", e);
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
                return (StatusCode::INTERNAL_SERVER_ERROR, "Failed to read response")
                    .into_response();
            }
        };

        if !return_logprob {
            return (status, decode_body).into_response();
        }

        let Some(prefill_body) = prefill_body else {
            return (status, decode_body).into_response();
        };

        // Merge logprobs from prefill and decode
        let (Ok(prefill_json), Ok(mut decode_json)) = (
            serde_json::from_slice::<Value>(&prefill_body),
            serde_json::from_slice::<Value>(&decode_body),
        ) else {
            warn!("Failed to parse responses for logprob merging");
            return (status, decode_body).into_response();
        };

        Self::merge_logprobs_in_json(&prefill_json, &mut decode_json);

        // Return merged response
        match serde_json::to_vec(&decode_json) {
            Ok(body) => (status, body).into_response(),
            Err(e) => {
                error!("Failed to serialize merged response: {}", e);
                (status, decode_body).into_response()
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
            }
        }
    }

    // Helper to process prefill response and extract body if needed for logprobs
    async fn process_prefill_response(
        &self,
        prefill_result: Result<reqwest::Response, reqwest::Error>,
        prefill_url: &str,
        return_logprob: bool,
    ) -> Result<(StatusCode, Option<bytes::Bytes>), Response> {
        // Check prefill result first - it's critical for disaggregated mode
        let prefill_response = match prefill_result {
            Ok(response) => response,
            Err(e) => {
                RouterMetrics::record_pd_prefill_error(prefill_url);
                error!(
                    "Prefill server failed (CRITICAL) prefill_url={} error={}. Decode will timeout without prefill KV cache.",
                    prefill_url,
                    e
                );

                // Return error immediately - don't wait for decode to timeout
                return Err((
                    StatusCode::BAD_GATEWAY,
                    format!(
                        "Prefill server error: {}. This will cause decode timeout.",
                        e
                    ),
                )
                    .into_response());
            }
        };

        let prefill_status = StatusCode::from_u16(prefill_response.status().as_u16())
            .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);

        // Check if prefill succeeded
        if !prefill_status.is_success() {
            RouterMetrics::record_pd_prefill_error(prefill_url);

            // Get error body from prefill
            let error_msg = prefill_response
                .text()
                .await
                .unwrap_or_else(|_| "Unknown prefill error".to_string());

            error!(
                "Prefill server returned error status prefill_url={} status={} body={}",
                prefill_url, prefill_status, error_msg
            );

            return Err((
                prefill_status,
                format!("Prefill server error ({}): {}", prefill_status, error_msg),
            )
                .into_response());
        }

        // Read prefill body if needed for logprob merging
        let prefill_body = if return_logprob {
            match prefill_response.bytes().await {
                Ok(body) => Some(body),
                Err(e) => {
                    warn!("Failed to read prefill response body for logprobs: {}", e);
                    None
                }
            }
        } else {
            // For non-logprob requests, just consume the response without storing
            debug!("Consuming prefill response body (non-logprob request)");
            match prefill_response.bytes().await {
                Ok(_) => debug!("Prefill response consumed successfully"),
                Err(e) => warn!("Error consuming prefill response: {}", e),
            }
            None
        };

        Ok((prefill_status, prefill_body))
    }

1495
    fn build_post_with_headers(
1496
        &self,
1497
        client: &Client,
1498
1499
        url: &str,
        route: &str,
1500
        json_request: &Value,
1501
        headers: Option<&HeaderMap>,
1502
        connection_close: bool,
1503
    ) -> reqwest::RequestBuilder {
1504
1505
1506
1507
        let mut request = client.post(api_path(url, route)).json(json_request);
        if connection_close {
            request = request.header("Connection", "close");
        }
1508
1509
        if let Some(headers) = headers {
            for (name, value) in headers.iter() {
1510
1511
1512
1513
1514
1515
1516
1517
1518
                let name_lc = name.as_str().to_ascii_lowercase();
                // Whitelist important end-to-end headers, skip hop-by-hop
                let forward = matches!(
                    name_lc.as_str(),
                    "authorization" | "x-request-id" | "x-correlation-id"
                ) || name_lc.starts_with("x-request-id-");
                if forward {
                    if let Ok(val) = value.to_str() {
                        request = request.header(name, val);
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
                    }
                }
            }
        }
        request
    }

    // Helper to merge logprobs from prefill and decode responses
    fn merge_logprobs_in_json(prefill_json: &Value, decode_json: &mut Value) -> bool {
        if let (Some(prefill_meta), Some(decode_meta)) = (
            prefill_json.get("meta_info"),
            decode_json.get_mut("meta_info"),
        ) {
            if let (Some(prefill_logprobs), Some(decode_logprobs)) = (
                prefill_meta.get("input_token_logprobs"),
                decode_meta.get_mut("input_token_logprobs"),
            ) {
                if let (Some(prefill_arr), Some(decode_arr)) =
                    (prefill_logprobs.as_array(), decode_logprobs.as_array_mut())
                {
                    let mut merged = prefill_arr.clone();
                    merged.extend(decode_arr.clone());
                    decode_meta["input_token_logprobs"] = Value::Array(merged);
                    return true;
                }
            }
        }
        false
    }

1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
    // Simple helper to merge logprobs in streaming responses
    fn merge_streaming_logprobs(
        prefill_logprobs: Option<Value>,
        decode_chunk: &[u8],
    ) -> Result<bytes::Bytes, ()> {
        // Skip non-data chunks
        let chunk_str = std::str::from_utf8(decode_chunk).map_err(|_| ())?;
        if !chunk_str.starts_with("data: ") || chunk_str.contains("[DONE]") {
            return Err(());
        }

        // Parse JSON from chunk
        let json_str = chunk_str.trim_start_matches("data: ").trim();
        let mut decode_json: Value = serde_json::from_str(json_str).map_err(|_| ())?;

        // Merge prefill logprobs if available
        if let Some(ref p_logprobs) = prefill_logprobs {
            if let Some(meta) = decode_json.get_mut("meta_info") {
                if let Some(d_logprobs) = meta.get_mut("input_token_logprobs") {
                    if let (Some(p_arr), Some(d_arr)) =
                        (p_logprobs.as_array(), d_logprobs.as_array())
                    {
                        let mut merged = p_arr.clone();
                        merged.extend(d_arr.clone());
                        *d_logprobs = Value::Array(merged);
                    }
                }
            }
        }

        // Re-serialize
        let merged_str = format!(
            "data: {}\n\n",
            serde_json::to_string(&decode_json).unwrap_or_default()
        );
        Ok(bytes::Bytes::from(merged_str))
    }
}

// Helper functions

1590
async fn get_worker_load(client: &Client, worker_url: &str) -> Option<isize> {
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
    match client.get(format!("{}/get_load", worker_url)).send().await {
        Ok(res) if res.status().is_success() => match res.bytes().await {
            Ok(bytes) => match serde_json::from_slice::<Value>(&bytes) {
                Ok(data) => data
                    .get("load")
                    .and_then(|v| v.as_i64())
                    .map(|v| v as isize),
                Err(e) => {
                    debug!("Failed to parse load response from {}: {}", worker_url, e);
                    None
                }
            },
            Err(e) => {
                debug!("Failed to read load response from {}: {}", worker_url, e);
                None
            }
        },
        Ok(res) => {
            debug!(
                "Worker {} returned non-success status: {}",
                worker_url,
                res.status()
            );
            None
        }
        Err(e) => {
            debug!("Failed to get load from {}: {}", worker_url, e);
            None
        }
    }
}

1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
#[async_trait]
impl WorkerManagement for PDRouter {
    async fn add_worker(&self, _worker_url: &str) -> Result<String, String> {
        // For PD router, we don't support adding workers via this generic method
        Err(
            "PD router requires specific add_prefill_server or add_decode_server methods"
                .to_string(),
        )
    }

    fn remove_worker(&self, worker_url: &str) {
        // For PD router, we would need to know if it's a prefill or decode server
        // For now, try both
        if let Ok(mut workers) = self.prefill_workers.write() {
            if let Some(index) = workers.iter().position(|w| w.url() == worker_url) {
                workers.remove(index);
                info!("Removed prefill worker: {}", worker_url);
                return;
            }
        }

        if let Ok(mut workers) = self.decode_workers.write() {
            if let Some(index) = workers.iter().position(|w| w.url() == worker_url) {
                workers.remove(index);
                info!("Removed decode worker: {}", worker_url);
            }
        }
    }

    fn get_worker_urls(&self) -> Vec<String> {
        let mut urls = Vec::new();

        // Add prefill worker URLs
        if let Ok(workers) = self.prefill_workers.read() {
            for worker in workers.iter() {
                urls.push(worker.url().to_string());
            }
        }

        // Add decode worker URLs
        if let Ok(workers) = self.decode_workers.read() {
            for worker in workers.iter() {
                urls.push(worker.url().to_string());
            }
        }

        urls
    }
}

#[async_trait]
impl RouterTrait for PDRouter {
    fn as_any(&self) -> &dyn std::any::Any {
        self
    }

    async fn health(&self, _req: Request<Body>) -> Response {
        // This is a server readiness check - checking if we have healthy workers
        // Workers handle their own health checks in the background
        let mut all_healthy = true;
        let mut unhealthy_servers = Vec::new();

        // Check prefill servers
        for worker in self.prefill_workers.read().unwrap().iter() {
            if !worker.is_healthy() {
                all_healthy = false;
                unhealthy_servers.push(format!("Prefill: {}", worker.url()));
            }
        }

        // Check decode servers
        for worker in self.decode_workers.read().unwrap().iter() {
            if !worker.is_healthy() {
                all_healthy = false;
                unhealthy_servers.push(format!("Decode: {}", worker.url()));
            }
        }

        if all_healthy {
            (StatusCode::OK, "All servers healthy").into_response()
        } else {
            (
                StatusCode::SERVICE_UNAVAILABLE,
                format!("Unhealthy servers: {:?}", unhealthy_servers),
            )
                .into_response()
        }
    }

    async fn health_generate(&self, _req: Request<Body>) -> Response {
1713
1714
        // Test model generation capability by selecting a random pair and testing them
        // Note: This endpoint actually causes the model to generate tokens, so we only test one pair
1715

1716
        // Select a random worker pair using the policy
1717
        let (prefill, decode) = match self.select_pd_pair(None).await {
1718
1719
            Ok(pair) => pair,
            Err(e) => {
1720
1721
1722
1723
1724
                return (
                    StatusCode::SERVICE_UNAVAILABLE,
                    format!("No healthy worker pair available: {}", e),
                )
                    .into_response();
1725
1726
            }
        };
1727

1728
1729
        // Test prefill server's health_generate
        let prefill_url = format!("{}/health_generate", prefill.url());
1730
1731
1732
        let (prefill_result, decode_result) = tokio::join!(
            self.client.get(&prefill_url).send(),
            self.client
1733
                .get(format!("{}/health_generate", decode.url()))
1734
1735
                .send()
        );
1736

1737
1738
        // Check results
        let mut errors = Vec::new();
1739

1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
        match prefill_result {
            Ok(res) if res.status().is_success() => {
                debug!(
                    "Health generate passed for prefill server: {}",
                    prefill.url()
                );
            }
            Ok(res) => {
                errors.push(format!(
                    "Prefill {} returned status {}",
                    prefill.url(),
                    res.status()
                ));
            }
            Err(e) => {
                errors.push(format!("Prefill {} error: {}", prefill.url(), e));
            }
        }
1758

1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
        match decode_result {
            Ok(res) if res.status().is_success() => {
                debug!("Health generate passed for decode server: {}", decode.url());
            }
            Ok(res) => {
                errors.push(format!(
                    "Decode {} returned status {}",
                    decode.url(),
                    res.status()
                ));
            }
            Err(e) => {
                errors.push(format!("Decode {} error: {}", decode.url(), e));
1772
1773
1774
            }
        }

1775
        if errors.is_empty() {
1776
1777
1778
1779
1780
1781
1782
1783
1784
            (
                StatusCode::OK,
                format!(
                    "Health generate passed on selected pair: prefill={}, decode={}",
                    prefill.url(),
                    decode.url()
                ),
            )
                .into_response()
1785
        } else {
1786
1787
1788
1789
1790
            (
                StatusCode::SERVICE_UNAVAILABLE,
                format!("Health generate failed: {:?}", errors),
            )
                .into_response()
1791
1792
1793
        }
    }

1794
    async fn get_server_info(&self, _req: Request<Body>) -> Response {
1795
        // Get info from the first decode server to match sglang's server info format
1796
1797
1798
        // Note: We use decode workers for server info to match expected format
        self.proxy_to_first_worker(&self.decode_workers, "get_server_info", "decode", None)
            .await
1799
1800
    }

1801
    async fn get_models(&self, req: Request<Body>) -> Response {
1802
        // Extract headers first to avoid Send issues
1803
        let headers = header_utils::copy_request_headers(&req);
1804

1805
1806
1807
        // Proxy to first prefill worker
        self.proxy_to_first_worker(&self.prefill_workers, "v1/models", "prefill", Some(headers))
            .await
1808
1809
    }

1810
    async fn get_model_info(&self, req: Request<Body>) -> Response {
1811
        // Extract headers first to avoid Send issues
1812
        let headers = header_utils::copy_request_headers(&req);
1813

1814
1815
1816
1817
1818
1819
1820
1821
        // Proxy to first prefill worker
        self.proxy_to_first_worker(
            &self.prefill_workers,
            "get_model_info",
            "prefill",
            Some(headers),
        )
        .await
1822
1823
    }

1824
1825
1826
1827
1828
    async fn route_generate(
        &self,
        headers: Option<&HeaderMap>,
        body: &GenerateRequest,
    ) -> Response {
1829
        // Extract parameters
1830
1831
        let is_stream = body.stream;
        let return_logprob = body.return_logprob;
1832

1833
        // Extract text for cache-aware routing
1834
        let request_text = if self.policies_need_request_text() {
1835
1836
1837
1838
            body.text
                .as_deref()
                .or_else(|| {
                    body.prompt.as_ref().and_then(|p| match p {
1839
1840
                        StringOrArray::String(s) => Some(s.as_str()),
                        StringOrArray::Array(v) => v.first().map(|s| s.as_str()),
1841
                    })
1842
                })
1843
                .map(|s| s.to_string())
1844
1845
1846
        } else {
            None
        };
1847

1848
        // Calculate batch size
1849
        let batch_size = Self::get_generate_batch_size(body);
1850

1851
1852
1853
1854
        // Create context
        let context = PDRequestContext {
            route: "/generate",
            batch_size,
1855
1856
            is_stream,
            return_logprob,
1857
1858
1859
1860
1861
            request_text,
        };

        // Execute with retry and bootstrap injection
        self.execute_dual_dispatch(headers, body, context).await
1862
1863
    }

1864
1865
1866
1867
1868
    async fn route_chat(
        &self,
        headers: Option<&HeaderMap>,
        body: &ChatCompletionRequest,
    ) -> Response {
1869
        // Extract parameters
1870
1871
1872
        let is_stream = body.stream;
        let return_logprob = body.logprobs;

1873
        // Extract text for cache-aware routing
1874
1875
        let request_text = if self.policies_need_request_text() {
            body.messages.first().and_then(|msg| match msg {
1876
1877
1878
                ChatMessage::User { content, .. } => match content {
                    UserMessageContent::Text(text) => Some(text.clone()),
                    UserMessageContent::Parts(_) => None,
1879
                },
1880
                ChatMessage::System { content, .. } => Some(content.clone()),
1881
1882
1883
1884
1885
                _ => None,
            })
        } else {
            None
        };
1886

1887
        // Calculate batch size
1888
        let batch_size = Self::get_chat_batch_size(body);
1889

1890
1891
1892
1893
        // Create context
        let context = PDRequestContext {
            route: "/v1/chat/completions",
            batch_size,
1894
1895
            is_stream,
            return_logprob,
1896
1897
1898
1899
1900
            request_text,
        };

        // Execute with retry and bootstrap injection
        self.execute_dual_dispatch(headers, body, context).await
1901
1902
    }

1903
1904
1905
1906
1907
    async fn route_completion(
        &self,
        headers: Option<&HeaderMap>,
        body: &CompletionRequest,
    ) -> Response {
1908
        // Extract parameters
1909
1910
1911
        let is_stream = body.stream;
        let return_logprob = body.logprobs.is_some();

1912
        // Extract text for cache-aware routing
1913
1914
        let request_text = if self.policies_need_request_text() {
            match &body.prompt {
1915
1916
                StringOrArray::String(s) => Some(s.clone()),
                StringOrArray::Array(v) => v.first().map(|s| s.to_string()),
1917
1918
1919
            }
        } else {
            None
1920
1921
        };

1922
        // Calculate batch size
1923
        let batch_size = Self::get_completion_batch_size(body);
1924

1925
1926
1927
1928
        // Create context
        let context = PDRequestContext {
            route: "/v1/completions",
            batch_size,
1929
1930
            is_stream,
            return_logprob,
1931
1932
1933
1934
1935
            request_text,
        };

        // Execute with retry and bootstrap injection
        self.execute_dual_dispatch(headers, body, context).await
1936
1937
    }

1938
1939
1940
1941
1942
1943
1944
1945
    async fn route_embeddings(&self, _headers: Option<&HeaderMap>, _body: Body) -> Response {
        todo!()
    }

    async fn route_rerank(&self, _headers: Option<&HeaderMap>, _body: Body) -> Response {
        todo!()
    }

1946
    async fn flush_cache(&self) -> Response {
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
        // Process both prefill and decode workers
        let (prefill_results, prefill_errors) = self
            .process_workers(&self.prefill_workers, "Prefill", "flush_cache")
            .await;
        let (decode_results, decode_errors) = self
            .process_workers(&self.decode_workers, "Decode", "flush_cache")
            .await;

        // Combine results and errors
        let mut results = prefill_results;
        results.extend(decode_results);
        let mut errors = prefill_errors;
        errors.extend(decode_errors);
1960

1961
1962
1963
1964
1965
1966
        if errors.is_empty() {
            (
                StatusCode::OK,
                format!("Cache flushed successfully: {:?}", results),
            )
                .into_response()
1967
        } else {
1968
            (
1969
1970
1971
1972
1973
                StatusCode::PARTIAL_CONTENT,
                format!(
                    "Partial success. Results: {:?}, Errors: {:?}",
                    results, errors
                ),
1974
1975
            )
                .into_response()
1976
1977
1978
        }
    }

1979
1980
1981
    async fn get_worker_loads(&self) -> Response {
        let mut loads = HashMap::new();
        let mut errors = Vec::new();
1982

1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
        // Process prefill workers
        match Self::get_worker_urls(&self.prefill_workers, "prefill") {
            Ok(urls) => {
                for worker_url in urls {
                    match get_worker_load(&self.client, &worker_url).await {
                        Some(load) => {
                            loads.insert(format!("prefill_{}", worker_url), load);
                        }
                        None => {
                            errors.push(format!("Failed to get load from prefill {}", worker_url));
                        }
                    }
1995
1996
                }
            }
1997
            Err(e) => errors.push(e),
1998
        }
1999

2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
        // Process decode workers
        match Self::get_worker_urls(&self.decode_workers, "decode") {
            Ok(urls) => {
                for worker_url in urls {
                    match get_worker_load(&self.client, &worker_url).await {
                        Some(load) => {
                            loads.insert(format!("decode_{}", worker_url), load);
                        }
                        None => {
                            errors.push(format!("Failed to get load from decode {}", worker_url));
                        }
                    }
2012
2013
                }
            }
2014
            Err(e) => errors.push(e),
2015
        }
2016

2017
2018
2019
2020
        let response_data = serde_json::json!({
            "loads": loads,
            "errors": errors
        });
2021

2022
        (StatusCode::OK, Json(response_data)).into_response()
2023
2024
2025
2026
2027
2028
    }

    fn router_type(&self) -> &'static str {
        "pd"
    }

2029
    fn readiness(&self) -> Response {
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
        // PD router is ready if it has at least one healthy prefill AND one healthy decode worker
        let healthy_prefill_count = self
            .prefill_workers
            .read()
            .unwrap()
            .iter()
            .filter(|w| w.is_healthy())
            .count();

        let healthy_decode_count = self
            .decode_workers
            .read()
            .unwrap()
            .iter()
            .filter(|w| w.is_healthy())
            .count();

        let total_prefill = self.prefill_workers.read().unwrap().len();
        let total_decode = self.decode_workers.read().unwrap().len();

        if healthy_prefill_count > 0 && healthy_decode_count > 0 {
2051
            Json(json!({
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
                "status": "ready",
                "prefill": {
                    "healthy": healthy_prefill_count,
                    "total": total_prefill
                },
                "decode": {
                    "healthy": healthy_decode_count,
                    "total": total_decode
                }
            }))
2062
            .into_response()
2063
2064
2065
2066
2067
2068
2069
2070
2071
        } else {
            let mut reasons = Vec::new();
            if healthy_prefill_count == 0 {
                reasons.push("no healthy prefill workers");
            }
            if healthy_decode_count == 0 {
                reasons.push("no healthy decode workers");
            }

2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
            (
                StatusCode::SERVICE_UNAVAILABLE,
                Json(serde_json::json!({
                    "status": "not_ready",
                    "reason": reasons.join(", "),
                    "prefill": {
                        "healthy": healthy_prefill_count,
                        "total": total_prefill
                    },
                    "decode": {
                        "healthy": healthy_decode_count,
                        "total": total_decode
                    }
                })),
            )
                .into_response()
2088
2089
2090
        }
    }
}
2091
2092
2093
2094
2095

#[cfg(test)]
mod tests {
    use super::*;
    use crate::core::{BasicWorker, WorkerType};
2096
    use crate::policies::RandomPolicy;
2097
2098

    fn create_test_pd_router() -> PDRouter {
2099
2100
        let prefill_policy = Arc::new(RandomPolicy::new());
        let decode_policy = Arc::new(RandomPolicy::new());
2101
2102
2103
2104

        PDRouter {
            prefill_workers: Arc::new(RwLock::new(vec![])),
            decode_workers: Arc::new(RwLock::new(vec![])),
2105
2106
            prefill_policy,
            decode_policy,
2107
2108
2109
2110
            timeout_secs: 5,
            interval_secs: 1,
            worker_loads: Arc::new(tokio::sync::watch::channel(HashMap::new()).1),
            load_monitor_handle: None,
2111
            client: Client::new(),
2112
            prefill_client: Client::new(),
2113
            prefill_drain_tx: mpsc::channel(100).0,
2114
            retry_config: RetryConfig::default(),
2115
            circuit_breaker_config: CircuitBreakerConfig::default(),
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
            _prefill_health_checker: None,
            _decode_health_checker: None,
        }
    }

    fn create_test_worker(url: String, worker_type: WorkerType, healthy: bool) -> Box<dyn Worker> {
        let worker = BasicWorker::new(url, worker_type);
        worker.set_healthy(healthy);
        Box::new(worker)
    }

    // ============= Worker Management Tests =============

    #[tokio::test]
    async fn test_add_prefill_server_already_exists() {
        let router = create_test_pd_router();

        // Add a worker first
        let worker = create_test_worker(
            "http://localhost:8000".to_string(),
            WorkerType::Prefill {
                bootstrap_port: Some(8080),
            },
            true,
        );
        router.prefill_workers.write().unwrap().push(worker);

        // Try to add the same URL again - this would fail during health check in real scenario
        // For unit test, we test the duplicate check logic
        let workers = router.prefill_workers.read().unwrap();
        let exists = workers.iter().any(|w| w.url() == "http://localhost:8000");
        assert!(exists);
    }

    #[tokio::test]
    async fn test_remove_prefill_server_success() {
        let router = create_test_pd_router();

        // Add servers first
        let worker1 = create_test_worker(
            "http://worker1".to_string(),
            WorkerType::Prefill {
                bootstrap_port: None,
            },
            true,
        );
        let worker2 = create_test_worker(
            "http://worker2".to_string(),
            WorkerType::Prefill {
                bootstrap_port: Some(8080),
            },
            true,
        );

        router.prefill_workers.write().unwrap().push(worker1);
        router.prefill_workers.write().unwrap().push(worker2);

        // Remove one
        let result = router.remove_prefill_server("http://worker1").await;

        assert!(result.is_ok());
        assert!(result.unwrap().contains("Successfully removed"));

        let workers = router.prefill_workers.read().unwrap();
        assert_eq!(workers.len(), 1);
        assert_eq!(workers[0].url(), "http://worker2");
    }

    #[tokio::test]
    async fn test_remove_prefill_server_not_found() {
        let router = create_test_pd_router();

        let result = router.remove_prefill_server("http://nonexistent").await;

        assert!(result.is_err());
        match result.unwrap_err() {
            PDRouterError::WorkerNotFound { url } => {
                assert_eq!(url, "http://nonexistent");
            }
            _ => panic!("Expected WorkerNotFound error"),
        }
    }

    #[tokio::test]
    async fn test_remove_decode_server_success() {
        let router = create_test_pd_router();

        // Add server first
        let worker = create_test_worker("http://decode1".to_string(), WorkerType::Decode, true);
        router.decode_workers.write().unwrap().push(worker);

        let result = router.remove_decode_server("http://decode1").await;

        assert!(result.is_ok());
        assert!(result.unwrap().contains("Successfully removed"));

        let workers = router.decode_workers.read().unwrap();
        assert_eq!(workers.len(), 0);
    }

    // ============= Lock Error Handling Tests =============

    #[test]
    fn test_lock_operations() {
        let router = create_test_pd_router();

        // Test read/write locks work correctly
        {
            let read_guard = router.prefill_workers.read().unwrap();
            assert_eq!(read_guard.len(), 0);
        }

        {
            let mut write_guard = router.prefill_workers.write().unwrap();
            write_guard.push(create_test_worker(
                "http://test".to_string(),
                WorkerType::Prefill {
                    bootstrap_port: None,
                },
                true,
            ));
        }

        {
            let read_guard = router.prefill_workers.read().unwrap();
            assert_eq!(read_guard.len(), 1);
        }
    }

2245
2246
2247
2248
2249
2250
2251
    // ============= Bootstrap Injection Tests =============
    // Note: These tests are commented out as we've moved to the optimized bootstrap injection
    // approach that doesn't use the Bootstrap trait on GenerateReqInput anymore.

    // TODO: Add new tests for the optimized bootstrap injection approach using
    // RequestWithBootstrap and BatchRequestWithBootstrap wrappers

2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
    // ============= Worker Selection Tests =============

    #[tokio::test]
    async fn test_select_healthy_prefill_worker() {
        let router = create_test_pd_router();

        // Add mix of healthy and unhealthy workers
        let healthy_worker = create_test_worker(
            "http://healthy".to_string(),
            WorkerType::Prefill {
                bootstrap_port: None,
            },
            true,
        );
        let unhealthy_worker = create_test_worker(
            "http://unhealthy".to_string(),
            WorkerType::Prefill {
                bootstrap_port: None,
            },
            false,
        );
        let decode_worker =
            create_test_worker("http://decode".to_string(), WorkerType::Decode, true);

        router
            .prefill_workers
            .write()
            .unwrap()
            .push(unhealthy_worker);
        router.prefill_workers.write().unwrap().push(healthy_worker);
        router.decode_workers.write().unwrap().push(decode_worker);

2284
        let result = router.select_pd_pair(None).await;
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297

        assert!(result.is_ok());
        let (prefill, _decode) = result.unwrap();

        // Should select the healthy worker
        assert_eq!(prefill.url(), "http://healthy");
        assert!(prefill.is_healthy());
    }

    #[tokio::test]
    async fn test_empty_worker_lists() {
        let router = create_test_pd_router();

2298
        let result = router.select_pd_pair(None).await;
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327

        assert!(result.is_err());
        assert!(result.unwrap_err().contains("No prefill workers available"));
    }

    // ============= Health Endpoints Tests =============

    #[tokio::test]
    async fn test_health_endpoints() {
        let router = create_test_pd_router();

        // Add healthy workers
        let prefill_worker = create_test_worker(
            "http://localhost:8000".to_string(),
            WorkerType::Prefill {
                bootstrap_port: None,
            },
            true,
        );
        let decode_worker = create_test_worker(
            "http://localhost:8001".to_string(),
            WorkerType::Decode,
            true,
        );

        router.prefill_workers.write().unwrap().push(prefill_worker);
        router.decode_workers.write().unwrap().push(decode_worker);

        // Test health endpoint
2328
2329
2330
        let http_req = axum::http::Request::builder()
            .body(axum::body::Body::empty())
            .unwrap();
2331
        let response = router.health(http_req).await;
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343

        assert_eq!(response.status(), 200);

        // Test readiness endpoint
        let response = router.readiness();
        assert_eq!(response.status(), 200);
    }

    // ============= Load Monitoring Tests =============

    #[tokio::test]
    async fn test_load_monitor_updates() {
2344
        let power_of_two_policy = Arc::new(crate::policies::PowerOfTwoPolicy::new());
2345
        let mut router = create_test_pd_router();
2346
2347
        router.prefill_policy = power_of_two_policy.clone();
        router.decode_policy = power_of_two_policy;
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394

        // Create load channel
        let (tx, rx) = tokio::sync::watch::channel(HashMap::new());
        router.worker_loads = Arc::new(rx);

        // Simulate load updates
        let mut loads = HashMap::new();
        loads.insert("http://worker1".to_string(), 10);
        loads.insert("http://worker2".to_string(), 5);

        let _ = tx.send(loads.clone());

        // Router should receive updates
        let received = router.worker_loads.borrow().clone();
        assert_eq!(received.get("http://worker1"), Some(&10));
        assert_eq!(received.get("http://worker2"), Some(&5));
    }

    // ============= Worker Load Tests =============

    #[test]
    fn test_worker_load_metrics() {
        let prefill_worker = create_test_worker(
            "http://prefill".to_string(),
            WorkerType::Prefill {
                bootstrap_port: None,
            },
            true,
        );
        let decode_worker =
            create_test_worker("http://decode".to_string(), WorkerType::Decode, true);

        // Create load guard for both workers
        let _guard =
            WorkerLoadGuard::new_multi(vec![prefill_worker.as_ref(), decode_worker.as_ref()]);

        // Load should be incremented
        assert_eq!(prefill_worker.load(), 1);
        assert_eq!(decode_worker.load(), 1);

        // Drop guard - load should decrement
        drop(_guard);

        assert_eq!(prefill_worker.load(), 0);
        assert_eq!(decode_worker.load(), 0);
    }

2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
    #[tokio::test]
    async fn test_streaming_load_tracking() {
        use futures_util::StreamExt;
        use tokio::time::{sleep, Duration};

        let router = create_test_pd_router();

        // Add workers
        let prefill_worker = create_test_worker(
            "http://prefill".to_string(),
            WorkerType::Prefill {
                bootstrap_port: None,
            },
            true,
        );
        let decode_worker =
            create_test_worker("http://decode".to_string(), WorkerType::Decode, true);

        router.prefill_workers.write().unwrap().push(prefill_worker);
        router.decode_workers.write().unwrap().push(decode_worker);

        // Get references to the workers - clone to avoid holding lock across await
        let (prefill_ref, decode_ref) = {
            let workers = router.prefill_workers.read().unwrap();
            let prefill = workers[0].clone_worker();
            drop(workers);
            let workers = router.decode_workers.read().unwrap();
            let decode = workers[0].clone_worker();
            (prefill, decode)
        };

        // Initially load should be 0
        assert_eq!(prefill_ref.load(), 0);
        assert_eq!(decode_ref.load(), 0);

        // Create a mock streaming response
        let (tx, rx) = tokio::sync::mpsc::unbounded_channel();
        let stream = tokio_stream::wrappers::UnboundedReceiverStream::new(rx);

        // Call create_streaming_response which should increment load
        let _response = router.create_streaming_response(
            stream.map(Ok),
            StatusCode::OK,
            None,
            false,
            None,
            None,
            prefill_ref.as_ref(),
            decode_ref.as_ref(),
        );

        // Load should be incremented immediately
        assert_eq!(prefill_ref.load(), 1);
        assert_eq!(decode_ref.load(), 1);

        // Send some data through the stream
        tx.send(bytes::Bytes::from("test data")).unwrap();

        // Give time for the spawned task to process
        sleep(Duration::from_millis(10)).await;

        // Load should still be 1 (streaming in progress)
        assert_eq!(prefill_ref.load(), 1);
        assert_eq!(decode_ref.load(), 1);

        // Close the stream
        drop(tx);

        // Give time for cleanup
        sleep(Duration::from_millis(100)).await;

        // Load should be decremented after streaming completes
        assert_eq!(prefill_ref.load(), 0);
        assert_eq!(decode_ref.load(), 0);
    }

2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
    // ============= Concurrent Operations Tests =============

    #[tokio::test]
    async fn test_concurrent_worker_operations() {
        let router = Arc::new(create_test_pd_router());

        let mut handles = vec![];

        // Spawn tasks to add workers
        for i in 0..5 {
            let router_clone = Arc::clone(&router);
            let url = format!("http://worker{}", i);
            let handle = tokio::spawn(async move {
                let worker = create_test_worker(
                    url,
                    WorkerType::Prefill {
                        bootstrap_port: None,
                    },
                    true,
                );
                router_clone.prefill_workers.write().unwrap().push(worker);
            });
            handles.push(handle);
        }

        // Wait for all tasks
        for handle in handles {
            let _ = handle.await;
        }

        // Check final state
        let workers = router.prefill_workers.read().unwrap();
        assert_eq!(workers.len(), 5);
    }
}