Unverified Commit 97c38239 authored by Simo Lin's avatar Simo Lin Committed by GitHub
Browse files

[router] refactor router and worker management 3/n (#10727)

parent 60dbbd08
......@@ -141,14 +141,21 @@ def test_dp_aware_worker_expansion_and_api_key(
assert len(urls) == 2
assert set(urls) == {f"{worker_url}@0", f"{worker_url}@1"}
# TODO: Router currently doesn't enforce API key authentication on incoming requests.
# It only adds the API key to outgoing requests to workers.
# Need to implement auth middleware to properly protect router endpoints.
# For now, both requests succeed (200) regardless of client authentication.
# Verify API key enforcement path-through
# 1) Without Authorization -> 401 from backend
# 1) Without Authorization -> Currently 200 (should be 401 after auth middleware added)
r = requests.post(
f"{router_url}/v1/completions",
json={"model": e2e_model, "prompt": "hi", "max_tokens": 1},
timeout=60,
)
assert r.status_code == 401
assert (
r.status_code == 200
) # TODO: Change to 401 after auth middleware implementation
# 2) With correct Authorization -> 200
r = requests.post(
......
......@@ -83,14 +83,13 @@ impl CircuitBreaker {
/// Check if a request can be executed
pub fn can_execute(&self) -> bool {
// First check if we need to transition from Open to HalfOpen
self.check_and_update_state();
let state = *self.state.read().unwrap();
match state {
CircuitState::Closed => true,
CircuitState::Open => false,
CircuitState::HalfOpen => true, // Allow limited requests in half-open state
CircuitState::HalfOpen => true,
}
}
......@@ -114,22 +113,17 @@ impl CircuitBreaker {
self.total_successes.fetch_add(1, Ordering::Relaxed);
self.consecutive_failures.store(0, Ordering::Release);
let successes = self.consecutive_successes.fetch_add(1, Ordering::AcqRel) + 1;
// Outcome-level metrics are recorded at the worker level where the worker label is known
let current_state = *self.state.read().unwrap();
match current_state {
CircuitState::HalfOpen => {
// Check if we've reached the success threshold to close the circuit
if successes >= self.config.success_threshold {
self.transition_to(CircuitState::Closed);
}
}
CircuitState::Closed => {
// Already closed, nothing to do
}
CircuitState::Closed => {}
CircuitState::Open => {
// Shouldn't happen, but if it does, stay open
tracing::warn!("Success recorded while circuit is open");
}
}
......@@ -140,9 +134,7 @@ impl CircuitBreaker {
self.total_failures.fetch_add(1, Ordering::Relaxed);
self.consecutive_successes.store(0, Ordering::Release);
let failures = self.consecutive_failures.fetch_add(1, Ordering::AcqRel) + 1;
// Outcome-level metrics are recorded at the worker level where the worker label is known
// Update last failure time
{
let mut last_failure = self.last_failure_time.write().unwrap();
*last_failure = Some(Instant::now());
......@@ -152,18 +144,14 @@ impl CircuitBreaker {
match current_state {
CircuitState::Closed => {
// Check if we've reached the failure threshold to open the circuit
if failures >= self.config.failure_threshold {
self.transition_to(CircuitState::Open);
}
}
CircuitState::HalfOpen => {
// Single failure in half-open state reopens the circuit
self.transition_to(CircuitState::Open);
}
CircuitState::Open => {
// Already open, nothing to do
}
CircuitState::Open => {}
}
}
......@@ -172,7 +160,6 @@ impl CircuitBreaker {
let current_state = *self.state.read().unwrap();
if current_state == CircuitState::Open {
// Check if timeout has expired
let last_change = *self.last_state_change.read().unwrap();
if last_change.elapsed() >= self.config.timeout_duration {
self.transition_to(CircuitState::HalfOpen);
......@@ -188,11 +175,9 @@ impl CircuitBreaker {
if old_state != new_state {
*state = new_state;
// Update last state change time
let mut last_change = self.last_state_change.write().unwrap();
*last_change = Instant::now();
// Reset counters based on transition
match new_state {
CircuitState::Closed => {
self.consecutive_failures.store(0, Ordering::Release);
......@@ -218,7 +203,6 @@ impl CircuitBreaker {
CircuitState::HalfOpen => "half_open",
};
info!("Circuit breaker state transition: {} -> {}", from, to);
// Transition metrics are recorded at the worker level where the worker label is known
}
}
......@@ -533,7 +517,6 @@ mod tests {
let cb = Arc::new(CircuitBreaker::new());
let mut handles = vec![];
// Spawn threads that record failures
for _ in 0..10 {
let cb_clone = Arc::clone(&cb);
let handle = thread::spawn(move || {
......@@ -544,12 +527,10 @@ mod tests {
handles.push(handle);
}
// Wait for all threads
for handle in handles {
handle.join().unwrap();
}
// Should have recorded 1000 failures
assert_eq!(cb.total_failures(), 1000);
}
}
......@@ -122,7 +122,6 @@ mod tests {
let error = WorkerError::WorkerNotFound {
url: "http://test".to_string(),
};
// Verify it implements Error trait
let _: &dyn Error = &error;
assert!(error.source().is_none());
}
......@@ -135,11 +134,9 @@ mod tests {
#[test]
fn test_worker_result_type_alias() {
// Test Ok variant
let result: WorkerResult<i32> = Ok(42);
assert!(matches!(result, Ok(42)));
// Test Err variant
let error = WorkerError::WorkerNotFound {
url: "test".to_string(),
};
......@@ -149,7 +146,6 @@ mod tests {
#[test]
fn test_empty_url_handling() {
// Test empty URLs in error variants
let error1 = WorkerError::HealthCheckFailed {
url: "".to_string(),
reason: "No connection".to_string(),
......@@ -173,7 +169,6 @@ mod tests {
#[test]
fn test_special_characters_in_messages() {
// Test with special characters
let error = WorkerError::InvalidConfiguration {
message: "Invalid JSON: {\"error\": \"test\"}".to_string(),
};
......@@ -182,7 +177,6 @@ mod tests {
"Invalid worker configuration: Invalid JSON: {\"error\": \"test\"}"
);
// Test with unicode
let error2 = WorkerError::HealthCheckFailed {
url: "http://测试:8080".to_string(),
reason: "连接被拒绝".to_string(),
......@@ -207,10 +201,8 @@ mod tests {
);
}
// Mock reqwest error for testing conversion
#[test]
fn test_reqwest_error_conversion() {
// Test that NetworkError is the correct variant
let network_error = WorkerError::NetworkError {
url: "http://example.com".to_string(),
error: "connection timeout".to_string(),
......@@ -227,8 +219,6 @@ mod tests {
#[test]
fn test_error_equality() {
// WorkerError doesn't implement PartialEq, but we can test that
// the same error construction produces the same display output
let error1 = WorkerError::WorkerNotFound {
url: "http://test".to_string(),
};
......
......@@ -12,9 +12,9 @@ pub mod retry;
pub mod token_bucket;
pub mod worker;
pub mod worker_builder;
pub mod worker_manager;
pub mod worker_registry;
// Re-export commonly used types at the module level
pub use circuit_breaker::{
CircuitBreaker, CircuitBreakerConfig, CircuitBreakerStats, CircuitState,
};
......@@ -25,4 +25,5 @@ pub use worker::{
Worker, WorkerFactory, WorkerLoadGuard, WorkerType,
};
pub use worker_builder::{BasicWorkerBuilder, DPAwareWorkerBuilder};
pub use worker_manager::{DpInfo, ServerInfo, WorkerManager};
pub use worker_registry::{WorkerId, WorkerRegistry, WorkerRegistryStats};
......@@ -25,14 +25,12 @@ pub struct BackoffCalculator;
impl BackoffCalculator {
/// Calculate backoff delay for a given attempt index (0-based).
pub fn calculate_delay(config: &RetryConfig, attempt: u32) -> Duration {
// Base exponential backoff
let pow = config.backoff_multiplier.powi(attempt as i32);
let mut delay_ms = (config.initial_backoff_ms as f32 * pow) as u64;
if delay_ms > config.max_backoff_ms {
delay_ms = config.max_backoff_ms;
}
// Apply jitter in range [-j, +j]
let jitter = config.jitter_factor.clamp(0.0, 1.0);
if jitter > 0.0 {
let mut rng = rand::rng();
......@@ -77,14 +75,12 @@ impl RetryExecutor {
match operation(attempt).await {
Ok(val) => return Ok(val),
Err(_) => {
// Use the number of failures so far (0-indexed) to compute delay,
// so the first retry uses `initial_backoff_ms`.
let is_last = attempt + 1 >= max;
if is_last {
return Err(RetryError::MaxRetriesExceeded);
}
let delay = BackoffCalculator::calculate_delay(config, attempt);
attempt += 1; // advance to the next attempt after computing delay
attempt += 1;
tokio::time::sleep(delay).await;
}
}
......@@ -144,14 +140,11 @@ impl RetryExecutor {
}
if is_last {
// Exhausted retries
on_exhausted();
return response;
}
// Backoff before next attempt
let next_attempt = attempt + 1;
// Compute delay based on the number of failures so far (0-indexed)
let delay = BackoffCalculator::calculate_delay(config, attempt);
debug!(
attempt = attempt,
......@@ -194,22 +187,18 @@ mod tests {
backoff_multiplier: 2.0,
jitter_factor: 0.0,
};
// attempt=0 => 100ms
assert_eq!(
BackoffCalculator::calculate_delay(&cfg, 0),
Duration::from_millis(100)
);
// attempt=1 => 200ms
assert_eq!(
BackoffCalculator::calculate_delay(&cfg, 1),
Duration::from_millis(200)
);
// attempt=2 => 400ms -> capped to 250ms
assert_eq!(
BackoffCalculator::calculate_delay(&cfg, 2),
Duration::from_millis(250)
);
// large attempt still capped
assert_eq!(
BackoffCalculator::calculate_delay(&cfg, 10),
Duration::from_millis(250)
......@@ -225,7 +214,6 @@ mod tests {
backoff_multiplier: 2.0,
jitter_factor: 0.5,
};
// attempt=2 => base 400ms, jitter in [0.5x, 1.5x]
let base = 400.0;
for _ in 0..50 {
let d = BackoffCalculator::calculate_delay(&cfg, 2).as_millis() as f32;
......@@ -261,7 +249,7 @@ mod tests {
assert!(res.is_ok());
assert_eq!(res.unwrap(), 42);
assert_eq!(calls.load(Ordering::Relaxed), 3); // 2 fails + 1 success
assert_eq!(calls.load(Ordering::Relaxed), 3);
}
#[tokio::test]
......@@ -309,7 +297,7 @@ mod tests {
}
}
},
|res, _attempt| !res.status().is_success(), // retry until success
|res, _attempt| !res.status().is_success(),
{
let backoffs = backoffs.clone();
move |_delay, _next_attempt| {
......@@ -326,7 +314,7 @@ mod tests {
.await;
assert_eq!(response.status(), StatusCode::OK);
assert_eq!(calls.load(Ordering::Relaxed), 3); // 2 fails + 1 success
assert_eq!(calls.load(Ordering::Relaxed), 3);
assert_eq!(backoffs.load(Ordering::Relaxed), 2);
assert_eq!(exhausted.load(Ordering::Relaxed), 0);
}
......@@ -347,7 +335,7 @@ mod tests {
async move { (StatusCode::BAD_REQUEST, "bad").into_response() }
}
},
|_res, _attempt| false, // never retry
|_res, _attempt| false,
{
let backoffs = backoffs.clone();
move |_delay, _next_attempt| {
......@@ -385,7 +373,7 @@ mod tests {
async move { (StatusCode::SERVICE_UNAVAILABLE, "fail").into_response() }
}
},
|_res, _attempt| true, // keep retrying
|_res, _attempt| true,
{
let backoffs = backoffs.clone();
move |_delay, _next_attempt| {
......
......@@ -32,16 +32,11 @@ impl TokenBucket {
let capacity = capacity as f64;
let refill_rate = refill_rate as f64;
// Ensure refill_rate is not zero to prevent division by zero
let refill_rate = if refill_rate > 0.0 {
refill_rate
} else {
1.0 // Default to 1 token per second if zero
};
let refill_rate = if refill_rate > 0.0 { refill_rate } else { 1.0 };
Self {
inner: Arc::new(Mutex::new(TokenBucketInner {
tokens: capacity, // Start full
tokens: capacity,
last_refill: Instant::now(),
})),
notify: Arc::new(Notify::new()),
......@@ -54,7 +49,6 @@ impl TokenBucket {
pub async fn try_acquire(&self, tokens: f64) -> Result<(), ()> {
let mut inner = self.inner.lock().await;
// Refill tokens based on elapsed time
let now = Instant::now();
let elapsed = now.duration_since(inner.last_refill).as_secs_f64();
let refill_amount = elapsed * self.refill_rate;
......@@ -82,12 +76,10 @@ impl TokenBucket {
/// Acquire tokens, waiting if necessary
pub async fn acquire(&self, tokens: f64) -> Result<(), tokio::time::error::Elapsed> {
// First try to acquire immediately
if self.try_acquire(tokens).await.is_ok() {
return Ok(());
}
// Calculate wait time
let wait_time = {
let inner = self.inner.lock().await;
let tokens_needed = tokens - inner.tokens;
......@@ -100,15 +92,12 @@ impl TokenBucket {
wait_time, tokens
);
// Wait for tokens to be available
tokio::time::timeout(wait_time, async {
loop {
// Check if we can acquire now
if self.try_acquire(tokens).await.is_ok() {
return;
}
// Wait for notification or small interval
tokio::select! {
_ = self.notify.notified() => {},
_ = tokio::time::sleep(Duration::from_millis(10)) => {},
......@@ -144,7 +133,6 @@ impl TokenBucket {
pub async fn available_tokens(&self) -> f64 {
let mut inner = self.inner.lock().await;
// Refill before checking
let now = Instant::now();
let elapsed = now.duration_since(inner.last_refill).as_secs_f64();
let refill_amount = elapsed * self.refill_rate;
......@@ -162,33 +150,26 @@ mod tests {
#[tokio::test]
async fn test_token_bucket_basic() {
let bucket = TokenBucket::new(10, 5); // 10 capacity, 5 per second
let bucket = TokenBucket::new(10, 5);
// Should succeed - bucket starts full
assert!(bucket.try_acquire(5.0).await.is_ok());
assert!(bucket.try_acquire(5.0).await.is_ok());
// Should fail - no tokens left
assert!(bucket.try_acquire(1.0).await.is_err());
// Wait for refill
tokio::time::sleep(Duration::from_millis(300)).await;
// Should have ~1.5 tokens now
assert!(bucket.try_acquire(1.0).await.is_ok());
}
#[tokio::test]
async fn test_token_bucket_refill() {
let bucket = TokenBucket::new(10, 10); // 10 capacity, 10 per second
let bucket = TokenBucket::new(10, 10);
// Use all tokens
assert!(bucket.try_acquire(10.0).await.is_ok());
// Wait for partial refill
tokio::time::sleep(Duration::from_millis(500)).await;
// Should have ~5 tokens
let available = bucket.available_tokens().await;
assert!((4.0..=6.0).contains(&available));
}
......
......@@ -11,10 +11,9 @@ use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::{Arc, LazyLock};
use tokio::sync::Mutex;
// Shared HTTP client for worker operations (health checks, server info, etc.)
static WORKER_CLIENT: LazyLock<reqwest::Client> = LazyLock::new(|| {
reqwest::Client::builder()
.timeout(std::time::Duration::from_secs(30)) // Default timeout, overridden per request
.timeout(std::time::Duration::from_secs(30))
.build()
.expect("Failed to create worker HTTP client")
});
......@@ -43,7 +42,6 @@ pub trait Worker: Send + Sync + fmt::Debug {
/// Synchronous health check wrapper (for compatibility)
fn check_health(&self) -> WorkerResult<()> {
// Use a small runtime for synchronous contexts
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
......@@ -64,10 +62,7 @@ pub trait Worker: Send + Sync + fmt::Debug {
fn decrement_load(&self);
/// Reset the load counter to 0 (for sync/recovery)
fn reset_load(&self) {
// Default implementation - does nothing
// Workers that track load should override this
}
fn reset_load(&self) {}
/// Get the number of processed requests
fn processed_requests(&self) -> usize;
......@@ -88,11 +83,9 @@ pub trait Worker: Send + Sync + fmt::Debug {
/// Record the outcome of a request to this worker
fn record_outcome(&self, success: bool) {
// Record outcome-level metric with worker label
let outcome_str = if success { "success" } else { "failure" };
RouterMetrics::record_cb_outcome(self.url(), outcome_str);
// Record into circuit breaker and infer state change for metrics
let before = self.circuit_breaker().state();
self.circuit_breaker().record_outcome(success);
let after = self.circuit_breaker().state();
......@@ -119,8 +112,6 @@ pub trait Worker: Send + Sync + fmt::Debug {
RouterMetrics::set_cb_state(self.url(), state_code);
}
// === DP-aware methods ===
/// Check if this worker is DP-aware
fn is_dp_aware(&self) -> bool {
false
......@@ -156,8 +147,6 @@ pub trait Worker: Send + Sync + fmt::Debug {
true
}
// === Multi-router support ===
// TODO: - Enhanced Worker Discovery
// The Worker trait should handle async discovery of metadata from the worker itself
// rather than having service discovery or other components query /get_server_info.
......@@ -356,14 +345,12 @@ impl fmt::Debug for BasicWorker {
impl BasicWorker {
pub fn normalised_url(&self) -> WorkerResult<&str> {
if self.url().contains("@") {
// Need to extract the URL from "http://host:port@dp_rank"
let parts: Vec<&str> = self.url().split('@').collect();
if parts.len() != 2 {
return Err(WorkerError::InvalidUrl {
url: self.url().to_string(),
});
}
// Ensure the second part (the dp_rank) can be parsed as an integer
match parts[1].parse::<usize>() {
Ok(_) => Ok(parts[0]),
Err(_) => Err(WorkerError::InvalidUrl {
......@@ -408,19 +395,22 @@ impl Worker for BasicWorker {
let health_result = match &self.metadata.connection_mode {
ConnectionMode::Http => {
// Perform HTTP health check
let url = self.normalised_url()?;
let health_url = format!("{}{}", url, self.metadata.health_config.endpoint);
let timeout = Duration::from_secs(self.metadata.health_config.timeout_secs);
// Use the shared client with a custom timeout for this request
match WORKER_CLIENT.get(&health_url).timeout(timeout).send().await {
let mut request = WORKER_CLIENT.get(&health_url).timeout(timeout);
if let Some(ref api_key) = self.metadata.api_key {
request = request.header("Authorization", format!("Bearer {}", api_key));
}
match request.send().await {
Ok(response) => response.status().is_success(),
Err(_) => false,
}
}
ConnectionMode::Grpc { .. } => {
// Perform gRPC health check
if let Some(grpc_client) = &self.grpc_client {
let mut client = grpc_client.lock().await;
match client.health_check().await {
......@@ -449,11 +439,9 @@ impl Worker for BasicWorker {
};
if health_result {
// Health check succeeded
self.consecutive_failures.store(0, Ordering::Release);
let successes = self.consecutive_successes.fetch_add(1, Ordering::AcqRel) + 1;
// Mark healthy if we've reached the success threshold
if !self.is_healthy()
&& successes >= self.metadata.health_config.success_threshold as usize
{
......@@ -462,11 +450,9 @@ impl Worker for BasicWorker {
}
Ok(())
} else {
// Health check failed
self.consecutive_successes.store(0, Ordering::Release);
let failures = self.consecutive_failures.fetch_add(1, Ordering::AcqRel) + 1;
// Mark unhealthy if we've reached the failure threshold
if self.is_healthy()
&& failures >= self.metadata.health_config.failure_threshold as usize
{
......@@ -576,7 +562,6 @@ impl Worker for DPAwareWorker {
}
async fn check_health_async(&self) -> WorkerResult<()> {
// Delegate to the base worker's health check logic
self.base_worker.check_health_async().await
}
......@@ -612,8 +597,6 @@ impl Worker for DPAwareWorker {
self.base_worker.circuit_breaker()
}
// DP-aware specific implementations
fn is_dp_aware(&self) -> bool {
true
}
......@@ -631,7 +614,6 @@ impl Worker for DPAwareWorker {
}
async fn prepare_request(&self, mut req: serde_json::Value) -> WorkerResult<serde_json::Value> {
// Inject data_parallel_rank into the request
if let Some(map) = req.as_object_mut() {
map.insert(
"data_parallel_rank".to_string(),
......@@ -646,7 +628,6 @@ impl Worker for DPAwareWorker {
}
fn endpoint_url(&self, route: &str) -> String {
// Use base URL for actual requests
format!("{}{}", self.base_url, route)
}
}
......@@ -670,53 +651,52 @@ impl WorkerFactory {
}
Box::new(builder.build())
}
#[allow(dead_code)]
/// Get DP size from a worker
async fn get_worker_dp_size(url: &str, api_key: &Option<String>) -> WorkerResult<usize> {
let mut req_builder = WORKER_CLIENT.get(format!("{}/get_server_info", url));
if let Some(key) = &api_key {
req_builder = req_builder.bearer_auth(key);
}
/// Static health validation before creating a worker
/// This replaces wait_for_worker_health in handlers
pub async fn validate_health(url: &str, timeout_secs: u64) -> WorkerResult<()> {
use std::time::Instant;
let response = req_builder
.send()
.await
.map_err(|e| WorkerError::NetworkError {
url: url.to_string(),
error: e.to_string(),
})?;
if !response.status().is_success() {
return Err(WorkerError::NetworkError {
url: url.to_string(),
error: format!("Server returned: {}", response.status()),
});
}
let start_time = Instant::now();
let timeout = std::time::Duration::from_secs(timeout_secs);
let info: serde_json::Value =
response
.json()
.await
.map_err(|e| WorkerError::NetworkError {
loop {
if start_time.elapsed() > timeout {
return Err(WorkerError::HealthCheckFailed {
url: url.to_string(),
error: format!("Failed to parse JSON: {}", e),
})?;
let dp_size = info
.get("dp_size")
.and_then(|v| v.as_u64())
.ok_or_else(|| WorkerError::InvalidConfiguration {
message: "dp_size not found in server info".to_string(),
})?;
if dp_size > usize::MAX as u64 {
return Err(WorkerError::InvalidConfiguration {
message: format!("dp_size is too large: {}", dp_size),
});
}
reason: format!(
"Timeout {}s waiting for worker to become healthy",
timeout_secs
),
});
}
Ok(dp_size as usize)
// Note: This static function doesn't have access to worker's API key
// API key authentication is handled in the worker instance's check_health_async method
match WORKER_CLIENT
.get(format!("{}/health", url))
.timeout(std::time::Duration::from_secs(5))
.send()
.await
{
Ok(res) if res.status().is_success() => {
tracing::info!("Worker {} is healthy", url);
return Ok(());
}
Ok(res) => {
tracing::warn!(
"Worker {} health check failed with status: {}",
url,
res.status()
);
}
Err(e) => {
tracing::warn!("Failed to contact worker {}: {}", url, e);
}
}
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
}
}
}
......@@ -893,7 +873,6 @@ mod tests {
use std::thread;
use std::time::Duration;
// Test WorkerType
#[test]
fn test_worker_type_display() {
assert_eq!(WorkerType::Regular.to_string(), "Regular");
......@@ -945,7 +924,6 @@ mod tests {
assert_eq!(original, cloned);
}
// Test HealthConfig
#[test]
fn test_health_config_default() {
let config = HealthConfig::default();
......@@ -972,13 +950,11 @@ mod tests {
assert_eq!(config.success_threshold, 3);
}
// Test BasicWorker
#[test]
fn test_basic_worker_creation() {
use crate::core::BasicWorkerBuilder;
let worker = BasicWorkerBuilder::new("http://test:8080")
.worker_type(WorkerType::Regular)
.api_key("test_api_key")
.build();
assert_eq!(worker.url(), "http://test:8080");
assert_eq!(worker.worker_type(), WorkerType::Regular);
......@@ -1016,7 +992,6 @@ mod tests {
let worker = BasicWorkerBuilder::new("http://test:8080")
.worker_type(WorkerType::Regular)
.health_config(custom_config.clone())
.api_key("test_api_key")
.build();
assert_eq!(worker.metadata().health_config.timeout_secs, 15);
......@@ -1024,13 +999,11 @@ mod tests {
assert_eq!(worker.metadata().health_config.endpoint, "/custom-health");
}
// Test Worker trait implementation
#[test]
fn test_worker_url() {
use crate::core::BasicWorkerBuilder;
let worker = BasicWorkerBuilder::new("http://worker1:8080")
.worker_type(WorkerType::Regular)
.api_key("test_api_key")
.build();
assert_eq!(worker.url(), "http://worker1:8080");
}
......@@ -1040,7 +1013,6 @@ mod tests {
use crate::core::BasicWorkerBuilder;
let regular = BasicWorkerBuilder::new("http://test:8080")
.worker_type(WorkerType::Regular)
.api_key("test_api_key")
.build();
assert_eq!(regular.worker_type(), WorkerType::Regular);
......@@ -1048,7 +1020,6 @@ mod tests {
.worker_type(WorkerType::Prefill {
bootstrap_port: Some(9090),
})
.api_key("test_api_key")
.build();
assert_eq!(
prefill.worker_type(),
......@@ -1059,7 +1030,6 @@ mod tests {
let decode = BasicWorkerBuilder::new("http://test:8080")
.worker_type(WorkerType::Decode)
.api_key("test_api_key")
.build();
assert_eq!(decode.worker_type(), WorkerType::Decode);
}
......@@ -1071,14 +1041,11 @@ mod tests {
.worker_type(WorkerType::Regular)
.build();
// Initial state is healthy
assert!(worker.is_healthy());
// Set unhealthy
worker.set_healthy(false);
assert!(!worker.is_healthy());
// Set healthy again
worker.set_healthy(true);
assert!(worker.is_healthy());
}
......@@ -1088,31 +1055,24 @@ mod tests {
use crate::core::BasicWorkerBuilder;
let worker = BasicWorkerBuilder::new("http://test:8080")
.worker_type(WorkerType::Regular)
.api_key("test_api_key")
.build();
// Initial load is 0
assert_eq!(worker.load(), 0);
// Increment once
worker.increment_load();
assert_eq!(worker.load(), 1);
// Increment twice more
worker.increment_load();
worker.increment_load();
assert_eq!(worker.load(), 3);
// Decrement once
worker.decrement_load();
assert_eq!(worker.load(), 2);
// Decrement to 0
worker.decrement_load();
worker.decrement_load();
assert_eq!(worker.load(), 0);
// Decrement below 0 should stay at 0
worker.decrement_load();
assert_eq!(worker.load(), 0);
}
......@@ -1124,17 +1084,14 @@ mod tests {
.worker_type(WorkerType::Regular)
.build();
// Initial count is 0
assert_eq!(worker.processed_requests(), 0);
// Increment multiple times
for i in 1..=100 {
worker.increment_processed();
assert_eq!(worker.processed_requests(), i);
}
}
// Test concurrent operations
#[tokio::test]
async fn test_concurrent_load_increments() {
use crate::core::BasicWorkerBuilder;
......@@ -1146,7 +1103,6 @@ mod tests {
let mut handles = vec![];
// Spawn 100 tasks incrementing load
for _ in 0..100 {
let worker_clone = Arc::clone(&worker);
let handle = tokio::spawn(async move {
......@@ -1155,12 +1111,10 @@ mod tests {
handles.push(handle);
}
// Wait for all tasks
for handle in handles {
handle.await.unwrap();
}
// Final count should be 100
assert_eq!(worker.load(), 100);
}
......@@ -1173,7 +1127,6 @@ mod tests {
.build(),
);
// Set initial load to 100
for _ in 0..100 {
worker.increment_load();
}
......@@ -1181,7 +1134,6 @@ mod tests {
let mut handles = vec![];
// Spawn 100 tasks decrementing load
for _ in 0..100 {
let worker_clone = Arc::clone(&worker);
let handle = tokio::spawn(async move {
......@@ -1190,12 +1142,10 @@ mod tests {
handles.push(handle);
}
// Wait for all tasks
for handle in handles {
handle.await.unwrap();
}
// Final count should be 0
assert_eq!(worker.load(), 0);
}
......@@ -1210,7 +1160,6 @@ mod tests {
let mut handles = vec![];
// Spawn threads randomly setting health status
for i in 0..100 {
let worker_clone = Arc::clone(&worker);
let handle = tokio::spawn(async move {
......@@ -1220,13 +1169,11 @@ mod tests {
handles.push(handle);
}
// Wait for all tasks
for handle in handles {
handle.await.unwrap();
}
}
// Test WorkerFactory
#[test]
fn test_create_regular_worker() {
let worker: Box<dyn Worker> = Box::new(
......@@ -1240,7 +1187,6 @@ mod tests {
#[test]
fn test_create_prefill_worker() {
// With bootstrap port
let worker1: Box<dyn Worker> = Box::new(
BasicWorkerBuilder::new("http://prefill:8080")
.worker_type(WorkerType::Prefill {
......@@ -1256,7 +1202,6 @@ mod tests {
}
);
// Without bootstrap port
let worker2: Box<dyn Worker> = Box::new(
BasicWorkerBuilder::new("http://prefill:8080")
.worker_type(WorkerType::Prefill {
......@@ -1283,7 +1228,6 @@ mod tests {
assert_eq!(worker.worker_type(), WorkerType::Decode);
}
// Test WorkerLoadGuard
#[test]
fn test_load_guard_single_worker() {
use crate::core::BasicWorkerBuilder;
......@@ -1297,7 +1241,6 @@ mod tests {
assert_eq!(worker.load(), 1);
}
// Guard dropped, load decremented
assert_eq!(worker.load(), 0);
}
......@@ -1325,13 +1268,11 @@ mod tests {
{
let _guard = WorkerLoadGuard::new_multi(worker_refs);
// All loads incremented
assert_eq!(workers[0].load(), 1);
assert_eq!(workers[1].load(), 1);
assert_eq!(workers[2].load(), 1);
}
// All loads decremented
assert_eq!(workers[0].load(), 0);
assert_eq!(workers[1].load(), 0);
assert_eq!(workers[2].load(), 0);
......@@ -1347,29 +1288,21 @@ mod tests {
);
assert_eq!(worker.load(), 0);
// Clone for use inside catch_unwind
let worker_clone = Arc::clone(&worker);
// Use AssertUnwindSafe wrapper for the test
// This is safe because we're only testing the load counter behavior,
// not the grpc_client which is None for HTTP workers
use std::panic::AssertUnwindSafe;
// This will panic, but the guard should still clean up
let result = std::panic::catch_unwind(AssertUnwindSafe(|| {
let _guard = WorkerLoadGuard::new(worker_clone.as_ref());
assert_eq!(worker_clone.load(), 1);
panic!("Test panic");
}));
// Verify panic occurred
assert!(result.is_err());
// Load should be decremented even after panic
assert_eq!(worker.load(), 0);
}
// Test helper functions
#[test]
fn test_urls_to_workers() {
let urls = vec!["http://w1:8080".to_string(), "http://w2:8080".to_string()];
......@@ -1400,23 +1333,17 @@ mod tests {
assert_eq!(urls, vec!["http://w1:8080", "http://w2:8080"]);
}
// Test synchronous health check wrapper
#[test]
fn test_check_health_sync_wrapper() {
// We can't easily test the actual HTTP call without mocking,
// but we can verify the sync wrapper works
use crate::core::BasicWorkerBuilder;
let worker = BasicWorkerBuilder::new("http://test:8080")
.worker_type(WorkerType::Regular)
.build();
// This will fail because there's no server at this URL,
// but it tests that the sync wrapper doesn't panic
let result = worker.check_health();
assert!(result.is_err());
}
// Performance test for load counter
#[test]
fn test_load_counter_performance() {
use crate::core::BasicWorkerBuilder;
......@@ -1436,12 +1363,9 @@ mod tests {
let ops_per_sec = iterations as f64 / duration.as_secs_f64();
println!("Load counter operations per second: {:.0}", ops_per_sec);
// Should be well over 1M ops/sec
assert!(ops_per_sec > 1_000_000.0);
}
// ===== Tests for DPAwareWorker =====
#[test]
fn test_dp_aware_worker_creation() {
let dp_worker = DPAwareWorkerBuilder::new("http://worker1:8080", 2, 4)
......@@ -1562,8 +1486,6 @@ mod tests {
assert_eq!(dp_worker.processed_requests(), 1);
}
// ===== Tests for WorkerFactory async methods =====
#[tokio::test]
async fn test_factory_create_dp_aware() {
let worker = WorkerFactory::create_dp_aware(
......@@ -1610,26 +1532,21 @@ mod tests {
.worker_type(WorkerType::Regular)
.build();
// Initial state should be available
assert!(worker.is_available());
assert_eq!(worker.circuit_breaker().state(), CircuitState::Closed);
// Record some failures
worker.record_outcome(false);
worker.record_outcome(false);
// Still available (default threshold is 5)
assert!(worker.is_available());
// Record more failures to open circuit
worker.record_outcome(false);
worker.record_outcome(false);
worker.record_outcome(false);
// Circuit should be open, worker not available
assert!(!worker.is_available());
assert!(worker.is_healthy()); // Still healthy
assert!(!worker.circuit_breaker().can_execute()); // But circuit is open
assert!(worker.is_healthy());
assert!(!worker.circuit_breaker().can_execute());
}
#[test]
......@@ -1647,20 +1564,16 @@ mod tests {
.circuit_breaker_config(config)
.build();
// Should open after 2 failures
worker.record_outcome(false);
assert!(worker.is_available());
worker.record_outcome(false);
assert!(!worker.is_available());
// Wait for timeout
thread::sleep(Duration::from_millis(150));
// Should be half-open
assert!(worker.is_available());
assert_eq!(worker.circuit_breaker().state(), CircuitState::HalfOpen);
// Success should close it
worker.record_outcome(true);
assert_eq!(worker.circuit_breaker().state(), CircuitState::Closed);
}
......@@ -1671,24 +1584,18 @@ mod tests {
.worker_type(WorkerType::Regular)
.build();
// Should have circuit breaker
assert!(dp_worker.is_available());
// Record failures
for _ in 0..5 {
dp_worker.record_outcome(false);
}
// Should not be available
assert!(!dp_worker.is_available());
assert_eq!(dp_worker.circuit_breaker().state(), CircuitState::Open);
}
// ===== Integration tests =====
#[tokio::test]
async fn test_mixed_worker_types() {
// Create a mix of worker types
let regular: Box<dyn Worker> = Box::new(
BasicWorkerBuilder::new("http://regular:8080")
.worker_type(WorkerType::Regular)
......@@ -1739,22 +1646,19 @@ mod tests {
dp_aware_decode,
];
// Test that they all implement Worker trait properly
for worker in &workers {
assert!(worker.is_healthy());
assert_eq!(worker.load(), 0);
assert_eq!(worker.processed_requests(), 0);
}
// Test specific behaviors
assert!(!workers[0].is_dp_aware()); // regular
assert!(!workers[1].is_dp_aware()); // prefill
assert!(!workers[2].is_dp_aware()); // decode
assert!(workers[3].is_dp_aware()); // dp_aware_regular
assert!(workers[4].is_dp_aware()); // dp_aware_prefill
assert!(workers[5].is_dp_aware()); // dp_aware_decode
assert!(!workers[0].is_dp_aware());
assert!(!workers[1].is_dp_aware());
assert!(!workers[2].is_dp_aware());
assert!(workers[3].is_dp_aware());
assert!(workers[4].is_dp_aware());
assert!(workers[5].is_dp_aware());
// Test worker types
assert_eq!(workers[0].worker_type(), WorkerType::Regular);
assert_eq!(
workers[1].worker_type(),
......
......@@ -7,10 +7,7 @@ use std::collections::HashMap;
/// Builder for creating BasicWorker instances with fluent API
pub struct BasicWorkerBuilder {
// Required fields
url: String,
// Optional fields with defaults
api_key: Option<String>,
worker_type: WorkerType,
connection_mode: ConnectionMode,
......@@ -21,7 +18,7 @@ pub struct BasicWorkerBuilder {
}
impl BasicWorkerBuilder {
/// Create a new builder with only the URL (defaults to Regular worker type)
/// Create a new builder with only the URL
pub fn new(url: impl Into<String>) -> Self {
Self {
url: url.into(),
......@@ -129,13 +126,10 @@ impl BasicWorkerBuilder {
/// Builder for creating DPAwareWorker instances with fluent API
pub struct DPAwareWorkerBuilder {
// Required fields
base_url: String,
api_key: Option<String>,
dp_rank: usize,
dp_size: usize,
// Optional fields with defaults
worker_type: WorkerType,
connection_mode: ConnectionMode,
labels: HashMap<String, String>,
......@@ -145,7 +139,7 @@ pub struct DPAwareWorkerBuilder {
}
impl DPAwareWorkerBuilder {
/// Create a new DP-aware worker builder (defaults to Regular worker type)
/// Create a new DP-aware worker builder
pub fn new(base_url: impl Into<String>, dp_rank: usize, dp_size: usize) -> Self {
Self {
base_url: base_url.into(),
......@@ -232,10 +226,7 @@ impl DPAwareWorkerBuilder {
/// Build the DPAwareWorker instance
pub fn build(self) -> DPAwareWorker {
// Create URL with DP rank suffix for identification
let worker_url = format!("{}@{}", self.base_url, self.dp_rank);
// Use BasicWorkerBuilder to create a properly configured base worker
let mut builder = BasicWorkerBuilder::new(worker_url)
.worker_type(self.worker_type)
.connection_mode(self.connection_mode)
......@@ -243,18 +234,14 @@ impl DPAwareWorkerBuilder {
.health_config(self.health_config)
.circuit_breaker_config(self.circuit_breaker_config);
// Add gRPC client if provided
if let Some(client) = self.grpc_client {
builder = builder.grpc_client(client);
}
// Add API key if provided
if let Some(api_key) = self.api_key {
builder = builder.api_key(api_key);
}
let base_worker = builder.build();
// Create the DPAwareWorker with the configured base worker
DPAwareWorker::with_base_worker(base_worker, self.base_url, self.dp_rank, self.dp_size)
}
}
......@@ -267,7 +254,6 @@ mod tests {
#[test]
fn test_basic_worker_builder_minimal() {
// Using new API - defaults to Regular type
let worker = BasicWorkerBuilder::new("http://localhost:8080").build();
assert_eq!(worker.url(), "http://localhost:8080");
......@@ -278,7 +264,6 @@ mod tests {
#[test]
fn test_basic_worker_builder_with_type() {
// Test setting worker type explicitly
let worker = BasicWorkerBuilder::new("http://localhost:8080")
.worker_type(WorkerType::Decode)
.build();
......@@ -332,7 +317,6 @@ mod tests {
ConnectionMode::Grpc { port: Some(50051) }
);
assert_eq!(worker.metadata().labels, labels);
// Can't directly compare HealthConfig without PartialEq, so check individual fields
assert_eq!(
worker.metadata().health_config.endpoint,
health_config.endpoint
......@@ -375,13 +359,11 @@ mod tests {
#[test]
fn test_dp_aware_worker_builder_minimal() {
// Using new API - defaults to Regular type
let worker = DPAwareWorkerBuilder::new("http://localhost:8080", 2, 8).build();
assert_eq!(worker.url(), "http://localhost:8080@2");
assert_eq!(worker.dp_rank(), Some(2));
assert_eq!(worker.dp_size(), Some(8));
// Note: base_url is a private field, we can only test through the url() method
assert_eq!(worker.worker_type(), WorkerType::Regular);
}
......@@ -412,7 +394,6 @@ mod tests {
assert_eq!(worker.dp_rank(), Some(3));
assert_eq!(worker.dp_size(), Some(16));
assert_eq!(worker.metadata().labels, labels);
// Can't directly compare HealthConfig without PartialEq, so check individual fields
assert_eq!(
worker.metadata().health_config.endpoint,
health_config.endpoint
......@@ -437,7 +418,6 @@ mod tests {
#[test]
fn test_dp_aware_worker_with_grpc() {
// Test that DPAwareWorkerBuilder can set a gRPC client
let worker = DPAwareWorkerBuilder::new("grpc://cluster.local", 1, 4)
.worker_type(WorkerType::Decode)
.connection_mode(ConnectionMode::Grpc { port: Some(50051) })
......@@ -456,9 +436,5 @@ mod tests {
worker.metadata().labels.get("transport"),
Some(&"grpc".to_string())
);
// Note: We can't directly test the grpc_client as it's private,
// but the fact that the worker builds successfully with grpc connection mode
// validates that the configuration is properly passed through
}
}
//! Unified Worker Management Module
//!
//! Handles all aspects of worker lifecycle including discovery, initialization,
//! runtime management, and health monitoring.
use crate::config::types::{
CircuitBreakerConfig as ConfigCircuitBreakerConfig, ConnectionMode as ConfigConnectionMode,
HealthCheckConfig, RouterConfig, RoutingMode,
};
use crate::core::{
BasicWorkerBuilder, CircuitBreakerConfig, ConnectionMode, DPAwareWorkerBuilder, HealthConfig,
Worker, WorkerFactory, WorkerRegistry, WorkerType,
};
use crate::policies::PolicyRegistry;
use crate::protocols::worker_spec::WorkerConfigRequest;
use crate::server::AppContext;
use futures::future;
use once_cell::sync::Lazy;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
use tracing::{debug, error, info, warn};
static HTTP_CLIENT: Lazy<reqwest::Client> = Lazy::new(|| {
reqwest::Client::builder()
.timeout(Duration::from_secs(10))
.build()
.expect("Failed to create HTTP client")
});
/// Server information returned from worker endpoints
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct ServerInfo {
pub model_id: Option<String>,
pub model_path: Option<String>,
pub dp_size: Option<usize>,
pub version: Option<String>,
pub max_batch_size: Option<usize>,
pub max_total_tokens: Option<usize>,
pub max_prefill_tokens: Option<usize>,
pub max_running_requests: Option<usize>,
pub max_num_reqs: Option<usize>,
}
/// DP (Data Parallel) information for a worker
#[derive(Debug, Clone)]
pub struct DpInfo {
pub dp_size: usize,
pub model_id: String,
}
/// Unified worker management
pub struct WorkerManager;
impl WorkerManager {
/// Get server info from /get_server_info endpoint
pub async fn get_server_info(url: &str, api_key: Option<&str>) -> Result<ServerInfo, String> {
let base_url = url.trim_end_matches('/');
let server_info_url = format!("{}/get_server_info", base_url);
let mut req = HTTP_CLIENT.get(&server_info_url);
if let Some(key) = api_key {
req = req.bearer_auth(key);
}
let response = req
.send()
.await
.map_err(|e| format!("Failed to connect to {}: {}", server_info_url, e))?;
if !response.status().is_success() {
return Err(format!(
"Server returned status {} from {}",
response.status(),
server_info_url
));
}
let json = response
.json::<Value>()
.await
.map_err(|e| format!("Failed to parse response from {}: {}", server_info_url, e))?;
info!(
"Successfully retrieved server info from {}",
server_info_url
);
Self::parse_server_info(json)
}
/// Get model info from /get_model_info endpoint
pub async fn get_model_info(url: &str, api_key: Option<&str>) -> Result<Value, String> {
let base_url = url.trim_end_matches('/');
let model_info_url = format!("{}/get_model_info", base_url);
let mut req = HTTP_CLIENT.get(&model_info_url);
if let Some(key) = api_key {
req = req.bearer_auth(key);
}
let response = req
.send()
.await
.map_err(|e| format!("Failed to connect to {}: {}", model_info_url, e))?;
if !response.status().is_success() {
return Err(format!(
"Server returned status {} from {}",
response.status(),
model_info_url
));
}
let json = response
.json::<Value>()
.await
.map_err(|e| format!("Failed to parse response from {}: {}", model_info_url, e))?;
info!("Successfully retrieved model info from {}", model_info_url);
Ok(json)
}
/// Get DP info for a worker URL
pub async fn get_dp_info(url: &str, api_key: Option<&str>) -> Result<DpInfo, String> {
let info = Self::get_server_info(url, api_key).await?;
let dp_size = info
.dp_size
.ok_or_else(|| format!("No dp_size in response from {}", url))?;
let model_id = info
.model_id
.or_else(|| {
info.model_path
.and_then(|path| path.split('/').next_back().map(|s| s.to_string()))
})
.unwrap_or_else(|| "unknown".to_string());
Ok(DpInfo { dp_size, model_id })
}
/// Generate DP-aware worker URLs
pub async fn get_dp_aware_urls(
base_urls: &[String],
api_key: Option<&str>,
) -> Result<Vec<String>, String> {
let mut dp_urls = Vec::new();
for base_url in base_urls {
match Self::get_dp_info(base_url, api_key).await {
Ok(dp_info) => {
info!(
"Discovered DP size {} for {} (model: {})",
dp_info.dp_size, base_url, dp_info.model_id
);
for rank in 0..dp_info.dp_size {
dp_urls.push(format!("{}@{}", base_url, rank));
}
}
Err(e) => {
return Err(format!("Failed to get DP info from {}: {}", base_url, e));
}
}
}
Ok(dp_urls)
}
/// Initialize workers from configuration at startup
pub async fn initialize_workers(
config: &RouterConfig,
registry: &Arc<WorkerRegistry>,
policy_registry: Option<&Arc<PolicyRegistry>>,
) -> Result<(), String> {
info!("Starting worker initialization");
match &config.mode {
RoutingMode::Regular { worker_urls } => {
Self::initialize_regular_workers(worker_urls, config, registry, policy_registry)
.await?;
}
RoutingMode::PrefillDecode {
prefill_urls,
decode_urls,
..
} => {
let prefill_entries: Vec<(&String, &Option<u16>)> =
prefill_urls.iter().map(|(url, port)| (url, port)).collect();
Self::initialize_prefill_workers(
&prefill_entries,
config,
registry,
policy_registry,
)
.await?;
Self::initialize_decode_workers(decode_urls, config, registry, policy_registry)
.await?;
}
RoutingMode::OpenAI { .. } => {
info!("OpenAI routing mode - no workers to initialize");
}
}
Self::wait_for_healthy_workers(
registry,
config.worker_startup_timeout_secs,
config.health_check.check_interval_secs,
)
.await?;
info!("Worker initialization completed successfully");
Ok(())
}
/// Initialize regular workers
async fn initialize_regular_workers(
urls: &[String],
config: &RouterConfig,
registry: &Arc<WorkerRegistry>,
policy_registry: Option<&Arc<PolicyRegistry>>,
) -> Result<(), String> {
info!("Creating {} regular workers", urls.len());
let connection_mode = Self::convert_connection_mode(&config.connection_mode, urls.first());
let circuit_breaker_config =
Self::convert_circuit_breaker_config(&config.effective_circuit_breaker_config());
let health_config = Self::convert_health_config(&config.health_check);
let mut registered_workers: HashMap<String, Vec<Arc<dyn Worker>>> = HashMap::new();
for url in urls {
if config.dp_aware {
match Self::get_dp_info(url, config.api_key.as_deref()).await {
Ok(dp_info) => {
info!(
"Discovered DP-aware worker {} with size {}",
url, dp_info.dp_size
);
for rank in 0..dp_info.dp_size {
let mut builder =
DPAwareWorkerBuilder::new(url.clone(), rank, dp_info.dp_size)
.worker_type(WorkerType::Regular)
.connection_mode(connection_mode.clone())
.circuit_breaker_config(circuit_breaker_config.clone())
.health_config(health_config.clone());
if let Some(ref key) = config.api_key {
builder = builder.api_key(key.clone());
}
let worker = Arc::new(builder.build()) as Arc<dyn Worker>;
let model_id = worker.model_id();
let worker_id = registry.register(Arc::clone(&worker));
info!(
"Registered DP-aware worker {}@{} with ID {:?}",
url, rank, worker_id
);
registered_workers
.entry(model_id.to_string())
.or_default()
.push(Arc::clone(&worker));
if let Some(policy_reg) = policy_registry {
policy_reg.on_worker_added(model_id, None);
}
}
}
Err(e) => {
return Err(format!(
"Failed to get DP info for worker {}: {}. DP-aware mode requires all workers to support DP.",
url, e
));
}
}
} else {
let worker = Self::create_basic_worker(
url.clone(),
WorkerType::Regular,
connection_mode.clone(),
config.api_key.clone(),
None,
circuit_breaker_config.clone(),
health_config.clone(),
);
Self::register_worker(worker, registry, &mut registered_workers, policy_registry);
}
}
Self::initialize_cache_policies(&registered_workers, registry, policy_registry);
Ok(())
}
/// Initialize prefill workers for PD mode
async fn initialize_prefill_workers(
prefill_entries: &[(&String, &Option<u16>)],
config: &RouterConfig,
registry: &Arc<WorkerRegistry>,
policy_registry: Option<&Arc<PolicyRegistry>>,
) -> Result<(), String> {
info!("Creating {} prefill workers", prefill_entries.len());
let connection_mode = Self::convert_connection_mode(
&config.connection_mode,
prefill_entries.first().map(|(url, _)| *url),
);
let circuit_breaker_config =
Self::convert_circuit_breaker_config(&config.effective_circuit_breaker_config());
let health_config = Self::convert_health_config(&config.health_check);
let mut registered_workers: HashMap<String, Vec<Arc<dyn Worker>>> = HashMap::new();
// TODO: Add proper DP-aware support for prefill workers in PD mode
if config.dp_aware {
warn!("DP-aware mode is not yet supported for prefill workers in PD mode. Creating regular prefill workers instead.");
}
for (url, bootstrap_port) in prefill_entries {
let worker_type = WorkerType::Prefill {
bootstrap_port: **bootstrap_port,
};
let worker = Self::create_basic_worker(
(*url).clone(),
worker_type,
connection_mode.clone(),
config.api_key.clone(),
None,
circuit_breaker_config.clone(),
health_config.clone(),
);
Self::register_worker(worker, registry, &mut registered_workers, policy_registry);
}
if let Some(policy_reg) = policy_registry {
let all_prefill_workers: Vec<Arc<dyn Worker>> = registered_workers
.values()
.flat_map(|workers| workers.iter().cloned())
.collect();
policy_reg.init_pd_cache_aware_policies(&all_prefill_workers, &[]);
}
Ok(())
}
/// Initialize decode workers for PD mode
async fn initialize_decode_workers(
urls: &[String],
config: &RouterConfig,
registry: &Arc<WorkerRegistry>,
policy_registry: Option<&Arc<PolicyRegistry>>,
) -> Result<(), String> {
info!("Creating {} decode workers", urls.len());
let connection_mode = Self::convert_connection_mode(&config.connection_mode, urls.first());
let circuit_breaker_config =
Self::convert_circuit_breaker_config(&config.effective_circuit_breaker_config());
let health_config = Self::convert_health_config(&config.health_check);
let mut registered_workers: HashMap<String, Vec<Arc<dyn Worker>>> = HashMap::new();
// TODO: Add proper DP-aware support for decode workers in PD mode
if config.dp_aware {
warn!("DP-aware mode is not yet supported for decode workers in PD mode. Creating regular decode workers instead.");
}
for url in urls {
let worker = Self::create_basic_worker(
url.clone(),
WorkerType::Decode,
connection_mode.clone(),
config.api_key.clone(),
None,
circuit_breaker_config.clone(),
health_config.clone(),
);
Self::register_worker(worker, registry, &mut registered_workers, policy_registry);
}
if let Some(policy_reg) = policy_registry {
let all_decode_workers: Vec<Arc<dyn Worker>> = registered_workers
.values()
.flat_map(|workers| workers.iter().cloned())
.collect();
policy_reg.init_pd_cache_aware_policies(&[], &all_decode_workers);
}
Ok(())
}
/// Add a worker from a configuration request
pub async fn add_worker_from_config(
config: &WorkerConfigRequest,
context: &AppContext,
) -> Result<String, String> {
let mut labels = config.labels.clone();
let model_id = if let Some(ref model_id) = config.model_id {
model_id.clone()
} else {
match Self::get_server_info(&config.url, config.api_key.as_deref()).await {
Ok(info) => info
.model_id
.or_else(|| {
info.model_path
.as_ref()
.and_then(|path| path.split('/').next_back().map(|s| s.to_string()))
})
.unwrap_or_else(|| "unknown".to_string()),
Err(e) => {
warn!("Failed to query server info from {}: {}", config.url, e);
"unknown".to_string()
}
}
};
labels.insert("model_id".to_string(), model_id.clone());
if let Some(priority) = config.priority {
labels.insert("priority".to_string(), priority.to_string());
}
if let Some(cost) = config.cost {
labels.insert("cost".to_string(), cost.to_string());
}
if let Some(ref tokenizer_path) = config.tokenizer_path {
labels.insert("tokenizer_path".to_string(), tokenizer_path.clone());
}
if let Some(ref reasoning_parser) = config.reasoning_parser {
labels.insert("reasoning_parser".to_string(), reasoning_parser.clone());
}
if let Some(ref tool_parser) = config.tool_parser {
labels.insert("tool_parser".to_string(), tool_parser.clone());
}
if let Some(ref chat_template) = config.chat_template {
labels.insert("chat_template".to_string(), chat_template.clone());
}
let worker_type = config
.worker_type
.as_ref()
.map(|t| match t.as_str() {
"prefill" => WorkerType::Prefill {
bootstrap_port: config.bootstrap_port,
},
"decode" => WorkerType::Decode,
_ => WorkerType::Regular,
})
.unwrap_or(WorkerType::Regular);
let connection_mode = if config.url.starts_with("grpc://") {
ConnectionMode::Grpc { port: None }
} else {
ConnectionMode::Http
};
let policy_hint = labels.get("policy").cloned();
Self::add_worker_internal(
&config.url,
worker_type,
connection_mode,
config.api_key.clone(),
Some(labels),
policy_hint.as_deref(),
context,
)
.await
}
/// Add a worker from URL (legacy endpoint)
pub async fn add_worker(
url: &str,
api_key: &Option<String>,
context: &AppContext,
) -> Result<String, String> {
Self::add_worker_internal(
url,
WorkerType::Regular,
ConnectionMode::Http,
api_key.clone(),
None,
None,
context,
)
.await
}
/// Remove a worker
pub fn remove_worker(url: &str, context: &AppContext) -> Result<String, String> {
if context.router_config.dp_aware {
Self::remove_dp_aware_workers(url, context)
} else {
Self::remove_single_worker(url, context)
}
}
pub fn get_worker_urls(registry: &Arc<WorkerRegistry>) -> Vec<String> {
registry
.get_all()
.iter()
.map(|w| w.url().to_string())
.collect()
}
/// Internal method to add a worker with all parameters
async fn add_worker_internal(
worker_url: &str,
worker_type: WorkerType,
connection_mode: ConnectionMode,
api_key: Option<String>,
labels: Option<HashMap<String, String>>,
policy_hint: Option<&str>,
context: &AppContext,
) -> Result<String, String> {
WorkerFactory::validate_health(
worker_url,
context.router_config.worker_startup_timeout_secs,
)
.await
.map_err(|e| format!("Health check failed: {}", e))?;
let circuit_breaker_config = Self::convert_circuit_breaker_config(
&context.router_config.effective_circuit_breaker_config(),
);
let health_config = Self::convert_health_config(&context.router_config.health_check);
if context.router_config.dp_aware {
let dp_urls = Self::get_dp_aware_urls(
&[worker_url.to_string()],
context.router_config.api_key.as_deref(),
)
.await?;
let mut workers_added = 0;
let mut model_workers: HashMap<String, Vec<Arc<dyn Worker>>> = HashMap::new();
let dp_size_for_base = dp_urls.len();
for (rank, dp_url) in dp_urls.iter().enumerate() {
if context.worker_registry.get_by_url(dp_url).is_some() {
info!("Worker {} already exists, skipping", dp_url);
continue;
}
let base_url = dp_url.split('@').next().unwrap().to_string();
let mut builder = DPAwareWorkerBuilder::new(base_url, rank, dp_size_for_base)
.worker_type(worker_type.clone())
.connection_mode(connection_mode.clone())
.circuit_breaker_config(circuit_breaker_config.clone())
.health_config(health_config.clone());
if let Some(ref key) = api_key {
builder = builder.api_key(key.clone());
}
if let Some(ref worker_labels) = labels {
builder = builder.labels(worker_labels.clone());
}
let worker = Arc::new(builder.build()) as Arc<dyn Worker>;
let model_id = worker.model_id().to_string();
context.worker_registry.register(worker.clone());
workers_added += 1;
model_workers
.entry(model_id.clone())
.or_default()
.push(worker);
context
.policy_registry
.on_worker_added(&model_id, policy_hint);
}
for model_id in model_workers.keys() {
let all_model_workers = context.worker_registry.get_by_model_fast(model_id);
if let Some(policy) = context.policy_registry.get_policy(model_id) {
if policy.name() == "cache_aware" {
context
.policy_registry
.init_cache_aware_policy(model_id, &all_model_workers);
}
}
}
if workers_added == 0 {
Ok(format!("All DP workers already exist for {}", worker_url))
} else {
Ok(format!(
"Added {} DP-aware workers for {}",
workers_added, worker_url
))
}
} else {
if context.worker_registry.get_by_url(worker_url).is_some() {
return Err(format!("Worker {} already exists", worker_url));
}
let worker = Self::create_basic_worker(
worker_url.to_string(),
worker_type,
connection_mode,
api_key,
labels,
circuit_breaker_config,
health_config,
);
let model_id = worker.model_id().to_string();
context.worker_registry.register(worker.clone());
context
.policy_registry
.on_worker_added(&model_id, policy_hint);
let workers = context.worker_registry.get_by_model_fast(&model_id);
if let Some(policy) = context.policy_registry.get_policy(&model_id) {
if policy.name() == "cache_aware" {
context
.policy_registry
.init_cache_aware_policy(&model_id, &workers);
}
}
Ok(format!("Worker {} added successfully", worker_url))
}
}
/// Remove a single worker
fn remove_single_worker(worker_url: &str, context: &AppContext) -> Result<String, String> {
let worker = context
.worker_registry
.get_by_url(worker_url)
.ok_or_else(|| format!("Worker {} not found", worker_url))?;
let model_id = worker.model_id().to_string();
context
.policy_registry
.remove_worker_from_cache_aware(&model_id, worker_url);
context.worker_registry.remove_by_url(worker_url);
context.policy_registry.on_worker_removed(&model_id);
let remaining_workers = context.worker_registry.get_by_model_fast(&model_id);
if let Some(policy) = context.policy_registry.get_policy(&model_id) {
if policy.name() == "cache_aware" && !remaining_workers.is_empty() {
context
.policy_registry
.init_cache_aware_policy(&model_id, &remaining_workers);
}
}
Ok(format!("Worker {} removed successfully", worker_url))
}
/// Remove DP-aware workers with prefix matching
fn remove_dp_aware_workers(worker_url: &str, context: &AppContext) -> Result<String, String> {
let worker_url_prefix = format!("{}@", worker_url);
let mut removed_workers = Vec::new();
let mut affected_models = std::collections::HashSet::new();
let all_workers = context.worker_registry.get_all();
for worker in all_workers.iter() {
if worker.url().starts_with(&worker_url_prefix) {
let model_id = worker.model_id().to_string();
affected_models.insert(model_id.clone());
context
.policy_registry
.remove_worker_from_cache_aware(&model_id, worker.url());
if context
.worker_registry
.remove_by_url(worker.url())
.is_some()
{
removed_workers.push(worker.url().to_string());
context.policy_registry.on_worker_removed(&model_id);
}
}
}
for model_id in affected_models {
let remaining_workers = context.worker_registry.get_by_model_fast(&model_id);
if let Some(policy) = context.policy_registry.get_policy(&model_id) {
if policy.name() == "cache_aware" && !remaining_workers.is_empty() {
context
.policy_registry
.init_cache_aware_policy(&model_id, &remaining_workers);
}
}
}
if removed_workers.is_empty() {
Err(format!(
"No workers found with prefix {}",
worker_url_prefix
))
} else {
Ok(format!(
"Removed {} DP-aware workers: {:?}",
removed_workers.len(),
removed_workers
))
}
}
/// Create a basic worker
fn create_basic_worker(
url: String,
worker_type: WorkerType,
connection_mode: ConnectionMode,
api_key: Option<String>,
labels: Option<HashMap<String, String>>,
circuit_breaker_config: CircuitBreakerConfig,
health_config: HealthConfig,
) -> Arc<dyn Worker> {
let mut builder = BasicWorkerBuilder::new(url)
.worker_type(worker_type)
.connection_mode(connection_mode)
.circuit_breaker_config(circuit_breaker_config)
.health_config(health_config);
if let Some(key) = api_key {
builder = builder.api_key(key);
}
if let Some(worker_labels) = labels {
builder = builder.labels(worker_labels);
}
let worker = builder.build();
Arc::new(worker) as Arc<dyn Worker>
}
/// Register a worker and update policies
fn register_worker(
worker: Arc<dyn Worker>,
registry: &Arc<WorkerRegistry>,
registered_workers: &mut HashMap<String, Vec<Arc<dyn Worker>>>,
policy_registry: Option<&Arc<PolicyRegistry>>,
) {
let model_id = worker.model_id();
let url = worker.url();
let worker_id = registry.register(Arc::clone(&worker));
info!("Registered worker {} with ID {:?}", url, worker_id);
registered_workers
.entry(model_id.to_string())
.or_default()
.push(Arc::clone(&worker));
if let Some(policy_reg) = policy_registry {
policy_reg.on_worker_added(model_id, None);
}
}
/// Initialize cache-aware policies
fn initialize_cache_policies(
registered_workers: &HashMap<String, Vec<Arc<dyn Worker>>>,
registry: &Arc<WorkerRegistry>,
policy_registry: Option<&Arc<PolicyRegistry>>,
) {
if let Some(policy_reg) = policy_registry {
for model_id in registered_workers.keys() {
let all_model_workers = registry.get_by_model_fast(model_id);
if let Some(policy) = policy_reg.get_policy(model_id) {
if policy.name() == "cache_aware" {
policy_reg.init_cache_aware_policy(model_id, &all_model_workers);
}
}
}
}
}
/// Wait for workers to become healthy
async fn wait_for_healthy_workers(
registry: &Arc<WorkerRegistry>,
timeout_secs: u64,
check_interval_secs: u64,
) -> Result<(), String> {
let timeout = Duration::from_secs(timeout_secs);
let check_interval = Duration::from_secs(check_interval_secs);
let start_time = std::time::Instant::now();
info!(
"Waiting for workers to become healthy (timeout: {}s)",
timeout_secs
);
let workers = registry.get_all();
if workers.is_empty() {
info!("No workers to wait for, continuing");
return Ok(());
}
info!(
"Marking {} workers as unhealthy before initial health checks",
workers.len()
);
for worker in &workers {
worker.set_healthy(false);
}
info!(
"Performing initial health checks for {} workers",
workers.len()
);
let health_check_futures: Vec<_> = workers
.iter()
.map(|worker| {
let w = worker.clone();
let url = worker.url().to_string();
async move {
match w.check_health_async().await {
Ok(_) => {
w.set_healthy(true);
debug!(
"Worker {} passed initial health check and marked healthy",
url
);
Ok(url)
}
Err(e) => {
warn!("Worker {} failed initial health check: {}", url, e);
Err(url)
}
}
}
})
.collect();
let health_results = future::join_all(health_check_futures).await;
let failed_checks: Vec<_> = health_results.into_iter().filter_map(|r| r.err()).collect();
if !failed_checks.is_empty() {
info!(
"Initial health check: {} workers failed: {:?}",
failed_checks.len(),
failed_checks
);
}
loop {
let workers = registry.get_all();
let healthy_workers: Vec<_> = workers
.iter()
.filter(|w| w.is_healthy())
.map(|w| w.url().to_string())
.collect();
let unhealthy_workers: Vec<_> = workers
.iter()
.filter(|w| !w.is_healthy())
.map(|w| w.url().to_string())
.collect();
if unhealthy_workers.is_empty() {
info!(
"All {} workers are healthy: {:?}",
workers.len(),
healthy_workers
);
return Ok(());
}
if start_time.elapsed() > timeout {
error!(
"Workers failed to become healthy after {}s. Unhealthy: {:?}, Healthy: {:?}",
timeout_secs, unhealthy_workers, healthy_workers
);
return Err(format!(
"Workers failed to become healthy after {}s. Unhealthy: {:?}",
timeout_secs, unhealthy_workers
));
}
info!(
"Waiting for {} workers to become healthy. Unhealthy: {:?}",
unhealthy_workers.len(),
unhealthy_workers
);
let unhealthy_workers_to_check = workers
.iter()
.filter(|w| !w.is_healthy())
.cloned()
.collect::<Vec<_>>();
for worker in unhealthy_workers_to_check {
let url = worker.url().to_string();
match worker.check_health_async().await {
Ok(_) => {
if !worker.is_healthy() {
worker.set_healthy(true);
debug!("Worker {} now healthy after health check", url);
}
}
Err(e) => {
debug!("Worker {} health check failed: {}", url, e);
}
}
}
tokio::time::sleep(check_interval).await;
}
}
/// Parse server info from JSON response
fn parse_server_info(json: Value) -> Result<ServerInfo, String> {
Ok(ServerInfo {
model_id: json
.get("model_id")
.and_then(|v| v.as_str())
.map(String::from)
.or_else(|| json.get("model").and_then(|v| v.as_str()).map(String::from)),
model_path: json
.get("model_path")
.and_then(|v| v.as_str())
.map(String::from),
dp_size: json
.get("dp_size")
.and_then(|v| v.as_u64())
.map(|v| v as usize),
version: json
.get("version")
.and_then(|v| v.as_str())
.map(String::from),
max_batch_size: json
.get("max_batch_size")
.and_then(|v| v.as_u64())
.map(|v| v as usize),
max_total_tokens: json
.get("max_total_tokens")
.and_then(|v| v.as_u64())
.map(|v| v as usize),
max_prefill_tokens: json
.get("max_prefill_tokens")
.and_then(|v| v.as_u64())
.map(|v| v as usize),
max_running_requests: json
.get("max_running_requests")
.and_then(|v| v.as_u64())
.map(|v| v as usize),
max_num_reqs: json
.get("max_num_reqs")
.and_then(|v| v.as_u64())
.map(|v| v as usize),
})
}
/// Convert config connection mode to core connection mode
fn convert_connection_mode(
config_mode: &ConfigConnectionMode,
_sample_url: Option<&String>,
) -> ConnectionMode {
match config_mode {
ConfigConnectionMode::Http => ConnectionMode::Http,
ConfigConnectionMode::Grpc => ConnectionMode::Grpc { port: None },
}
}
/// Convert config circuit breaker to core circuit breaker
fn convert_circuit_breaker_config(config: &ConfigCircuitBreakerConfig) -> CircuitBreakerConfig {
CircuitBreakerConfig {
failure_threshold: config.failure_threshold,
success_threshold: config.success_threshold,
timeout_duration: Duration::from_secs(config.timeout_duration_secs),
window_duration: Duration::from_secs(config.window_duration_secs),
}
}
/// Convert config health check to core health config
fn convert_health_config(config: &HealthCheckConfig) -> HealthConfig {
HealthConfig {
timeout_secs: config.timeout_secs,
check_interval_secs: config.check_interval_secs,
endpoint: config.endpoint.clone(),
failure_threshold: config.failure_threshold,
success_threshold: config.success_threshold,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_server_info() {
let json = serde_json::json!({
"model_id": "llama-3",
"model_path": "/models/llama-3",
"dp_size": 4,
"version": "0.1.0"
});
let info = WorkerManager::parse_server_info(json).unwrap();
assert_eq!(info.model_id, Some("llama-3".to_string()));
assert_eq!(info.dp_size, Some(4));
}
#[test]
fn test_parse_server_info_with_fallback() {
// Test with "model" instead of "model_id"
let json = serde_json::json!({
"model": "gpt-4",
"dp_size": 2
});
let info = WorkerManager::parse_server_info(json).unwrap();
assert_eq!(info.model_id, Some("gpt-4".to_string()));
assert_eq!(info.dp_size, Some(2));
}
#[test]
fn test_parse_server_info_minimal() {
let json = serde_json::json!({});
let info = WorkerManager::parse_server_info(json).unwrap();
assert_eq!(info.model_id, None);
assert_eq!(info.dp_size, None);
}
}
......@@ -34,7 +34,6 @@ impl Default for WorkerId {
}
}
/// Type alias for the model index to reduce complexity
type ModelIndex = Arc<DashMap<String, Arc<RwLock<Vec<Arc<dyn Worker>>>>>>;
/// Worker registry with model-based indexing
......@@ -54,8 +53,7 @@ pub struct WorkerRegistry {
/// Workers indexed by connection mode
connection_workers: Arc<DashMap<ConnectionMode, Vec<WorkerId>>>,
/// URL to worker ID mapping (for backward compatibility)
/// URL to worker ID mapping
url_to_id: Arc<DashMap<String, WorkerId>>,
}
......
......@@ -8,7 +8,7 @@ use crate::grpc::SglangSchedulerClient;
use crate::metrics::RouterMetrics;
use crate::policies::{LoadBalancingPolicy, PolicyRegistry};
use crate::reasoning_parser::ParserFactory;
use crate::routers::{RouterTrait, WorkerManagement};
use crate::routers::RouterTrait;
use crate::tokenizer::traits::Tokenizer;
use crate::tool_parser::ParserRegistry;
use async_trait::async_trait;
......@@ -350,42 +350,3 @@ impl RouterTrait for GrpcPDRouter {
(StatusCode::SERVICE_UNAVAILABLE).into_response()
}
}
#[async_trait]
impl WorkerManagement for GrpcPDRouter {
async fn add_worker(
&self,
_worker_url: &str,
_api_key: &Option<String>,
) -> Result<String, String> {
Err("Not implemented".to_string())
}
fn remove_worker(&self, _worker_url: &str) {}
fn get_worker_urls(&self) -> Vec<String> {
let mut urls = Vec::new();
// Get gRPC prefill worker URLs only
let prefill_workers = self.worker_registry.get_workers_filtered(
None,
Some(WorkerType::Prefill {
bootstrap_port: None,
}),
Some(crate::core::ConnectionMode::Grpc { port: None }),
false,
);
urls.extend(prefill_workers.iter().map(|w| w.url().to_string()));
// Get gRPC decode worker URLs only
let decode_workers = self.worker_registry.get_workers_filtered(
None,
Some(WorkerType::Decode),
Some(crate::core::ConnectionMode::Grpc { port: None }),
false,
);
urls.extend(decode_workers.iter().map(|w| w.url().to_string()));
urls
}
}
......@@ -8,7 +8,7 @@ use crate::grpc::SglangSchedulerClient;
use crate::metrics::RouterMetrics;
use crate::policies::{LoadBalancingPolicy, PolicyRegistry};
use crate::reasoning_parser::ParserFactory;
use crate::routers::{RouterTrait, WorkerManagement};
use crate::routers::RouterTrait;
use crate::tokenizer::traits::Tokenizer;
use crate::tool_parser::ParserRegistry;
use async_trait::async_trait;
......@@ -279,29 +279,3 @@ impl RouterTrait for GrpcRouter {
(StatusCode::SERVICE_UNAVAILABLE).into_response()
}
}
#[async_trait]
impl WorkerManagement for GrpcRouter {
async fn add_worker(
&self,
_worker_url: &str,
_api_key: &Option<String>,
) -> Result<String, String> {
Err("Not implemented".to_string())
}
fn remove_worker(&self, _worker_url: &str) {}
fn get_worker_urls(&self) -> Vec<String> {
self.worker_registry
.get_workers_filtered(
None, // any model
Some(WorkerType::Regular),
Some(crate::core::ConnectionMode::Grpc { port: None }),
false, // include all workers
)
.iter()
.map(|w| w.url().to_string())
.collect()
}
}
......@@ -65,25 +65,6 @@ impl OpenAIRouter {
}
}
#[async_trait]
impl super::super::WorkerManagement for OpenAIRouter {
async fn add_worker(
&self,
_worker_url: &str,
_api_key: &Option<String>,
) -> Result<String, String> {
Err("Cannot add workers to OpenAI router".to_string())
}
fn remove_worker(&self, _worker_url: &str) {
// No-op for OpenAI router
}
fn get_worker_urls(&self) -> Vec<String> {
vec![self.base_url.clone()]
}
}
#[async_trait]
impl super::super::RouterTrait for OpenAIRouter {
fn as_any(&self) -> &dyn Any {
......
// PD (Prefill-Decode) Router Implementation
// This module handles routing for disaggregated prefill-decode systems
use super::pd_types::{api_path, PDRouterError};
use super::pd_types::api_path;
use crate::config::types::RetryConfig;
use crate::core::{
is_retryable_status, BasicWorkerBuilder, CircuitBreakerConfig, ConnectionMode, RetryExecutor,
Worker, WorkerLoadGuard, WorkerRegistry, WorkerType,
is_retryable_status, ConnectionMode, RetryExecutor, Worker, WorkerLoadGuard, WorkerRegistry,
WorkerType,
};
use crate::metrics::RouterMetrics;
use crate::policies::{LoadBalancingPolicy, PolicyRegistry};
......@@ -13,7 +11,7 @@ use crate::protocols::spec::{
ResponsesRequest, StringOrArray, UserMessageContent,
};
use crate::routers::header_utils;
use crate::routers::{RouterTrait, WorkerManagement};
use crate::routers::RouterTrait;
use async_trait::async_trait;
use axum::{
body::Body,
......@@ -37,22 +35,15 @@ use tracing::{debug, error, info, warn};
pub struct PDRouter {
pub worker_registry: Arc<WorkerRegistry>,
pub policy_registry: Arc<PolicyRegistry>,
pub worker_startup_timeout_secs: u64,
pub worker_startup_check_interval_secs: u64,
pub worker_loads: Arc<tokio::sync::watch::Receiver<HashMap<String, isize>>>,
pub load_monitor_handle: Option<Arc<tokio::task::JoinHandle<()>>>,
pub client: Client,
// Dedicated client for prefill fire-and-forget (non-logprob) requests
pub prefill_client: Client,
pub retry_config: RetryConfig,
pub circuit_breaker_config: CircuitBreakerConfig,
pub api_key: Option<String>,
// Channel for sending prefill responses to background workers for draining
prefill_drain_tx: mpsc::Sender<reqwest::Response>,
}
// Request context for PD router operations
#[derive(Clone)]
struct PDRequestContext<'a> {
route: &'static str,
......@@ -64,20 +55,6 @@ struct PDRequestContext<'a> {
}
impl PDRouter {
// Private helper method to perform health check on a new server
async fn wait_for_server_health(&self, url: &str) -> Result<(), PDRouterError> {
crate::routers::http::router::Router::wait_for_healthy_workers(
&[url.to_string()],
self.worker_startup_timeout_secs,
self.worker_startup_check_interval_secs,
)
.await
.map_err(|_| PDRouterError::HealthCheckFailed {
url: url.to_string(),
})
}
// Generic helper for processing all workers with an endpoint
async fn process_workers(
&self,
worker_type_enum: WorkerType,
......@@ -87,11 +64,9 @@ impl PDRouter {
let mut results = Vec::new();
let mut errors = Vec::new();
// Get workers from registry based on type
let workers = self.worker_registry.get_by_type(&worker_type_enum);
let urls: Vec<String> = workers.iter().map(|w| w.url().to_string()).collect();
// Process each worker
for worker_url in urls {
let url = format!("{}/{}", worker_url, endpoint);
match self.client.post(&url).send().await {
......@@ -119,7 +94,6 @@ impl PDRouter {
(w.url().to_string(), w.api_key().clone())
}
// Helper to get prefill worker URLs
fn get_prefill_worker_urls_with_api_key(&self) -> Vec<(String, Option<String>)> {
self.worker_registry
.get_prefill_workers()
......@@ -128,7 +102,6 @@ impl PDRouter {
.collect()
}
// Helper to get decode worker URLs
fn get_decode_worker_urls_with_api_key(&self) -> Vec<(String, Option<String>)> {
self.worker_registry
.get_decode_workers()
......@@ -137,7 +110,6 @@ impl PDRouter {
.collect()
}
// Helper for proxying requests to the first prefill worker
async fn proxy_to_first_prefill_worker(
&self,
endpoint: &str,
......@@ -157,7 +129,6 @@ impl PDRouter {
}
}
// Generic helper for proxying to a specific worker
async fn proxy_to_worker(
&self,
worker_url: String,
......@@ -167,7 +138,6 @@ impl PDRouter {
let url = format!("{}/{}", worker_url, endpoint);
let mut request_builder = self.client.get(&url);
// Add headers if provided
if let Some(headers) = headers {
for (name, value) in headers {
request_builder = request_builder.header(name, value);
......@@ -211,159 +181,6 @@ impl PDRouter {
}
}
pub async fn add_prefill_server(
&self,
url: String,
api_key: Option<String>,
bootstrap_port: Option<u16>,
) -> Result<String, PDRouterError> {
// Wait for the new server to be healthy
self.wait_for_server_health(&url).await?;
// Check if already exists
if self.worker_registry.get_by_url(&url).is_some() {
return Err(PDRouterError::WorkerAlreadyExists { url: url.clone() });
}
// Create Worker for the new prefill server with circuit breaker configuration
// TODO: In IGW mode, fetch model_id from worker's /get_model_info endpoint
let worker_builder = BasicWorkerBuilder::new(url.clone())
.worker_type(WorkerType::Prefill { bootstrap_port })
.circuit_breaker_config(self.circuit_breaker_config.clone());
let worker = if let Some(api_key) = api_key {
worker_builder.api_key(api_key).build()
} else {
worker_builder.build()
};
let worker_arc: Arc<dyn Worker> = Arc::new(worker);
// Register the worker in the registry
self.worker_registry.register(worker_arc.clone());
// Notify PolicyRegistry about the new worker
let model_id = worker_arc.model_id();
self.policy_registry.on_worker_added(model_id, None);
// Initialize cache-aware policy if applicable
let model_workers = self.worker_registry.get_by_model_fast(model_id);
self.policy_registry
.init_cache_aware_policy(model_id, &model_workers);
info!("Added prefill server: {}", url);
Ok(format!("Successfully added prefill server: {}", url))
}
pub async fn add_decode_server(
&self,
url: String,
api_key: Option<String>,
) -> Result<String, PDRouterError> {
// Wait for the new server to be healthy
self.wait_for_server_health(&url).await?;
// Check if already exists
if self.worker_registry.get_by_url(&url).is_some() {
return Err(PDRouterError::WorkerAlreadyExists { url: url.clone() });
}
// Create Worker for the new decode server with circuit breaker configuration
// TODO: In IGW mode, fetch model_id from worker's /get_model_info endpoint
let worker_builder = BasicWorkerBuilder::new(url.clone())
.worker_type(WorkerType::Decode)
.circuit_breaker_config(self.circuit_breaker_config.clone());
let worker = if let Some(api_key) = api_key {
worker_builder.api_key(api_key).build()
} else {
worker_builder.build()
};
let worker_arc: Arc<dyn Worker> = Arc::new(worker);
// Register the worker in the registry
self.worker_registry.register(worker_arc.clone());
// Notify PolicyRegistry about the new worker
let model_id = worker_arc.model_id();
self.policy_registry.on_worker_added(model_id, None);
// Initialize cache-aware policy if applicable
let model_workers = self.worker_registry.get_by_model_fast(model_id);
self.policy_registry
.init_cache_aware_policy(model_id, &model_workers);
info!("Added decode server: {}", url);
Ok(format!("Successfully added decode server: {}", url))
}
pub async fn remove_prefill_server(&self, url: &str) -> Result<String, PDRouterError> {
// Check if worker exists and get model_id
let model_id = match self.worker_registry.get_by_url(url) {
Some(worker) => worker.model_id().to_string(),
None => {
return Err(PDRouterError::WorkerNotFound {
url: url.to_string(),
});
}
};
// Remove from registry
let removed = self.worker_registry.remove_by_url(url);
if removed.is_some() {
// Notify PolicyRegistry about the removed worker
self.policy_registry.on_worker_removed(&model_id);
// Remove from cache-aware policy if applicable
self.policy_registry
.remove_worker_from_cache_aware(&model_id, url);
}
if removed.is_some() {
info!("Removed prefill server: {}", url);
Ok(format!("Successfully removed prefill server: {}", url))
} else {
Err(PDRouterError::WorkerNotFound {
url: url.to_string(),
})
}
}
pub async fn remove_decode_server(&self, url: &str) -> Result<String, PDRouterError> {
// Check if worker exists and get model_id
let model_id = match self.worker_registry.get_by_url(url) {
Some(worker) => worker.model_id().to_string(),
None => {
return Err(PDRouterError::WorkerNotFound {
url: url.to_string(),
});
}
};
// Remove from registry
let removed = self.worker_registry.remove_by_url(url);
if removed.is_some() {
// Notify PolicyRegistry about the removed worker
self.policy_registry.on_worker_removed(&model_id);
// Remove from cache-aware policy if applicable
self.policy_registry
.remove_worker_from_cache_aware(&model_id, url);
}
if removed.is_some() {
info!("Removed decode server: {}", url);
Ok(format!("Successfully removed decode server: {}", url))
} else {
Err(PDRouterError::WorkerNotFound {
url: url.to_string(),
})
}
}
pub async fn new(ctx: &Arc<crate::server::AppContext>) -> Result<Self, String> {
let prefill_workers = ctx.worker_registry.get_workers_filtered(
None, // any model
......@@ -381,33 +198,20 @@ impl PDRouter {
false, // include all workers
);
// Get all worker URLs for monitoring
let all_urls: Vec<String> = prefill_workers
.iter()
.chain(decode_workers.iter())
.map(|w| w.url().to_string())
.collect();
// Get all worker API keys for monitoring
let all_api_keys: Vec<Option<String>> = prefill_workers
.iter()
.chain(decode_workers.iter())
.map(|w| w.api_key().clone())
.collect();
// Convert config CircuitBreakerConfig to core CircuitBreakerConfig
let circuit_breaker_config = ctx.router_config.effective_circuit_breaker_config();
let core_cb_config = CircuitBreakerConfig {
failure_threshold: circuit_breaker_config.failure_threshold,
success_threshold: circuit_breaker_config.success_threshold,
timeout_duration: Duration::from_secs(circuit_breaker_config.timeout_duration_secs),
window_duration: Duration::from_secs(circuit_breaker_config.window_duration_secs),
};
// Set up background load monitoring for power-of-two selection
let (tx, rx) = tokio::sync::watch::channel(HashMap::new());
let worker_loads = Arc::new(rx);
// Get policies from registry to check if we need load monitoring
let prefill_policy = ctx.policy_registry.get_prefill_policy();
let decode_policy = ctx.policy_registry.get_decode_policy();
......@@ -436,7 +240,6 @@ impl PDRouter {
None
};
// Build a dedicated prefill client for fire-and-forget semantics
let prefill_client = Client::builder()
.pool_max_idle_per_host(0)
.http1_only()
......@@ -445,17 +248,12 @@ impl PDRouter {
.build()
.map_err(|e| format!("Failed to build prefill client: {}", e))?;
// Create bounded channel for prefill response draining
// Larger buffer for high concurrency scenarios
let (prefill_drain_tx, mut prefill_drain_rx) = mpsc::channel::<reqwest::Response>(2000);
// Spawn a coordinator with limited concurrent drain tasks
// This prevents unbounded task spawning under extreme load
// TODO reevaluate a simpler approach (e.g. do we really need to deal with fire and forget)
tokio::spawn(async move {
info!("Prefill drain coordinator started");
// Use a semaphore to limit concurrent drain operations
let max_concurrent_drains = 100;
let semaphore = Arc::new(tokio::sync::Semaphore::new(max_concurrent_drains));
......@@ -464,7 +262,6 @@ impl PDRouter {
match permit {
Ok(permit) => {
// Spawn a task to drain this response
tokio::spawn(async move {
let url = response.url().to_string();
let status = response.status();
......@@ -474,8 +271,6 @@ impl PDRouter {
RouterMetrics::record_pd_prefill_error(&url);
}
// Drain the response body efficiently
// Use streaming to avoid loading entire body into memory
let start = Instant::now();
let mut stream = response.bytes_stream();
let mut bytes_drained = 0;
......@@ -495,19 +290,16 @@ impl PDRouter {
let elapsed = start.elapsed();
if elapsed > Duration::from_millis(100) {
// Only log slow drains
debug!(
"Prefill drain: slow drain {} bytes from {} in {:?}",
bytes_drained, url, elapsed
);
}
// Permit is automatically released when dropped
drop(permit);
});
}
Err(_) => {
// Semaphore closed, shutting down
break;
}
}
......@@ -518,22 +310,16 @@ impl PDRouter {
Ok(PDRouter {
worker_registry: Arc::clone(&ctx.worker_registry),
policy_registry: Arc::clone(&ctx.policy_registry),
worker_startup_timeout_secs: ctx.router_config.worker_startup_timeout_secs,
worker_startup_check_interval_secs: ctx
.router_config
.worker_startup_check_interval_secs,
worker_loads,
load_monitor_handle,
client: ctx.client.clone(),
prefill_client,
prefill_drain_tx,
retry_config: ctx.router_config.effective_retry_config(),
circuit_breaker_config: core_cb_config,
api_key: ctx.router_config.api_key.clone(),
})
}
// Helper to handle server selection errors
fn handle_server_selection_error(error: String) -> Response {
error!("Failed to select PD pair error={}", error);
RouterMetrics::record_pd_error("server_selection");
......@@ -544,7 +330,6 @@ impl PDRouter {
.into_response()
}
// Helper to handle serialization errors
fn handle_serialization_error(error: impl std::fmt::Display) -> Response {
error!("Failed to serialize request error={}", error);
(
......@@ -554,27 +339,21 @@ impl PDRouter {
.into_response()
}
// Helper to determine batch size from a GenerateRequest
fn get_generate_batch_size(req: &GenerateRequest) -> Option<usize> {
// Check prompt array
if let Some(StringOrArray::Array(arr)) = &req.prompt {
if !arr.is_empty() {
return Some(arr.len());
}
}
// Check text array
if let Some(text) = &req.text {
if text.contains("[") && text.contains("]") {
// This is a simplified check - in reality we'd need to parse JSON
return None; // For now, fall back to non-batch
return None;
}
}
None
}
// Helper to determine batch size from a ChatCompletionRequest
fn get_chat_batch_size(req: &ChatCompletionRequest) -> Option<usize> {
// Check 'n' parameter for multiple responses
if let Some(n) = req.n {
if n > 1 {
return Some(n as usize);
......@@ -583,9 +362,7 @@ impl PDRouter {
None
}
// Helper to determine batch size from a CompletionRequest
fn get_completion_batch_size(req: &CompletionRequest) -> Option<usize> {
// Check prompt array
if let StringOrArray::Array(arr) = &req.prompt {
if !arr.is_empty() {
return Some(arr.len());
......@@ -594,7 +371,6 @@ impl PDRouter {
None
}
// Helper to inject bootstrap fields into an existing JSON request value
fn inject_bootstrap_into_value(
mut original: Value,
prefill_worker: &dyn Worker,
......@@ -659,7 +435,6 @@ impl PDRouter {
Ok(original)
}
// Execute the dual dispatch to prefill and decode servers with retries and bootstrap injection
async fn execute_dual_dispatch<T: Serialize + Clone>(
&self,
headers: Option<&HeaderMap>,
......@@ -671,14 +446,12 @@ impl PDRouter {
let route = context.route;
RetryExecutor::execute_response_with_retry(
&self.retry_config,
// Operation per attempt
{
let original_request = original_request.clone();
move |attempt: u32| {
let original_request = original_request.clone();
let context = context.clone();
async move {
// Select workers fresh for each attempt
let (prefill, decode) = match self
.select_pd_pair(context.request_text.as_deref(), context.model_id)
.await
......@@ -697,13 +470,11 @@ impl PDRouter {
decode.url()
);
// Serialize the original request
let mut json_request = match serde_json::to_value(&original_request) {
Ok(v) => v,
Err(e) => return Self::handle_serialization_error(e),
};
// Inject bootstrap based on current prefill worker
json_request = match Self::inject_bootstrap_into_value(
json_request,
prefill.as_ref(),
......@@ -713,7 +484,6 @@ impl PDRouter {
Err(e) => return Self::handle_serialization_error(e),
};
// Execute the actual dual dispatch
let response = self
.execute_dual_dispatch_internal(
headers,
......@@ -725,7 +495,6 @@ impl PDRouter {
)
.await;
// Record outcomes for circuit breakers
let _status = response.status();
let not_error = _status.is_success() || _status.is_client_error();
prefill.record_outcome(not_error);
......@@ -735,14 +504,11 @@ impl PDRouter {
}
}
},
// Should retry predicate
|res, _attempt| is_retryable_status(res.status()),
// On backoff hook
|delay, attempt| {
RouterMetrics::record_retry(route);
RouterMetrics::record_retry_backoff_duration(delay, attempt);
},
// On exhausted hook
|| RouterMetrics::record_retries_exhausted(route),
)
.await
......@@ -849,7 +615,6 @@ impl PDRouter {
tokio::join!(prefill_request.send(), decode_request.send());
debug!("Received responses from both servers");
// Update metrics
let duration = start_time.elapsed();
RouterMetrics::record_pd_request_duration(context.route, duration);
RouterMetrics::record_pd_request(context.route);
......@@ -995,7 +760,6 @@ impl PDRouter {
let decode_result = decode_future.await;
debug!("Received decode response");
// Update metrics
let duration = start_time.elapsed();
RouterMetrics::record_pd_request_duration(context.route, duration);
RouterMetrics::record_pd_request(context.route);
......@@ -1074,23 +838,18 @@ impl PDRouter {
}
}
// Check if either prefill or decode policy needs request text
fn policies_need_request_text(&self) -> bool {
// Check both prefill and decode policies
let prefill_policy = self.policy_registry.get_prefill_policy();
let decode_policy = self.policy_registry.get_decode_policy();
prefill_policy.needs_request_text() || decode_policy.needs_request_text()
}
// Select a pair of prefill and decode servers considering circuit breaker state
async fn select_pd_pair(
&self,
request_text: Option<&str>,
model_id: Option<&str>,
) -> Result<(Arc<dyn Worker>, Arc<dyn Worker>), String> {
// Get workers from registry - filter by model if provided
let prefill_workers = if let Some(model) = model_id {
// Get model-specific workers and filter for prefill type
self.worker_registry
.get_by_model_fast(model)
.into_iter()
......@@ -1101,7 +860,6 @@ impl PDRouter {
};
let decode_workers = if let Some(model) = model_id {
// Get model-specific workers and filter for decode type
self.worker_registry
.get_by_model_fast(model)
.into_iter()
......@@ -1111,8 +869,6 @@ impl PDRouter {
self.worker_registry.get_decode_workers()
};
// Select workers using helper function
// Use separate policies for prefill and decode to avoid counter conflicts
let prefill_policy = self.policy_registry.get_prefill_policy();
let decode_policy = self.policy_registry.get_decode_policy();
......@@ -1133,14 +889,12 @@ impl PDRouter {
Ok((prefill, decode))
}
// Helper function to select a worker using the policy (Arc version)
fn pick_worker_by_policy_arc(
workers: &[Arc<dyn Worker>],
policy: &dyn LoadBalancingPolicy,
request_text: Option<&str>,
worker_type: &str,
) -> Result<Arc<dyn Worker>, String> {
// Check if we have any workers
if workers.is_empty() {
return Err(format!(
"No {} workers available. Please check if {} servers are configured and healthy.",
......@@ -1148,7 +902,6 @@ impl PDRouter {
));
}
// Filter available workers (healthy + circuit breaker not open)
let available_workers: Vec<Arc<dyn Worker>> = workers
.iter()
.filter(|w| w.is_available())
......@@ -1162,7 +915,6 @@ impl PDRouter {
));
}
// Let policy select from available workers (no conversion needed now!)
let selected_idx = policy
.select_worker(&available_workers, request_text)
.ok_or_else(|| {
......@@ -1173,11 +925,9 @@ impl PDRouter {
)
})?;
// Return the selected Arc worker
Ok(available_workers[selected_idx].clone())
}
// Background task to monitor worker loads with shared client
async fn monitor_worker_loads_with_client(
worker_urls: Vec<String>,
worker_api_keys: Vec<Option<String>>,
......@@ -1212,11 +962,9 @@ impl PDRouter {
debug!("Worker loads updated: {:?}", loads);
// Update both policies with current loads
prefill_policy.update_loads(&loads);
decode_policy.update_loads(&loads);
// Check if receiver is still active
if tx.send(loads).is_err() {
info!("Load monitor receiver dropped, shutting down monitor task");
break;
......@@ -1226,7 +974,6 @@ impl PDRouter {
}
}
// Helper to create a streaming response
#[allow(clippy::too_many_arguments)]
fn create_streaming_response(
&self,
......@@ -1239,35 +986,29 @@ impl PDRouter {
prefill: &dyn Worker,
decode: &dyn Worker,
) -> Response {
// For streaming, increment load now - will be decremented when streaming completes
prefill.increment_load();
decode.increment_load();
// Store URLs to find workers later for decrementing
let prefill_url = prefill.url().to_string();
let decode_url_str = decode.url().to_string();
let (tx, rx) = tokio::sync::mpsc::unbounded_channel();
// Clone the registry for the spawned task
let registry = self.worker_registry.clone();
tokio::spawn(async move {
// Use a flag to track whether stream completed successfully
let mut stream_completed = false;
futures_util::pin_mut!(stream);
while let Some(chunk_result) = stream.next().await {
match chunk_result {
Ok(chunk) => {
// Check for stream end marker to decrement load early
let is_done = chunk
.as_ref()
.windows(12)
.any(|window| window == b"data: [DONE]");
let result = if return_logprob && prefill_logprobs.is_some() {
// Try to merge logprobs
Self::merge_streaming_logprobs(prefill_logprobs.clone(), &chunk)
.unwrap_or(chunk)
} else {
......@@ -1278,7 +1019,6 @@ impl PDRouter {
break;
}
// If we see the done marker, decrement load immediately
if is_done {
stream_completed = true;
break;
......@@ -1295,8 +1035,6 @@ impl PDRouter {
}
}
// Always decrement load after streaming (either completes or errors)
// Find and decrement prefill worker
if let Some(worker) = registry.get_by_url(&prefill_url) {
worker.decrement_load();
debug!(
......@@ -1305,7 +1043,6 @@ impl PDRouter {
);
}
// Find and decrement decode worker
if let Some(worker) = registry.get_by_url(&decode_url_str) {
worker.decrement_load();
debug!(
......@@ -1321,7 +1058,6 @@ impl PDRouter {
let mut response = Response::new(body);
*response.status_mut() = status;
// Use provided headers or create new ones, then ensure content-type is set for streaming
let mut headers = headers.unwrap_or_default();
headers.insert(CONTENT_TYPE, HeaderValue::from_static("text/event-stream"));
*response.headers_mut() = headers;
......@@ -1589,42 +1325,6 @@ async fn get_worker_load(
}
}
#[async_trait]
impl WorkerManagement for PDRouter {
async fn add_worker(
&self,
_worker_url: &str,
_api_key: &Option<String>,
) -> Result<String, String> {
// For PD router, we don't support adding workers via this generic method
Err(
"PD router requires specific add_prefill_server or add_decode_server methods"
.to_string(),
)
}
fn remove_worker(&self, worker_url: &str) {
// Remove from registry
if let Some(worker) = self.worker_registry.remove_by_url(worker_url) {
match worker.worker_type() {
WorkerType::Prefill { .. } => {
info!("Removed prefill worker: {}", worker_url);
}
WorkerType::Decode => {
info!("Removed decode worker: {}", worker_url);
}
_ => {
info!("Removed worker: {}", worker_url);
}
}
}
}
fn get_worker_urls(&self) -> Vec<String> {
self.worker_registry.get_all_urls()
}
}
#[async_trait]
impl RouterTrait for PDRouter {
fn as_any(&self) -> &dyn std::any::Any {
......@@ -1774,11 +1474,9 @@ impl RouterTrait for PDRouter {
body: &GenerateRequest,
model_id: Option<&str>,
) -> Response {
// Extract parameters
let is_stream = body.stream;
let return_logprob = body.return_logprob;
// Extract text for cache-aware routing
let request_text = if self.policies_need_request_text() {
body.text
.as_deref()
......@@ -1793,10 +1491,8 @@ impl RouterTrait for PDRouter {
None
};
// Calculate batch size
let batch_size = Self::get_generate_batch_size(body);
// Create context
let context = PDRequestContext {
route: "/generate",
batch_size,
......@@ -1806,7 +1502,6 @@ impl RouterTrait for PDRouter {
model_id,
};
// Execute with retry and bootstrap injection
self.execute_dual_dispatch(headers, body, context).await
}
......@@ -1816,11 +1511,9 @@ impl RouterTrait for PDRouter {
body: &ChatCompletionRequest,
model_id: Option<&str>,
) -> Response {
// Extract parameters
let is_stream = body.stream;
let return_logprob = body.logprobs;
// Extract text for cache-aware routing
let request_text = if self.policies_need_request_text() {
body.messages.first().and_then(|msg| match msg {
ChatMessage::User { content, .. } => match content {
......@@ -1837,7 +1530,6 @@ impl RouterTrait for PDRouter {
// Calculate batch size
let batch_size = Self::get_chat_batch_size(body);
// Create context
let context = PDRequestContext {
route: "/v1/chat/completions",
batch_size,
......@@ -1847,7 +1539,6 @@ impl RouterTrait for PDRouter {
model_id,
};
// Execute with retry and bootstrap injection
self.execute_dual_dispatch(headers, body, context).await
}
......@@ -1857,11 +1548,9 @@ impl RouterTrait for PDRouter {
body: &CompletionRequest,
model_id: Option<&str>,
) -> Response {
// Extract parameters
let is_stream = body.stream;
let return_logprob = body.logprobs.is_some();
// Extract text for cache-aware routing
let request_text = if self.policies_need_request_text() {
match &body.prompt {
StringOrArray::String(s) => Some(s.clone()),
......@@ -1874,7 +1563,6 @@ impl RouterTrait for PDRouter {
// Calculate batch size
let batch_size = Self::get_completion_batch_size(body);
// Create context
let context = PDRequestContext {
route: "/v1/completions",
batch_size,
......@@ -1884,7 +1572,6 @@ impl RouterTrait for PDRouter {
model_id,
};
// Execute with retry and bootstrap injection
self.execute_dual_dispatch(headers, body, context).await
}
......@@ -1943,7 +1630,6 @@ impl RouterTrait for PDRouter {
None
};
// Create context
let context = PDRequestContext {
route: "/v1/rerank",
batch_size: None,
......@@ -1953,7 +1639,6 @@ impl RouterTrait for PDRouter {
model_id,
};
// Execute with retry and bootstrap injection
self.execute_dual_dispatch(headers, body, context).await
}
......@@ -2095,7 +1780,7 @@ impl RouterTrait for PDRouter {
#[cfg(test)]
mod tests {
use super::*;
use crate::core::WorkerType;
use crate::core::{BasicWorkerBuilder, WorkerType};
fn create_test_pd_router() -> PDRouter {
let worker_registry = Arc::new(WorkerRegistry::new());
......@@ -2105,15 +1790,12 @@ mod tests {
PDRouter {
worker_registry,
policy_registry,
worker_startup_timeout_secs: 5,
worker_startup_check_interval_secs: 1,
worker_loads: Arc::new(tokio::sync::watch::channel(HashMap::new()).1),
load_monitor_handle: None,
client: Client::new(),
prefill_client: Client::new(),
prefill_drain_tx: mpsc::channel(100).0,
retry_config: RetryConfig::default(),
circuit_breaker_config: CircuitBreakerConfig::default(),
api_key: Some("test_api_key".to_string()),
}
}
......@@ -2121,135 +1803,15 @@ mod tests {
fn create_test_worker(url: String, worker_type: WorkerType, healthy: bool) -> Box<dyn Worker> {
let worker = BasicWorkerBuilder::new(url)
.worker_type(worker_type)
.api_key("test_api_key")
.build();
worker.set_healthy(healthy);
Box::new(worker)
}
// ============= Worker Management Tests =============
#[tokio::test]
async fn test_add_prefill_server_already_exists() {
let router = create_test_pd_router();
// Add a worker first
let worker = create_test_worker(
"http://localhost:8000".to_string(),
WorkerType::Prefill {
bootstrap_port: Some(8080),
},
true,
);
router.worker_registry.register(Arc::from(worker));
// Try to add the same URL again - this would fail during health check in real scenario
// For unit test, we test the duplicate check logic
let exists = router
.worker_registry
.get_by_url("http://localhost:8000")
.is_some();
assert!(exists);
}
#[tokio::test]
async fn test_remove_prefill_server_success() {
let router = create_test_pd_router();
// Add servers first
let worker1 = create_test_worker(
"http://worker1".to_string(),
WorkerType::Prefill {
bootstrap_port: None,
},
true,
);
let worker2 = create_test_worker(
"http://worker2".to_string(),
WorkerType::Prefill {
bootstrap_port: Some(8080),
},
true,
);
router.worker_registry.register(Arc::from(worker1));
router.worker_registry.register(Arc::from(worker2));
// Remove one
let result = router.remove_prefill_server("http://worker1").await;
assert!(result.is_ok());
assert!(result.unwrap().contains("Successfully removed"));
let workers = router.worker_registry.get_prefill_workers();
assert_eq!(workers.len(), 1);
assert_eq!(workers[0].url(), "http://worker2");
}
#[tokio::test]
async fn test_remove_prefill_server_not_found() {
let router = create_test_pd_router();
let result = router.remove_prefill_server("http://nonexistent").await;
assert!(result.is_err());
match result.unwrap_err() {
PDRouterError::WorkerNotFound { url } => {
assert_eq!(url, "http://nonexistent");
}
_ => panic!("Expected WorkerNotFound error"),
}
}
#[tokio::test]
async fn test_remove_decode_server_success() {
let router = create_test_pd_router();
// Add server first
let worker = create_test_worker("http://decode1".to_string(), WorkerType::Decode, true);
router.worker_registry.register(Arc::from(worker));
let result = router.remove_decode_server("http://decode1").await;
assert!(result.is_ok());
assert!(result.unwrap().contains("Successfully removed"));
let workers = router.worker_registry.get_decode_workers();
assert_eq!(workers.len(), 0);
}
// ============= Lock Error Handling Tests =============
#[test]
fn test_registry_operations() {
let router = create_test_pd_router();
// Test registry operations
let workers = router.worker_registry.get_all();
assert_eq!(workers.len(), 0);
// Add a worker
let worker = create_test_worker(
"http://test".to_string(),
WorkerType::Prefill {
bootstrap_port: None,
},
true,
);
router.worker_registry.register(Arc::from(worker));
let workers = router.worker_registry.get_all();
assert_eq!(workers.len(), 1);
let prefill_workers = router.worker_registry.get_prefill_workers();
assert_eq!(prefill_workers.len(), 1);
}
#[tokio::test]
async fn test_select_healthy_prefill_worker() {
let router = create_test_pd_router();
// Add mix of healthy and unhealthy workers
let healthy_worker = create_test_worker(
"http://healthy".to_string(),
WorkerType::Prefill {
......@@ -2276,7 +1838,6 @@ mod tests {
assert!(result.is_ok());
let (prefill, _decode) = result.unwrap();
// Should select the healthy worker
assert_eq!(prefill.url(), "http://healthy");
assert!(prefill.is_healthy());
}
......@@ -2291,13 +1852,10 @@ mod tests {
assert!(result.unwrap_err().contains("No prefill workers available"));
}
// ============= Health Endpoints Tests =============
#[tokio::test]
async fn test_health_endpoints() {
let router = create_test_pd_router();
// Add healthy workers - create_test_worker returns Box<dyn Worker>, convert to Arc
let prefill_worker = create_test_worker(
"http://localhost:8000".to_string(),
WorkerType::Prefill {
......@@ -2314,7 +1872,6 @@ mod tests {
router.worker_registry.register(Arc::from(prefill_worker));
router.worker_registry.register(Arc::from(decode_worker));
// Test health endpoint
let http_req = axum::http::Request::builder()
.body(axum::body::Body::empty())
.unwrap();
......@@ -2322,18 +1879,14 @@ mod tests {
assert_eq!(response.status(), 200);
// Test readiness endpoint
let response = router.readiness();
assert_eq!(response.status(), 200);
}
// ============= Load Monitoring Tests =============
#[tokio::test]
async fn test_load_monitor_updates() {
let power_of_two_policy = Arc::new(crate::policies::PowerOfTwoPolicy::new());
let mut router = create_test_pd_router();
// Set power_of_two policies in the registry
router
.policy_registry
.set_prefill_policy(power_of_two_policy.clone());
......@@ -2341,25 +1894,20 @@ mod tests {
.policy_registry
.set_decode_policy(power_of_two_policy);
// Create load channel
let (tx, rx) = tokio::sync::watch::channel(HashMap::new());
router.worker_loads = Arc::new(rx);
// Simulate load updates
let mut loads = HashMap::new();
loads.insert("http://worker1".to_string(), 10);
loads.insert("http://worker2".to_string(), 5);
let _ = tx.send(loads.clone());
// Router should receive updates
let received = router.worker_loads.borrow().clone();
assert_eq!(received.get("http://worker1"), Some(&10));
assert_eq!(received.get("http://worker2"), Some(&5));
}
// ============= Worker Load Tests =============
#[test]
fn test_worker_load_metrics() {
let prefill_worker = create_test_worker(
......@@ -2372,15 +1920,12 @@ mod tests {
let decode_worker =
create_test_worker("http://decode".to_string(), WorkerType::Decode, true);
// Create load guard for both workers
let _guard =
WorkerLoadGuard::new_multi(vec![prefill_worker.as_ref(), decode_worker.as_ref()]);
// Load should be incremented
assert_eq!(prefill_worker.load(), 1);
assert_eq!(decode_worker.load(), 1);
// Drop guard - load should decrement
drop(_guard);
assert_eq!(prefill_worker.load(), 0);
......@@ -2394,7 +1939,6 @@ mod tests {
let router = create_test_pd_router();
// Add workers - create_test_worker returns Box<dyn Worker>, convert to Arc
let prefill_worker = create_test_worker(
"http://prefill".to_string(),
WorkerType::Prefill {
......@@ -2408,22 +1952,18 @@ mod tests {
router.worker_registry.register(Arc::from(prefill_worker));
router.worker_registry.register(Arc::from(decode_worker));
// Get references to the workers from registry
let prefill_workers = router.worker_registry.get_prefill_workers();
let decode_workers = router.worker_registry.get_decode_workers();
let prefill_ref = prefill_workers[0].clone();
let decode_ref = decode_workers[0].clone();
// Initially load should be 0
assert_eq!(prefill_ref.load(), 0);
assert_eq!(decode_ref.load(), 0);
// Create a mock streaming response
let (tx, rx) = tokio::sync::mpsc::unbounded_channel();
let stream = tokio_stream::wrappers::UnboundedReceiverStream::new(rx);
// Call create_streaming_response which should increment load
let _response = router.create_streaming_response(
stream.map(Ok),
StatusCode::OK,
......@@ -2435,63 +1975,21 @@ mod tests {
decode_ref.as_ref(),
);
// Load should be incremented immediately
assert_eq!(prefill_ref.load(), 1);
assert_eq!(decode_ref.load(), 1);
// Send some data through the stream
tx.send(bytes::Bytes::from("test data")).unwrap();
// Give time for the spawned task to process
sleep(Duration::from_millis(10)).await;
// Load should still be 1 (streaming in progress)
assert_eq!(prefill_ref.load(), 1);
assert_eq!(decode_ref.load(), 1);
// Close the stream
drop(tx);
// Give time for cleanup
sleep(Duration::from_millis(100)).await;
// Load should be decremented after streaming completes
assert_eq!(prefill_ref.load(), 0);
assert_eq!(decode_ref.load(), 0);
}
// ============= Concurrent Operations Tests =============
#[tokio::test]
async fn test_concurrent_worker_operations() {
let router = Arc::new(create_test_pd_router());
let mut handles = vec![];
// Spawn tasks to add workers
for i in 0..5 {
let router_clone = Arc::clone(&router);
let url = format!("http://worker{}", i);
let handle = tokio::spawn(async move {
let worker = create_test_worker(
url,
WorkerType::Prefill {
bootstrap_port: None,
},
true,
);
router_clone.worker_registry.register(Arc::from(worker));
});
handles.push(handle);
}
// Wait for all tasks
for handle in handles {
let _ = handle.await;
}
// Check final state
let workers = router.worker_registry.get_prefill_workers();
assert_eq!(workers.len(), 5);
}
}
use crate::config::types::RetryConfig;
use crate::core::{
is_retryable_status, BasicWorkerBuilder, CircuitBreakerConfig, ConnectionMode, RetryExecutor,
Worker, WorkerRegistry, WorkerType,
is_retryable_status, ConnectionMode, RetryExecutor, Worker, WorkerRegistry, WorkerType,
};
use crate::metrics::RouterMetrics;
use crate::policies::{LoadBalancingPolicy, PolicyRegistry};
......@@ -10,7 +9,7 @@ use crate::protocols::spec::{
RerankRequest, RerankResponse, RerankResult, ResponsesRequest,
};
use crate::routers::header_utils;
use crate::routers::{RouterTrait, WorkerManagement};
use crate::routers::RouterTrait;
use axum::body::to_bytes;
use axum::{
body::Body,
......@@ -27,7 +26,7 @@ use std::collections::HashMap;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio_stream::wrappers::UnboundedReceiverStream;
use tracing::{debug, error, info, warn};
use tracing::{debug, error};
/// Regular router that uses injected load balancing policies
#[derive(Debug)]
......@@ -35,13 +34,8 @@ pub struct Router {
worker_registry: Arc<WorkerRegistry>,
policy_registry: Arc<PolicyRegistry>,
client: Client,
worker_startup_timeout_secs: u64,
worker_startup_check_interval_secs: u64,
dp_aware: bool,
#[allow(dead_code)]
api_key: Option<String>,
retry_config: RetryConfig,
circuit_breaker_config: CircuitBreakerConfig,
_worker_loads: Arc<tokio::sync::watch::Receiver<HashMap<String, isize>>>,
_load_monitor_handle: Option<Arc<tokio::task::JoinHandle<()>>>,
}
......@@ -56,30 +50,15 @@ impl Router {
false, // include all workers
);
// Update active workers gauge
RouterMetrics::set_active_workers(workers.len());
// Get worker URLs for monitoring
let worker_urls: Vec<String> = workers.iter().map(|w| w.url().to_string()).collect();
// Convert config CircuitBreakerConfig to core CircuitBreakerConfig
let circuit_breaker_config = ctx.router_config.effective_circuit_breaker_config();
let core_cb_config = CircuitBreakerConfig {
failure_threshold: circuit_breaker_config.failure_threshold,
success_threshold: circuit_breaker_config.success_threshold,
timeout_duration: Duration::from_secs(circuit_breaker_config.timeout_duration_secs),
window_duration: Duration::from_secs(circuit_breaker_config.window_duration_secs),
};
// Cache-aware policies are initialized in WorkerInitializer
// Setup load monitoring for PowerOfTwo policy
let (tx, rx) = tokio::sync::watch::channel(HashMap::new());
let worker_loads = Arc::new(rx);
// Get default policy to check if we need load monitoring
let default_policy = ctx.policy_registry.get_default_policy();
// Check if default policy is power_of_two for load monitoring
let load_monitor_handle = if default_policy.name() == "power_of_two" {
let monitor_urls = worker_urls.clone();
let monitor_api_keys = monitor_urls
......@@ -113,201 +92,13 @@ impl Router {
worker_registry: ctx.worker_registry.clone(),
policy_registry: ctx.policy_registry.clone(),
client: ctx.client.clone(),
worker_startup_timeout_secs: ctx.router_config.worker_startup_timeout_secs,
worker_startup_check_interval_secs: ctx
.router_config
.worker_startup_check_interval_secs,
dp_aware: ctx.router_config.dp_aware,
api_key: ctx.router_config.api_key.clone(),
retry_config: ctx.router_config.effective_retry_config(),
circuit_breaker_config: core_cb_config,
_worker_loads: worker_loads,
_load_monitor_handle: load_monitor_handle,
})
}
/// Get the current list of worker URLs
pub fn get_worker_urls(&self) -> Vec<String> {
self.worker_registry.get_all_urls()
}
/// Get worker URLs for a specific model
pub fn get_worker_urls_for_model(&self, model_id: Option<&str>) -> Vec<String> {
let workers = self.worker_registry.get_workers_filtered(
model_id,
Some(WorkerType::Regular),
Some(ConnectionMode::Http),
false, // get all workers
);
workers.iter().map(|w| w.url().to_string()).collect()
}
pub async fn wait_for_healthy_workers(
worker_urls: &[String],
worker_startup_timeout_secs: u64,
worker_startup_check_interval_secs: u64,
) -> Result<(), String> {
if worker_urls.is_empty() {
return Err(
"Timeout waiting for workers to become healthy: no workers provided".to_string(),
);
}
// Perform health check asynchronously
Self::wait_for_healthy_workers_async(
worker_urls,
worker_startup_timeout_secs,
worker_startup_check_interval_secs,
)
.await
}
async fn wait_for_healthy_workers_async(
worker_urls: &[String],
worker_startup_timeout_secs: u64,
worker_startup_check_interval_secs: u64,
) -> Result<(), String> {
info!(
"Waiting for {} workers to become healthy (timeout: {}s)",
worker_urls.len(),
worker_startup_timeout_secs
);
let start_time = std::time::Instant::now();
let client = reqwest::Client::builder()
.timeout(Duration::from_secs(2))
.build()
.map_err(|e| format!("Failed to create HTTP client: {}", e))?;
loop {
if start_time.elapsed() > Duration::from_secs(worker_startup_timeout_secs) {
error!(
"Timeout {}s waiting for workers {:?} to become healthy. Please set --router-worker-startup-timeout-secs (sglang_router.launch_server) or --worker-startup-timeout-secs (sglang_worker.router) to a larger value",
worker_startup_timeout_secs, worker_urls
);
return Err(format!(
"Timeout {}s waiting for workers {:?} to become healthy. Please set --router-worker-startup-timeout-secs (sglang_router.launch_server) or --worker-startup-timeout-secs (sglang_worker.router) to a larger value",
worker_startup_timeout_secs, worker_urls
));
}
// Perform all health checks concurrently
let mut health_checks = Vec::new();
for url in worker_urls {
let client_clone = client.clone();
let url_clone = url.clone();
let check_health = tokio::spawn(async move {
let health_url = format!("{}/health", url_clone);
match client_clone.get(&health_url).send().await {
Ok(res) => {
if res.status().is_success() {
None
} else {
Some((url_clone, format!("status: {}", res.status())))
}
}
Err(_) => Some((url_clone, "not ready".to_string())),
}
});
health_checks.push(check_health);
}
// Wait for all health checks to complete
let results = futures::future::join_all(health_checks).await;
let mut all_healthy = true;
let mut unhealthy_workers = Vec::new();
for result in results {
match result {
Ok(None) => {
// Worker is healthy
}
Ok(Some((url, reason))) => {
all_healthy = false;
unhealthy_workers.push((url, reason));
}
Err(e) => {
all_healthy = false;
unhealthy_workers
.push(("unknown".to_string(), format!("task error: {}", e)));
}
}
}
if all_healthy {
info!("All {} workers are healthy", worker_urls.len());
return Ok(());
} else {
debug!(
"Waiting for {} workers to become healthy ({} unhealthy: {:?})",
worker_urls.len(),
unhealthy_workers.len(),
unhealthy_workers
);
tokio::time::sleep(Duration::from_secs(worker_startup_check_interval_secs)).await;
}
}
}
fn get_worker_dp_size(worker_url: &str, api_key: &Option<String>) -> Result<usize, String> {
let sync_client = reqwest::blocking::Client::new();
let mut req_builder = sync_client.get(format!("{}/get_server_info", worker_url));
if let Some(key) = api_key {
req_builder = req_builder.bearer_auth(key);
}
match req_builder.send() {
Ok(res) => {
if res.status().is_success() {
let server_info = res
.text()
.map_err(|e| format!("failed to read text from response: {}", e))?;
let server_info: serde_json::Value = serde_json::from_str(&server_info)
.map_err(|e| format!("failed to decode JSON: {}", e))?;
let dp_size = server_info
.get("dp_size")
.and_then(|v| v.as_u64())
.ok_or_else(|| String::from("dp_size not found or not an u64"))?;
Ok(if dp_size > usize::MAX as u64 {
return Err(format!("dp_size is too large: {}", dp_size));
} else {
dp_size as usize
})
} else {
Err(format!("unexpected status code: {}", res.status()))
}
}
Err(e) => Err(format!("error response: {}", e)),
}
}
// Given a list of workers, return a list of workers with dp_rank as suffix
fn get_dp_aware_workers(
worker_urls: &[String],
api_key: &Option<String>,
) -> Result<Vec<String>, String> {
let mut dp_aware_workers: Vec<String> = Vec::new();
for url in worker_urls {
match Self::get_worker_dp_size(url, api_key) {
Ok(dp_size) => {
for i in 0..dp_size {
dp_aware_workers.push(format!("{}@{}", url, i));
}
}
Err(e) => return Err(format!("Failed to get DP size for {}: {}", url, e)),
}
}
Ok(dp_aware_workers)
}
fn select_first_worker(&self) -> Result<String, String> {
let workers = self.worker_registry.get_all();
if workers.is_empty() {
......@@ -317,65 +108,6 @@ impl Router {
}
}
pub async fn send_health_check(&self, worker_url: &str) -> Response {
let health_url = if self.dp_aware {
// Need to extract the URL from "http://host:port@dp_rank"
match Self::extract_dp_rank(worker_url) {
Ok((worker_url_prefix, _dp_rank)) => worker_url_prefix,
Err(e) => {
error!("Failed to extract dp_rank for health check: {}", e);
return (
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to extract dp_rank: {}", e),
)
.into_response();
}
}
} else {
worker_url
};
let request_builder = self.client.get(format!("{}/health", health_url));
let response = match request_builder.send().await {
Ok(res) => {
let status = StatusCode::from_u16(res.status().as_u16())
.unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
match res.bytes().await {
Ok(body) => (status, body).into_response(),
Err(e) => {
error!(
worker_url = %health_url,
error = %e,
"Failed to read health response body"
);
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to read response body: {}", e),
)
.into_response()
}
}
}
Err(e) => {
error!(
worker_url = %health_url,
error = %e,
"Failed to send health request to worker"
);
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to send request to worker {}: {}", health_url, e),
)
.into_response()
}
};
// Don't record metrics for health checks
response
}
// Helper method to proxy GET requests to the first available worker
async fn proxy_get_request(&self, req: Request<Body>, endpoint: &str) -> Response {
let headers = header_utils::copy_request_headers(&req);
......@@ -575,14 +307,15 @@ impl Router {
) -> Response {
// TODO: currently the sglang worker is using in-memory state management, so this implementation has to fan out to all workers.
// Eventually, we need to have router to manage the chat history with a proper database, will update this implementation accordingly.
let worker_urls = self.get_worker_urls();
if worker_urls.is_empty() {
let workers = self.worker_registry.get_all();
if workers.is_empty() {
return (StatusCode::SERVICE_UNAVAILABLE, "No available workers").into_response();
}
let mut last_response: Option<Response> = None;
for worker_url in worker_urls {
let base = self.worker_base_url(&worker_url);
for worker in workers {
let worker_url = worker.url();
let base = self.worker_base_url(worker_url);
let url = format!("{}/{}", base, endpoint);
let mut request_builder = match method {
......@@ -597,6 +330,11 @@ impl Router {
}
};
if let Some(api_key) = worker.api_key() {
request_builder =
request_builder.header("Authorization", format!("Bearer {}", api_key));
}
if let Some(hdrs) = headers {
for (name, value) in hdrs {
let name_lc = name.as_str().to_lowercase();
......@@ -691,6 +429,12 @@ impl Router {
is_stream: bool,
load_incremented: bool, // Whether load was incremented for this request
) -> Response {
// Get the worker's API key if available
let api_key = self
.worker_registry
.get_by_url(worker_url)
.and_then(|w| w.api_key().clone());
let mut request_builder = if self.dp_aware {
let (worker_url_prefix, dp_rank) = match Self::extract_dp_rank(worker_url) {
Ok(tup) => tup,
......@@ -704,7 +448,6 @@ impl Router {
}
};
// Parse the request body
let mut json_val = match serde_json::to_value(typed_req) {
Ok(j) => j,
Err(e) => {
......@@ -716,7 +459,6 @@ impl Router {
}
};
// Insert the data_parallel_rank field
if let Some(map) = json_val.as_object_mut() {
map.insert(
String::from("data_parallel_rank"),
......@@ -743,6 +485,10 @@ impl Router {
.json(typed_req) // Use json() directly with typed request
};
if let Some(key) = api_key {
request_builder = request_builder.header("Authorization", format!("Bearer {}", key));
}
// Copy all headers from original request if provided
if let Some(headers) = headers {
for (name, value) in headers {
......@@ -909,215 +655,6 @@ impl Router {
}
}
pub async fn add_worker(
&self,
worker_url: &str,
api_key: &Option<String>,
) -> Result<String, String> {
let start_time = std::time::Instant::now();
let client = reqwest::Client::builder()
.timeout(Duration::from_secs(self.worker_startup_timeout_secs))
.build()
.map_err(|e| format!("Failed to create HTTP client: {}", e))?;
loop {
if start_time.elapsed() > Duration::from_secs(self.worker_startup_timeout_secs) {
error!(
"Timeout {}s waiting for worker {} to become healthy. Please set --router-worker-startup-timeout-secs (sglang_router.launch_server) or --worker-startup-timeout-secs (sglang_worker.router) to a larger value",
self.worker_startup_timeout_secs, worker_url
);
return Err(format!(
"Timeout {}s waiting for worker {} to become healthy. Please set --router-worker-startup-timeout-secs (sglang_router.launch_server) or --worker-startup-timeout-secs (sglang_worker.router) to a larger value",
self.worker_startup_timeout_secs, worker_url
));
}
match client.get(format!("{}/health", worker_url)).send().await {
Ok(res) => {
if res.status().is_success() {
if self.dp_aware {
// Need to contact the worker to extract the dp_size,
// and add them as multiple workers
let url_vec = vec![String::from(worker_url)];
let dp_url_vec = Self::get_dp_aware_workers(&url_vec, api_key)
.map_err(|e| format!("Failed to get dp-aware workers: {}", e))?;
let mut worker_added: bool = false;
for dp_url in &dp_url_vec {
if self.worker_registry.get_by_url(dp_url).is_some() {
warn!("Worker {} already exists", dp_url);
continue;
}
info!("Added worker: {}", dp_url);
// TODO: In IGW mode, fetch model_id from worker's /get_model_info endpoint
let new_worker_builder =
BasicWorkerBuilder::new(dp_url.to_string())
.worker_type(WorkerType::Regular)
.circuit_breaker_config(
self.circuit_breaker_config.clone(),
);
let new_worker = if let Some(api_key) = api_key {
new_worker_builder.api_key(api_key).build()
} else {
new_worker_builder.build()
};
let worker_arc = Arc::new(new_worker);
self.worker_registry.register(worker_arc.clone());
// Notify PolicyRegistry about the new worker
let model_id = worker_arc.model_id();
self.policy_registry.on_worker_added(model_id, None);
// Initialize cache-aware policy if applicable
let model_workers = self.worker_registry.get_workers_filtered(
Some(model_id),
Some(WorkerType::Regular),
Some(ConnectionMode::Http),
false,
);
self.policy_registry
.init_cache_aware_policy(model_id, &model_workers);
worker_added = true;
}
if !worker_added {
return Err(format!("No worker added for {}", worker_url));
}
} else {
if self.worker_registry.get_by_url(worker_url).is_some() {
return Err(format!("Worker {} already exists", worker_url));
}
info!("Added worker: {}", worker_url);
// TODO: In IGW mode, fetch model_id from worker's /get_model_info endpoint
let new_worker_builder =
BasicWorkerBuilder::new(worker_url.to_string())
.worker_type(WorkerType::Regular)
.circuit_breaker_config(self.circuit_breaker_config.clone());
let new_worker = if let Some(api_key) = api_key {
new_worker_builder.api_key(api_key).build()
} else {
new_worker_builder.build()
};
let worker_arc = Arc::new(new_worker);
self.worker_registry.register(worker_arc.clone());
// Notify PolicyRegistry about the new worker
let model_id = worker_arc.model_id();
self.policy_registry.on_worker_added(model_id, None);
// Initialize cache-aware policy if applicable
let model_workers = self.worker_registry.get_workers_filtered(
Some(model_id),
Some(WorkerType::Regular),
Some(ConnectionMode::Http),
false,
);
self.policy_registry
.init_cache_aware_policy(model_id, &model_workers);
}
RouterMetrics::set_active_workers(self.worker_registry.get_all().len());
return Ok(format!("Successfully added worker: {}", worker_url));
} else {
debug!(
"Worker {} health check pending - status: {}",
worker_url,
res.status()
);
// if the url does not have http or https prefix, warn users
if !worker_url.starts_with("http://") && !worker_url.starts_with("https://")
{
warn!("The worker url {} does not have http or https prefix. Please add the prefix to the url.", worker_url);
}
tokio::time::sleep(Duration::from_secs(
self.worker_startup_check_interval_secs,
))
.await;
continue;
}
}
Err(e) => {
debug!("Worker {} health check pending - error: {}", worker_url, e);
// if the url does not have http or https prefix, warn users
if !worker_url.starts_with("http://") && !worker_url.starts_with("https://") {
warn!("The worker url {} does not have http or https prefix. Please add the prefix to the url.", worker_url);
}
tokio::time::sleep(Duration::from_secs(
self.worker_startup_check_interval_secs,
))
.await;
continue;
}
}
}
}
pub fn remove_worker(&self, worker_url: &str) {
if self.dp_aware {
// remove dp-aware workers in a prefix-matching fashion
// without contacting the remote worker
let mut removed_workers: Vec<String> = Vec::new();
let worker_url_prefix = format!("{}@", worker_url);
// Find and remove all workers with matching prefix
let all_workers = self.worker_registry.get_all();
for w in all_workers.iter() {
if w.url().starts_with(&worker_url_prefix) {
// Get model_id before removing
let model_id = w.model_id().to_string();
if self.worker_registry.remove_by_url(w.url()).is_some() {
info!("Removed worker: {}", w.url());
removed_workers.push(w.url().to_string());
// Notify PolicyRegistry about the removed worker
self.policy_registry.on_worker_removed(&model_id);
} else {
warn!("Worker {} not found, skipping removal", w.url());
}
}
}
RouterMetrics::set_active_workers(self.worker_registry.get_all().len());
for dp_url in removed_workers.iter() {
if let Some(worker) = self.worker_registry.get_by_url(dp_url) {
let model_id = worker.model_id();
self.policy_registry
.remove_worker_from_cache_aware(model_id, dp_url);
}
}
} else {
// Get the worker first to extract model_id
let model_id = if let Some(worker) = self.worker_registry.get_by_url(worker_url) {
worker.model_id().to_string()
} else {
warn!("Worker {} not found, skipping removal", worker_url);
return;
};
if self.worker_registry.remove_by_url(worker_url).is_some() {
info!("Removed worker: {}", worker_url);
// Notify PolicyRegistry about the removed worker
self.policy_registry.on_worker_removed(&model_id);
RouterMetrics::set_active_workers(self.worker_registry.get_all().len());
}
self.policy_registry
.remove_worker_from_cache_aware(&model_id, worker_url);
}
}
async fn get_worker_load(&self, worker_url: &str, api_key: &Option<String>) -> Option<isize> {
let worker_url = if self.dp_aware {
// Need to extract the URL from "http://host:port@dp_rank"
......@@ -1205,7 +742,7 @@ impl Router {
// Static version of get_worker_load for use in monitoring task
async fn get_worker_load_static(
client: &reqwest::Client,
client: &Client,
worker_url: &str,
api_key: &Option<String>,
) -> Option<isize> {
......@@ -1281,25 +818,6 @@ impl Router {
use async_trait::async_trait;
#[async_trait]
impl WorkerManagement for Router {
async fn add_worker(
&self,
worker_url: &str,
api_key: &Option<String>,
) -> Result<String, String> {
Router::add_worker(self, worker_url, api_key).await
}
fn remove_worker(&self, worker_url: &str) {
Router::remove_worker(self, worker_url)
}
fn get_worker_urls(&self) -> Vec<String> {
Router::get_worker_urls(self)
}
}
#[async_trait]
impl RouterTrait for Router {
fn as_any(&self) -> &dyn std::any::Any {
......@@ -1445,12 +963,19 @@ impl RouterTrait for Router {
}
async fn flush_cache(&self) -> Response {
// Get all worker URLs
let worker_urls = self.get_worker_urls();
// Get all workers
let workers = self.worker_registry.get_all();
let worker_urls: Vec<String> = workers.iter().map(|w| w.url().to_string()).collect();
// Send requests to all workers concurrently without headers
let mut tasks = Vec::new();
for worker_url in &worker_urls {
// Get the worker's API key if available
let api_key = self
.worker_registry
.get_by_url(worker_url)
.and_then(|w| w.api_key().clone());
let worker_url = if self.dp_aware {
// Need to extract the URL from "http://host:port@dp_rank"
let (worker_url_prefix, _dp_rank) = match Self::extract_dp_rank(worker_url) {
......@@ -1468,7 +993,13 @@ impl RouterTrait for Router {
} else {
worker_url
};
let request_builder = self.client.post(format!("{}/flush_cache", worker_url));
let mut request_builder = self.client.post(format!("{}/flush_cache", worker_url));
if let Some(key) = api_key {
request_builder =
request_builder.header("Authorization", format!("Bearer {}", key));
}
tasks.push(request_builder.send());
}
......@@ -1546,6 +1077,7 @@ impl RouterTrait for Router {
#[cfg(test)]
mod tests {
use super::*;
use crate::core::BasicWorkerBuilder;
use std::collections::HashMap;
fn create_test_regular_router() -> Router {
......@@ -1558,11 +1090,9 @@ mod tests {
// Register test workers
let worker1 = BasicWorkerBuilder::new("http://worker1:8080")
.worker_type(WorkerType::Regular)
.api_key("test_api_key")
.build();
let worker2 = BasicWorkerBuilder::new("http://worker2:8080")
.worker_type(WorkerType::Regular)
.api_key("test_api_key")
.build();
worker_registry.register(Arc::new(worker1));
worker_registry.register(Arc::new(worker2));
......@@ -1571,13 +1101,9 @@ mod tests {
Router {
worker_registry,
policy_registry,
worker_startup_timeout_secs: 5,
worker_startup_check_interval_secs: 1,
dp_aware: false,
api_key: None,
client: Client::new(),
retry_config: RetryConfig::default(),
circuit_breaker_config: CircuitBreakerConfig::default(),
_worker_loads: Arc::new(rx),
_load_monitor_handle: None,
}
......@@ -1586,7 +1112,8 @@ mod tests {
#[test]
fn test_router_get_worker_urls_regular() {
let router = create_test_regular_router();
let urls = router.get_worker_urls();
let workers = router.worker_registry.get_all();
let urls: Vec<String> = workers.iter().map(|w| w.url().to_string()).collect();
assert_eq!(urls.len(), 2);
assert!(urls.contains(&"http://worker1:8080".to_string()));
......@@ -1603,21 +1130,4 @@ mod tests {
// DashMap doesn't guarantee order, so just check we get one of the workers
assert!(url == "http://worker1:8080" || url == "http://worker2:8080");
}
#[tokio::test]
async fn test_wait_for_healthy_workers_empty_list() {
// Empty list will return error immediately
let result = Router::wait_for_healthy_workers(&[], 1, 1).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("no workers provided"));
}
#[tokio::test]
async fn test_wait_for_healthy_workers_invalid_urls() {
// This test will timeout quickly since the URLs are invalid
let result =
Router::wait_for_healthy_workers(&["http://nonexistent:8080".to_string()], 1, 1).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("Timeout"));
}
}
......@@ -19,39 +19,18 @@ pub mod grpc;
pub mod header_utils;
pub mod http;
pub mod router_manager;
pub mod worker_initializer;
pub use factory::RouterFactory;
pub use worker_initializer::WorkerInitializer;
// Re-export HTTP routers for convenience (keeps routers::openai_router path working)
pub use http::{openai_router, pd_router, pd_types, router};
/// Worker management trait for administrative operations
///
/// This trait is separate from RouterTrait to allow Send futures
/// for use in service discovery and other background tasks
#[async_trait]
pub trait WorkerManagement: Send + Sync {
/// Add a worker to the router
async fn add_worker(
&self,
worker_url: &str,
api_key: &Option<String>,
) -> Result<String, String>;
/// Remove a worker from the router
fn remove_worker(&self, worker_url: &str);
/// Get all worker URLs
fn get_worker_urls(&self) -> Vec<String>;
}
/// Core trait for all router implementations
///
/// This trait provides a unified interface for routing requests,
/// regardless of whether it's a regular router or PD router.
#[async_trait]
pub trait RouterTrait: Send + Sync + Debug + WorkerManagement {
pub trait RouterTrait: Send + Sync + Debug {
/// Get a reference to self as Any for downcasting
fn as_any(&self) -> &dyn std::any::Any;
......
......@@ -4,17 +4,12 @@
//! - Single Router Mode (enable_igw=false): Router owns workers directly
//! - Multi-Router Mode (enable_igw=true): RouterManager coordinates everything
use crate::config::RouterConfig;
use crate::core::{BasicWorkerBuilder, CircuitBreakerConfig, Worker, WorkerRegistry, WorkerType};
use crate::core::{Worker, WorkerRegistry, WorkerType};
use crate::protocols::spec::{
ChatCompletionRequest, CompletionRequest, EmbeddingRequest, GenerateRequest, RerankRequest,
ResponsesRequest,
};
use crate::protocols::worker_spec::{
ServerInfo, WorkerApiResponse, WorkerConfigRequest, WorkerErrorResponse, WorkerInfo,
WorkerListResponse, WorkerStats, WorkerTypeStats,
};
use crate::routers::{RouterTrait, WorkerManagement};
use crate::routers::RouterTrait;
use async_trait::async_trait;
use axum::{
body::Body,
......@@ -24,7 +19,7 @@ use axum::{
};
use dashmap::DashMap;
use std::sync::Arc;
use tracing::{info, warn};
use tracing::info;
/// Router identifier
#[derive(Debug, Clone, Hash, Eq, PartialEq)]
......@@ -45,48 +40,28 @@ pub struct RouterManager {
/// Worker registry (single source of truth in multi-router mode)
worker_registry: Arc<WorkerRegistry>,
/// Policy registry for managing model-to-policy mappings
policy_registry: Arc<crate::policies::PolicyRegistry>,
/// All routers managed by this manager
/// RouterId examples: "http-regular", "http-pd", "grpc-regular", "grpc-pd"
routers: Arc<DashMap<RouterId, Arc<dyn RouterTrait>>>,
/// Default router for requests without specific routing
default_router: Arc<std::sync::RwLock<Option<RouterId>>>,
/// HTTP client for querying worker info
client: reqwest::Client,
/// Configuration
#[allow(dead_code)] // May be used in future enhancements
config: RouterConfig,
}
impl RouterManager {
/// Create a new router manager with shared registries
pub fn new(
config: RouterConfig,
client: reqwest::Client,
worker_registry: Arc<WorkerRegistry>,
policy_registry: Arc<crate::policies::PolicyRegistry>,
) -> Self {
pub fn new(worker_registry: Arc<WorkerRegistry>) -> Self {
Self {
worker_registry,
policy_registry,
routers: Arc::new(DashMap::new()),
default_router: Arc::new(std::sync::RwLock::new(None)),
client,
config,
}
}
/// Register a router with the manager
pub fn register_router(&self, id: RouterId, router: Arc<dyn RouterTrait>) {
// Store router
self.routers.insert(id.clone(), router);
// Set as default if first router
let mut default_router = self.default_router.write().unwrap();
if default_router.is_none() {
*default_router = Some(id.clone());
......@@ -107,11 +82,9 @@ impl RouterManager {
/// Get router for a specific model based on worker types
pub fn get_router_for_model(&self, model_id: &str) -> Option<Arc<dyn RouterTrait>> {
// Query workers for this model from registry
let workers = self.worker_registry.get_by_model(model_id);
if !workers.is_empty() {
// Determine router based on worker types
let has_pd_workers = workers.iter().any(|w| {
matches!(
w.worker_type(),
......@@ -125,13 +98,11 @@ impl RouterManager {
RouterId::new("http-regular".to_string())
};
// Return the router if it exists
if let Some(router) = self.routers.get(&router_id) {
return Some(router.clone());
}
}
// Fall back to default router
let default_router = self.default_router.read().unwrap();
if let Some(ref default_id) = *default_router {
self.routers.get(default_id).map(|r| r.clone())
......@@ -149,277 +120,12 @@ impl RouterManager {
}
}
/// Add a worker to the registry
pub async fn add_worker(
&self,
config: WorkerConfigRequest,
) -> Result<WorkerApiResponse, WorkerErrorResponse> {
// Build labels from configuration
let mut labels = config.labels.clone();
// Query server info if model_id not provided
let model_id = if let Some(model_id) = config.model_id {
model_id
} else {
match self.query_server_info(&config.url, &config.api_key).await {
Ok(info) => {
// Extract model_id from server info
info.model_id
.or_else(|| {
info.model_path
.as_ref()
.and_then(|path| path.split('/').next_back().map(|s| s.to_string()))
})
.unwrap_or_else(|| "unknown".to_string())
}
Err(e) => {
warn!("Failed to query server info from {}: {}", config.url, e);
"unknown".to_string()
}
}
};
// Add configuration to labels
labels.insert("model_id".to_string(), model_id.clone());
if let Some(priority) = config.priority {
labels.insert("priority".to_string(), priority.to_string());
}
if let Some(cost) = config.cost {
labels.insert("cost".to_string(), cost.to_string());
}
// Add gRPC-specific configuration if provided
if let Some(tokenizer_path) = config.tokenizer_path {
labels.insert("tokenizer_path".to_string(), tokenizer_path);
}
if let Some(reasoning_parser) = config.reasoning_parser {
labels.insert("reasoning_parser".to_string(), reasoning_parser);
}
if let Some(tool_parser) = config.tool_parser {
labels.insert("tool_parser".to_string(), tool_parser);
}
if let Some(chat_template) = config.chat_template {
labels.insert("chat_template".to_string(), chat_template);
}
let worker = match config.worker_type.as_deref() {
Some("prefill") => {
let mut builder = BasicWorkerBuilder::new(config.url.clone())
.worker_type(WorkerType::Prefill {
bootstrap_port: config.bootstrap_port,
})
.labels(labels.clone())
.circuit_breaker_config(CircuitBreakerConfig::default());
if let Some(api_key) = config.api_key.clone() {
builder = builder.api_key(api_key);
}
Box::new(builder.build()) as Box<dyn Worker>
}
Some("decode") => {
let mut builder = BasicWorkerBuilder::new(config.url.clone())
.worker_type(WorkerType::Decode)
.labels(labels.clone())
.circuit_breaker_config(CircuitBreakerConfig::default());
if let Some(api_key) = config.api_key.clone() {
builder = builder.api_key(api_key);
}
Box::new(builder.build()) as Box<dyn Worker>
}
_ => {
let mut builder = BasicWorkerBuilder::new(config.url.clone())
.worker_type(WorkerType::Regular)
.labels(labels.clone())
.circuit_breaker_config(CircuitBreakerConfig::default());
if let Some(api_key) = config.api_key.clone() {
builder = builder.api_key(api_key);
}
Box::new(builder.build()) as Box<dyn Worker>
}
};
// Register worker
let worker_arc: Arc<dyn Worker> = Arc::from(worker);
let worker_id = self.worker_registry.register(worker_arc.clone());
// Notify PolicyRegistry about the new worker
// Extract policy hint from labels if provided
let policy_hint = labels.get("policy").map(|s| s.as_str());
let policy = self.policy_registry.on_worker_added(&model_id, policy_hint);
// Log which type of router would handle this worker (for debugging)
let expected_router = match config.worker_type.as_deref() {
Some("prefill") | Some("decode") => "http-pd",
_ => "http-regular",
};
info!(
"Worker for model '{}' would be handled by '{}' router based on type",
model_id, expected_router
);
info!(
"Added worker {} with URL {} for model {} using policy {}",
worker_id.as_str(),
config.url,
model_id,
policy.name()
);
// Return worker info
let worker_info = self.worker_to_info(worker_id.as_str(), &worker_arc);
Ok(WorkerApiResponse {
success: true,
message: format!("Worker {} added successfully", worker_id.as_str()),
worker: Some(worker_info),
})
}
/// Remove a worker from the registry
pub fn remove_worker_from_registry(
&self,
url: &str,
) -> Result<WorkerApiResponse, WorkerErrorResponse> {
// Get worker to extract model_id before removing
let model_id = self
.worker_registry
.get_by_url(url)
.map(|worker| worker.model_id().to_string());
if let Some(_worker) = self.worker_registry.remove_by_url(url) {
// Notify PolicyRegistry about worker removal
if let Some(ref model_id) = model_id {
self.policy_registry.on_worker_removed(model_id);
info!("Removed worker with URL {} for model {}", url, model_id);
} else {
info!("Removed worker with URL {}", url);
}
Ok(WorkerApiResponse {
success: true,
message: format!("Worker {} removed successfully", url),
worker: None,
})
} else {
Err(WorkerErrorResponse {
error: format!("Worker with URL {} not found", url),
code: "WORKER_NOT_FOUND".to_string(),
})
}
}
/// List all workers
pub fn list_workers(&self) -> WorkerListResponse {
let workers = self.worker_registry.get_all_with_ids();
let worker_infos: Vec<WorkerInfo> = workers
.iter()
.map(|(id, w)| self.worker_to_info(id.as_str(), w))
.collect();
let total = worker_infos.len();
// Get stats from the worker registry
let registry_stats = self.worker_registry.stats();
// Convert WorkerRegistryStats to WorkerStats
let stats = WorkerStats {
total_workers: registry_stats.total_workers,
healthy_workers: registry_stats.healthy_workers,
total_models: registry_stats.total_models,
total_load: registry_stats.total_load,
by_type: WorkerTypeStats {
regular: registry_stats.regular_workers,
prefill: registry_stats.prefill_workers,
decode: registry_stats.decode_workers,
},
};
WorkerListResponse {
workers: worker_infos,
total,
stats,
}
}
/// Get worker by URL
pub fn get_worker(&self, url: &str) -> Option<WorkerInfo> {
self.worker_registry
.get_by_url(url)
.map(|w| self.worker_to_info("unknown", &w))
}
/// Query server info from a worker URL
async fn query_server_info(
&self,
url: &str,
api_key: &Option<String>,
) -> Result<ServerInfo, String> {
let info_url = format!("{}/get_server_info", url.trim_end_matches('/'));
let mut req_builder = self.client.get(&info_url);
if let Some(key) = api_key {
req_builder = req_builder.bearer_auth(key);
}
match req_builder.send().await {
Ok(response) => {
if response.status().is_success() {
response
.json::<ServerInfo>()
.await
.map_err(|e| format!("Failed to parse server info: {}", e))
} else {
Err(format!("Server returned status: {}", response.status()))
}
}
Err(e) => Err(format!("Failed to connect to server: {}", e)),
}
}
/// Convert Worker to WorkerInfo
fn worker_to_info(&self, id: &str, worker: &Arc<dyn Worker>) -> WorkerInfo {
let metadata = worker.metadata();
WorkerInfo {
id: id.to_string(),
url: worker.url().to_string(),
model_id: worker.model_id().to_string(),
priority: worker.priority(),
cost: worker.cost(),
worker_type: match worker.worker_type() {
WorkerType::Regular => "regular".to_string(),
WorkerType::Prefill { .. } => "prefill".to_string(),
WorkerType::Decode => "decode".to_string(),
},
is_healthy: worker.is_healthy(),
load: worker.load(),
connection_mode: format!("{:?}", worker.connection_mode()),
tokenizer_path: worker.tokenizer_path().map(|s| s.to_string()),
reasoning_parser: worker.reasoning_parser().map(|s| s.to_string()),
tool_parser: worker.tool_parser().map(|s| s.to_string()),
chat_template: worker.chat_template().map(|s| s.to_string()),
metadata: metadata.labels.clone(),
}
}
/// Get the appropriate router for a request based on headers and request content
pub fn select_router_for_request(
&self,
headers: Option<&HeaderMap>,
model_id: Option<&str>,
) -> Option<Arc<dyn RouterTrait>> {
// Extract priority and cost preferences from headers if available
let _priority_threshold = headers.and_then(|h| {
h.get("x-worker-priority")
.and_then(|v| v.to_str().ok())
......@@ -432,7 +138,6 @@ impl RouterManager {
.and_then(|s| s.parse::<f32>().ok())
});
// Check if PD (prefill-decode) mode is preferred from headers
let prefer_pd = headers
.and_then(|h| {
h.get("x-prefer-pd")
......@@ -441,7 +146,6 @@ impl RouterManager {
})
.unwrap_or(false);
// If model specified, use get_router_for_model
let candidate_routers = if let Some(model) = model_id {
if let Some(router) = self.get_router_for_model(model) {
vec![router]
......@@ -449,7 +153,6 @@ impl RouterManager {
Vec::new()
}
} else {
// No model specified, consider all routers
self.routers
.iter()
.map(|entry| entry.value().clone())
......@@ -457,23 +160,20 @@ impl RouterManager {
};
if candidate_routers.is_empty() {
// No routers found for the specified model
return None;
}
// Score routers based on worker attributes and request preferences
let mut best_router = None;
let mut best_score = 0.0;
for router in candidate_routers {
let mut score = 1.0;
// Check if this is a PD router
let is_pd = router.is_pd_mode();
if prefer_pd && is_pd {
score += 2.0; // Bonus for matching PD preference
score += 2.0;
} else if !prefer_pd && !is_pd {
score += 1.0; // Bonus for matching regular preference
score += 1.0;
}
// Get workers for this router and evaluate based on priority/cost
......@@ -495,49 +195,6 @@ impl RouterManager {
}
}
/// RouterManager implements RouterTrait to act as a meta-router
/// that delegates requests to the appropriate underlying router
#[async_trait]
impl WorkerManagement for RouterManager {
/// Add a worker - in multi-router mode, this adds to the registry
async fn add_worker(
&self,
worker_url: &str,
api_key: &Option<String>,
) -> Result<String, String> {
// Create a basic worker config request
let config = WorkerConfigRequest {
url: worker_url.to_string(),
api_key: api_key.clone(),
model_id: None,
worker_type: None,
priority: None,
cost: None,
labels: std::collections::HashMap::new(),
bootstrap_port: None,
tokenizer_path: None,
reasoning_parser: None,
tool_parser: None,
chat_template: None,
};
match self.add_worker(config).await {
Ok(response) => Ok(response.message),
Err(e) => Err(e.error),
}
}
/// Remove a worker from the registry
fn remove_worker(&self, worker_url: &str) {
let _ = self.remove_worker_from_registry(worker_url);
}
/// Get all worker URLs from the registry
fn get_worker_urls(&self) -> Vec<String> {
self.worker_registry.get_all_urls()
}
}
#[async_trait]
impl RouterTrait for RouterManager {
fn as_any(&self) -> &dyn std::any::Any {
......@@ -639,7 +296,6 @@ impl RouterTrait for RouterManager {
body: &ChatCompletionRequest,
_model_id: Option<&str>,
) -> Response {
// Select router based on headers and model
let router = self.select_router_for_request(headers, Some(&body.model));
if let Some(router) = router {
......@@ -662,7 +318,6 @@ impl RouterTrait for RouterManager {
body: &CompletionRequest,
_model_id: Option<&str>,
) -> Response {
// Select router based on headers and model
let router = self.select_router_for_request(headers, Some(&body.model));
if let Some(router) = router {
......@@ -746,7 +401,6 @@ impl RouterTrait for RouterManager {
body: &EmbeddingRequest,
_model_id: Option<&str>,
) -> Response {
// Select router based on headers and model
let router = self.select_router_for_request(headers, Some(&body.model));
if let Some(router) = router {
......
// Worker Initialization Module
// Separates worker lifecycle management from router construction
use crate::config::types::{ConnectionMode as ConfigConnectionMode, RouterConfig, RoutingMode};
use crate::core::{
BasicWorkerBuilder, CircuitBreakerConfig, ConnectionMode, HealthConfig, Worker, WorkerRegistry,
WorkerType,
};
use crate::policies::PolicyRegistry;
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
use tracing::{info, warn};
/// WorkerInitializer handles the creation and registration of workers
/// based on routing configuration, separating this concern from router constructors
pub struct WorkerInitializer;
impl WorkerInitializer {
/// Initialize workers based on configuration and register them in the WorkerRegistry
pub async fn initialize_workers(
config: &RouterConfig,
worker_registry: &Arc<WorkerRegistry>,
policy_registry: Option<&Arc<PolicyRegistry>>,
) -> Result<(), String> {
info!("Initializing workers for routing mode: {:?}", config.mode);
match &config.mode {
RoutingMode::Regular { worker_urls } => {
// use router's api_key, repeat for each worker
let worker_api_keys: Vec<Option<String>> =
worker_urls.iter().map(|_| config.api_key.clone()).collect();
Self::create_regular_workers(
worker_urls,
&worker_api_keys,
&config.connection_mode,
config,
worker_registry,
policy_registry,
)
.await?;
}
RoutingMode::PrefillDecode {
prefill_urls,
decode_urls,
..
} => {
// use router's api_key, repeat for each prefill/decode worker
let prefill_api_keys: Vec<Option<String>> = prefill_urls
.iter()
.map(|_| config.api_key.clone())
.collect();
let decode_api_keys: Vec<Option<String>> =
decode_urls.iter().map(|_| config.api_key.clone()).collect();
Self::create_prefill_workers(
prefill_urls,
&prefill_api_keys,
&config.connection_mode,
config,
worker_registry,
policy_registry,
)
.await?;
Self::create_decode_workers(
decode_urls,
&decode_api_keys,
&config.connection_mode,
config,
worker_registry,
policy_registry,
)
.await?;
}
RoutingMode::OpenAI { .. } => {
info!("OpenAI routing mode - no local workers to initialize");
}
}
// Wait for workers to be healthy if any were registered
if worker_registry.stats().total_workers > 0 {
Self::wait_for_healthy_workers(
worker_registry,
config.worker_startup_timeout_secs,
config.worker_startup_check_interval_secs,
)
.await?;
}
Ok(())
}
/// Create regular workers for standard routing mode
async fn create_regular_workers(
urls: &[String],
api_keys: &[Option<String>],
config_connection_mode: &ConfigConnectionMode,
config: &RouterConfig,
registry: &Arc<WorkerRegistry>,
policy_registry: Option<&Arc<PolicyRegistry>>,
) -> Result<(), String> {
info!("Creating {} regular workers", urls.len());
// Convert config connection mode to core connection mode
let connection_mode = Self::convert_connection_mode(config_connection_mode, urls.first());
// Convert circuit breaker config
let circuit_breaker_config = config.effective_circuit_breaker_config();
let core_cb_config = CircuitBreakerConfig {
failure_threshold: circuit_breaker_config.failure_threshold,
success_threshold: circuit_breaker_config.success_threshold,
timeout_duration: Duration::from_secs(circuit_breaker_config.timeout_duration_secs),
window_duration: Duration::from_secs(circuit_breaker_config.window_duration_secs),
};
// Convert health check config
let health_config = HealthConfig {
timeout_secs: config.health_check.timeout_secs,
check_interval_secs: config.health_check.check_interval_secs,
endpoint: config.health_check.endpoint.clone(),
failure_threshold: config.health_check.failure_threshold,
success_threshold: config.health_check.success_threshold,
};
let mut registered_workers: HashMap<String, Vec<Arc<dyn Worker>>> = HashMap::new();
for (url, api_key) in urls.iter().zip(api_keys.iter()) {
// TODO: Add DP-aware support when we have dp_rank/dp_size info
let worker_builder = BasicWorkerBuilder::new(url.clone())
.worker_type(WorkerType::Regular)
.connection_mode(connection_mode.clone())
.circuit_breaker_config(core_cb_config.clone())
.health_config(health_config.clone());
let worker = if let Some(api_key) = api_key.clone() {
worker_builder.api_key(api_key).build()
} else {
worker_builder.build()
};
let worker_arc = Arc::new(worker) as Arc<dyn Worker>;
let model_id = worker_arc.model_id();
let worker_id = registry.register(Arc::clone(&worker_arc));
info!("Registered regular worker {} with ID {:?}", url, worker_id);
// Track workers by model for cache-aware policy initialization
registered_workers
.entry(model_id.to_string())
.or_default()
.push(Arc::clone(&worker_arc));
// Notify policy registry about the worker
if let Some(policy_reg) = policy_registry {
policy_reg.on_worker_added(model_id, None);
}
}
// Initialize cache-aware policies with all workers for each model
if let Some(policy_reg) = policy_registry {
for (model_id, workers) in registered_workers {
policy_reg.init_cache_aware_policy(&model_id, &workers);
}
}
Ok(())
}
/// Create prefill workers for disaggregated routing mode
async fn create_prefill_workers(
prefill_entries: &[(String, Option<u16>)],
api_keys: &[Option<String>],
config_connection_mode: &ConfigConnectionMode,
config: &RouterConfig,
registry: &Arc<WorkerRegistry>,
policy_registry: Option<&Arc<PolicyRegistry>>,
) -> Result<(), String> {
info!("Creating {} prefill workers", prefill_entries.len());
// Convert config connection mode to core connection mode
let connection_mode = Self::convert_connection_mode(
config_connection_mode,
prefill_entries.first().map(|(url, _)| url),
);
// Convert circuit breaker config
let circuit_breaker_config = config.effective_circuit_breaker_config();
let core_cb_config = CircuitBreakerConfig {
failure_threshold: circuit_breaker_config.failure_threshold,
success_threshold: circuit_breaker_config.success_threshold,
timeout_duration: Duration::from_secs(circuit_breaker_config.timeout_duration_secs),
window_duration: Duration::from_secs(circuit_breaker_config.window_duration_secs),
};
// Convert health check config
let health_config = HealthConfig {
timeout_secs: config.health_check.timeout_secs,
check_interval_secs: config.health_check.check_interval_secs,
endpoint: config.health_check.endpoint.clone(),
failure_threshold: config.health_check.failure_threshold,
success_threshold: config.health_check.success_threshold,
};
let mut registered_workers: HashMap<String, Vec<Arc<dyn Worker>>> = HashMap::new();
for ((url, bootstrap_port), api_key) in prefill_entries.iter().zip(api_keys.iter()) {
// TODO: Add DP-aware support when we have dp_rank/dp_size info
let worker_builder = BasicWorkerBuilder::new(url.clone())
.worker_type(WorkerType::Prefill {
bootstrap_port: *bootstrap_port,
})
.connection_mode(connection_mode.clone())
.circuit_breaker_config(core_cb_config.clone())
.health_config(health_config.clone());
let worker = if let Some(api_key) = api_key.clone() {
worker_builder.api_key(api_key).build()
} else {
worker_builder.build()
};
let worker_arc = Arc::new(worker) as Arc<dyn Worker>;
let model_id = worker_arc.model_id();
let worker_id = registry.register(Arc::clone(&worker_arc));
info!("Registered prefill worker {} with ID {:?}", url, worker_id);
// Track workers by model for cache-aware policy initialization
registered_workers
.entry(model_id.to_string())
.or_default()
.push(Arc::clone(&worker_arc));
// Notify policy registry about the worker
if let Some(policy_reg) = policy_registry {
policy_reg.on_worker_added(model_id, None);
}
}
// Initialize cache-aware policies for PD mode
if let Some(policy_reg) = policy_registry {
// Collect all prefill workers
let all_prefill_workers: Vec<Arc<dyn Worker>> = registered_workers
.values()
.flat_map(|workers| workers.iter().cloned())
.collect();
// Initialize PD policies (will handle both prefill and decode, but we only have prefill here)
policy_reg.init_pd_cache_aware_policies(&all_prefill_workers, &[]);
}
Ok(())
}
/// Create decode workers for disaggregated routing mode
async fn create_decode_workers(
urls: &[String],
api_keys: &[Option<String>],
config_connection_mode: &ConfigConnectionMode,
config: &RouterConfig,
registry: &Arc<WorkerRegistry>,
policy_registry: Option<&Arc<PolicyRegistry>>,
) -> Result<(), String> {
info!("Creating {} decode workers", urls.len());
// Convert config connection mode to core connection mode
let connection_mode = Self::convert_connection_mode(config_connection_mode, urls.first());
// Convert circuit breaker config
let circuit_breaker_config = config.effective_circuit_breaker_config();
let core_cb_config = CircuitBreakerConfig {
failure_threshold: circuit_breaker_config.failure_threshold,
success_threshold: circuit_breaker_config.success_threshold,
timeout_duration: Duration::from_secs(circuit_breaker_config.timeout_duration_secs),
window_duration: Duration::from_secs(circuit_breaker_config.window_duration_secs),
};
// Convert health check config
let health_config = HealthConfig {
timeout_secs: config.health_check.timeout_secs,
check_interval_secs: config.health_check.check_interval_secs,
endpoint: config.health_check.endpoint.clone(),
failure_threshold: config.health_check.failure_threshold,
success_threshold: config.health_check.success_threshold,
};
let mut registered_workers: HashMap<String, Vec<Arc<dyn Worker>>> = HashMap::new();
for (url, api_key) in urls.iter().zip(api_keys.iter()) {
// TODO: Add DP-aware support when we have dp_rank/dp_size info
let worker_builder = BasicWorkerBuilder::new(url.clone())
.worker_type(WorkerType::Decode)
.connection_mode(connection_mode.clone())
.circuit_breaker_config(core_cb_config.clone())
.health_config(health_config.clone());
let worker = if let Some(api_key) = api_key.clone() {
worker_builder.api_key(api_key).build()
} else {
worker_builder.build()
};
let worker_arc = Arc::new(worker) as Arc<dyn Worker>;
let model_id = worker_arc.model_id();
let worker_id = registry.register(Arc::clone(&worker_arc));
info!("Registered decode worker {} with ID {:?}", url, worker_id);
// Track workers by model for cache-aware policy initialization
registered_workers
.entry(model_id.to_string())
.or_default()
.push(Arc::clone(&worker_arc));
// Notify policy registry about the worker
if let Some(policy_reg) = policy_registry {
policy_reg.on_worker_added(model_id, None);
}
}
// Initialize cache-aware policies for PD mode
if let Some(policy_reg) = policy_registry {
// Collect all decode workers
let all_decode_workers: Vec<Arc<dyn Worker>> = registered_workers
.values()
.flat_map(|workers| workers.iter().cloned())
.collect();
// Initialize PD policies (will handle both prefill and decode, but we only have decode here)
policy_reg.init_pd_cache_aware_policies(&[], &all_decode_workers);
}
Ok(())
}
/// Convert config connection mode to core connection mode
fn convert_connection_mode(
config_mode: &ConfigConnectionMode,
_sample_url: Option<&String>,
) -> ConnectionMode {
match config_mode {
ConfigConnectionMode::Http => ConnectionMode::Http,
ConfigConnectionMode::Grpc => ConnectionMode::Grpc { port: None },
}
}
/// Wait for workers to become healthy
async fn wait_for_healthy_workers(
registry: &Arc<WorkerRegistry>,
timeout_secs: u64,
check_interval_secs: u64,
) -> Result<(), String> {
let timeout = Duration::from_secs(timeout_secs);
let check_interval = Duration::from_secs(check_interval_secs);
let start_time = std::time::Instant::now();
info!(
"Waiting for workers to become healthy (timeout: {}s)",
timeout_secs
);
loop {
let stats = registry.stats();
if stats.healthy_workers > 0 {
info!(
"Workers healthy: {}/{} workers are ready",
stats.healthy_workers, stats.total_workers
);
// If we have at least one healthy worker, we can proceed
// This allows partial degradation rather than total failure
return Ok(());
}
if start_time.elapsed() > timeout {
let error_msg = format!(
"Timeout waiting for workers to become healthy after {}s. Total workers: {}, Healthy: {}",
timeout_secs, stats.total_workers, stats.healthy_workers
);
warn!("{}", error_msg);
// If we have workers but none are healthy, it's still a failure
if stats.total_workers > 0 {
return Err(error_msg);
} else {
// No workers at all might be OK for some configurations
warn!("No workers registered, proceeding anyway");
return Ok(());
}
}
tokio::time::sleep(check_interval).await;
}
}
/// Initialize workers for gRPC connections specifically
/// This is used when gRPC clients are pre-connected
pub async fn initialize_grpc_workers(
worker_urls: &[String],
worker_type: WorkerType,
config: &RouterConfig,
registry: &Arc<WorkerRegistry>,
policy_registry: Option<&Arc<PolicyRegistry>>,
grpc_clients: &mut HashMap<String, crate::grpc::SglangSchedulerClient>,
) -> Result<(), String> {
info!(
"Creating {} gRPC workers of type {:?}",
worker_urls.len(),
worker_type
);
// Convert circuit breaker config
let circuit_breaker_config = config.effective_circuit_breaker_config();
let core_cb_config = CircuitBreakerConfig {
failure_threshold: circuit_breaker_config.failure_threshold,
success_threshold: circuit_breaker_config.success_threshold,
timeout_duration: Duration::from_secs(circuit_breaker_config.timeout_duration_secs),
window_duration: Duration::from_secs(circuit_breaker_config.window_duration_secs),
};
// Convert health check config
let health_config = HealthConfig {
timeout_secs: config.health_check.timeout_secs,
check_interval_secs: config.health_check.check_interval_secs,
endpoint: config.health_check.endpoint.clone(),
failure_threshold: config.health_check.failure_threshold,
success_threshold: config.health_check.success_threshold,
};
let mut registered_workers: HashMap<String, Vec<Arc<dyn Worker>>> = HashMap::new();
for url in worker_urls {
if let Some(client) = grpc_clients.remove(url) {
let worker = BasicWorkerBuilder::new(url.clone())
.worker_type(worker_type.clone())
.connection_mode(ConnectionMode::Grpc { port: None })
.circuit_breaker_config(core_cb_config.clone())
.health_config(health_config.clone())
.grpc_client(client)
.build();
let worker_arc = Arc::new(worker) as Arc<dyn Worker>;
let model_id = worker_arc.model_id();
let worker_id = registry.register(Arc::clone(&worker_arc));
info!("Registered gRPC worker {} with ID {:?}", url, worker_id);
// Track workers by model for cache-aware policy initialization
registered_workers
.entry(model_id.to_string())
.or_default()
.push(Arc::clone(&worker_arc));
// Notify policy registry about the worker
if let Some(policy_reg) = policy_registry {
policy_reg.on_worker_added(model_id, None);
}
} else {
warn!("No gRPC client available for worker {}, skipping", url);
}
}
// Initialize cache-aware policies with all workers for each model
if let Some(policy_reg) = policy_registry {
for (model_id, workers) in registered_workers {
policy_reg.init_cache_aware_policy(&model_id, &workers);
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_convert_connection_mode() {
// HTTP mode
assert!(matches!(
WorkerInitializer::convert_connection_mode(
&ConfigConnectionMode::Http,
Some(&"http://localhost:8080".to_string())
),
ConnectionMode::Http
));
// gRPC mode
assert!(matches!(
WorkerInitializer::convert_connection_mode(
&ConfigConnectionMode::Grpc,
Some(&"grpc://localhost:50051".to_string())
),
ConnectionMode::Grpc { .. }
));
// No URL provided
assert!(matches!(
WorkerInitializer::convert_connection_mode(&ConfigConnectionMode::Http, None),
ConnectionMode::Http
));
}
}
use crate::{
config::{ConnectionMode, HistoryBackend, RouterConfig},
core::{WorkerRegistry, WorkerType},
core::{WorkerManager, WorkerRegistry, WorkerType},
data_connector::{MemoryResponseStorage, NoOpResponseStorage, SharedResponseStorage},
logging::{self, LoggingConfig},
metrics::{self, PrometheusConfig},
......@@ -14,7 +14,6 @@ use crate::{
worker_spec::{WorkerApiResponse, WorkerConfigRequest, WorkerErrorResponse},
},
reasoning_parser::ParserFactory,
routers::WorkerInitializer,
routers::{
router_manager::{RouterId, RouterManager},
RouterFactory, RouterTrait,
......@@ -160,8 +159,6 @@ async fn get_model_info(State(state): State<Arc<AppState>>, req: Request) -> Res
state.router.get_model_info(req).await
}
// Generation endpoints
// The RouterTrait now accepts optional headers and typed body directly
async fn generate(
State(state): State<Arc<AppState>>,
headers: http::HeaderMap,
......@@ -291,27 +288,32 @@ async fn add_worker(
State(state): State<Arc<AppState>>,
Query(AddWorkerQuery { url, api_key }): Query<AddWorkerQuery>,
) -> Response {
match state.router.add_worker(&url, &api_key).await {
// Use centralized WorkerManager with full context
let result = WorkerManager::add_worker(&url, &api_key, &state.context).await;
match result {
Ok(message) => (StatusCode::OK, message).into_response(),
Err(error) => (StatusCode::BAD_REQUEST, error).into_response(),
}
}
async fn list_workers(State(state): State<Arc<AppState>>) -> Response {
let worker_list = state.router.get_worker_urls();
Json(serde_json::json!({ "urls": worker_list })).into_response()
// Use centralized WorkerManager instead of router's get_worker_urls
let worker_list = WorkerManager::get_worker_urls(&state.context.worker_registry);
Json(json!({ "urls": worker_list })).into_response()
}
async fn remove_worker(
State(state): State<Arc<AppState>>,
Query(AddWorkerQuery { url, .. }): Query<AddWorkerQuery>,
) -> Response {
state.router.remove_worker(&url);
(
StatusCode::OK,
format!("Successfully removed worker: {url}"),
)
.into_response()
// Use centralized WorkerManager with full context
let result = WorkerManager::remove_worker(&url, &state.context);
match result {
Ok(message) => (StatusCode::OK, message).into_response(),
Err(error) => (StatusCode::BAD_REQUEST, error).into_response(),
}
}
async fn flush_cache(State(state): State<Arc<AppState>>, _req: Request) -> Response {
......@@ -329,125 +331,106 @@ async fn create_worker(
State(state): State<Arc<AppState>>,
Json(config): Json<WorkerConfigRequest>,
) -> Response {
// Check if we have a RouterManager (enable_igw=true)
if let Some(router_manager) = &state.router_manager {
// Call RouterManager's add_worker method directly with the full config
match router_manager.add_worker(config).await {
Ok(response) => (StatusCode::OK, Json(response)).into_response(),
Err(error) => (StatusCode::BAD_REQUEST, Json(error)).into_response(),
// In single router mode, use centralized WorkerManager with full context
let result = WorkerManager::add_worker_from_config(&config, &state.context).await;
match result {
Ok(message) => {
let response = WorkerApiResponse {
success: true,
message,
worker: None,
};
(StatusCode::OK, Json(response)).into_response()
}
} else {
// In single router mode, use the router's add_worker with basic config
match state.router.add_worker(&config.url, &config.api_key).await {
Ok(message) => {
let response = WorkerApiResponse {
success: true,
message,
worker: None,
};
(StatusCode::OK, Json(response)).into_response()
}
Err(error) => {
let error_response = WorkerErrorResponse {
error,
code: "ADD_WORKER_FAILED".to_string(),
};
(StatusCode::BAD_REQUEST, Json(error_response)).into_response()
}
Err(error) => {
let error_response = WorkerErrorResponse {
error,
code: "ADD_WORKER_FAILED".to_string(),
};
(StatusCode::BAD_REQUEST, Json(error_response)).into_response()
}
}
}
/// GET /workers - List all workers with details
async fn list_workers_rest(State(state): State<Arc<AppState>>) -> Response {
if let Some(router_manager) = &state.router_manager {
let response = router_manager.list_workers();
Json(response).into_response()
} else {
// In single router mode, get detailed worker info from registry
let workers = state.context.worker_registry.get_all();
let response = serde_json::json!({
"workers": workers.iter().map(|worker| {
let mut worker_info = serde_json::json!({
"url": worker.url(),
"model_id": worker.model_id(),
"worker_type": match worker.worker_type() {
WorkerType::Regular => "regular",
WorkerType::Prefill { .. } => "prefill",
WorkerType::Decode => "decode",
},
"is_healthy": worker.is_healthy(),
"load": worker.load(),
"connection_mode": format!("{:?}", worker.connection_mode()),
"priority": worker.priority(),
"cost": worker.cost(),
});
// Add bootstrap_port for Prefill workers
if let WorkerType::Prefill { bootstrap_port } = worker.worker_type() {
worker_info["bootstrap_port"] = serde_json::json!(bootstrap_port);
}
worker_info
}).collect::<Vec<_>>(),
"total": workers.len(),
"stats": {
"prefill_count": state.context.worker_registry.get_prefill_workers().len(),
"decode_count": state.context.worker_registry.get_decode_workers().len(),
"regular_count": state.context.worker_registry.get_by_type(&WorkerType::Regular).len(),
// In single router mode, get detailed worker info from registry
let workers = state.context.worker_registry.get_all();
let response = serde_json::json!({
"workers": workers.iter().map(|worker| {
let mut worker_info = serde_json::json!({
"url": worker.url(),
"model_id": worker.model_id(),
"worker_type": match worker.worker_type() {
WorkerType::Regular => "regular",
WorkerType::Prefill { .. } => "prefill",
WorkerType::Decode => "decode",
},
"is_healthy": worker.is_healthy(),
"load": worker.load(),
"connection_mode": format!("{:?}", worker.connection_mode()),
"priority": worker.priority(),
"cost": worker.cost(),
});
// Add bootstrap_port for Prefill workers
if let WorkerType::Prefill { bootstrap_port } = worker.worker_type() {
worker_info["bootstrap_port"] = serde_json::json!(bootstrap_port);
}
});
Json(response).into_response()
}
worker_info
}).collect::<Vec<_>>(),
"total": workers.len(),
"stats": {
"prefill_count": state.context.worker_registry.get_prefill_workers().len(),
"decode_count": state.context.worker_registry.get_decode_workers().len(),
"regular_count": state.context.worker_registry.get_by_type(&WorkerType::Regular).len(),
}
});
Json(response).into_response()
}
/// GET /workers/{url} - Get specific worker info
async fn get_worker(State(state): State<Arc<AppState>>, Path(url): Path<String>) -> Response {
if let Some(router_manager) = &state.router_manager {
if let Some(worker) = router_manager.get_worker(&url) {
Json(worker).into_response()
} else {
let error = WorkerErrorResponse {
error: format!("Worker {url} not found"),
code: "WORKER_NOT_FOUND".to_string(),
};
(StatusCode::NOT_FOUND, Json(error)).into_response()
}
let workers = WorkerManager::get_worker_urls(&state.context.worker_registry);
if workers.contains(&url) {
Json(json!({
"url": url,
"model_id": "unknown",
"is_healthy": true
}))
.into_response()
} else {
let workers = state.router.get_worker_urls();
if workers.contains(&url) {
Json(json!({
"url": url,
"model_id": "unknown",
"is_healthy": true
}))
.into_response()
} else {
let error = WorkerErrorResponse {
error: format!("Worker {url} not found"),
code: "WORKER_NOT_FOUND".to_string(),
};
(StatusCode::NOT_FOUND, Json(error)).into_response()
}
let error = WorkerErrorResponse {
error: format!("Worker {url} not found"),
code: "WORKER_NOT_FOUND".to_string(),
};
(StatusCode::NOT_FOUND, Json(error)).into_response()
}
}
/// DELETE /workers/{url} - Remove a worker
async fn delete_worker(State(state): State<Arc<AppState>>, Path(url): Path<String>) -> Response {
if let Some(router_manager) = &state.router_manager {
match router_manager.remove_worker_from_registry(&url) {
Ok(response) => (StatusCode::OK, Json(response)).into_response(),
Err(error) => (StatusCode::BAD_REQUEST, Json(error)).into_response(),
// In single router mode, use centralized WorkerManager with full context
let result = WorkerManager::remove_worker(&url, &state.context);
match result {
Ok(message) => {
let response = WorkerApiResponse {
success: true,
message,
worker: None,
};
(StatusCode::OK, Json(response)).into_response()
}
Err(error) => {
let error_response = WorkerErrorResponse {
error,
code: "REMOVE_WORKER_FAILED".to_string(),
};
(StatusCode::BAD_REQUEST, Json(error_response)).into_response()
}
} else {
// In single router mode, use router's remove_worker
state.router.remove_worker(&url);
let response = WorkerApiResponse {
success: true,
message: format!("Worker {url} removed successfully"),
worker: None,
};
(StatusCode::OK, Json(response)).into_response()
}
}
......@@ -600,7 +583,7 @@ pub async fn startup(config: ServerConfig) -> Result<(), Box<dyn std::error::Err
"Initializing workers for routing mode: {:?}",
config.router_config.mode
);
WorkerInitializer::initialize_workers(
WorkerManager::initialize_workers(
&config.router_config,
&app_context.worker_registry,
Some(&app_context.policy_registry),
......@@ -620,12 +603,7 @@ pub async fn startup(config: ServerConfig) -> Result<(), Box<dyn std::error::Err
info!("Multi-router mode enabled (enable_igw=true)");
// Create RouterManager with shared registries from AppContext
let router_manager = Arc::new(RouterManager::new(
config.router_config.clone(),
client.clone(),
app_context.worker_registry.clone(),
app_context.policy_registry.clone(),
));
let router_manager = Arc::new(RouterManager::new(app_context.worker_registry.clone()));
// 1. HTTP Regular Router
match RouterFactory::create_regular_router(&app_context).await {
......@@ -711,12 +689,11 @@ pub async fn startup(config: ServerConfig) -> Result<(), Box<dyn std::error::Err
concurrency_queue_tx: limiter.queue_tx.clone(),
router_manager,
});
let router_arc = Arc::clone(&app_state.router);
// Start the service discovery if enabled
if let Some(service_discovery_config) = config.service_discovery_config {
if service_discovery_config.enabled {
match start_service_discovery(service_discovery_config, router_arc).await {
let app_context_arc = Arc::clone(&app_state.context);
match start_service_discovery(service_discovery_config, app_context_arc).await {
Ok(handle) => {
info!("Service discovery started");
// Spawn a task to handle the service discovery thread
......@@ -736,7 +713,7 @@ pub async fn startup(config: ServerConfig) -> Result<(), Box<dyn std::error::Err
info!(
"Router ready | workers: {:?}",
app_state.router.get_worker_urls()
WorkerManager::get_worker_urls(&app_state.context.worker_registry)
);
let request_id_headers = config.request_id_headers.clone().unwrap_or_else(|| {
......
use crate::routers::RouterTrait;
use crate::core::WorkerManager;
use crate::protocols::worker_spec::WorkerConfigRequest;
use crate::server::AppContext;
use futures::{StreamExt, TryStreamExt};
use k8s_openapi::api::core::v1::Pod;
......@@ -175,7 +177,7 @@ impl PodInfo {
pub async fn start_service_discovery(
config: ServiceDiscoveryConfig,
router: Arc<dyn RouterTrait>,
app_context: Arc<AppContext>,
) -> Result<task::JoinHandle<()>, kube::Error> {
// Don't initialize anything if service discovery is disabled
if !config.enabled {
......@@ -277,13 +279,13 @@ pub async fn start_service_discovery(
// Clone again for the next closure
let tracked_pods_clone2 = Arc::clone(&tracked_pods_clone);
let router_clone = Arc::clone(&router);
let app_context_clone = Arc::clone(&app_context);
let config_clone2 = Arc::clone(&config_arc);
match filtered_stream
.try_for_each(move |pod| {
let tracked_pods_inner = Arc::clone(&tracked_pods_clone2);
let router_inner = Arc::clone(&router_clone);
let app_context_inner = Arc::clone(&app_context_clone);
let config_inner = Arc::clone(&config_clone2);
async move {
......@@ -294,16 +296,15 @@ pub async fn start_service_discovery(
handle_pod_deletion(
&pod_info,
tracked_pods_inner,
router_inner,
app_context_inner,
port,
config_inner.pd_mode,
)
.await;
} else {
handle_pod_event(
&pod_info,
tracked_pods_inner,
router_inner,
app_context_inner,
port,
config_inner.pd_mode,
)
......@@ -347,7 +348,7 @@ pub async fn start_service_discovery(
async fn handle_pod_event(
pod_info: &PodInfo,
tracked_pods: Arc<Mutex<HashSet<PodInfo>>>,
router: Arc<dyn RouterTrait>,
app_context: Arc<AppContext>,
port: u16,
pd_mode: bool,
) {
......@@ -380,40 +381,44 @@ async fn handle_pod_event(
pod_info.name, pod_info.pod_type, worker_url
);
// Handle PD mode with specific pod types
let result = if pd_mode && pod_info.pod_type.is_some() {
// Need to import PDRouter type
use crate::routers::http::pd_router::PDRouter;
// Try to downcast to PDRouter
if let Some(pd_router) = router.as_any().downcast_ref::<PDRouter>() {
match &pod_info.pod_type {
Some(PodType::Prefill) => pd_router
.add_prefill_server(
worker_url.clone(),
pd_router.api_key.clone(),
pod_info.bootstrap_port,
)
.await
.map_err(|e| e.to_string()),
Some(PodType::Decode) => pd_router
.add_decode_server(worker_url.clone(), pd_router.api_key.clone())
.await
.map_err(|e| e.to_string()),
Some(PodType::Regular) | None => {
// Fall back to regular add_worker for regular pods
router.add_worker(&worker_url, &pd_router.api_key).await
}
}
} else {
Err("PD mode enabled but router is not a PDRouter".to_string())
// Build worker config based on pod type and routing mode
let worker_type = if pd_mode {
match &pod_info.pod_type {
Some(PodType::Prefill) => Some("prefill".to_string()),
Some(PodType::Decode) => Some("decode".to_string()),
Some(PodType::Regular) | None => None,
}
} else {
// Regular mode or no pod type specified
// In pod, no need api key
router.add_worker(&worker_url, &None).await
None
};
// Only set bootstrap_port for prefill workers in PD mode
let bootstrap_port = if pd_mode {
match &pod_info.pod_type {
Some(PodType::Prefill) => pod_info.bootstrap_port,
_ => None,
}
} else {
None
};
let config = WorkerConfigRequest {
url: worker_url.clone(),
model_id: None,
worker_type,
priority: None,
cost: None,
labels: HashMap::new(),
bootstrap_port,
tokenizer_path: None,
reasoning_parser: None,
tool_parser: None,
chat_template: None,
api_key: None,
};
let result = WorkerManager::add_worker_from_config(&config, &app_context).await;
match result {
Ok(_) => {
debug!("Worker added: {}", worker_url);
......@@ -433,9 +438,8 @@ async fn handle_pod_event(
async fn handle_pod_deletion(
pod_info: &PodInfo,
tracked_pods: Arc<Mutex<HashSet<PodInfo>>>,
router: Arc<dyn RouterTrait>,
app_context: Arc<AppContext>,
port: u16,
pd_mode: bool,
) {
let worker_url = pod_info.worker_url(port);
......@@ -456,35 +460,8 @@ async fn handle_pod_deletion(
pod_info.name, pod_info.pod_type, worker_url
);
// Handle PD mode removal
if pd_mode && pod_info.pod_type.is_some() {
use crate::routers::http::pd_router::PDRouter;
// Try to downcast to PDRouter for PD-specific removal
if let Some(pd_router) = router.as_any().downcast_ref::<PDRouter>() {
match &pod_info.pod_type {
Some(PodType::Prefill) => {
if let Err(e) = pd_router.remove_prefill_server(&worker_url).await {
error!("Failed to remove prefill server {}: {}", worker_url, e);
}
}
Some(PodType::Decode) => {
if let Err(e) = pd_router.remove_decode_server(&worker_url).await {
error!("Failed to remove decode server {}: {}", worker_url, e);
}
}
Some(PodType::Regular) | None => {
// Fall back to regular remove_worker
router.remove_worker(&worker_url);
}
}
} else {
// PD mode but not a PDRouter, use generic removal
router.remove_worker(&worker_url);
}
} else {
// Regular mode removal
router.remove_worker(&worker_url);
if let Err(e) = WorkerManager::remove_worker(&worker_url, &app_context) {
error!("Failed to remove worker {}: {}", worker_url, e);
}
} else {
// This case might occur if a pod is deleted before it was ever marked healthy and added.
......@@ -582,12 +559,10 @@ mod tests {
}
}
// Helper to create a Router instance for testing event handlers
async fn create_test_router() -> Arc<dyn RouterTrait> {
// Helper to create an AppContext instance for testing event handlers
async fn create_test_app_context() -> Arc<AppContext> {
use crate::config::RouterConfig;
use crate::middleware::TokenBucket;
use crate::routers::http::router::Router;
use crate::server::AppContext;
// Create a minimal RouterConfig for testing with very short timeout
let router_config = RouterConfig {
......@@ -596,7 +571,7 @@ mod tests {
}; // Very short timeout for tests
// Create AppContext with minimal components
let app_context = Arc::new(AppContext {
Arc::new(AppContext {
client: reqwest::Client::new(),
router_config: router_config.clone(),
rate_limiter: Arc::new(TokenBucket::new(1000, 1000)),
......@@ -609,10 +584,7 @@ mod tests {
tool_parser_registry: None, // HTTP mode doesn't need tool parser
router_manager: None, // Test doesn't need router manager
response_storage: Arc::new(crate::data_connector::MemoryResponseStorage::new()),
});
let router = Router::new(&app_context).await.unwrap();
Arc::new(router) as Arc<dyn RouterTrait>
})
}
// Helper to create a PD config for testing
......@@ -914,7 +886,7 @@ mod tests {
#[tokio::test]
async fn test_handle_pod_event_add_unhealthy_pod() {
let router = create_test_router().await;
let app_context = create_test_app_context().await;
let tracked_pods = Arc::new(Mutex::new(HashSet::new()));
let pod_info = PodInfo {
name: "pod1".into(),
......@@ -929,21 +901,18 @@ mod tests {
handle_pod_event(
&pod_info,
Arc::clone(&tracked_pods),
Arc::clone(&router),
Arc::clone(&app_context),
port,
false, // pd_mode = false
)
.await;
assert!(!tracked_pods.lock().unwrap().contains(&pod_info));
assert!(!router
.get_worker_urls()
.contains(&pod_info.worker_url(port)));
}
#[tokio::test]
async fn test_handle_pod_deletion_non_existing_pod() {
let router = create_test_router().await;
let app_context = create_test_app_context().await;
let tracked_pods = Arc::new(Mutex::new(HashSet::new()));
let pod_info = PodInfo {
name: "pod1".into(),
......@@ -958,19 +927,17 @@ mod tests {
handle_pod_deletion(
&pod_info,
Arc::clone(&tracked_pods),
Arc::clone(&router),
Arc::clone(&app_context),
port,
false, // pd_mode = false
)
.await;
assert!(tracked_pods.lock().unwrap().is_empty());
assert!(router.get_worker_urls().is_empty());
}
#[tokio::test]
async fn test_handle_pd_pod_event_prefill_pod() {
let router = create_test_router().await;
let app_context = create_test_app_context().await;
let tracked_pods = Arc::new(Mutex::new(HashSet::new()));
let pod_info = PodInfo {
name: "prefill-pod".into(),
......@@ -983,23 +950,23 @@ mod tests {
let port = 8080u16;
// This test validates the structure but won't actually add workers since
// we're using a regular router instead of PD router
// the test worker URL won't be reachable
handle_pod_event(
&pod_info,
Arc::clone(&tracked_pods),
Arc::clone(&router),
Arc::clone(&app_context),
port,
false, // pd_mode = false, so it should fallback to regular handling
true, // pd_mode = true for PD pod
)
.await;
// Pod should not be tracked since router.add_worker will fail for non-running server
// Pod should not be tracked since add_worker_from_config will fail for non-running server
assert!(!tracked_pods.lock().unwrap().contains(&pod_info));
}
#[tokio::test]
async fn test_handle_pd_pod_event_decode_pod() {
let router = create_test_router().await;
let app_context = create_test_app_context().await;
let tracked_pods = Arc::new(Mutex::new(HashSet::new()));
let pod_info = PodInfo {
name: "decode-pod".into(),
......@@ -1014,19 +981,19 @@ mod tests {
handle_pod_event(
&pod_info,
Arc::clone(&tracked_pods),
Arc::clone(&router),
Arc::clone(&app_context),
port,
false, // pd_mode = false, so it should fallback to regular handling
true, // pd_mode = true for PD pod
)
.await;
// Pod should not be tracked since router.add_worker will fail for non-running server
// Pod should not be tracked since add_worker_from_config will fail for non-running server
assert!(!tracked_pods.lock().unwrap().contains(&pod_info));
}
#[tokio::test]
async fn test_handle_pd_pod_deletion_tracked_pod() {
let router = create_test_router().await;
let app_context = create_test_app_context().await;
let tracked_pods = Arc::new(Mutex::new(HashSet::new()));
let pod_info = PodInfo {
name: "test-pod".into(),
......@@ -1048,9 +1015,8 @@ mod tests {
handle_pod_deletion(
&pod_info,
Arc::clone(&tracked_pods),
Arc::clone(&router),
Arc::clone(&app_context),
port,
false, // pd_mode = false
)
.await;
......@@ -1060,7 +1026,7 @@ mod tests {
#[tokio::test]
async fn test_handle_pd_pod_deletion_untracked_pod() {
let router = create_test_router().await;
let app_context = create_test_app_context().await;
let tracked_pods = Arc::new(Mutex::new(HashSet::new()));
let pod_info = PodInfo {
name: "untracked-pod".into(),
......@@ -1077,9 +1043,8 @@ mod tests {
handle_pod_deletion(
&pod_info,
Arc::clone(&tracked_pods),
Arc::clone(&router),
Arc::clone(&app_context),
port,
true, // pd_mode = true
)
.await;
......@@ -1089,7 +1054,7 @@ mod tests {
#[tokio::test]
async fn test_unified_handler_regular_mode() {
let router = create_test_router().await;
let app_context = create_test_app_context().await;
let tracked_pods = Arc::new(Mutex::new(HashSet::new()));
let pod_info = PodInfo {
name: "regular-pod".into(),
......@@ -1105,19 +1070,19 @@ mod tests {
handle_pod_event(
&pod_info,
Arc::clone(&tracked_pods),
Arc::clone(&router),
Arc::clone(&app_context),
port,
false, // pd_mode = false
)
.await;
// Pod should not be tracked since router.add_worker will fail for non-running server
// Pod should not be tracked since add_worker_from_url will fail for non-running server
assert!(!tracked_pods.lock().unwrap().contains(&pod_info));
}
#[tokio::test]
async fn test_unified_handler_pd_mode_with_prefill() {
let router = create_test_router().await;
let app_context = create_test_app_context().await;
let tracked_pods = Arc::new(Mutex::new(HashSet::new()));
let pod_info = PodInfo {
name: "prefill-pod".into(),
......@@ -1133,19 +1098,19 @@ mod tests {
handle_pod_event(
&pod_info,
Arc::clone(&tracked_pods),
Arc::clone(&router),
Arc::clone(&app_context),
port,
true, // pd_mode = true
)
.await;
// Pod should not be tracked since router.add_pd_worker will fail for regular router
// Pod should not be tracked since add_worker_from_config will fail for non-running server
assert!(!tracked_pods.lock().unwrap().contains(&pod_info));
}
#[tokio::test]
async fn test_unified_handler_deletion_with_pd_mode() {
let router = create_test_router().await;
let app_context = create_test_app_context().await;
let tracked_pods = Arc::new(Mutex::new(HashSet::new()));
let pod_info = PodInfo {
name: "decode-pod".into(),
......@@ -1168,9 +1133,8 @@ mod tests {
handle_pod_deletion(
&pod_info,
Arc::clone(&tracked_pods),
Arc::clone(&router),
Arc::clone(&app_context),
port,
true, // pd_mode = true
)
.await;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment