main.rs 22.6 KB
Newer Older
1
use clap::{ArgAction, Parser, ValueEnum};
2
use sglang_router_rs::config::{
3
    CircuitBreakerConfig, ConfigError, ConfigResult, ConnectionMode, DiscoveryConfig,
4
5
    HealthCheckConfig, HistoryBackend, MetricsConfig, PolicyConfig, RetryConfig, RouterConfig,
    RoutingMode,
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
};
use sglang_router_rs::metrics::PrometheusConfig;
use sglang_router_rs::server::{self, ServerConfig};
use sglang_router_rs::service_discovery::ServiceDiscoveryConfig;
use std::collections::HashMap;

// Helper function to parse prefill arguments from command line
fn parse_prefill_args() -> Vec<(String, Option<u16>)> {
    let args: Vec<String> = std::env::args().collect();
    let mut prefill_entries = Vec::new();
    let mut i = 0;

    while i < args.len() {
        if args[i] == "--prefill" && i + 1 < args.len() {
            let url = args[i + 1].clone();
            let bootstrap_port = if i + 2 < args.len() && !args[i + 2].starts_with("--") {
                // Check if next arg is a port number
                if let Ok(port) = args[i + 2].parse::<u16>() {
                    i += 1; // Skip the port argument
                    Some(port)
                } else if args[i + 2].to_lowercase() == "none" {
                    i += 1; // Skip the "none" argument
                    None
                } else {
                    None
                }
            } else {
                None
            };
            prefill_entries.push((url, bootstrap_port));
            i += 2; // Skip --prefill and URL
        } else {
            i += 1;
        }
    }

    prefill_entries
}

45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
#[derive(Copy, Clone, Debug, Eq, PartialEq, ValueEnum)]
pub enum Backend {
    #[value(name = "sglang")]
    Sglang,
    #[value(name = "vllm")]
    Vllm,
    #[value(name = "trtllm")]
    Trtllm,
    #[value(name = "openai")]
    Openai,
    #[value(name = "anthropic")]
    Anthropic,
}

impl std::fmt::Display for Backend {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        let s = match self {
            Backend::Sglang => "sglang",
            Backend::Vllm => "vllm",
            Backend::Trtllm => "trtllm",
            Backend::Openai => "openai",
            Backend::Anthropic => "anthropic",
        };
        write!(f, "{}", s)
    }
}

72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
#[derive(Parser, Debug)]
#[command(name = "sglang-router")]
#[command(about = "SGLang Router - High-performance request distribution across worker nodes")]
#[command(long_about = r#"
SGLang Router - High-performance request distribution across worker nodes

Usage:
This launcher enables starting a router with individual worker instances. It is useful for
multi-node setups or when you want to start workers and router separately.

Examples:
  # Regular mode
  sglang-router --worker-urls http://worker1:8000 http://worker2:8000

  # PD disaggregated mode with same policy for both
  sglang-router --pd-disaggregation \
    --prefill http://127.0.0.1:30001 9001 \
    --prefill http://127.0.0.2:30002 9002 \
    --decode http://127.0.0.3:30003 \
    --decode http://127.0.0.4:30004 \
    --policy cache_aware

  # PD mode with different policies for prefill and decode
  sglang-router --pd-disaggregation \
    --prefill http://127.0.0.1:30001 9001 \
    --prefill http://127.0.0.2:30002 \
    --decode http://127.0.0.3:30003 \
    --decode http://127.0.0.4:30004 \
    --prefill-policy cache_aware --decode-policy power_of_two
101

102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
"#)]
struct CliArgs {
    /// Host address to bind the router server
    #[arg(long, default_value = "127.0.0.1")]
    host: String,

    /// Port number to bind the router server
    #[arg(long, default_value_t = 30000)]
    port: u16,

    /// List of worker URLs (e.g., http://worker1:8000 http://worker2:8000)
    #[arg(long, num_args = 0..)]
    worker_urls: Vec<String>,

    /// Load balancing policy to use
    #[arg(long, default_value = "cache_aware", value_parser = ["random", "round_robin", "cache_aware", "power_of_two"])]
    policy: String,

    /// Enable PD (Prefill-Decode) disaggregated mode
    #[arg(long, default_value_t = false)]
    pd_disaggregation: bool,

    /// Decode server URL (can be specified multiple times)
    #[arg(long, action = ArgAction::Append)]
    decode: Vec<String>,

    /// Specific policy for prefill nodes in PD mode
    #[arg(long, value_parser = ["random", "round_robin", "cache_aware", "power_of_two"])]
    prefill_policy: Option<String>,

    /// Specific policy for decode nodes in PD mode
    #[arg(long, value_parser = ["random", "round_robin", "cache_aware", "power_of_two"])]
    decode_policy: Option<String>,

    /// Timeout in seconds for worker startup
137
    #[arg(long, default_value_t = 600)]
138
139
140
    worker_startup_timeout_secs: u64,

    /// Interval in seconds between checks for worker startup
141
    #[arg(long, default_value_t = 30)]
142
143
144
    worker_startup_check_interval: u64,

    /// Cache threshold (0.0-1.0) for cache-aware routing
145
    #[arg(long, default_value_t = 0.3)]
146
147
148
    cache_threshold: f32,

    /// Absolute threshold for load balancing
149
    #[arg(long, default_value_t = 64)]
150
151
152
    balance_abs_threshold: usize,

    /// Relative threshold for load balancing
153
    #[arg(long, default_value_t = 1.5)]
154
155
156
    balance_rel_threshold: f32,

    /// Interval in seconds between cache eviction operations
157
    #[arg(long, default_value_t = 120)]
158
159
160
    eviction_interval: u64,

    /// Maximum size of the approximation tree for cache-aware routing
161
    #[arg(long, default_value_t = 67108864)] // 2^26
162
163
164
    max_tree_size: usize,

    /// Maximum payload size in bytes
165
    #[arg(long, default_value_t = 536870912)] // 512MB
166
167
168
169
170
171
172
173
174
175
    max_payload_size: usize,

    /// Enable data parallelism aware schedule
    #[arg(long, default_value_t = false)]
    dp_aware: bool,

    /// API key for worker authorization
    #[arg(long)]
    api_key: Option<String>,

176
177
178
179
    /// Backend to route requests to (sglang, vllm, trtllm, openai, anthropic)
    #[arg(long, value_enum, default_value_t = Backend::Sglang, alias = "runtime")]
    backend: Backend,

180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
    /// Directory to store log files
    #[arg(long)]
    log_dir: Option<String>,

    /// Set the logging level
    #[arg(long, default_value = "info", value_parser = ["debug", "info", "warn", "error"])]
    log_level: String,

    /// Enable Kubernetes service discovery
    #[arg(long, default_value_t = false)]
    service_discovery: bool,

    /// Label selector for Kubernetes service discovery (format: key1=value1 key2=value2)
    #[arg(long, num_args = 0..)]
    selector: Vec<String>,

    /// Port to use for discovered worker pods
    #[arg(long, default_value_t = 80)]
    service_discovery_port: u16,

    /// Kubernetes namespace to watch for pods
    #[arg(long)]
    service_discovery_namespace: Option<String>,

    /// Label selector for prefill server pods in PD mode
    #[arg(long, num_args = 0..)]
    prefill_selector: Vec<String>,

    /// Label selector for decode server pods in PD mode
    #[arg(long, num_args = 0..)]
    decode_selector: Vec<String>,

    /// Port to expose Prometheus metrics
    #[arg(long, default_value_t = 29000)]
    prometheus_port: u16,

    /// Host address to bind the Prometheus metrics server
    #[arg(long, default_value = "127.0.0.1")]
    prometheus_host: String,

    /// Custom HTTP headers to check for request IDs
    #[arg(long, num_args = 0..)]
    request_id_headers: Vec<String>,

    /// Request timeout in seconds
225
    #[arg(long, default_value_t = 1800)]
226
227
228
    request_timeout_secs: u64,

    /// Maximum number of concurrent requests allowed
229
    #[arg(long, default_value_t = 256)]
230
231
232
233
234
235
236
237
    max_concurrent_requests: usize,

    /// CORS allowed origins
    #[arg(long, num_args = 0..)]
    cors_allowed_origins: Vec<String>,

    // Retry configuration
    /// Maximum number of retries
238
    #[arg(long, default_value_t = 5)]
239
240
241
    retry_max_retries: u32,

    /// Initial backoff in milliseconds for retries
242
    #[arg(long, default_value_t = 50)]
243
244
245
    retry_initial_backoff_ms: u64,

    /// Maximum backoff in milliseconds for retries
246
    #[arg(long, default_value_t = 30000)]
247
248
249
    retry_max_backoff_ms: u64,

    /// Backoff multiplier for exponential backoff
250
    #[arg(long, default_value_t = 1.5)]
251
252
253
    retry_backoff_multiplier: f32,

    /// Jitter factor for retry backoff
254
    #[arg(long, default_value_t = 0.2)]
255
256
257
258
259
260
261
262
    retry_jitter_factor: f32,

    /// Disable retries
    #[arg(long, default_value_t = false)]
    disable_retries: bool,

    // Circuit breaker configuration
    /// Number of failures before circuit breaker opens
263
    #[arg(long, default_value_t = 10)]
264
265
266
    cb_failure_threshold: u32,

    /// Number of successes before circuit breaker closes
267
    #[arg(long, default_value_t = 3)]
268
269
270
    cb_success_threshold: u32,

    /// Timeout duration in seconds for circuit breaker
271
    #[arg(long, default_value_t = 60)]
272
273
274
    cb_timeout_duration_secs: u64,

    /// Window duration in seconds for circuit breaker
275
    #[arg(long, default_value_t = 120)]
276
277
278
279
280
    cb_window_duration_secs: u64,

    /// Disable circuit breaker
    #[arg(long, default_value_t = false)]
    disable_circuit_breaker: bool,
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301

    // Health check configuration
    /// Number of consecutive health check failures before marking worker unhealthy
    #[arg(long, default_value_t = 3)]
    health_failure_threshold: u32,

    /// Number of consecutive health check successes before marking worker healthy
    #[arg(long, default_value_t = 2)]
    health_success_threshold: u32,

    /// Timeout in seconds for health check requests
    #[arg(long, default_value_t = 5)]
    health_check_timeout_secs: u64,

    /// Interval in seconds between runtime health checks
    #[arg(long, default_value_t = 60)]
    health_check_interval_secs: u64,

    /// Health check endpoint path
    #[arg(long, default_value = "/health")]
    health_check_endpoint: String,
302
303
304
305
306

    // IGW (Inference Gateway) configuration
    /// Enable Inference Gateway mode
    #[arg(long, default_value_t = false)]
    enable_igw: bool,
307
308
309
310
311
312
313
314
315

    // Tokenizer configuration
    /// Model path for loading tokenizer (HuggingFace model ID or local path)
    #[arg(long)]
    model_path: Option<String>,

    /// Explicit tokenizer path (overrides model_path tokenizer if provided)
    #[arg(long)]
    tokenizer_path: Option<String>,
316
317
318
319

    /// History backend configuration (memory or none)
    #[arg(long, default_value = "memory", value_parser = ["memory", "none"])]
    history_backend: String,
320
321
322
}

impl CliArgs {
323
324
    /// Determine connection mode from worker URLs
    fn determine_connection_mode(worker_urls: &[String]) -> ConnectionMode {
325
        // Only consider it gRPC if explicitly specified with grpc:// or grpcs:// scheme
326
327
328
329
330
        for url in worker_urls {
            if url.starts_with("grpc://") || url.starts_with("grpcs://") {
                return ConnectionMode::Grpc;
            }
        }
331
        // Default to HTTP for all other cases (including http://, https://, or no scheme)
332
333
334
        ConnectionMode::Http
    }

335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
    /// Parse selector strings into HashMap
    fn parse_selector(selector_list: &[String]) -> HashMap<String, String> {
        let mut map = HashMap::new();
        for item in selector_list {
            if let Some(eq_pos) = item.find('=') {
                let key = item[..eq_pos].to_string();
                let value = item[eq_pos + 1..].to_string();
                map.insert(key, value);
            }
        }
        map
    }

    /// Convert policy string to PolicyConfig
    fn parse_policy(&self, policy_str: &str) -> PolicyConfig {
        match policy_str {
            "random" => PolicyConfig::Random,
            "round_robin" => PolicyConfig::RoundRobin,
            "cache_aware" => PolicyConfig::CacheAware {
                cache_threshold: self.cache_threshold,
                balance_abs_threshold: self.balance_abs_threshold,
                balance_rel_threshold: self.balance_rel_threshold,
                eviction_interval_secs: self.eviction_interval,
                max_tree_size: self.max_tree_size,
            },
            "power_of_two" => PolicyConfig::PowerOfTwo {
                load_check_interval_secs: 5, // Default value
            },
            _ => PolicyConfig::RoundRobin, // Fallback
        }
    }

    /// Convert CLI arguments to RouterConfig
    fn to_router_config(
        &self,
        prefill_urls: Vec<(String, Option<u16>)>,
    ) -> ConfigResult<RouterConfig> {
        // Determine routing mode
373
374
375
376
377
        let mode = if self.enable_igw {
            // IGW mode - routing mode is not used in IGW, but we need to provide a placeholder
            RoutingMode::Regular {
                worker_urls: vec![],
            }
378
379
380
381
382
        } else if matches!(self.backend, Backend::Openai) {
            // OpenAI backend mode - use worker_urls as base(s)
            RoutingMode::OpenAI {
                worker_urls: self.worker_urls.clone(),
            }
383
        } else if self.pd_disaggregation {
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
            let decode_urls = self.decode.clone();

            // Validate PD configuration if not using service discovery
            if !self.service_discovery && (prefill_urls.is_empty() || decode_urls.is_empty()) {
                return Err(ConfigError::ValidationFailed {
                    reason: "PD disaggregation mode requires --prefill and --decode URLs when not using service discovery".to_string(),
                });
            }

            RoutingMode::PrefillDecode {
                prefill_urls,
                decode_urls,
                prefill_policy: self.prefill_policy.as_ref().map(|p| self.parse_policy(p)),
                decode_policy: self.decode_policy.as_ref().map(|p| self.parse_policy(p)),
            }
        } else {
            // Regular mode
            if !self.service_discovery && self.worker_urls.is_empty() {
                return Err(ConfigError::ValidationFailed {
                    reason: "Regular mode requires --worker-urls when not using service discovery"
                        .to_string(),
                });
            }
            RoutingMode::Regular {
                worker_urls: self.worker_urls.clone(),
            }
        };

        // Main policy
        let policy = self.parse_policy(&self.policy);

        // Service discovery configuration
        let discovery = if self.service_discovery {
            Some(DiscoveryConfig {
                enabled: true,
                namespace: self.service_discovery_namespace.clone(),
                port: self.service_discovery_port,
                check_interval_secs: 60,
                selector: Self::parse_selector(&self.selector),
                prefill_selector: Self::parse_selector(&self.prefill_selector),
                decode_selector: Self::parse_selector(&self.decode_selector),
                bootstrap_port_annotation: "sglang.ai/bootstrap-port".to_string(),
            })
        } else {
            None
        };

        // Metrics configuration
        let metrics = Some(MetricsConfig {
            port: self.prometheus_port,
            host: self.prometheus_host.clone(),
        });

437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
        // Determine connection mode from all worker URLs
        let mut all_urls = Vec::new();
        match &mode {
            RoutingMode::Regular { worker_urls } => {
                all_urls.extend(worker_urls.clone());
            }
            RoutingMode::PrefillDecode {
                prefill_urls,
                decode_urls,
                ..
            } => {
                for (url, _) in prefill_urls {
                    all_urls.push(url.clone());
                }
                all_urls.extend(decode_urls.clone());
            }
453
454
455
            RoutingMode::OpenAI { .. } => {
                // For connection-mode detection, skip URLs; OpenAI forces HTTP below.
            }
456
        }
457
458
459
460
        let connection_mode = match &mode {
            RoutingMode::OpenAI { .. } => ConnectionMode::Http,
            _ => Self::determine_connection_mode(&all_urls),
        };
461

462
463
464
465
        // Build RouterConfig
        Ok(RouterConfig {
            mode,
            policy,
466
            connection_mode,
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
            host: self.host.clone(),
            port: self.port,
            max_payload_size: self.max_payload_size,
            request_timeout_secs: self.request_timeout_secs,
            worker_startup_timeout_secs: self.worker_startup_timeout_secs,
            worker_startup_check_interval_secs: self.worker_startup_check_interval,
            dp_aware: self.dp_aware,
            api_key: self.api_key.clone(),
            discovery,
            metrics,
            log_dir: self.log_dir.clone(),
            log_level: Some(self.log_level.clone()),
            request_id_headers: if self.request_id_headers.is_empty() {
                None
            } else {
                Some(self.request_id_headers.clone())
            },
            max_concurrent_requests: self.max_concurrent_requests,
485
486
            queue_size: 100,        // Default queue size
            queue_timeout_secs: 60, // Default timeout
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
            cors_allowed_origins: self.cors_allowed_origins.clone(),
            retry: RetryConfig {
                max_retries: self.retry_max_retries,
                initial_backoff_ms: self.retry_initial_backoff_ms,
                max_backoff_ms: self.retry_max_backoff_ms,
                backoff_multiplier: self.retry_backoff_multiplier,
                jitter_factor: self.retry_jitter_factor,
            },
            circuit_breaker: CircuitBreakerConfig {
                failure_threshold: self.cb_failure_threshold,
                success_threshold: self.cb_success_threshold,
                timeout_duration_secs: self.cb_timeout_duration_secs,
                window_duration_secs: self.cb_window_duration_secs,
            },
            disable_retries: self.disable_retries,
            disable_circuit_breaker: self.disable_circuit_breaker,
503
504
505
506
507
508
509
            health_check: HealthCheckConfig {
                failure_threshold: self.health_failure_threshold,
                success_threshold: self.health_success_threshold,
                timeout_secs: self.health_check_timeout_secs,
                check_interval_secs: self.health_check_interval_secs,
                endpoint: self.health_check_endpoint.clone(),
            },
510
            enable_igw: self.enable_igw,
511
            rate_limit_tokens_per_second: None,
512
513
            model_path: self.model_path.clone(),
            tokenizer_path: self.tokenizer_path.clone(),
514
515
516
517
            history_backend: match self.history_backend.as_str() {
                "none" => HistoryBackend::None,
                _ => HistoryBackend::Memory,
            },
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
        })
    }

    /// Create ServerConfig from CLI args and RouterConfig
    fn to_server_config(&self, router_config: RouterConfig) -> ServerConfig {
        // Create service discovery config if enabled
        let service_discovery_config = if self.service_discovery {
            Some(ServiceDiscoveryConfig {
                enabled: true,
                selector: Self::parse_selector(&self.selector),
                check_interval: std::time::Duration::from_secs(60),
                port: self.service_discovery_port,
                namespace: self.service_discovery_namespace.clone(),
                pd_mode: self.pd_disaggregation,
                prefill_selector: Self::parse_selector(&self.prefill_selector),
                decode_selector: Self::parse_selector(&self.decode_selector),
                bootstrap_port_annotation: "sglang.ai/bootstrap-port".to_string(),
            })
        } else {
            None
        };

        // Create Prometheus config
        let prometheus_config = Some(PrometheusConfig {
            port: self.prometheus_port,
            host: self.prometheus_host.clone(),
        });

        ServerConfig {
            host: self.host.clone(),
            port: self.port,
            router_config,
            max_payload_size: self.max_payload_size,
            log_dir: self.log_dir.clone(),
            log_level: Some(self.log_level.clone()),
            service_discovery_config,
            prometheus_config,
            request_timeout_secs: self.request_timeout_secs,
            request_id_headers: if self.request_id_headers.is_empty() {
                None
            } else {
                Some(self.request_id_headers.clone())
            },
        }
    }
}

fn main() -> Result<(), Box<dyn std::error::Error>> {
    // Parse prefill arguments manually before clap parsing
    let prefill_urls = parse_prefill_args();

    // Filter out prefill arguments and their values before passing to clap
    let mut filtered_args: Vec<String> = Vec::new();
    let raw_args: Vec<String> = std::env::args().collect();
    let mut i = 0;

    while i < raw_args.len() {
        if raw_args[i] == "--prefill" && i + 1 < raw_args.len() {
            // Skip --prefill and its URL
            i += 2;
            // Also skip bootstrap port if present
579
580
581
582
583
            if i < raw_args.len()
                && !raw_args[i].starts_with("--")
                && (raw_args[i].parse::<u16>().is_ok() || raw_args[i].to_lowercase() == "none")
            {
                i += 1;
584
585
586
587
588
589
590
591
592
593
594
595
596
            }
        } else {
            filtered_args.push(raw_args[i].clone());
            i += 1;
        }
    }

    // Parse CLI arguments with clap using filtered args
    let cli_args = CliArgs::parse_from(filtered_args);

    // Print startup info
    println!("SGLang Router starting...");
    println!("Host: {}:{}", cli_args.host, cli_args.port);
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
    let mode_str = if cli_args.enable_igw {
        "IGW (Inference Gateway)".to_string()
    } else if matches!(cli_args.backend, Backend::Openai) {
        "OpenAI Backend".to_string()
    } else if cli_args.pd_disaggregation {
        "PD Disaggregated".to_string()
    } else {
        format!("Regular ({})", cli_args.backend)
    };
    println!("Mode: {}", mode_str);

    // Warn for runtimes that are parsed but not yet implemented
    match cli_args.backend {
        Backend::Vllm | Backend::Trtllm | Backend::Anthropic => {
            println!(
                "WARNING: runtime '{}' not implemented yet; falling back to regular routing. \
Provide --worker-urls or PD flags as usual.",
                cli_args.backend
            );
616
        }
617
618
        Backend::Sglang | Backend::Openai => {}
    }
619

620
621
622
623
624
625
626
    if !cli_args.enable_igw {
        println!("Policy: {}", cli_args.policy);

        if cli_args.pd_disaggregation && !prefill_urls.is_empty() {
            println!("Prefill nodes: {:?}", prefill_urls);
            println!("Decode nodes: {:?}", cli_args.decode);
        }
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
    }

    // Convert to RouterConfig
    let router_config = cli_args.to_router_config(prefill_urls)?;

    // Validate configuration
    router_config.validate()?;

    // Create ServerConfig
    let server_config = cli_args.to_server_config(router_config);

    // Create a new runtime for the server (like Python binding does)
    let runtime = tokio::runtime::Runtime::new()?;

    // Block on the async startup function
    runtime.block_on(async move { server::startup(server_config).await })?;

    Ok(())
}