router.rs 24.1 KB
Newer Older
1
use crate::tree::Tree;
2
3
4
use actix_web::http::header::{HeaderValue, CONTENT_TYPE};
use actix_web::{HttpRequest, HttpResponse};
use bytes::Bytes;
Byron Hsu's avatar
Byron Hsu committed
5
use futures_util::{StreamExt, TryStreamExt};
6
use log::{debug, info, warn};
7
use std::collections::HashMap;
8
use std::fmt::Debug;
9
use std::sync::atomic::AtomicUsize;
10
use std::sync::{Arc, Mutex, RwLock};
11
12
use std::thread;
use std::time::Duration;
13
use tokio;
14
15

#[derive(Debug)]
16
17
pub enum Router {
    RoundRobin {
18
        worker_urls: Arc<RwLock<Vec<String>>>,
19
        current_index: AtomicUsize,
20
21
    },
    Random {
22
        worker_urls: Arc<RwLock<Vec<String>>>,
23
    },
24
25
    CacheAware {
        /*
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
            Cache-Aware Load Balancing Router

            This router combines two strategies to optimize both cache utilization and request distribution:

            1. Cache-Aware Routing (Approximate Tree)
            2. Load Balancing (Shortest Queue with Balance Thresholds)

            The router dynamically switches between these strategies based on load conditions:
            - Uses load balancing when the system is imbalanced
            - Uses cache-aware routing when the system is balanced

            A system is considered imbalanced if both conditions are met:
            1. (max - min) > abs_threshold
            2. max > rel_threshold * min

            Strategy Details:

            1. Cache-Aware Routing (Approximate Tree)
            -------------------------------------------
            This strategy maintains an approximate radix tree for each worker based on request history,
            eliminating the need for direct cache state queries. The tree stores raw text characters
            instead of token IDs to avoid tokenization overhead.

            Process:
            a. For each request, find the worker with the highest prefix match
            b. If match rate > cache_threshold:
            Route to the worker with highest match (likely has relevant data cached)
            c. If match rate ≤ cache_threshold:
            Route to the worker with smallest tree size (most available cache capacity)
            d. Background maintenance:
            Periodically evict least recently used leaf nodes to prevent memory overflow

            2. Load Balancing (Shortest Queue)
            -------------------------------------------
            This strategy tracks pending request counts per worker and routes new requests
            to the least busy worker when the system is detected to be imbalanced.

            Configuration Parameters:
            ------------------------
            1. cache_threshold: (float, 0.0 to 1.0)
            Minimum prefix match ratio to use highest-match routing.
            Below this threshold, routes to worker with most available cache space.

            2. balance_abs_threshold: (integer)
            Absolute difference threshold for load imbalance detection.
            System is potentially imbalanced if (max_load - min_load) > abs_threshold

            3. balance_rel_threshold: (float)
            Relative ratio threshold for load imbalance detection.
            System is potentially imbalanced if max_load > min_load * rel_threshold
            Used in conjunction with abs_threshold to determine final imbalance state.

            4. eviction_interval_secs: (integer)
            Interval between LRU eviction cycles for the approximate trees.

            5. max_tree_size: (integer)
            Maximum nodes per tree. When exceeded, LRU leaf nodes are evicted
            during the next eviction cycle.
84
        */
85
        worker_urls: Arc<RwLock<Vec<String>>>,
86
87
88
        tree: Arc<Mutex<Tree>>,
        running_queue: Arc<Mutex<HashMap<String, usize>>>,
        processed_queue: Arc<Mutex<HashMap<String, usize>>>,
89
        cache_threshold: f32,
90
91
92
        balance_abs_threshold: usize,
        balance_rel_threshold: f32,
        _eviction_thread: Option<thread::JoinHandle<()>>,
93
94
95
    },
}

96
#[derive(Debug, Clone)]
97
98
99
pub enum PolicyConfig {
    RandomConfig,
    RoundRobinConfig,
100
    CacheAwareConfig {
101
        cache_threshold: f32,
102
103
        balance_abs_threshold: usize,
        balance_rel_threshold: f32,
104
105
        eviction_interval_secs: u64,
        max_tree_size: usize,
106
107
108
    },
}

109
impl Router {
110
111
112
113
114
115
    pub fn new(worker_urls: Vec<String>, policy_config: PolicyConfig) -> Result<Self, String> {
        // Wait until all workers are healthy
        Self::wait_for_healthy_workers(&worker_urls, 300, 10)?;

        // Create router based on policy...
        Ok(match policy_config {
116
117
118
            PolicyConfig::RandomConfig => Router::Random {
                worker_urls: Arc::new(RwLock::new(worker_urls)),
            },
119
            PolicyConfig::RoundRobinConfig => Router::RoundRobin {
120
                worker_urls: Arc::new(RwLock::new(worker_urls)),
121
122
                current_index: std::sync::atomic::AtomicUsize::new(0),
            },
123
            PolicyConfig::CacheAwareConfig {
124
                cache_threshold,
125
126
                balance_abs_threshold,
                balance_rel_threshold,
127
128
                eviction_interval_secs,
                max_tree_size,
129
            } => {
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
                let mut running_queue = HashMap::new();
                for url in &worker_urls {
                    running_queue.insert(url.clone(), 0);
                }

                let mut processed_queue = HashMap::new();
                for url in &worker_urls {
                    processed_queue.insert(url.clone(), 0);
                }

                let tree = Arc::new(Mutex::new(Tree::new()));
                let running_queue = Arc::new(Mutex::new(running_queue));
                let processed_queue = Arc::new(Mutex::new(processed_queue));

                // Create background eviction thread
                let tree_clone = Arc::clone(&tree);
                let processed_queue_clone = Arc::clone(&processed_queue);
147
                let running_queue_clone = Arc::clone(&running_queue);
148
149
150
151
152
153
154
                let eviction_thread = thread::spawn(move || {
                    loop {
                        // Sleep for the specified interval
                        thread::sleep(Duration::from_secs(eviction_interval_secs));

                        let locked_tree_clone = tree_clone.lock().unwrap();
                        // Run eviction
155
                        locked_tree_clone.evict_tenant_by_size(max_tree_size);
156
157
158

                        // Print the process queue
                        let locked_processed_queue = processed_queue_clone.lock().unwrap();
159
                        info!("Processed Queue: {:?}", locked_processed_queue);
160
161
162

                        // Print the running queue
                        let locked_running_queue = running_queue_clone.lock().unwrap();
163
                        info!("Running Queue: {:?}", locked_running_queue);
164
165
                    }
                });
166
167

                for url in &worker_urls {
168
                    tree.lock().unwrap().insert(&"".to_string(), url);
169
170
                }

171
                Router::CacheAware {
172
                    worker_urls: Arc::new(RwLock::new(worker_urls)),
173
174
175
                    tree,
                    running_queue,
                    processed_queue,
176
                    cache_threshold,
177
178
                    balance_abs_threshold,
                    balance_rel_threshold,
179
                    _eviction_thread: Some(eviction_thread),
180
181
                }
            }
182
        })
183
184
    }

185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
    fn wait_for_healthy_workers(
        worker_urls: &[String],
        timeout_secs: u64,
        interval_secs: u64,
    ) -> Result<(), String> {
        let start_time = std::time::Instant::now();
        let sync_client = reqwest::blocking::Client::new();

        loop {
            if start_time.elapsed() > Duration::from_secs(timeout_secs) {
                return Err(format!(
                    "Timeout {}s waiting for workers to become healthy",
                    timeout_secs
                ));
            }

            let mut all_healthy = true;
            let mut unhealthy_workers = Vec::new();

            for url in worker_urls {
                match sync_client.get(&format!("{}/health", url)).send() {
                    Ok(res) => {
                        if !res.status().is_success() {
                            info!(
                                "Worker {} health check is pending with status: {}.",
                                url,
                                res.status()
                            );
                            all_healthy = false;
                            unhealthy_workers.push((url, format!("Status: {}", res.status())));
                        }
                    }
                    Err(e) => {
                        info!("Worker {} health check is pending with error: {}", url, e);
                        all_healthy = false;
                        unhealthy_workers.push((url, format!("Error: {}", e)));
                    }
                }
            }

            if all_healthy {
                info!("All workers are healthy");
                return Ok(());
            } else {
                info!("Unhealthy workers:");
                for (url, reason) in &unhealthy_workers {
                    info!("  {} - {}", url, reason);
                }
                thread::sleep(Duration::from_secs(interval_secs));
            }
        }
    }

238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
    fn select_first_worker(&self) -> Result<String, String> {
        match self {
            Router::RoundRobin { worker_urls, .. }
            | Router::Random { worker_urls }
            | Router::CacheAware { worker_urls, .. } => {
                if worker_urls.read().unwrap().is_empty() {
                    Err("No workers are available".to_string())
                } else {
                    Ok(worker_urls.read().unwrap()[0].clone())
                }
            }
        }
    }

    async fn send_request(
253
254
        &self,
        client: &reqwest::Client,
255
        worker_url: String,
256
        route: &str,
257
    ) -> HttpResponse {
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
        match client.get(format!("{}{}", worker_url, route)).send().await {
            Ok(res) => {
                let status = actix_web::http::StatusCode::from_u16(res.status().as_u16())
                    .unwrap_or(actix_web::http::StatusCode::INTERNAL_SERVER_ERROR);

                match res.bytes().await {
                    Ok(body) => HttpResponse::build(status).body(body.to_vec()),
                    Err(e) => HttpResponse::InternalServerError()
                        .body(format!("Failed to read response body: {}", e)),
                }
            }
            Err(e) => HttpResponse::InternalServerError().body(format!(
                "Failed to send request to worker {}: {}",
                worker_url, e
            )),
        }
    }

    pub async fn route_to_first(&self, client: &reqwest::Client, route: &str) -> HttpResponse {
        match self.select_first_worker() {
            Ok(worker_url) => self.send_request(client, worker_url, route).await,
            Err(e) => HttpResponse::InternalServerError().body(e),
        }
    }

    fn get_text_from_request(&self, body: &Bytes, route: &str) -> String {
        // convert body to json
        let json = serde_json::from_slice::<serde_json::Value>(body).unwrap();

        if route == "generate" {
            // get the "text" field
            let text = json.get("text").and_then(|t| t.as_str()).unwrap_or("");
            return text.to_string();
        } else if route == "v1/chat/completions" {
            // get the messages field as raw text
            if let Some(messages) = json.get("messages") {
                // Convert messages back to a string, preserving all JSON formatting
                return serde_json::to_string(messages).unwrap_or_default();
            }
        } else if route == "v1/completions" {
            let prompt = json.get("prompt").and_then(|t| t.as_str()).unwrap_or("");
            return prompt.to_string();
        }

        return "".to_string();
    }

    // TODO: return Result<String, String> instead of panicking
    fn select_generate_worker(&self, body: &Bytes, route: &str) -> String {
        let text = self.get_text_from_request(&body, route);
308

309
310
311
312
313
        let worker_url = match self {
            Router::RoundRobin {
                worker_urls,
                current_index,
            } => {
314
                let idx = current_index
315
316
317
                    .fetch_update(
                        std::sync::atomic::Ordering::SeqCst,
                        std::sync::atomic::Ordering::SeqCst,
318
                        |x| Some((x + 1) % worker_urls.read().unwrap().len()),
319
                    )
320
                    .unwrap();
321
                worker_urls.read().unwrap()[idx].clone()
322
            }
323

324
325
326
            Router::Random { worker_urls } => worker_urls.read().unwrap()
                [rand::random::<usize>() % worker_urls.read().unwrap().len()]
            .clone(),
327

328
            Router::CacheAware {
329
                worker_urls,
330
331
332
                tree,
                running_queue,
                processed_queue,
333
                cache_threshold,
334
335
                balance_abs_threshold,
                balance_rel_threshold,
336
337
                ..
            } => {
338
                // TODO: delay scheduling if cache hit rate is high because it may cause imbalance. prioritize low hit rate ones
339

Byron Hsu's avatar
Byron Hsu committed
340
                let tree = tree.lock().unwrap();
341
                let mut running_queue = running_queue.lock().unwrap();
342

343
344
345
346
347
348
349
350
351
352
353
354
                // Get current load statistics
                let max_load = *running_queue.values().max().unwrap_or(&0);
                let min_load = *running_queue.values().min().unwrap_or(&0);

                // Load is considered imbalanced if:
                // 1. (max - min) > abs_threshold AND
                // 2. max > rel_threshold * min
                let is_imbalanced = max_load.saturating_sub(min_load) > *balance_abs_threshold
                    && (max_load as f32) > (min_load as f32 * balance_rel_threshold);

                let selected_url = if is_imbalanced {
                    // Log load balancing trigger and current queue state
355
                    info!(
356
357
358
359
360
361
362
363
364
365
366
                        "Load balancing triggered due to workload imbalance:\n\
                        Max load: {}, Min load: {}\n\
                        Current running queue: {:?}",
                        max_load, min_load, running_queue
                    );

                    // Use shortest queue routing when load is imbalanced
                    running_queue
                        .iter()
                        .min_by_key(|(_url, &count)| count)
                        .map(|(url, _)| url.clone())
367
                        .unwrap_or_else(|| worker_urls.read().unwrap()[0].clone())
368
369
                } else {
                    // Use cache-aware routing when load is balanced
370
371
372
                    let (matched_text, matched_worker) = tree.prefix_match(&text);
                    let matched_rate =
                        matched_text.chars().count() as f32 / text.chars().count() as f32;
373

374
375
376
377
                    if matched_rate > *cache_threshold {
                        matched_worker.to_string()
                    } else {
                        tree.get_smallest_tenant()
378
                    }
379
                };
380

381
382
                // Update queues and tree
                *running_queue.get_mut(&selected_url).unwrap() += 1;
383

384
385
386
387
388
                *processed_queue
                    .lock()
                    .unwrap()
                    .get_mut(&selected_url)
                    .unwrap() += 1;
389
390
391
                tree.insert(&text, &selected_url);

                selected_url
392
393
            }
        };
394

395
396
397
398
399
400
401
402
403
404
405
        worker_url
    }

    async fn send_generate_request(
        &self,
        client: &reqwest::Client,
        req: HttpRequest,
        body: Bytes,
        route: &str,
        worker_url: &str,
    ) -> HttpResponse {
406
407
408
        let is_stream = serde_json::from_slice::<serde_json::Value>(&body)
            .map(|v| v.get("stream").and_then(|s| s.as_bool()).unwrap_or(false))
            .unwrap_or(false);
409

410
        let res = match client
411
            .post(format!("{}{}", worker_url, route))
412
413
414
415
416
417
418
419
420
421
422
423
424
425
            .header(
                "Content-Type",
                req.headers()
                    .get("Content-Type")
                    .and_then(|h| h.to_str().ok())
                    .unwrap_or("application/json"),
            )
            .body(body.to_vec())
            .send()
            .await
        {
            Ok(res) => res,
            Err(_) => return HttpResponse::InternalServerError().finish(),
        };
426

427
428
        let status = actix_web::http::StatusCode::from_u16(res.status().as_u16())
            .unwrap_or(actix_web::http::StatusCode::INTERNAL_SERVER_ERROR);
429

430
        if !is_stream {
431
432
            // For non-streaming requests, get response first
            let response = match res.bytes().await {
433
                Ok(body) => HttpResponse::build(status).body(body.to_vec()),
434
435
436
437
                Err(e) => {
                    let error_msg = format!("Failed to get response body: {}", e);
                    HttpResponse::InternalServerError().body(error_msg)
                }
438
439
440
441
442
            };

            // Then decrement running queue counter if using CacheAware
            if let Router::CacheAware { running_queue, .. } = self {
                if let Ok(mut queue) = running_queue.lock() {
443
                    if let Some(count) = queue.get_mut(worker_url) {
444
445
446
                        *count = count.saturating_sub(1);
                    }
                }
447
            }
448
449
450
451

            response
        } else if let Router::CacheAware { running_queue, .. } = self {
            let running_queue = Arc::clone(running_queue);
452
            let worker_url = worker_url.to_string();
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470

            HttpResponse::build(status)
                .insert_header((CONTENT_TYPE, HeaderValue::from_static("text/event-stream")))
                .streaming(
                    res.bytes_stream()
                        .map_err(|_| {
                            actix_web::error::ErrorInternalServerError("Failed to read stream")
                        })
                        .inspect(move |bytes| {
                            let bytes = bytes.as_ref().unwrap();
                            if bytes
                                .as_ref()
                                .windows(12)
                                .any(|window| window == b"data: [DONE]")
                            {
                                let mut locked_queue = running_queue.lock().unwrap();
                                let count = locked_queue.get_mut(&worker_url).unwrap();
                                *count = count.saturating_sub(1);
471
                                debug!("Streaming is done!!")
472
473
474
                            }
                        }),
                )
475
476
477
478
        } else {
            HttpResponse::build(status)
                .insert_header((CONTENT_TYPE, HeaderValue::from_static("text/event-stream")))
                .streaming(res.bytes_stream().map_err(|_| {
479
                    actix_web::error::ErrorInternalServerError("Failed to read stream")
480
                }))
481
482
        }
    }
483

484
485
486
487
488
489
490
491
492
493
494
495
    pub async fn route_generate_request(
        &self,
        client: &reqwest::Client,
        req: HttpRequest,
        body: Bytes,
        route: &str,
    ) -> HttpResponse {
        let worker_url = self.select_generate_worker(&body, route);
        self.send_generate_request(client, req, body, route, &worker_url)
            .await
    }

496
    pub async fn add_worker(&self, worker_url: String) -> Result<String, String> {
497
498
499
500
501
502
503
504
        let interval_secs = 10; // check every 10 seconds
        let timeout_secs = 300; // 5 minutes

        let start_time = std::time::Instant::now();
        let client = reqwest::Client::new();

        loop {
            if start_time.elapsed() > Duration::from_secs(timeout_secs) {
505
                return Err(format!(
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
                    "Timeout {}s waiting for worker {} to become healthy",
                    timeout_secs, worker_url
                ));
            }

            match client.get(&format!("{}/health", worker_url)).send().await {
                Ok(res) => {
                    if res.status().is_success() {
                        match self {
                            Router::RoundRobin { worker_urls, .. }
                            | Router::Random { worker_urls }
                            | Router::CacheAware { worker_urls, .. } => {
                                info!("Worker {} health check passed", worker_url);
                                let mut urls = worker_urls.write().unwrap();
                                if urls.contains(&worker_url) {
521
                                    return Err(format!("Worker {} already exists", worker_url));
522
523
524
525
526
                                }
                                info!("Added worker: {}", worker_url);
                                urls.push(worker_url.clone());
                            }
                        }
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549

                        // If cache aware, initialize the queues for the new worker
                        if let Router::CacheAware {
                            running_queue,
                            processed_queue,
                            tree,
                            ..
                        } = self
                        {
                            // Add worker to running queue with initial count of 0
                            running_queue.lock().unwrap().insert(worker_url.clone(), 0);

                            // Add worker to processed queue with initial count of 0
                            processed_queue
                                .lock()
                                .unwrap()
                                .insert(worker_url.clone(), 0);

                            // Add worker to tree
                            tree.lock().unwrap().insert(&"".to_string(), &worker_url);
                        }

                        return Ok(format!("Successfully added worker: {}", worker_url));
550
551
                    } else {
                        info!(
552
553
554
                            "Worker {} health check is pending with status: {}.",
                            worker_url,
                            res.status()
555
556
557
558
559
560
561
562
563
564
565
566
                        );
                        // if the url does not have http or https prefix, warn users
                        if !worker_url.starts_with("http://") && !worker_url.starts_with("https://")
                        {
                            warn!("The worker url {} does not have http or https prefix. Please add the prefix to the url.", worker_url);
                        }

                        tokio::time::sleep(Duration::from_secs(interval_secs)).await;
                        continue;
                    }
                }
                Err(e) => {
567
568
569
570
                    info!(
                        "Worker {} health check is pending with error: {}",
                        worker_url, e
                    );
571
572
573
574
575
576
577
578
579

                    // if the url does not have http or https prefix, warn users
                    if !worker_url.starts_with("http://") && !worker_url.starts_with("https://") {
                        warn!("The worker url {} does not have http or https prefix. Please add the prefix to the url.", worker_url);
                    }

                    tokio::time::sleep(Duration::from_secs(interval_secs)).await;
                    continue;
                }
580
581
582
            }
        }
    }
583
584
585
586
587
588
589
590
591
592
593
594
595
596

    pub fn remove_worker(&self, worker_url: String) {
        match self {
            Router::RoundRobin { worker_urls, .. }
            | Router::Random { worker_urls }
            | Router::CacheAware { worker_urls, .. } => {
                let mut urls = worker_urls.write().unwrap();
                let index = urls.iter().position(|url| url == &worker_url).unwrap();
                urls.remove(index);
                info!("Removed worker: {}", worker_url);
            }
        }

        // if cache aware, remove the worker from the tree
597
598
599
600
601
602
603
        if let Router::CacheAware {
            tree,
            running_queue,
            processed_queue,
            ..
        } = self
        {
604
            tree.lock().unwrap().remove_tenant(&worker_url);
605
606
607
608
609
610
            running_queue.lock().unwrap().remove(&worker_url);
            processed_queue.lock().unwrap().remove(&worker_url);
            info!(
                "Removed worker from tree and cleaned up queues: {}",
                worker_url
            );
611
612
        }
    }
613
}