cache_aware.rs 19 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
/*
    Cache-Aware Load Balancing Router

    This router combines two strategies to optimize both cache utilization and request distribution:

    1. Cache-Aware Routing (Approximate Tree)
    2. Load Balancing (Shortest Queue with Balance Thresholds)

    The router dynamically switches between these strategies based on load conditions:
    - Uses load balancing when the system is imbalanced
    - Uses cache-aware routing when the system is balanced

    A system is considered imbalanced if both conditions are met:
    1. (max - min) > abs_threshold
    2. max > rel_threshold * min

    Strategy Details:

    1. Cache-Aware Routing (Approximate Tree)
    -------------------------------------------
    This strategy maintains an approximate radix tree for each worker based on request history,
    eliminating the need for direct cache state queries. The tree stores raw text characters
    instead of token IDs to avoid tokenization overhead.

    Process:
    a. For each request, find the worker with the highest prefix match
    b. If match rate > cache_threshold:
    Route to the worker with highest match (likely has relevant data cached)
    c. If match rate ≤ cache_threshold:
    Route to the worker with smallest tree size (most available cache capacity)
    d. Background maintenance:
    Periodically evict least recently used leaf nodes to prevent memory overflow

    2. Load Balancing (Shortest Queue)
    -------------------------------------------
    This strategy tracks pending request counts per worker and routes new requests
    to the least busy worker when the system is detected to be imbalanced.

    Configuration Parameters:
    ------------------------
    1. cache_threshold: (float, 0.0 to 1.0)
    Minimum prefix match ratio to use highest-match routing.
    Below this threshold, routes to worker with most available cache space.

    2. balance_abs_threshold: (integer)
    Absolute difference threshold for load imbalance detection.
    System is potentially imbalanced if (max_load - min_load) > abs_threshold

    3. balance_rel_threshold: (float)
    Relative ratio threshold for load imbalance detection.
    System is potentially imbalanced if max_load > min_load * rel_threshold
    Used in conjunction with abs_threshold to determine final imbalance state.

    4. eviction_interval_secs: (integer)
    Interval between LRU eviction cycles for the approximate trees.

    5. max_tree_size: (integer)
    Maximum nodes per tree. When exceeded, LRU leaf nodes are evicted
    during the next eviction cycle.
*/

62
63
use std::{sync::Arc, thread, time::Duration};

64
65
use dashmap::DashMap;
use rand::Rng;
66
use tracing::debug;
67

68
69
70
use super::{get_healthy_worker_indices, CacheAwareConfig, LoadBalancingPolicy};
use crate::{core::Worker, metrics::RouterMetrics, tree::Tree};

71
72
73
74
/// Cache-aware routing policy
///
/// Routes requests based on cache affinity when load is balanced,
/// switches to shortest-queue routing when load is imbalanced.
75
/// Maintains separate trees per model for multi-model support.
76
77
78
#[derive(Debug)]
pub struct CacheAwarePolicy {
    config: CacheAwareConfig,
79
    trees: Arc<DashMap<String, Arc<Tree>>>,
80
81
82
83
84
85
86
87
88
    eviction_handle: Option<thread::JoinHandle<()>>,
}

impl CacheAwarePolicy {
    pub fn new() -> Self {
        Self::with_config(CacheAwareConfig::default())
    }

    pub fn with_config(config: CacheAwareConfig) -> Self {
89
        let trees = Arc::new(DashMap::<String, Arc<Tree>>::new());
90
91
92

        // Start background eviction thread if configured
        let eviction_handle = if config.eviction_interval_secs > 0 {
93
            let trees_clone = Arc::clone(&trees);
94
95
96
97
98
99
            let max_tree_size = config.max_tree_size;
            let interval = config.eviction_interval_secs;

            Some(thread::spawn(move || loop {
                thread::sleep(Duration::from_secs(interval));

100
101
102
103
104
105
106
107
108
                // Evict for all model trees
                for tree_ref in trees_clone.iter() {
                    let model_id = tree_ref.key();
                    let tree = tree_ref.value();
                    tree.evict_tenant_by_size(max_tree_size);
                    debug!(
                        "Cache eviction completed for model {}, max_size: {}",
                        model_id, max_tree_size
                    );
109
110
111
112
113
114
115
116
                }
            }))
        } else {
            None
        };

        Self {
            config,
117
            trees,
118
119
120
121
            eviction_handle,
        }
    }

122
    /// Initialize the tree with worker URLs (used only during initial setup)
123
    pub fn init_workers(&self, workers: &[Arc<dyn Worker>]) {
124
125
126
127
128
        // Group workers by model
        let mut model_workers: std::collections::HashMap<String, Vec<&Arc<dyn Worker>>> =
            std::collections::HashMap::new();
        for worker in workers {
            // Use "default" for unknown/empty model_ids for backward compatibility
129
130
            let model_id = worker.model_id();
            let tree_key = if model_id.is_empty() || model_id == "unknown" {
131
                "default"
132
            } else {
133
                model_id
134
            };
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
            model_workers
                .entry(tree_key.to_string())
                .or_default()
                .push(worker);
        }

        // Initialize tree for each model
        for (tree_key, model_workers) in model_workers {
            let tree = self
                .trees
                .entry(tree_key)
                .or_insert_with(|| Arc::new(Tree::new()));
            for worker in model_workers {
                tree.insert("", worker.url());
            }
150
151
152
        }
    }

153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
    /// Add a single worker to the tree (incremental update)
    pub fn add_worker(&self, worker: &dyn Worker) {
        // For backward compatibility: if model_id is "unknown" or empty,
        // use a default tree. This preserves existing behavior for single-model routers.
        let model_id = worker.model_id();
        let tree_key = if model_id.is_empty() || model_id == "unknown" {
            "default"
        } else {
            model_id
        };
        let tree = self
            .trees
            .entry(tree_key.to_string())
            .or_insert_with(|| Arc::new(Tree::new()));
        tree.insert("", worker.url());
    }

170
171
    /// Add a worker by URL and model (for backward compatibility)
    pub fn add_worker_by_url(&self, url: &str, model_id: &str) {
172
173
174
175
176
        let tree = self
            .trees
            .entry(model_id.to_string())
            .or_insert_with(|| Arc::new(Tree::new()));
        tree.insert("", url);
177
178
    }

179
    /// Remove a worker from the tree
180
    pub fn remove_worker(&self, worker: &dyn Worker) {
181
182
183
184
185
186
187
188
189
        // Use same logic as add_worker for consistency
        let model_id = worker.model_id();
        let tree_key = if model_id.is_empty() || model_id == "unknown" {
            "default"
        } else {
            model_id
        };
        if let Some(tree) = self.trees.get(tree_key) {
            tree.remove_tenant(worker.url());
190
191
192
193
194
        }
    }

    /// Remove a worker by URL (removes from all model trees for backward compatibility)
    pub fn remove_worker_by_url(&self, url: &str) {
195
196
197
        // Remove from all trees since we don't know which model it belongs to
        for tree_ref in self.trees.iter() {
            tree_ref.value().remove_tenant(url);
198
199
200
201
202
        }
    }

    /// Run cache eviction to prevent unbounded growth
    pub fn evict_cache(&self, max_size: usize) {
203
204
205
206
207
208
209
210
        for tree_ref in self.trees.iter() {
            let model_id = tree_ref.key();
            let tree = tree_ref.value();
            tree.evict_tenant_by_size(max_size);
            debug!(
                "Cache eviction for model {}, max_size: {}",
                model_id, max_size
            );
211
212
213
214
215
216
217
        }
    }
}

impl LoadBalancingPolicy for CacheAwarePolicy {
    fn select_worker(
        &self,
218
        workers: &[Arc<dyn Worker>],
219
220
221
222
223
224
225
226
        request_text: Option<&str>,
    ) -> Option<usize> {
        let healthy_indices = get_healthy_worker_indices(workers);

        if healthy_indices.is_empty() {
            return None;
        }

227
228
229
230
231
232
233
234
        // Determine the model for this set of workers (router pre-filters by model)
        // All workers should be from the same model
        let first_model = workers[healthy_indices[0]].model_id();
        let model_id = if first_model.is_empty() || first_model == "unknown" {
            "default"
        } else {
            first_model
        };
235

236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
        // Get current load statistics
        let loads: Vec<usize> = workers.iter().map(|w| w.load()).collect();
        let max_load = *loads.iter().max().unwrap_or(&0);
        let min_load = *loads.iter().min().unwrap_or(&0);

        // Check if load is imbalanced
        let is_imbalanced = max_load.saturating_sub(min_load) > self.config.balance_abs_threshold
            && (max_load as f32) > (min_load as f32 * self.config.balance_rel_threshold);

        if is_imbalanced {
            // Log load balancing trigger
            let worker_loads: Vec<(String, usize)> = workers
                .iter()
                .map(|w| (w.url().to_string(), w.load()))
                .collect();

252
253
            debug!(
                "Load balancing triggered | max: {} | min: {} | workers: {:?}",
254
255
256
                max_load, min_load, worker_loads
            );

257
258
            RouterMetrics::record_load_balancing_event();
            RouterMetrics::set_load_range(max_load, min_load);
259
260
261
262
263
264
265

            // Use shortest queue when imbalanced
            let min_load_idx = healthy_indices
                .iter()
                .min_by_key(|&&idx| workers[idx].load())
                .copied()?;

266
267
            // Even in imbalanced mode, update the tree to maintain cache state
            if let Some(text) = request_text {
268
269
270
271
272
273
                // Get the tree reference without locking the entire HashMap
                // DashMap only locks the specific shard containing this key
                let tree = self.trees.get(model_id).map(|entry| entry.value().clone());

                if let Some(tree) = tree {
                    // Now we can work with the tree without holding the HashMap lock
274
                    tree.insert(text, workers[min_load_idx].url());
275
276
277
278
279
                } else {
                    debug!(
                        "Warning: No tree found for model '{}', skipping cache update",
                        model_id
                    );
280
281
282
                }
            }

283
284
            // Increment processed counter
            workers[min_load_idx].increment_processed();
285
            RouterMetrics::record_processed_request(workers[min_load_idx].url());
286
            RouterMetrics::record_policy_decision(self.name(), workers[min_load_idx].url());
287
288
289
290
291
292
293

            return Some(min_load_idx);
        }

        // Use cache-aware routing when balanced
        let text = request_text.unwrap_or("");

294
295
296
297
298
299
        // Get the tree reference without locking the entire HashMap
        // DashMap only locks the specific shard containing this key
        let tree = self.trees.get(model_id).map(|entry| entry.value().clone());

        if let Some(tree) = tree {
            // Now we work with the tree without holding the HashMap lock
300
301
302
303
304
305
            let (matched_text, matched_worker) = tree.prefix_match(text);
            let match_rate = if text.is_empty() {
                0.0
            } else {
                matched_text.chars().count() as f32 / text.chars().count() as f32
            };
306

307
            let selected_url = if match_rate > self.config.cache_threshold {
308
                RouterMetrics::record_cache_hit();
309
                matched_worker.to_string()
310
            } else {
311
                RouterMetrics::record_cache_miss();
312
313
                tree.get_smallest_tenant()
            };
314

315
316
            // Find the index of the selected worker
            if let Some(selected_idx) = workers.iter().position(|w| w.url() == selected_url) {
317
                // Only proceed if the worker is healthy
318
319
320
                if workers[selected_idx].is_healthy() {
                    // Update the tree with this request
                    tree.insert(text, &selected_url);
321

322
323
324
                    // Increment processed counter
                    workers[selected_idx].increment_processed();
                    RouterMetrics::record_processed_request(&selected_url);
325

326
                    return Some(selected_idx);
327
328
                }
            } else {
329
330
331
332
                // Selected worker no longer exists, remove it from tree
                tree.remove_tenant(&selected_url);
                debug!("Removed stale worker {} from cache tree", selected_url);
            }
333

334
            // Fallback to first healthy worker
335
336
337
338
339
340
341
342
343
344
345
            healthy_indices.first().copied()
        } else {
            // No tree for this model, log warning and use random selection
            debug!(
                "Warning: No tree found for model '{}', using random worker selection",
                model_id
            );
            // Return a random healthy worker
            let mut rng = rand::rng();
            let random_idx = rng.random_range(0..healthy_indices.len());
            Some(healthy_indices[random_idx])
346
347
348
349
350
351
352
        }
    }

    fn name(&self) -> &'static str {
        "cache_aware"
    }

353
354
355
356
    fn needs_request_text(&self) -> bool {
        true // Cache-aware policy needs request text for cache affinity
    }

357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
    fn on_request_complete(&self, worker_url: &str, success: bool) {
        // Could track success rates per worker for more intelligent routing
        if !success {
            // Optionally reduce affinity for failed requests
            tracing::debug!(
                "Request to {} completed with success={}",
                worker_url,
                success
            );
        }
    }

    fn as_any(&self) -> &dyn std::any::Any {
        self
    }

    fn select_worker_pair(
        &self,
375
376
        prefill_workers: &[Arc<dyn Worker>],
        decode_workers: &[Arc<dyn Worker>],
377
378
        request_text: Option<&str>,
    ) -> Option<(usize, usize)> {
379
380
381
382
383
        // DEPRECATED: This method is no longer used when separate policies are configured.
        // The PD router now uses separate policies for prefill and decode selection.
        // This implementation remains for backward compatibility when a single policy is used.

        // In PD mode with single policy:
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
        // - Prefill: Use cache-aware routing for better cache utilization
        // - Decode: Use least-load routing for better load distribution

        // Select prefill worker using cache-aware logic
        let prefill_idx = self.select_worker(prefill_workers, request_text)?;

        // Select decode worker using least-load logic
        let healthy_decode = get_healthy_worker_indices(decode_workers);
        if healthy_decode.is_empty() {
            return None;
        }

        let decode_idx = healthy_decode
            .iter()
            .min_by_key(|&&idx| decode_workers[idx].load())
            .copied()?;

        Some((prefill_idx, decode_idx))
    }
}

impl Default for CacheAwarePolicy {
    fn default() -> Self {
        Self::new()
    }
}

impl Drop for CacheAwarePolicy {
    fn drop(&mut self) {
        // Note: We can't properly stop the eviction thread since it's in an infinite loop
        // In a production system, we'd use a channel or atomic flag to signal shutdown
        if let Some(handle) = self.eviction_handle.take() {
            // The thread will continue running until the program exits
            // This is acceptable for now since the router typically runs for the lifetime of the program
            drop(handle);
        }
    }
}

#[cfg(test)]
mod tests {
    use super::*;
426
    use crate::core::{BasicWorkerBuilder, WorkerType};
427
428
429
430
431
432
433
434
435

    #[test]
    fn test_cache_aware_with_balanced_load() {
        // Create policy without eviction thread for testing
        let config = CacheAwareConfig {
            eviction_interval_secs: 0, // Disable eviction thread
            ..Default::default()
        };
        let policy = CacheAwarePolicy::with_config(config);
436
        let workers: Vec<Arc<dyn Worker>> = vec![
437
438
439
            Arc::new(
                BasicWorkerBuilder::new("http://w1:8000")
                    .worker_type(WorkerType::Regular)
440
                    .api_key("test_api_key")
441
442
443
444
445
                    .build(),
            ),
            Arc::new(
                BasicWorkerBuilder::new("http://w2:8000")
                    .worker_type(WorkerType::Regular)
446
                    .api_key("test_api_key")
447
448
                    .build(),
            ),
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
        ];

        // Initialize the policy with workers
        policy.init_workers(&workers);

        // First request should be distributed
        let idx1 = policy.select_worker(&workers, Some("hello world")).unwrap();

        // Same request should go to same worker (cache hit)
        let idx2 = policy.select_worker(&workers, Some("hello world")).unwrap();
        assert_eq!(idx1, idx2);

        // Similar request should also go to same worker
        let idx3 = policy.select_worker(&workers, Some("hello")).unwrap();
        assert_eq!(idx1, idx3);
    }

    #[test]
    fn test_cache_aware_with_imbalanced_load() {
        let policy = CacheAwarePolicy::with_config(CacheAwareConfig {
            cache_threshold: 0.5,
            balance_abs_threshold: 5,
            balance_rel_threshold: 2.0,
            eviction_interval_secs: 0, // Disable eviction thread
            max_tree_size: 10000,
        });

476
477
478
479
480
481
        let worker1 = BasicWorkerBuilder::new("http://w1:8000")
            .worker_type(WorkerType::Regular)
            .build();
        let worker2 = BasicWorkerBuilder::new("http://w2:8000")
            .worker_type(WorkerType::Regular)
            .build();
482
483
484
485
486
487
488

        // Create significant load imbalance
        for _ in 0..20 {
            worker1.increment_load();
        }
        // worker2 has load 0

489
        let workers: Vec<Arc<dyn Worker>> = vec![Arc::new(worker1), Arc::new(worker2)];
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
        policy.init_workers(&workers);

        // Should select worker2 (lower load) despite cache affinity
        for _ in 0..5 {
            let idx = policy.select_worker(&workers, Some("test")).unwrap();
            assert_eq!(idx, 1); // Should always pick worker2
        }
    }

    #[test]
    fn test_cache_aware_worker_removal() {
        let config = CacheAwareConfig {
            eviction_interval_secs: 0, // Disable eviction thread
            ..Default::default()
        };
        let policy = CacheAwarePolicy::with_config(config);
506
        let workers: Vec<Arc<dyn Worker>> = vec![
507
508
509
510
511
512
513
514
515
516
            Arc::new(
                BasicWorkerBuilder::new("http://w1:8000")
                    .worker_type(WorkerType::Regular)
                    .build(),
            ),
            Arc::new(
                BasicWorkerBuilder::new("http://w2:8000")
                    .worker_type(WorkerType::Regular)
                    .build(),
            ),
517
518
519
520
521
522
523
524
525
        ];

        policy.init_workers(&workers);

        // Route some requests
        policy.select_worker(&workers, Some("test1"));
        policy.select_worker(&workers, Some("test2"));

        // Remove a worker
526
        policy.remove_worker_by_url("http://w1:8000");
527
528
529
530
531
532
533
        workers[0].set_healthy(false);

        // All requests should now go to worker2
        let idx = policy.select_worker(&workers, Some("test1")).unwrap();
        assert_eq!(idx, 1);
    }
}