kv.rs 12.2 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

16
17
use std::collections::HashMap;

18
use super::*;
19
use llm_rs::kv_router::indexer::KvIndexerInterface;
20
use rs::traits::events::EventSubscriber;
21
use tracing;
22

23
24
use llm_rs::kv_router::{indexer::compute_block_hash_for_seq, protocols::*};

25
26
27
28
29
30
31
32
#[pyclass]
pub(crate) struct KvRouter {
    inner: Arc<llm_rs::kv_router::KvRouter>,
}

#[pymethods]
impl KvRouter {
    #[new]
GuanLuo's avatar
GuanLuo committed
33
    // [FXIME] 'drt' can be obtained from 'component'
34
    fn new(drt: DistributedRuntime, component: Component, kv_block_size: usize) -> PyResult<Self> {
35
36
37
38
39
        let runtime = pyo3_async_runtimes::tokio::get_runtime();
        runtime.block_on(async {
            let inner = llm_rs::kv_router::KvRouter::from_runtime(
                drt.inner.clone(),
                component.inner.clone(),
40
                kv_block_size,
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
            )
            .await
            .map_err(to_pyerr)?;
            Ok(Self { inner })
        })
    }

    fn schedule<'p>(
        &self,
        py: Python<'p>,
        token_ids: Vec<u32>,
        lora_id: u64,
    ) -> PyResult<Bound<'p, PyAny>> {
        let router = self.inner.clone();
        pyo3_async_runtimes::tokio::future_into_py(py, async move {
GuanLuo's avatar
GuanLuo committed
56
            let worker_id = router
57
58
59
                .schedule(&token_ids, lora_id)
                .await
                .map_err(to_pyerr)?;
GuanLuo's avatar
GuanLuo committed
60
            Ok(worker_id)
61
62
63
        })
    }
}
GuanLuo's avatar
GuanLuo committed
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79

#[pyclass]
pub(crate) struct KvMetricsPublisher {
    inner: Arc<llm_rs::kv_router::publisher::KvMetricsPublisher>,
}

#[pymethods]
impl KvMetricsPublisher {
    #[new]
    fn new() -> PyResult<Self> {
        let inner = llm_rs::kv_router::publisher::KvMetricsPublisher::new().map_err(to_pyerr)?;
        Ok(Self {
            inner: inner.into(),
        })
    }

Alec's avatar
Alec committed
80
    fn create_endpoint<'p>(
GuanLuo's avatar
GuanLuo committed
81
82
83
84
85
86
87
        &self,
        py: Python<'p>,
        component: Component,
    ) -> PyResult<Bound<'p, PyAny>> {
        let rs_publisher = self.inner.clone();
        let rs_component = component.inner.clone();
        pyo3_async_runtimes::tokio::future_into_py(py, async move {
88
            rs_publisher
89
                .create_endpoint(rs_component)
GuanLuo's avatar
GuanLuo committed
90
91
92
93
94
95
                .await
                .map_err(to_pyerr)?;
            Ok(())
        })
    }

96
    #[allow(clippy::too_many_arguments)]
97
    fn publish(
GuanLuo's avatar
GuanLuo committed
98
        &self,
99
        _py: Python,
GuanLuo's avatar
GuanLuo committed
100
101
102
103
        request_active_slots: u64,
        request_total_slots: u64,
        kv_active_blocks: u64,
        kv_total_blocks: u64,
104
105
106
        num_requests_waiting: u64,
        gpu_cache_usage_perc: f32,
        gpu_prefix_cache_hit_rate: f32,
GuanLuo's avatar
GuanLuo committed
107
108
109
110
111
112
113
114
    ) -> PyResult<()> {
        self.inner
            .publish(
                llm_rs::kv_router::protocols::ForwardPassMetrics {
                    request_active_slots,
                    request_total_slots,
                    kv_active_blocks,
                    kv_total_blocks,
115
116
117
                    num_requests_waiting,
                    gpu_cache_usage_perc,
                    gpu_prefix_cache_hit_rate,
GuanLuo's avatar
GuanLuo committed
118
119
120
121
122
123
                }
                .into(),
            )
            .map_err(to_pyerr)
    }
}
124

125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
#[pyclass]
pub(crate) struct KvEventPublisher {
    inner: Arc<llm_rs::kv_router::publisher::KvEventPublisher>,
    warning_count: u32,
}

#[pymethods]
impl KvEventPublisher {
    #[new]
    fn new(component: Component, worker_id: i64, kv_block_size: usize) -> PyResult<Self> {
        let inner = llm_rs::kv_router::publisher::KvEventPublisher::new(
            component.inner.clone(),
            worker_id,
            kv_block_size,
        )
        .map_err(to_pyerr)?;
        Ok(Self {
            inner: inner.into(),
            warning_count: 0,
        })
    }

    #[allow(clippy::too_many_arguments)]
    #[pyo3(signature = (event_id, token_ids, num_block_tokens, block_hashes, lora_id, parent_hash=None))]
    fn publish_stored(
        &mut self,
        _py: Python,
        event_id: u64,
        token_ids: Vec<u32>,
        num_block_tokens: Vec<u64>,
        block_hashes: Vec<u64>,
        lora_id: u64,
        parent_hash: Option<u64>,
    ) -> PyResult<()> {
        let event = KvCacheEvent {
            event_id,
            data: KvCacheEventData::Stored(KvCacheStoreData {
                parent_hash: parent_hash.map(ExternalSequenceBlockHash),
                blocks: self.create_stored_blocks(
                    &token_ids,
                    &num_block_tokens,
                    &block_hashes,
                    lora_id,
                ),
            }),
        };

        self.inner.publish(event).map_err(to_pyerr)
    }

    fn publish_removed(&self, _py: Python, event_id: u64, block_hashes: Vec<u64>) -> PyResult<()> {
        let block_hashes: Vec<ExternalSequenceBlockHash> = block_hashes
            .iter()
            .map(|&v| ExternalSequenceBlockHash(v))
            .collect();
        let event = KvCacheEvent {
            event_id,
            data: KvCacheEventData::Removed(KvCacheRemoveData { block_hashes }),
        };

        self.inner.publish(event).map_err(to_pyerr)
    }
}

impl KvEventPublisher {
    fn create_stored_block_from_parts(
        &self,
        block_hash: u64,
        token_ids: &[u32],
        _lora_id: u64,
    ) -> KvCacheStoredBlockData {
        let tokens_hash = compute_block_hash_for_seq(token_ids, self.inner.kv_block_size())[0];
        KvCacheStoredBlockData {
            block_hash: ExternalSequenceBlockHash(block_hash),
            tokens_hash,
        }
    }

    fn create_stored_blocks(
        &mut self,
        token_ids: &[u32],
        num_block_tokens: &[u64],
        block_hashes: &[u64],
        lora_id: u64,
    ) -> Vec<KvCacheStoredBlockData> {
        let mut blocks: Vec<KvCacheStoredBlockData> = Vec::new();

        let mut token_offset: usize = 0;
        for (num_tokens_it, block_hash_it) in num_block_tokens.iter().zip(block_hashes.iter()) {
            if (self.warning_count < 3) && (*num_tokens_it != self.inner.kv_block_size() as u64) {
                tracing::warn!(
                    "Block not published. Block size must be {} tokens to be published. Block size is: {}",
                    self.inner.kv_block_size(),
                    *num_tokens_it
                );
                self.warning_count += 1;
                break;
            }

            let tokens = &token_ids[token_offset..(token_offset + *num_tokens_it as usize)];
            blocks.push(self.create_stored_block_from_parts(*block_hash_it, tokens, lora_id));
            token_offset += *num_tokens_it as usize;
        }

        blocks
    }
}

233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
#[pyclass]
#[derive(Clone)]
pub(crate) struct OverlapScores {
    inner: llm_rs::kv_router::indexer::OverlapScores,
}

#[pymethods]
impl OverlapScores {
    #[getter]
    fn scores(&self) -> HashMap<llm_rs::kv_router::indexer::WorkerId, u32> {
        self.inner.scores.clone()
    }

    #[getter]
    fn frequencies(&self) -> Vec<usize> {
        self.inner.frequencies.clone()
    }
}

#[pyclass]
pub(crate) struct KvIndexer {
    inner: Arc<llm_rs::kv_router::indexer::KvIndexer>,
}

#[pymethods]
impl KvIndexer {
    #[new]
260
    fn new(component: Component, kv_block_size: usize) -> PyResult<Self> {
261
262
263
264
265
        let runtime = pyo3_async_runtimes::tokio::get_runtime();
        runtime.block_on(async {
            let inner: Arc<llm_rs::kv_router::indexer::KvIndexer> =
                llm_rs::kv_router::indexer::KvIndexer::new(
                    component.inner.drt().runtime().child_token(),
266
                    kv_block_size,
267
268
                )
                .into();
269
270
            // [gluo TODO] try subscribe_with_type::<RouterEvent>,
            // error checking below will be different.
271
272
            let mut kv_events_rx = component
                .inner
273
                .subscribe(llm_rs::kv_router::KV_EVENT_SUBJECT)
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
                .await
                .map_err(to_pyerr)?;
            let kv_events_tx = inner.event_sender();

            // [FIXME] this is the added functionality to the indexer to subscribe to kv events,
            // should have been made to a trait and implemented here? i.e. AsyncEngine style
            tokio::spawn(async move {
                while let Some(event) = kv_events_rx.next().await {
                    let event: llm_rs::kv_router::indexer::RouterEvent =
                        serde_json::from_slice(&event.payload).unwrap();
                    tracing::debug!("received kv event: {:?}", event);
                    if let Err(e) = kv_events_tx.send(event).await {
                        tracing::trace!(
                            "failed to send kv event to indexer; shutting down: {:?}",
                            e
                        );
                    }
                }
            });
            Ok(Self { inner })
        })
    }

297
298
299
300
    fn block_size(&self) -> usize {
        self.inner.block_size()
    }

301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
    fn find_matches_for_request<'p>(
        &self,
        py: Python<'p>,
        token_ids: Vec<u32>,
        _lora_id: u64,
    ) -> PyResult<Bound<'p, PyAny>> {
        let indexer = self.inner.clone();
        pyo3_async_runtimes::tokio::future_into_py(py, async move {
            let rs_overlap_scores = indexer
                .find_matches_for_request(token_ids.as_slice())
                .await
                .map_err(to_pyerr)?;
            Ok(OverlapScores {
                inner: rs_overlap_scores,
            })
        })
    }
}

#[pyclass]
#[derive(Clone)]
pub(crate) struct EndpointKvMetrics {
    #[pyo3(get, set)]
    pub worker_id: i64,
    #[pyo3(get, set)]
    pub request_active_slots: u64,
    #[pyo3(get, set)]
    pub request_total_slots: u64,
    #[pyo3(get, set)]
    pub kv_active_blocks: u64,
    #[pyo3(get, set)]
    pub kv_total_blocks: u64,
333
334
335
336
337
338
    #[pyo3(get, set)]
    pub num_requests_waiting: u64,
    #[pyo3(get, set)]
    pub gpu_cache_usage_perc: f32,
    #[pyo3(get, set)]
    pub gpu_prefix_cache_hit_rate: f32,
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
}

#[pyclass]
#[derive(Clone)]
pub(crate) struct AggregatedMetrics {
    #[pyo3(get, set)]
    pub endpoints: Vec<EndpointKvMetrics>,
    #[pyo3(get, set)]
    pub load_avg: f64,
    #[pyo3(get, set)]
    pub load_std: f64,
}

#[pyclass]
pub(crate) struct KvMetricsAggregator {
    inner: Arc<llm_rs::kv_router::metrics_aggregator::KvMetricsAggregator>,
}

#[pymethods]
impl KvMetricsAggregator {
    #[new]
    fn new(component: Component) -> PyResult<Self> {
        let runtime = pyo3_async_runtimes::tokio::get_runtime();
        runtime.block_on(async {
            let inner = llm_rs::kv_router::metrics_aggregator::KvMetricsAggregator::new(
                component.inner.clone(),
                component.inner.drt().runtime().child_token(),
            )
            .await;
            Ok(Self {
                inner: inner.into(),
            })
        })
    }

    fn get_metrics<'p>(&self, py: Python<'p>) -> PyResult<Bound<'p, PyAny>> {
        let endpoints = self.inner.get_endpoints();
        let endpoint_kv_metrics = endpoints
            .endpoints
            .iter()
            .map(|x| EndpointKvMetrics {
                worker_id: x.worker_id(),
                request_active_slots: x.data.request_active_slots,
                request_total_slots: x.data.request_total_slots,
                kv_active_blocks: x.data.kv_active_blocks,
                kv_total_blocks: x.data.kv_total_blocks,
385
386
387
                num_requests_waiting: x.data.num_requests_waiting,
                gpu_cache_usage_perc: x.data.gpu_cache_usage_perc,
                gpu_prefix_cache_hit_rate: x.data.gpu_prefix_cache_hit_rate,
388
389
390
391
392
393
394
395
396
397
398
            })
            .collect();
        pyo3_async_runtimes::tokio::future_into_py(py, async move {
            Ok(AggregatedMetrics {
                endpoints: endpoint_kv_metrics,
                load_avg: endpoints.load_avg,
                load_std: endpoints.load_std,
            })
        })
    }
}