"googlemock/vscode:/vscode.git/clone" did not exist on "a67e9a84881ba791aa2f839756b14972a4a6b3eb"
kv_router.py 9.99 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


import argparse
18
import logging
19
20
import random
from argparse import Namespace
21
from typing import AsyncIterator, Tuple
22

23
from components.worker import VllmWorker
24
from utils.logging import check_required_workers
25
from utils.protocol import Tokens
26
from utils.vllm import RouterType
27
28

from dynamo.llm import AggregatedMetrics, KvIndexer, KvMetricsAggregator, OverlapScores
29
from dynamo.sdk import async_on_start, depends, dynamo_context, dynamo_endpoint, service
30
31
32
from dynamo.sdk.lib.config import ServiceConfig

WorkerId = str
33
fallback_msg = "Will fallback to random routing."
34

35
36
logger = logging.getLogger(__name__)

37
38
39
40
41
42
43
44
45
46

def parse_args(service_name, prefix) -> Namespace:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--min-workers",
        type=int,
        default=1,
        help="Minimum number of workers required before proceeding",
    )
    parser.add_argument(
47
        "--model",
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
        type=str,
        default="deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
        help="Model that is being served",
    )
    # TODO: Read block size
    parser.add_argument(
        "--block-size",
        type=int,
        default=64,
        help="KV block size",
    )
    parser.add_argument(
        "--custom-router",
        type=bool,
        default=False,
        help="Whether to use custom router or not",
    )
65
66
67
68
69
70
    parser.add_argument(
        "--router",
        type=str,
        default="kv",
        help="The router type",
    )
71
72
73
74
75
76
77
78
79
    config = ServiceConfig.get_instance()
    config_args = config.as_args(service_name, prefix=prefix)
    args = parser.parse_args(config_args)
    return args


@service(
    dynamo={
        "enabled": True,
80
        "namespace": "dynamo",
81
82
83
84
85
86
87
88
89
    },
    resources={"cpu": "10", "memory": "20Gi"},
    workers=1,
)
class Router:
    """
    Request handler for the generate endpoint
    """

90
91
    worker = depends(VllmWorker)

92
    def __init__(self):
93
        logger.info("Initializing Custom Router")
94
95
        self.args = parse_args(self.__class__.__name__, "")

96
97
98
99
100
101
        self.default_metrics = {
            "gpu_cache_usage_perc": 0.0,
            "num_requests_waiting": 0.0,
            "gpu_prefix_cache_hit_rate": 0.0,
        }

102
    @async_on_start
103
104
105
    async def async_init(self):
        self.runtime = dynamo_context["runtime"]
        self.workers_client = (
106
            await self.runtime.namespace("dynamo")
107
108
109
110
            .component("VllmWorker")
            .endpoint("generate")
            .client()
        )
111

112
113
        self.router_type = self.args.router

114
        await check_required_workers(self.workers_client, self.args.min_workers)
115

116
        kv_listener = self.runtime.namespace("dynamo").component("VllmWorker")
117
        await kv_listener.create_service()
118
119
        if self.router_type == RouterType.KV:
            self.indexer = KvIndexer(kv_listener, self.args.block_size)
120
        self.metrics_aggregator = KvMetricsAggregator(kv_listener)
121
        logger.info("KV Router initialized")
122
123
124
125
126
127
128

    def _cost_function(
        self,
        scores: OverlapScores | None,
        metrics: AggregatedMetrics | None,
        token_length: int,
    ):
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
        """The cost function for deciding the best worker to route a request to.
        If there are multiple workers sharing the same optimal cost, then
        one of them is randomly selected.

        Args:
            scores (OverlapScores | None): The number of matching blocks between
                the request and the prefix cache of each worker.
            metrics (AggregatedMetrics | None): Several worker metrics polled
                by the `KvMetricsAggregator`, currently including the
                GPU cache usage, number of waiting requests, and the
                GPU prefix cache hit rate.
            token_length (int): The number of tokens in the request.

        Returns:
            (str, float): The best worker id and the corresponding score.
        """

146
147
148
149
150
151
152
153
        worker_scores = {}
        if scores:
            for worker_id, score in scores.scores.items():
                # score is number of matching blocks we multiply by block_size to get tokens
                # and compare to token_length. The larger the cache hit the better
                worker_scores[worker_id] = (
                    score * self.indexer.block_size() / token_length
                )
154
155
        else:
            logger.warning("Cannot get KV scores")
156
157
158
159
160
161

        worker_metrics = {}
        max_waiting = 0.0
        if metrics:
            for endpoint in metrics.endpoints:
                worker_id = endpoint.worker_id
162
                worker_metrics[worker_id] = {
163
164
                    key: getattr(endpoint, key, self.default_metrics[key])
                    for key in self.default_metrics.keys()
165
166
167
168
                }
                max_waiting = max(
                    max_waiting, worker_metrics[worker_id]["num_requests_waiting"]
                )
169
170
        else:
            logger.warning("Cannot get metrics")
171
172
173
174
175
176
177
178
179

        # Get all worker IDs from the client. This is needed because scores / metrics may not have values for all workers
        # and we want all workers to be considered in the logit calculation
        worker_ids = self.workers_client.endpoint_ids()

        worker_logits = {}
        for worker_id in worker_ids:
            # Use default values if worker not in scores or metrics
            score = worker_scores.get(worker_id, 0.0)
180
181
            metrics_dict = worker_metrics.get(worker_id, self.default_metrics)
            gpu_cache_usage = metrics_dict["gpu_cache_usage_perc"]
182
183
184
185
186
187
188
189
190

            normalized_waiting = (
                metrics_dict["num_requests_waiting"] / max_waiting
                if max_waiting > 0
                else 0.0
            )

            # Have 1 metric that weights towards cache hit
            # 2 metrics that penalize overloaded worker and queuing
191
            worker_logits[worker_id] = 2 * score - gpu_cache_usage - normalized_waiting
192
            logger.info(
193
                f"Formula for {worker_id}: {worker_logits[worker_id]:.3f} = 2.0 * {score:.3f} - {gpu_cache_usage:.3f} - {normalized_waiting:.3f}"
194
195
            )

196
197
        if not worker_logits or not any(worker_logits.values()):
            logger.warning(f"All worker logits are zero. {fallback_msg}.")
198
            return "", 0.0
199
200

        # Select the worker with the highest logit
201
202
203
204
205
        max_logit = max(worker_logits.values())
        best_workers = [
            wid for wid, logit in worker_logits.items() if logit == max_logit
        ]
        best_worker_id = random.choice(best_workers)
206
207
208

        # Log the metrics for the selected worker
        if best_worker_id:
209
            metrics_dict = worker_metrics.get(best_worker_id, self.default_metrics)
210

211
212
213
214
215
216
217
218
219
220
221
            # Create log messages
            log_messages = [
                f"Selected worker: {best_worker_id}, logit: {worker_logits[best_worker_id]:.3f}",
                f"Score: {scores.scores.get(best_worker_id, 0.0) if scores else 0.0:.3f}",
                f"GPU Cache Hit Rate: {metrics_dict['gpu_prefix_cache_hit_rate']:.3f}",
                f"GPU Cache Usage: {metrics_dict['gpu_cache_usage_perc']:.3f}",
                f"Requests Waiting: {metrics_dict['num_requests_waiting']}",
            ]

            # Log to vllm_logger
            for message in log_messages:
222
                logger.info(message)
223
224
225

        return best_worker_id, worker_scores.get(best_worker_id, 0.0)

226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
    def _get_underloaded_worker(self, metrics: AggregatedMetrics | None):
        if not metrics:
            logger.warning(f"Cannot get metrics. {fallback_msg}")
            return "", 0.0

        kv_load = {
            endpoint.worker_id: getattr(endpoint, "gpu_cache_usage_perc", 0.0)
            for endpoint in metrics.endpoints
        }

        if not kv_load or not any(kv_load.values()):
            logger.warning(f"All KV loads are zero. {fallback_msg}")
            return "", 0.0

        min_load = min(kv_load.values())
        min_load_workers = [
            worker_id for worker_id, load in kv_load.items() if load == min_load
        ]
        best_worker_id = random.choice(min_load_workers)

        logger.info(
            f"Selected worker: {best_worker_id}, KV load: {kv_load[best_worker_id]:.3f}"
        )
        return best_worker_id, kv_load[best_worker_id]

251
    @dynamo_endpoint()
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
    async def generate(self, request: Tokens) -> AsyncIterator[Tuple[WorkerId, float]]:
        metrics = await self.metrics_aggregator.get_metrics()

        # Quick return for KV_LOAD mode
        if self.router_type == RouterType.KV_LOAD:
            try:
                yield self._get_underloaded_worker(metrics)
            except Exception as e:
                logger.exception(
                    f"Error finding underloaded worker: {e}. {fallback_msg}"
                )
                yield "", 0.0
            return

        # Existing KV routing logic
267
268
269
270
271
272
273
        lora_id = 0
        try:
            scores = await self.indexer.find_matches_for_request(
                request.tokens, lora_id
            )
        except Exception as e:
            scores = {}
274
275
276
            logger.exception(f"Error finding matches: {e}. {fallback_msg}")
            yield "", 0.0
            return
277

278
279
280
        worker_id, prefix_hit_rate = self._cost_function(
            scores, metrics, len(request.tokens)
        )
281

282
283
284
285
286
287
        if worker_id:
            logger.info(
                f"Scheduling to worker_id: {worker_id} with estimated prefix hit rate: {prefix_hit_rate}"
            )

        yield worker_id, prefix_hit_rate