"docs/guides/planner_benchmark/disagg_1p1d.yml" did not exist on "9f0181a8b38e3ff4ceff05f20a62875d896aeb3e"
kv_router.py 9.97 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


import argparse
18
import logging
19
20
import random
from argparse import Namespace
21
from typing import AsyncIterator, Tuple
22

23
from components.worker import VllmWorker
24
from utils.check_worker import check_required_workers
25
from utils.protocol import Tokens
26
from utils.vllm import RouterType
27
28

from dynamo.llm import AggregatedMetrics, KvIndexer, KvMetricsAggregator, OverlapScores
29
from dynamo.sdk import async_on_start, depends, dynamo_context, dynamo_endpoint, service
30
31
32
from dynamo.sdk.lib.config import ServiceConfig

WorkerId = str
33
fallback_msg = "Will fallback to random routing."
34

35
36
logger = logging.getLogger(__name__)

37
38
39
40
41
42
43
44
45
46

def parse_args(service_name, prefix) -> Namespace:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--min-workers",
        type=int,
        default=1,
        help="Minimum number of workers required before proceeding",
    )
    parser.add_argument(
47
        "--model",
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
        type=str,
        default="deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
        help="Model that is being served",
    )
    # TODO: Read block size
    parser.add_argument(
        "--block-size",
        type=int,
        default=64,
        help="KV block size",
    )
    parser.add_argument(
        "--custom-router",
        type=bool,
        default=False,
        help="Whether to use custom router or not",
    )
65
66
67
68
69
70
    parser.add_argument(
        "--router",
        type=str,
        default="kv",
        help="The router type",
    )
71
72
73
74
75
76
77
78
    config = ServiceConfig.get_instance()
    config_args = config.as_args(service_name, prefix=prefix)
    args = parser.parse_args(config_args)
    return args


@service(
    dynamo={
79
        "namespace": "dynamo",
80
81
82
83
84
85
86
87
88
    },
    resources={"cpu": "10", "memory": "20Gi"},
    workers=1,
)
class Router:
    """
    Request handler for the generate endpoint
    """

89
90
    worker = depends(VllmWorker)

91
    def __init__(self):
92
        logger.info("Initializing Custom Router")
93
94
        self.args = parse_args(self.__class__.__name__, "")

95
96
97
98
99
100
        self.default_metrics = {
            "gpu_cache_usage_perc": 0.0,
            "num_requests_waiting": 0.0,
            "gpu_prefix_cache_hit_rate": 0.0,
        }

101
    @async_on_start
102
103
104
    async def async_init(self):
        self.runtime = dynamo_context["runtime"]
        self.workers_client = (
105
            await self.runtime.namespace("dynamo")
106
107
108
109
            .component("VllmWorker")
            .endpoint("generate")
            .client()
        )
110

111
112
        self.router_type = self.args.router

113
        await check_required_workers(self.workers_client, self.args.min_workers)
114

115
        kv_listener = self.runtime.namespace("dynamo").component("VllmWorker")
116
        await kv_listener.create_service()
117
118
        if self.router_type == RouterType.KV:
            self.indexer = KvIndexer(kv_listener, self.args.block_size)
119
        self.metrics_aggregator = KvMetricsAggregator(kv_listener)
120
        logger.info("KV Router initialized")
121
122
123
124
125
126
127

    def _cost_function(
        self,
        scores: OverlapScores | None,
        metrics: AggregatedMetrics | None,
        token_length: int,
    ):
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
        """The cost function for deciding the best worker to route a request to.
        If there are multiple workers sharing the same optimal cost, then
        one of them is randomly selected.

        Args:
            scores (OverlapScores | None): The number of matching blocks between
                the request and the prefix cache of each worker.
            metrics (AggregatedMetrics | None): Several worker metrics polled
                by the `KvMetricsAggregator`, currently including the
                GPU cache usage, number of waiting requests, and the
                GPU prefix cache hit rate.
            token_length (int): The number of tokens in the request.

        Returns:
            (str, float): The best worker id and the corresponding score.
        """

145
146
147
148
149
150
151
152
        worker_scores = {}
        if scores:
            for worker_id, score in scores.scores.items():
                # score is number of matching blocks we multiply by block_size to get tokens
                # and compare to token_length. The larger the cache hit the better
                worker_scores[worker_id] = (
                    score * self.indexer.block_size() / token_length
                )
153
154
        else:
            logger.warning("Cannot get KV scores")
155
156
157
158
159
160

        worker_metrics = {}
        max_waiting = 0.0
        if metrics:
            for endpoint in metrics.endpoints:
                worker_id = endpoint.worker_id
161
                worker_metrics[worker_id] = {
162
163
                    key: getattr(endpoint, key, self.default_metrics[key])
                    for key in self.default_metrics.keys()
164
165
166
167
                }
                max_waiting = max(
                    max_waiting, worker_metrics[worker_id]["num_requests_waiting"]
                )
168
169
        else:
            logger.warning("Cannot get metrics")
170
171
172
173
174
175
176
177
178

        # Get all worker IDs from the client. This is needed because scores / metrics may not have values for all workers
        # and we want all workers to be considered in the logit calculation
        worker_ids = self.workers_client.endpoint_ids()

        worker_logits = {}
        for worker_id in worker_ids:
            # Use default values if worker not in scores or metrics
            score = worker_scores.get(worker_id, 0.0)
179
180
            metrics_dict = worker_metrics.get(worker_id, self.default_metrics)
            gpu_cache_usage = metrics_dict["gpu_cache_usage_perc"]
181
182
183
184
185
186
187
188
189

            normalized_waiting = (
                metrics_dict["num_requests_waiting"] / max_waiting
                if max_waiting > 0
                else 0.0
            )

            # Have 1 metric that weights towards cache hit
            # 2 metrics that penalize overloaded worker and queuing
190
            worker_logits[worker_id] = 2 * score - gpu_cache_usage - normalized_waiting
191
            logger.info(
192
                f"Formula for {worker_id}: {worker_logits[worker_id]:.3f} = 2.0 * {score:.3f} - {gpu_cache_usage:.3f} - {normalized_waiting:.3f}"
193
194
            )

195
196
        if not worker_logits or not any(worker_logits.values()):
            logger.warning(f"All worker logits are zero. {fallback_msg}.")
197
            return "", 0.0
198
199

        # Select the worker with the highest logit
200
201
202
203
204
        max_logit = max(worker_logits.values())
        best_workers = [
            wid for wid, logit in worker_logits.items() if logit == max_logit
        ]
        best_worker_id = random.choice(best_workers)
205
206
207

        # Log the metrics for the selected worker
        if best_worker_id:
208
            metrics_dict = worker_metrics.get(best_worker_id, self.default_metrics)
209

210
211
212
213
214
215
216
217
218
219
220
            # Create log messages
            log_messages = [
                f"Selected worker: {best_worker_id}, logit: {worker_logits[best_worker_id]:.3f}",
                f"Score: {scores.scores.get(best_worker_id, 0.0) if scores else 0.0:.3f}",
                f"GPU Cache Hit Rate: {metrics_dict['gpu_prefix_cache_hit_rate']:.3f}",
                f"GPU Cache Usage: {metrics_dict['gpu_cache_usage_perc']:.3f}",
                f"Requests Waiting: {metrics_dict['num_requests_waiting']}",
            ]

            # Log to vllm_logger
            for message in log_messages:
221
                logger.info(message)
222
223
224

        return best_worker_id, worker_scores.get(best_worker_id, 0.0)

225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
    def _get_underloaded_worker(self, metrics: AggregatedMetrics | None):
        if not metrics:
            logger.warning(f"Cannot get metrics. {fallback_msg}")
            return "", 0.0

        kv_load = {
            endpoint.worker_id: getattr(endpoint, "gpu_cache_usage_perc", 0.0)
            for endpoint in metrics.endpoints
        }

        if not kv_load or not any(kv_load.values()):
            logger.warning(f"All KV loads are zero. {fallback_msg}")
            return "", 0.0

        min_load = min(kv_load.values())
        min_load_workers = [
            worker_id for worker_id, load in kv_load.items() if load == min_load
        ]
        best_worker_id = random.choice(min_load_workers)

        logger.info(
            f"Selected worker: {best_worker_id}, KV load: {kv_load[best_worker_id]:.3f}"
        )
        return best_worker_id, kv_load[best_worker_id]

250
    @dynamo_endpoint()
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
    async def generate(self, request: Tokens) -> AsyncIterator[Tuple[WorkerId, float]]:
        metrics = await self.metrics_aggregator.get_metrics()

        # Quick return for KV_LOAD mode
        if self.router_type == RouterType.KV_LOAD:
            try:
                yield self._get_underloaded_worker(metrics)
            except Exception as e:
                logger.exception(
                    f"Error finding underloaded worker: {e}. {fallback_msg}"
                )
                yield "", 0.0
            return

        # Existing KV routing logic
266
267
268
269
270
271
272
        lora_id = 0
        try:
            scores = await self.indexer.find_matches_for_request(
                request.tokens, lora_id
            )
        except Exception as e:
            scores = {}
273
274
275
            logger.exception(f"Error finding matches: {e}. {fallback_msg}")
            yield "", 0.0
            return
276

277
278
279
        worker_id, prefix_hit_rate = self._cost_function(
            scores, metrics, len(request.tokens)
        )
280

281
282
283
284
285
286
        if worker_id:
            logger.info(
                f"Scheduling to worker_id: {worker_id} with estimated prefix hit rate: {prefix_hit_rate}"
            )

        yield worker_id, prefix_hit_rate