test_kv_bindings.py 9.14 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


import asyncio
import ctypes
import os
import subprocess
from ctypes import c_char_p, c_int64, c_uint32
from time import sleep
from typing import List

import pytest

27
28
29
30
31
32
33
from dynamo.llm import (
    KvEventPublisher,
    KvIndexer,
    KvMetricsAggregator,
    KvMetricsPublisher,
)
from dynamo.runtime import Component, DistributedRuntime
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55

pytestmark = pytest.mark.pre_merge


@pytest.fixture(scope="module", autouse=True)
def setup_and_teardown():
    # Setup code
    nats_server = subprocess.Popen(["nats-server", "-js"])
    etcd = subprocess.Popen(["etcd"])
    print("Setting up resources")

    sleep(5)  # wait for nats-server and etcd to start
    yield

    # Teardown code
    print("Tearing down resources")
    nats_server.terminate()
    nats_server.wait()
    etcd.terminate()
    etcd.wait()


56
57
58
@pytest.fixture(scope="module")
async def distributed_runtime():
    loop = asyncio.get_running_loop()
59
    return DistributedRuntime(loop, False)
60

61

62
63
64
65
66
67
# TODO Figure out how to test with different kv_block_size
# Right now I get an error in EventPublisher init when I run this test
# back to back. It occurs when calling dynamo_llm_init and I think is related to the
# OnceCell initializations not being reset.
# The test works individually if I run it with 32, then 11, then 64.
# @pytest.mark.parametrize("kv_block_size", [11, 32, 64])
68
async def test_event_handler(distributed_runtime):
69
    kv_block_size = 32
70
71
    namespace = "kv_test"
    component = "event"
72
73
    kv_listener = distributed_runtime.namespace(namespace).component(component)
    await kv_listener.create_service()
74
75
76

    # publisher
    worker_id = 233
77
    event_publisher = EventPublisher(kv_listener, worker_id, kv_block_size)
78
79

    # indexer
80
    indexer = KvIndexer(kv_listener, kv_block_size)
81

82
    test_token = [3] * kv_block_size
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
    lora_id = 0  # lora_id is not used in the indexer
    scores = await indexer.find_matches_for_request(test_token, lora_id)
    assert not scores.scores

    event_publisher.store_event(test_token, lora_id)
    # wait for the event to be processed as it is sent asynchronously
    await asyncio.sleep(1)
    scores = await indexer.find_matches_for_request(test_token, lora_id)
    assert scores.scores
    assert worker_id in scores.scores
    assert scores.scores[worker_id] == 1

    # remove event
    event_publisher.remove_event()
    await asyncio.sleep(1)
    scores = await indexer.find_matches_for_request(test_token, lora_id)
    assert not scores.scores

101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132

class EventPublisher:
    def __init__(self, component: Component, worker_id: int, kv_block_size: int):
        self.publisher = KvEventPublisher(component, worker_id, kv_block_size)
        self.event_id_counter = 0
        self.block_hashes: List[int] = []

    def store_event(self, tokens, lora_id):
        parent_hash = self.event_id_counter if self.event_id_counter > 0 else None
        self.publisher.publish_stored(
            self.event_id_counter,  # event_id
            tokens,  # token_ids
            [
                len(tokens),
            ],  # num_block_tokens
            [
                self.event_id_counter,
            ],  # block_hashes
            lora_id,  # lora_id
            parent_hash,  # parent_hash
        )
        self.block_hashes.append(self.event_id_counter)
        self.event_id_counter += 1

    def remove_event(self):
        self.publisher.publish_removed(
            self.event_id_counter,  # event_id
            [
                self.block_hashes[-1],
            ],  # block_hashes
        )
        self.event_id_counter += 1
133

134

135
# [TODO] to be deprecated
136
# KV events
Neelay Shah's avatar
Neelay Shah committed
137
class DynamoResult:
138
139
140
141
    OK = 0
    ERR = 1


142
class CtypesEventPublisher:
143
144
145
    def __init__(
        self, namespace: str, component: str, worker_id: int, kv_block_size: int
    ):
146
147
148
149
150
        self.event_id_counter = 0
        self.block_ids: List[int] = []

        # load event publisher library
        self.lib = ctypes.CDLL(os.environ["VLLM_KV_CAPI_PATH"])
151
        self.lib.dynamo_llm_init.argtypes = [c_char_p, c_char_p, c_int64, c_uint32]
Neelay Shah's avatar
Neelay Shah committed
152
153
        self.lib.dynamo_llm_init.restype = c_uint32
        result = self.lib.dynamo_llm_init(
154
            namespace.encode(), component.encode(), worker_id, kv_block_size
155
        )
Neelay Shah's avatar
Neelay Shah committed
156
        assert result == DynamoResult.OK
157

Neelay Shah's avatar
Neelay Shah committed
158
        self.lib.dynamo_kv_event_publish_stored.argtypes = [
159
160
161
162
163
164
165
166
            ctypes.c_uint64,  # event_id
            ctypes.POINTER(ctypes.c_uint32),  # token_ids
            ctypes.POINTER(ctypes.c_size_t),  # num_block_tokens
            ctypes.POINTER(ctypes.c_uint64),  # block_ids
            ctypes.c_size_t,  # num_blocks
            ctypes.POINTER(ctypes.c_uint64),  # parent_hash
            ctypes.c_uint64,  # lora_id
        ]
Neelay Shah's avatar
Neelay Shah committed
167
        self.lib.dynamo_kv_event_publish_stored.restype = (
168
            ctypes.c_uint32
Neelay Shah's avatar
Neelay Shah committed
169
        )  # dynamo_llm_result_t
170

Neelay Shah's avatar
Neelay Shah committed
171
        self.lib.dynamo_kv_event_publish_removed.argtypes = [
172
173
174
175
            ctypes.c_uint64,  # event_id
            ctypes.POINTER(ctypes.c_uint64),  # block_ids
            ctypes.c_size_t,  # num_blocks
        ]
Neelay Shah's avatar
Neelay Shah committed
176
        self.lib.dynamo_kv_event_publish_removed.restype = (
177
            ctypes.c_uint32
Neelay Shah's avatar
Neelay Shah committed
178
        )  # dynamo_llm_result_t
179
180
181
182
183
184
185

    def store_event(self, tokens, lora_id):
        parent_hash = (
            (ctypes.c_uint64 * 1)(self.event_id_counter)
            if self.event_id_counter > 0
            else None
        )
Neelay Shah's avatar
Neelay Shah committed
186
        result = self.lib.dynamo_kv_event_publish_stored(
187
188
189
190
191
192
193
194
195
196
197
            self.event_id_counter,  # uint64_t event_id
            (ctypes.c_uint32 * len(tokens))(*tokens),  # const uint32_t *token_ids
            (ctypes.c_size_t * 1)(len(tokens)),  # const uintptr_t *num_block_tokens
            (ctypes.c_uint64 * 1)(self.event_id_counter),  # const uint64_t *block_ids
            1,  # uintptr_t num_blocks
            parent_hash,  # const uint64_t *parent_hash
            lora_id,  # uint64_t lora_id
        )
        self.block_ids.append(self.event_id_counter)
        self.event_id_counter += 1

Neelay Shah's avatar
Neelay Shah committed
198
        assert result == DynamoResult.OK
199
200

    def remove_event(self):
Neelay Shah's avatar
Neelay Shah committed
201
        result = self.lib.dynamo_kv_event_publish_removed(
202
203
204
205
206
207
            self.event_id_counter,  # uint64_t event_id
            (ctypes.c_uint64 * 1)(self.block_ids[-1]),  # const uint64_t *block_ids
            1,  # uintptr_t num_blocks
        )
        self.event_id_counter += 1

Neelay Shah's avatar
Neelay Shah committed
208
        assert result == DynamoResult.OK
209

210
211
212
213
    def shutdown(self):
        result = self.lib.dynamo_llm_shutdown()
        assert result == DynamoResult.OK

214

215
async def test_metrics_aggregator(distributed_runtime):
216
217
    namespace = "kv_test"
    component = "metrics"
218
    kv_listener = distributed_runtime.namespace(namespace).component(component)
219
220
221
222
223
224
225
226
227
228
229
230
231
232
    await kv_listener.create_service()

    # aggregator
    metrics_aggregator = KvMetricsAggregator(kv_listener)

    # has nothing to aggregate as worker has not started
    metrics = await metrics_aggregator.get_metrics()
    assert not metrics.endpoints

    expected_metrics = {
        "request_active_slots": 0,
        "request_total_slots": 1024,
        "kv_active_blocks": 523,
        "kv_total_blocks": 777,
233
234
235
        "num_requests_waiting": 10,
        "gpu_cache_usage_perc": 0.5,
        "gpu_prefix_cache_hit_rate": 0.75,
236
237
    }

238
239
    # need 'create_task' to put publisher task in the background
    asyncio.create_task(metrics_publisher_task(kv_listener, expected_metrics))
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257

    # needs time for publisher to spawn up
    for i in range(10):
        await asyncio.sleep(1)
        metrics = await metrics_aggregator.get_metrics()
        if metrics.endpoints:
            break
    assert metrics.endpoints
    for endpoint in metrics.endpoints:
        # [TODO] not really checking id for now, can't get it as create_endpoint()
        # create and serve the endpoint internally
        assert endpoint.worker_id != 0
        assert endpoint.request_active_slots == expected_metrics["request_active_slots"]
        assert endpoint.request_total_slots == expected_metrics["request_total_slots"]
        assert endpoint.kv_active_blocks == expected_metrics["kv_active_blocks"]
        assert endpoint.kv_total_blocks == expected_metrics["kv_total_blocks"]


258
async def metrics_publisher_task(kv_listener, expected_metrics):
259
260
261
262
263
264
    metrics_publisher = KvMetricsPublisher()
    metrics_publisher.publish(
        expected_metrics["request_active_slots"],
        expected_metrics["request_total_slots"],
        expected_metrics["kv_active_blocks"],
        expected_metrics["kv_total_blocks"],
265
266
267
        expected_metrics["num_requests_waiting"],
        expected_metrics["gpu_cache_usage_perc"],
        expected_metrics["gpu_prefix_cache_hit_rate"],
268
269
    )
    await metrics_publisher.create_endpoint(kv_listener)