utils.py 4.35 KB
Newer Older
Woosuk Kwon's avatar
Woosuk Kwon committed
1
import enum
2
import os
3
import socket
Zhuohan Li's avatar
Zhuohan Li committed
4
import uuid
5
from platform import uname
6
from typing import List
Zhuohan Li's avatar
Zhuohan Li committed
7

8
import psutil
Zhuohan Li's avatar
Zhuohan Li committed
9
import torch
10
11
12
13
14
15
16
17
18
19
20
import asyncio
from functools import partial
from typing import (
    Awaitable,
    Callable,
    TypeVar,
)
from collections import OrderedDict
from typing import Any, Hashable, Optional

T = TypeVar("T")
Zhuohan Li's avatar
Zhuohan Li committed
21

Woosuk Kwon's avatar
Woosuk Kwon committed
22
23
24
25
26
27
28
29
30
31
32

class Device(enum.Enum):
    GPU = enum.auto()
    CPU = enum.auto()


class Counter:

    def __init__(self, start: int = 0) -> None:
        self.counter = start

Woosuk Kwon's avatar
Woosuk Kwon committed
33
    def __next__(self) -> int:
34
        i = self.counter
Woosuk Kwon's avatar
Woosuk Kwon committed
35
        self.counter += 1
36
        return i
Woosuk Kwon's avatar
Woosuk Kwon committed
37
38
39

    def reset(self) -> None:
        self.counter = 0
Zhuohan Li's avatar
Zhuohan Li committed
40

41

42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
class LRUCache:

    def __init__(self, capacity: int):
        self.cache = OrderedDict()
        self.capacity = capacity

    def __contains__(self, key: Hashable) -> bool:
        return key in self.cache

    def __len__(self) -> int:
        return len(self.cache)

    def __getitem__(self, key: Hashable) -> Any:
        return self.get(key)

    def __setitem__(self, key: Hashable, value: Any) -> None:
        self.put(key, value)

    def __delitem__(self, key: Hashable) -> None:
        self.pop(key)

    def touch(self, key: Hashable) -> None:
        self.cache.move_to_end(key)

    def get(self, key: Hashable, default_value: Optional[Any] = None) -> int:
        if key in self.cache:
            value = self.cache[key]
            self.cache.move_to_end(key)
        else:
            value = default_value
        return value

    def put(self, key: Hashable, value: Any) -> None:
        self.cache[key] = value
        self.cache.move_to_end(key)
        self._remove_old_if_needed()

    def _on_remove(self, key: Hashable, value: Any):
        pass

    def remove_oldest(self):
        if not self.cache:
            return
        key, value = self.cache.popitem(last=False)
        self._on_remove(key, value)

    def _remove_old_if_needed(self) -> None:
        while len(self.cache) > self.capacity:
            self.remove_oldest()

    def pop(self, key: int, default_value: Optional[Any] = None) -> Any:
        run_on_remove = key in self.cache
        value = self.cache.pop(key, default_value)
        if run_on_remove:
            self._on_remove(key, value)
        return value

    def clear(self):
        while len(self.cache) > 0:
            self.remove_oldest()
        self.cache.clear()


105
106
107
108
def is_hip() -> bool:
    return torch.version.hip is not None


109
110
def get_max_shared_memory_bytes(gpu: int = 0) -> int:
    """Returns the maximum shared memory per thread block in bytes."""
111
112
113
114
    # NOTE: This import statement should be executed lazily since
    # the Neuron-X backend does not have the `cuda_utils` module.
    from vllm._C import cuda_utils

115
116
117
118
    max_shared_mem = cuda_utils.get_max_shared_memory_per_block_device_attribute(
        gpu)
    # value 0 will cause MAX_SEQ_LEN become negative and test_attention.py will fail
    assert max_shared_mem > 0, "max_shared_mem can not be zero"
119
120
121
    return int(max_shared_mem)


122
def get_cpu_memory() -> int:
123
    """Returns the total CPU memory of the node in bytes."""
124
    return psutil.virtual_memory().total
Zhuohan Li's avatar
Zhuohan Li committed
125
126
127
128


def random_uuid() -> str:
    return str(uuid.uuid4().hex)
129

130

131
132
133
def in_wsl() -> bool:
    # Reference: https://github.com/microsoft/WSL/issues/4071
    return "microsoft" in " ".join(uname()).lower()
134
135


136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
def make_async(func: Callable[..., T]) -> Callable[..., Awaitable[T]]:
    """Take a blocking function, and run it on in an executor thread.

    This function prevents the blocking function from blocking the
    asyncio event loop.
    The code in this function needs to be thread safe.
    """

    def _async_wrapper(*args, **kwargs) -> asyncio.Future:
        loop = asyncio.get_event_loop()
        p_func = partial(func, *args, **kwargs)
        return loop.run_in_executor(executor=None, func=p_func)

    return _async_wrapper


152
def get_ip() -> str:
153
154
155
    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    s.connect(("8.8.8.8", 80))  # Doesn't need to be reachable
    return s.getsockname()[0]
156
157


158
159
160
161
def get_distributed_init_method(ip: str, port: int) -> str:
    return f"tcp://{ip}:{port}"


162
def get_open_port() -> int:
163
164
165
    with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
        s.bind(("", 0))
        return s.getsockname()[1]
166
167
168
169


def set_cuda_visible_devices(device_ids: List[int]) -> None:
    os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(map(str, device_ids))