globals.py 1.93 KB
Newer Older
1
import torch
Nicolas Patry's avatar
Nicolas Patry committed
2
import os
3
from loguru import logger
4
5
6
from typing import Dict, Optional

from text_generation_server.utils.log import log_master
7

8
PREFIX_CACHING = os.getenv("USE_PREFIX_CACHING").lower() in {"1", "true"}
Nicolas Patry's avatar
Nicolas Patry committed
9
log_master(logger.info, f"Using prefix caching = {PREFIX_CACHING}")
10
ATTENTION = os.getenv("ATTENTION")
11
12
13
14
15
_expected = {"paged", "flashdecoding", "flashinfer"}
assert (
    ATTENTION in _expected
), f"Attention is not valid {ATTENTION}, expected {_expected}"
log_master(logger.info, f"Using Attention = {ATTENTION}")
16

17
if PREFIX_CACHING and ATTENTION not in {"flashinfer", "flashdecoding"}:
18
19
    raise RuntimeError("Prefix caching is only supported with flashinfer")

20
MEM_POOL = torch.cuda.graph_pool_handle() if torch.cuda.is_available() else None
21
22
23
TGI_WIGGLE_ROOM = float(os.getenv("TGI_WIGGLE_ROOM", "0.95"))
assert TGI_WIGGLE_ROOM > 0
assert TGI_WIGGLE_ROOM < 1
24

Nicolas Patry's avatar
Nicolas Patry committed
25
# This is overridden by the cli
26
27
28
29
30
31
32
BLOCK_SIZE: int
if ATTENTION == "flashdecoding":
    BLOCK_SIZE = 256
elif ATTENTION == "flashinfer":
    BLOCK_SIZE = 1
else:
    BLOCK_SIZE = 16
33

34
cuda_graphs = os.getenv("CUDA_GRAPHS")
Nicolas Patry's avatar
Nicolas Patry committed
35
if cuda_graphs is not None:
36
37
38
39
40
41
    try:
        cuda_graphs = [int(item) for item in cuda_graphs.split(",")]
    except Exception as e:
        raise RuntimeError(
            f"Could not parse cuda graphs {cuda_graphs}, expected comma separated list for batch sizes to run on: {e}"
        )
42
43
else:
    cuda_graphs = None
44
45
46
47
48
# sorting the cuda graphs in descending order helps reduce the
# memory impact and results in less memory usage
if cuda_graphs is not None:
    cuda_graphs.sort(reverse=True)

49
CUDA_GRAPHS = cuda_graphs
fxmarty's avatar
fxmarty committed
50

drbh's avatar
drbh committed
51
52
# NOTE: eventually we should move this into the router and pass back the
# index in all cases.
53
ADAPTER_TO_INDEX: Optional[Dict[str, int]] = None
drbh's avatar
drbh committed
54
55
56
57
58


def set_adapter_to_index(adapter_to_index: Dict[str, int]):
    global ADAPTER_TO_INDEX
    ADAPTER_TO_INDEX = adapter_to_index
Nicolas Patry's avatar
Nicolas Patry committed
59
60
61
62
63


def get_adapter_to_index():
    global ADAPTER_TO_INDEX
    return ADAPTER_TO_INDEX