"server/testdata/tools/llama3-groq-tool-use.out" did not exist on "b0135f4b9b176eab9155b660d04c9ca2a1ec2341"
llm.py 9.48 KB
Newer Older
1
from typing import List, Optional, Union
2
3

from tqdm import tqdm
Zhuohan Li's avatar
Zhuohan Li committed
4
from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast
5

Woosuk Kwon's avatar
Woosuk Kwon committed
6
7
8
9
10
from vllm.engine.arg_utils import EngineArgs
from vllm.engine.llm_engine import LLMEngine
from vllm.outputs import RequestOutput
from vllm.sampling_params import SamplingParams
from vllm.utils import Counter
11
12
13


class LLM:
Woosuk Kwon's avatar
Woosuk Kwon committed
14
15
16
17
18
19
20
21
22
    """An LLM for generating texts from given prompts and sampling parameters.

    This class includes a tokenizer, a language model (possibly distributed
    across multiple GPUs), and GPU memory space allocated for intermediate
    states (aka KV cache). Given a batch of prompts and sampling parameters,
    this class generates texts from the model, using an intelligent batching
    mechanism and efficient memory management.

    NOTE: This class is intended to be used for offline inference. For online
23
    serving, use the `AsyncLLMEngine` class instead.
Zhuohan Li's avatar
Zhuohan Li committed
24
    NOTE: For the comprehensive list of arguments, see `EngineArgs`.
Woosuk Kwon's avatar
Woosuk Kwon committed
25
26
27

    Args:
        model: The name or path of a HuggingFace Transformers model.
28
        tokenizer: The name or path of a HuggingFace Transformers tokenizer.
29
30
        tokenizer_mode: The tokenizer mode. "auto" will use the fast tokenizer
            if available, and "slow" will always use the slow tokenizer.
31
32
        trust_remote_code: Trust remote code (e.g., from HuggingFace) when
            downloading the model and tokenizer.
Woosuk Kwon's avatar
Woosuk Kwon committed
33
34
35
        tensor_parallel_size: The number of GPUs to use for distributed
            execution with tensor parallelism.
        dtype: The data type for the model weights and activations. Currently,
Woosuk Kwon's avatar
Woosuk Kwon committed
36
37
38
39
            we support `float32`, `float16`, and `bfloat16`. If `auto`, we use
            the `torch_dtype` attribute specified in the model config file.
            However, if the `torch_dtype` in the config is `float32`, we will
            use `float16` instead.
40
        quantization: The method used to quantize the model weights. Currently,
41
42
43
44
            we support "awq", "gptq" and "squeezellm". If None, we first check
            the `quantization_config` attribute in the model config file. If
            that is None, we assume the model weights are not quantized and use
            `dtype` to determine the data type of the weights.
Jasmond L's avatar
Jasmond L committed
45
46
        revision: The specific model version to use. It can be a branch name,
            a tag name, or a commit id.
47
48
        tokenizer_revision: The specific tokenizer version to use. It can be a
            branch name, a tag name, or a commit id.
49
50
51
52
53
54
55
56
57
58
59
        seed: The seed to initialize the random number generator for sampling.
        gpu_memory_utilization: The ratio (between 0 and 1) of GPU memory to
            reserve for the model weights, activations, and KV cache. Higher
            values will increase the KV cache size and thus improve the model's
            throughput. However, if the value is too high, it may cause out-of-
            memory (OOM) errors.
        swap_space: The size (GiB) of CPU memory per GPU to use as swap space.
            This can be used for temporarily storing the states of the requests
            when their `best_of` sampling parameters are larger than 1. If all
            requests will have `best_of=1`, you can safely set this to 0.
            Otherwise, too small values may cause out-of-memory (OOM) errors.
60
61
62
63
64
65
        enforce_eager: Whether to enforce eager execution. If True, we will
            disable CUDA graph and always execute the model in eager mode.
            If False, we will use CUDA graph and eager execution in hybrid.
        max_context_len_to_capture: Maximum context len covered by CUDA graphs.
            When a sequence has context length larger than this, we fall back
            to eager mode.
Woosuk Kwon's avatar
Woosuk Kwon committed
66
    """
67
68
69
70

    def __init__(
        self,
        model: str,
71
        tokenizer: Optional[str] = None,
72
        tokenizer_mode: str = "auto",
73
        trust_remote_code: bool = False,
74
        tensor_parallel_size: int = 1,
Woosuk Kwon's avatar
Woosuk Kwon committed
75
        dtype: str = "auto",
76
        quantization: Optional[str] = None,
77
        revision: Optional[str] = None,
78
        tokenizer_revision: Optional[str] = None,
79
80
81
        seed: int = 0,
        gpu_memory_utilization: float = 0.9,
        swap_space: int = 4,
82
83
        enforce_eager: bool = False,
        max_context_len_to_capture: int = 8192,
84
85
86
87
        **kwargs,
    ) -> None:
        if "disable_log_stats" not in kwargs:
            kwargs["disable_log_stats"] = True
Zhuohan Li's avatar
Zhuohan Li committed
88
        engine_args = EngineArgs(
89
            model=model,
90
            tokenizer=tokenizer,
91
            tokenizer_mode=tokenizer_mode,
92
            trust_remote_code=trust_remote_code,
93
94
            tensor_parallel_size=tensor_parallel_size,
            dtype=dtype,
95
            quantization=quantization,
96
            revision=revision,
97
            tokenizer_revision=tokenizer_revision,
98
99
100
            seed=seed,
            gpu_memory_utilization=gpu_memory_utilization,
            swap_space=swap_space,
101
102
            enforce_eager=enforce_eager,
            max_context_len_to_capture=max_context_len_to_capture,
103
104
            **kwargs,
        )
Zhuohan Li's avatar
Zhuohan Li committed
105
        self.llm_engine = LLMEngine.from_engine_args(engine_args)
106
107
        self.request_counter = Counter()

108
    def get_tokenizer(
109
            self) -> Union[PreTrainedTokenizer, PreTrainedTokenizerFast]:
Zhuohan Li's avatar
Zhuohan Li committed
110
        return self.llm_engine.tokenizer
111

112
113
114
115
116
117
    def set_tokenizer(
        self,
        tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast],
    ) -> None:
        self.llm_engine.tokenizer = tokenizer

118
119
    def generate(
        self,
Woosuk Kwon's avatar
Woosuk Kwon committed
120
        prompts: Optional[Union[str, List[str]]] = None,
121
        sampling_params: Optional[SamplingParams] = None,
122
        prompt_token_ids: Optional[List[List[int]]] = None,
123
        prefix_pos: Optional[Union[int, List[int]]] = None,
124
125
        use_tqdm: bool = True,
    ) -> List[RequestOutput]:
Woosuk Kwon's avatar
Woosuk Kwon committed
126
127
128
129
130
131
132
133
134
135
136
137
        """Generates the completions for the input prompts.

        NOTE: This class automatically batches the given prompts, considering
        the memory constraint. For the best performance, put all of your prompts
        into a single list and pass it to this method.

        Args:
            prompts: A list of prompts to generate completions for.
            sampling_params: The sampling parameters for text generation. If
                None, we use the default sampling parameters.
            prompt_token_ids: A list of token IDs for the prompts. If None, we
                use the tokenizer to convert the prompts to token IDs.
138
139
140
141
142
            prefix_pos: If not None, we use the given position as the prefix
                position for each prompt. We will cache the prefix's KV
                cache and reuse it for the next request with the same prefix.
                This is an experimental feature, and may be replaced with
                automatic prefix caching in the future.
Woosuk Kwon's avatar
Woosuk Kwon committed
143
144
145
146
147
148
149
150
151
            use_tqdm: Whether to use tqdm to display the progress bar.

        Returns:
            A list of `RequestOutput` objects containing the generated
            completions in the same order as the input prompts.
        """
        if prompts is None and prompt_token_ids is None:
            raise ValueError("Either prompts or prompt_token_ids must be "
                             "provided.")
Woosuk Kwon's avatar
Woosuk Kwon committed
152
        if isinstance(prompts, str):
Woosuk Kwon's avatar
Woosuk Kwon committed
153
            # Convert a single prompt to a list.
Woosuk Kwon's avatar
Woosuk Kwon committed
154
            prompts = [prompts]
155
156
157
158
        if (prompts is not None and prompt_token_ids is not None
                and len(prompts) != len(prompt_token_ids)):
            raise ValueError("The lengths of prompts and prompt_token_ids "
                             "must be the same.")
159
        if sampling_params is None:
160
            # Use default sampling params.
161
            sampling_params = SamplingParams()
Woosuk Kwon's avatar
Woosuk Kwon committed
162

Zhuohan Li's avatar
Zhuohan Li committed
163
        # Add requests to the engine.
164
165
        num_requests = len(prompts) if prompts is not None else len(
            prompt_token_ids)
Woosuk Kwon's avatar
Woosuk Kwon committed
166
167
        for i in range(num_requests):
            prompt = prompts[i] if prompts is not None else None
168
            prefix_pos_i = prefix_pos[i] if prefix_pos is not None else None
169
170
            token_ids = None if prompt_token_ids is None else prompt_token_ids[
                i]
171
            self._add_request(prompt, sampling_params, token_ids, prefix_pos_i)
Zhuohan Li's avatar
Zhuohan Li committed
172
        return self._run_engine(use_tqdm)
173

174
175
    def _add_request(
        self,
Woosuk Kwon's avatar
Woosuk Kwon committed
176
        prompt: Optional[str],
177
178
        sampling_params: SamplingParams,
        prompt_token_ids: Optional[List[int]],
179
        prefix_pos: Optional[int] = None,
180
181
    ) -> None:
        request_id = str(next(self.request_counter))
182
183
184
185
186
        self.llm_engine.add_request(request_id,
                                    prompt,
                                    sampling_params,
                                    prompt_token_ids,
                                    prefix_pos=prefix_pos)
187

Zhuohan Li's avatar
Zhuohan Li committed
188
    def _run_engine(self, use_tqdm: bool) -> List[RequestOutput]:
189
190
        # Initialize tqdm.
        if use_tqdm:
Zhuohan Li's avatar
Zhuohan Li committed
191
            num_requests = self.llm_engine.get_num_unfinished_requests()
192
            pbar = tqdm(total=num_requests, desc="Processed prompts")
Zhuohan Li's avatar
Zhuohan Li committed
193
        # Run the engine.
194
        outputs: List[RequestOutput] = []
Zhuohan Li's avatar
Zhuohan Li committed
195
196
        while self.llm_engine.has_unfinished_requests():
            step_outputs = self.llm_engine.step()
197
            for output in step_outputs:
198
                if output.finished:
199
200
201
202
203
                    outputs.append(output)
                    if use_tqdm:
                        pbar.update(1)
        if use_tqdm:
            pbar.close()
204
205
206
207
        # Sort the outputs by request ID.
        # This is necessary because some requests may be finished earlier than
        # its previous requests.
        outputs = sorted(outputs, key=lambda x: int(x.request_id))
208
        return outputs