"docs/source/vscode:/vscode.git/clone" did not exist on "a945ec635f7e7d6d21f9b774f667ce9935827f51"
llm.py 2.18 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
from typing import List, Optional

from tqdm import tqdm

from cacheflow.outputs import RequestOutput
from cacheflow.sampling_params import SamplingParams
from cacheflow.server.arg_utils import ServerArgs
from cacheflow.server.llm_server import LLMServer
from cacheflow.utils import Counter


class LLM:

    def __init__(
        self,
        model: str,
        tensor_parallel_size: int = 1,
        dtype: str = "default",
        seed: int = 0,
        **kwargs,
    ) -> None:
        if "disable_log_stats" not in kwargs:
            kwargs["disable_log_stats"] = True
        server_args = ServerArgs(
            model=model,
            tensor_parallel_size=tensor_parallel_size,
            dtype=dtype,
            seed=seed,
            **kwargs,
        )
        self.llm_server = LLMServer.from_server_args(server_args)
        self.request_counter = Counter()

    def generate(
        self,
        prompts: List[str],
        sampling_params: Optional[SamplingParams] = None,
38
        prompt_token_ids: Optional[List[List[int]]] = None,
39
40
41
        use_tqdm: bool = True,
    ) -> List[RequestOutput]:
        if sampling_params is None:
42
            # Use default sampling params.
43
44
45
46
47
48
            sampling_params = SamplingParams()
        # Initialize tqdm.
        if use_tqdm:
            pbar = tqdm(total=len(prompts), desc="Processed prompts")

        # Add requests to the server.
49
50
51
52
53
54
        for i in range(len(prompts)):
            prompt = prompts[i]
            if prompt_token_ids is None:
                token_ids = None
            else:
                token_ids = prompt_token_ids[i]
55
            request_id = str(next(self.request_counter))
56
57
            self.llm_server.add_request(request_id, prompt, sampling_params,
                                        token_ids)
58
59
60
61
62
63
64
65
66
67
68
69
70

        # Run the server.
        outputs: List[RequestOutput] = []
        while self.llm_server.has_unfinished_requests():
            step_outputs = self.llm_server.step()
            for output in step_outputs:
                if output.done:
                    outputs.append(output)
                    if use_tqdm:
                        pbar.update(1)
        if use_tqdm:
            pbar.close()
        return outputs