outputs.py 5.41 KB
Newer Older
1
from typing import List, Optional
2
import time
3

4
from vllm.sequence import (PromptLogprobs, SampleLogprobs, SequenceGroup,
5
                           SequenceStatus, RequestMetrics)
6
from vllm.lora.request import LoRARequest
7
8
9


class CompletionOutput:
Zhuohan Li's avatar
Zhuohan Li committed
10
11
12
13
14
15
16
17
18
19
20
    """The output data of one completion output of a request.

    Args:
        index: The index of the output in the request.
        text: The generated output text.
        token_ids: The token IDs of the generated output text.
        cumulative_logprob: The cumulative log probability of the generated
            output text.
        logprobs: The log probabilities of the top probability words at each
            position if the logprobs are requested.
        finish_reason: The reason why the sequence is finished.
21
        lora_request: The LoRA request that was used to generate the output.
Zhuohan Li's avatar
Zhuohan Li committed
22
    """
23
24
25

    def __init__(
        self,
26
        index: int,
27
28
        text: str,
        token_ids: List[int],
29
        cumulative_logprob: float,
30
        logprobs: Optional[SampleLogprobs],
Zhuohan Li's avatar
Zhuohan Li committed
31
        finish_reason: Optional[str] = None,
32
        lora_request: Optional[LoRARequest] = None,
33
    ) -> None:
34
        self.index = index
35
36
        self.text = text
        self.token_ids = token_ids
37
        self.cumulative_logprob = cumulative_logprob
38
        self.logprobs = logprobs
Zhuohan Li's avatar
Zhuohan Li committed
39
        self.finish_reason = finish_reason
40
        self.lora_request = lora_request
Zhuohan Li's avatar
Zhuohan Li committed
41
42
43

    def finished(self) -> bool:
        return self.finish_reason is not None
44
45

    def __repr__(self) -> str:
46
47
        return (f"CompletionOutput(index={self.index}, "
                f"text={self.text!r}, "
48
                f"token_ids={self.token_ids}, "
49
                f"cumulative_logprob={self.cumulative_logprob}, "
50
                f"logprobs={self.logprobs}, "
Zhuohan Li's avatar
Zhuohan Li committed
51
                f"finish_reason={self.finish_reason})")
52
53
54


class RequestOutput:
Zhuohan Li's avatar
Zhuohan Li committed
55
56
57
58
59
60
    """The output data of a request to the LLM.

    Args:
        request_id: The unique ID of the request.
        prompt: The prompt string of the request.
        prompt_token_ids: The token IDs of the prompt.
lots-o's avatar
lots-o committed
61
        prompt_logprobs: The log probabilities to return per prompt token.
Zhuohan Li's avatar
Zhuohan Li committed
62
        outputs: The output sequences of the request.
63
        finished: Whether the whole request is finished.
64
        metrics: Metrics associated with the request.
65
        lora_request: The LoRA request that was used to generate the output.
Zhuohan Li's avatar
Zhuohan Li committed
66
    """
67

68
69
    def __init__(
        self,
70
        request_id: str,
71
72
        prompt: str,
        prompt_token_ids: List[int],
73
        prompt_logprobs: Optional[PromptLogprobs],
74
        outputs: List[CompletionOutput],
75
        finished: bool,
76
        metrics: Optional[RequestMetrics] = None,
77
        lora_request: Optional[LoRARequest] = None,
78
79
80
81
    ) -> None:
        self.request_id = request_id
        self.prompt = prompt
        self.prompt_token_ids = prompt_token_ids
82
        self.prompt_logprobs = prompt_logprobs
83
        self.outputs = outputs
84
        self.finished = finished
85
        self.metrics = metrics
86
        self.lora_request = lora_request
87

88
89
    @classmethod
    def from_seq_group(cls, seq_group: SequenceGroup) -> "RequestOutput":
90
91
        # Get the top-n sequences.
        n = seq_group.sampling_params.n
92
        seqs = seq_group.get_seqs()
93
94
95
96
97
98
        if seq_group.sampling_params.use_beam_search:
            sorting_key = lambda seq: seq.get_beam_search_score(
                seq_group.sampling_params.length_penalty)
        else:
            sorting_key = lambda seq: seq.get_cumulative_logprob()
        sorted_seqs = sorted(seqs, key=sorting_key, reverse=True)
99
        top_n_seqs = sorted_seqs[:n]
100

101
102
103
        # Create the outputs.
        outputs: List[CompletionOutput] = []
        for seq in top_n_seqs:
104
            logprobs = seq.output_logprobs
Zhuohan Li's avatar
Zhuohan Li committed
105
            if seq_group.sampling_params.logprobs is None:
106
107
108
                # NOTE: We need to take care of this case because the sequence
                # always has the logprobs of the sampled tokens even if the
                # logprobs are not requested.
109
                logprobs = None
Zhuohan Li's avatar
Zhuohan Li committed
110
            finshed_reason = SequenceStatus.get_finished_reason(seq.status)
111
112
            output = CompletionOutput(seqs.index(seq), seq.output_text,
                                      seq.get_output_token_ids(),
Zhuohan Li's avatar
Zhuohan Li committed
113
114
                                      seq.get_cumulative_logprob(), logprobs,
                                      finshed_reason)
115
116
117
            outputs.append(output)

        # Every sequence in the sequence group should have the same prompt.
118
119
120
        prompt = seq_group.prompt
        prompt_token_ids = seq_group.prompt_token_ids
        prompt_logprobs = seq_group.prompt_logprobs
121
        finished = seq_group.is_finished()
122
123
        finished_time = time.time() if finished else None
        seq_group.set_finished_time(finished_time)
124
125
126
127
128
129
        return cls(seq_group.request_id,
                   prompt,
                   prompt_token_ids,
                   prompt_logprobs,
                   outputs,
                   finished,
130
                   seq_group.metrics,
131
                   lora_request=seq_group.lora_request)
132
133
134
135
136

    def __repr__(self) -> str:
        return (f"RequestOutput(request_id={self.request_id}, "
                f"prompt={self.prompt!r}, "
                f"prompt_token_ids={self.prompt_token_ids}, "
137
                f"prompt_logprobs={self.prompt_logprobs}, "
138
                f"outputs={self.outputs}, "
139
                f"finished={self.finished}, "
140
                f"metrics={self.metrics}, "
141
                f"lora_request={self.lora_request})")