"include/ck/library/utility/host_gemm.hpp" did not exist on "d3051d75175268ee8d6beb64b0177d4c08733291"
sampling_params.py 11.8 KB
Newer Older
1
"""Sampling parameters for text generation."""
2
3
from enum import IntEnum
from functools import cached_property
4
5
from typing import Callable, List, Optional, Union
import torch
Woosuk Kwon's avatar
Woosuk Kwon committed
6

7
_SAMPLING_EPS = 1e-5
Woosuk Kwon's avatar
Woosuk Kwon committed
8

9

10
11
12
13
14
15
class SamplingType(IntEnum):
    GREEDY = 0
    RANDOM = 1
    BEAM = 2


16
17
18
19
20
21
LogitsProcessor = Callable[[List[int], torch.Tensor], torch.Tensor]
"""LogitsProcessor is a function that takes a list of previously generated
tokens and a tensor of the logits for the next token, and returns a modified
tensor of logits to sample from."""


Woosuk Kwon's avatar
Woosuk Kwon committed
22
class SamplingParams:
23
24
25
26
27
28
29
    """Sampling parameters for text generation.

    Overall, we follow the sampling parameters from the OpenAI text completion
    API (https://platform.openai.com/docs/api-reference/completions/create).
    In addition, we support beam search, which is not supported by OpenAI.

    Args:
30
31
32
33
34
35
        n: Number of output sequences to return for the given prompt.
        best_of: Number of output sequences that are generated from the prompt.
            From these `best_of` sequences, the top `n` sequences are returned.
            `best_of` must be greater than or equal to `n`. This is treated as
            the beam width when `use_beam_search` is True. By default, `best_of`
            is set to `n`.
36
37
38
39
40
41
42
43
        presence_penalty: Float that penalizes new tokens based on whether they
            appear in the generated text so far. Values > 0 encourage the model
            to use new tokens, while values < 0 encourage the model to repeat
            tokens.
        frequency_penalty: Float that penalizes new tokens based on their
            frequency in the generated text so far. Values > 0 encourage the
            model to use new tokens, while values < 0 encourage the model to
            repeat tokens.
ljss's avatar
ljss committed
44
        repetition_penalty: Float that penalizes new tokens based on whether
45
46
47
            they appear in the prompt and the generated text so far. Values > 1
            encourage the model to use new tokens, while values < 1 encourage
            the model to repeat tokens.
48
49
50
51
52
53
54
        temperature: Float that controls the randomness of the sampling. Lower
            values make the model more deterministic, while higher values make
            the model more random. Zero means greedy sampling.
        top_p: Float that controls the cumulative probability of the top tokens
            to consider. Must be in (0, 1]. Set to 1 to consider all tokens.
        top_k: Integer that controls the number of top tokens to consider. Set
            to -1 to consider all tokens.
Roy's avatar
Roy committed
55
56
57
        min_p: Float that represents the minimum probability for a token to be
            considered, relative to the probability of the most likely token.
            Must be in [0, 1]. Set to 0 to disable this.
58
        use_beam_search: Whether to use beam search instead of sampling.
59
60
61
62
63
64
65
66
67
        length_penalty: Float that penalizes sequences based on their length.
            Used in beam search.
        early_stopping: Controls the stopping condition for beam search. It
            accepts the following values: `True`, where the generation stops as
            soon as there are `best_of` complete candidates; `False`, where an
            heuristic is applied and the generation stops when is it very
            unlikely to find better candidates; `"never"`, where the beam search
            procedure only stops when there cannot be better candidates
            (canonical beam search algorithm).
68
69
        stop: List of strings that stop the generation when they are generated.
            The returned output will not contain the stop strings.
70
71
72
        stop_token_ids: List of tokens that stop the generation when they are
            generated. The returned output will contain the stop tokens unless
            the stop tokens are sepcial tokens.
73
74
        ignore_eos: Whether to ignore the EOS token and continue generating
            tokens after the EOS token is generated.
75
76
        max_tokens: Maximum number of tokens to generate per output sequence.
        logprobs: Number of log probabilities to return per output token.
77
78
79
80
81
82
            Note that the implementation follows the OpenAI API: The return
            result includes the log probabilities on the `logprobs` most likely
            tokens, as well the chosen tokens. The API will always return the
            log probability of the sampled token, so there  may be up to
            `logprobs+1` elements in the response.
        prompt_logprobs: Number of log probabilities to return per prompt token.
83
        skip_special_tokens: Whether to skip special tokens in the output.
84
85
        spaces_between_special_tokens: Whether to add spaces between special
            tokens in the output.  Defaults to True.
86
87
        logits_processors: List of functions that modify logits based on
            previously generated tokens.
88
    """
Woosuk Kwon's avatar
Woosuk Kwon committed
89
90
91

    def __init__(
        self,
Woosuk Kwon's avatar
Woosuk Kwon committed
92
        n: int = 1,
93
        best_of: Optional[int] = None,
Woosuk Kwon's avatar
Woosuk Kwon committed
94
95
        presence_penalty: float = 0.0,
        frequency_penalty: float = 0.0,
ljss's avatar
ljss committed
96
        repetition_penalty: float = 1.0,
Woosuk Kwon's avatar
Woosuk Kwon committed
97
98
99
        temperature: float = 1.0,
        top_p: float = 1.0,
        top_k: int = -1,
Roy's avatar
Roy committed
100
        min_p: int = 0.0,
Woosuk Kwon's avatar
Woosuk Kwon committed
101
        use_beam_search: bool = False,
102
103
        length_penalty: float = 1.0,
        early_stopping: Union[bool, str] = False,
104
105
        stop: Optional[Union[str, List[str]]] = None,
        stop_token_ids: Optional[List[int]] = None,
106
        ignore_eos: bool = False,
Woosuk Kwon's avatar
Woosuk Kwon committed
107
        max_tokens: int = 16,
Zhuohan Li's avatar
Zhuohan Li committed
108
        logprobs: Optional[int] = None,
109
        prompt_logprobs: Optional[int] = None,
110
        skip_special_tokens: bool = True,
111
        spaces_between_special_tokens: bool = True,
112
        logits_processors: Optional[List[LogitsProcessor]] = None,
Woosuk Kwon's avatar
Woosuk Kwon committed
113
114
    ) -> None:
        self.n = n
115
        self.best_of = best_of if best_of is not None else n
116
117
        self.presence_penalty = presence_penalty
        self.frequency_penalty = frequency_penalty
ljss's avatar
ljss committed
118
        self.repetition_penalty = repetition_penalty
Woosuk Kwon's avatar
Woosuk Kwon committed
119
120
        self.temperature = temperature
        self.top_p = top_p
Woosuk Kwon's avatar
Woosuk Kwon committed
121
        self.top_k = top_k
Roy's avatar
Roy committed
122
        self.min_p = min_p
Woosuk Kwon's avatar
Woosuk Kwon committed
123
        self.use_beam_search = use_beam_search
124
125
        self.length_penalty = length_penalty
        self.early_stopping = early_stopping
126
127
128
129
130
131
        if stop is None:
            self.stop = []
        elif isinstance(stop, str):
            self.stop = [stop]
        else:
            self.stop = list(stop)
132
133
134
135
        if stop_token_ids is None:
            self.stop_token_ids = []
        else:
            self.stop_token_ids = list(stop_token_ids)
136
        self.ignore_eos = ignore_eos
Woosuk Kwon's avatar
Woosuk Kwon committed
137
138
        self.max_tokens = max_tokens
        self.logprobs = logprobs
139
        self.prompt_logprobs = prompt_logprobs
140
        self.skip_special_tokens = skip_special_tokens
141
        self.spaces_between_special_tokens = spaces_between_special_tokens
142
        self.logits_processors = logits_processors
143
144
        self._verify_args()
        if self.use_beam_search:
145
            self._verify_beam_search()
146
147
148
149
        else:
            self._verify_non_beam_search()
            if self.temperature < _SAMPLING_EPS:
                # Zero temperature means greedy sampling.
150
151
                self.top_p = 1.0
                self.top_k = -1
152
                self._verify_greedy_sampling()
153
154
155
156

    def _verify_args(self) -> None:
        if self.n < 1:
            raise ValueError(f"n must be at least 1, got {self.n}.")
157
158
159
        if self.best_of < self.n:
            raise ValueError(f"best_of must be greater than or equal to n, "
                             f"got n={self.n} and best_of={self.best_of}.")
160
161
162
163
164
165
        if not -2.0 <= self.presence_penalty <= 2.0:
            raise ValueError("presence_penalty must be in [-2, 2], got "
                             f"{self.presence_penalty}.")
        if not -2.0 <= self.frequency_penalty <= 2.0:
            raise ValueError("frequency_penalty must be in [-2, 2], got "
                             f"{self.frequency_penalty}.")
ljss's avatar
ljss committed
166
167
168
        if not 0.0 < self.repetition_penalty <= 2.0:
            raise ValueError("repetition_penalty must be in (0, 2], got "
                             f"{self.repetition_penalty}.")
169
170
171
172
173
174
175
176
        if self.temperature < 0.0:
            raise ValueError(
                f"temperature must be non-negative, got {self.temperature}.")
        if not 0.0 < self.top_p <= 1.0:
            raise ValueError(f"top_p must be in (0, 1], got {self.top_p}.")
        if self.top_k < -1 or self.top_k == 0:
            raise ValueError(f"top_k must be -1 (disable), or at least 1, "
                             f"got {self.top_k}.")
Roy's avatar
Roy committed
177
178
179
        if not 0.0 <= self.min_p <= 1.0:
            raise ValueError("min_p must be in [0, 1], got "
                             f"{self.min_p}.")
180
181
182
        if self.max_tokens < 1:
            raise ValueError(
                f"max_tokens must be at least 1, got {self.max_tokens}.")
Zhuohan Li's avatar
Zhuohan Li committed
183
        if self.logprobs is not None and self.logprobs < 0:
184
185
            raise ValueError(
                f"logprobs must be non-negative, got {self.logprobs}.")
186
187
188
        if self.prompt_logprobs is not None and self.prompt_logprobs < 0:
            raise ValueError(f"prompt_logprobs must be non-negative, got "
                             f"{self.prompt_logprobs}.")
189

190
    def _verify_beam_search(self) -> None:
191
192
193
        if self.best_of == 1:
            raise ValueError("best_of must be greater than 1 when using beam "
                             f"search. Got {self.best_of}.")
194
        if self.temperature > _SAMPLING_EPS:
195
            raise ValueError("temperature must be 0 when using beam search.")
196
        if self.top_p < 1.0 - _SAMPLING_EPS:
197
198
199
            raise ValueError("top_p must be 1 when using beam search.")
        if self.top_k != -1:
            raise ValueError("top_k must be -1 when using beam search.")
200
201
202
203
204
205
206
207
208
209
210
211
212
213
        if self.early_stopping not in [True, False, "never"]:
            raise ValueError(
                f"early_stopping must be True, False, or 'never', "
                f"got {self.early_stopping}.")

    def _verify_non_beam_search(self) -> None:
        if self.early_stopping is not False:
            raise ValueError("early_stopping is not effective and must be "
                             "False when not using beam search.")
        if (self.length_penalty < 1.0 - _SAMPLING_EPS
                or self.length_penalty > 1.0 + _SAMPLING_EPS):
            raise ValueError(
                "length_penalty is not effective and must be the "
                "default value of 1.0 when not using beam search.")
214
215

    def _verify_greedy_sampling(self) -> None:
216
217
218
        if self.best_of > 1:
            raise ValueError("best_of must be 1 when using greedy sampling."
                             f"Got {self.best_of}.")
219

220
221
222
223
224
225
226
227
    @cached_property
    def sampling_type(self) -> SamplingType:
        if self.use_beam_search:
            return SamplingType.BEAM
        if self.temperature < _SAMPLING_EPS:
            return SamplingType.GREEDY
        return SamplingType.RANDOM

228
    def __repr__(self) -> str:
Woosuk Kwon's avatar
Woosuk Kwon committed
229
        return (f"SamplingParams(n={self.n}, "
230
                f"best_of={self.best_of}, "
231
232
                f"presence_penalty={self.presence_penalty}, "
                f"frequency_penalty={self.frequency_penalty}, "
ljss's avatar
ljss committed
233
                f"repetition_penalty={self.repetition_penalty}, "
Woosuk Kwon's avatar
Woosuk Kwon committed
234
235
                f"temperature={self.temperature}, "
                f"top_p={self.top_p}, "
236
                f"top_k={self.top_k}, "
Roy's avatar
Roy committed
237
                f"min_p={self.min_p}, "
Woosuk Kwon's avatar
Woosuk Kwon committed
238
                f"use_beam_search={self.use_beam_search}, "
239
240
                f"length_penalty={self.length_penalty}, "
                f"early_stopping={self.early_stopping}, "
241
                f"stop={self.stop}, "
242
                f"stop_token_ids={self.stop_token_ids}, "
243
                f"ignore_eos={self.ignore_eos}, "
Woosuk Kwon's avatar
Woosuk Kwon committed
244
                f"max_tokens={self.max_tokens}, "
245
                f"logprobs={self.logprobs}, "
246
                f"prompt_logprobs={self.prompt_logprobs}, "
247
248
249
                f"skip_special_tokens={self.skip_special_tokens}, "
                "spaces_between_special_tokens="
                f"{self.spaces_between_special_tokens})")