generating_args.py 2.91 KB
Newer Older
chenych's avatar
chenych committed
1
# Copyright 2025 the LlamaFactory team.
chenych's avatar
chenych committed
2
3
4
5
6
7
8
9
10
11
12
13
14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
15
from dataclasses import asdict, dataclass, field
mashun1's avatar
mashun1 committed
16
from typing import Any
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
17

luopl's avatar
luopl committed
18
19
from transformers import GenerationConfig

Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
20
21
22

@dataclass
class GeneratingArguments:
chenych's avatar
chenych committed
23
    r"""Arguments pertaining to specify the decoding parameters."""
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
24
25
26
27
28
29
30
31
32
33
34
35

    do_sample: bool = field(
        default=True,
        metadata={"help": "Whether or not to use sampling, use greedy decoding otherwise."},
    )
    temperature: float = field(
        default=0.95,
        metadata={"help": "The value used to modulate the next token probabilities."},
    )
    top_p: float = field(
        default=0.7,
        metadata={
chenych's avatar
chenych committed
36
37
38
            "help": (
                "The smallest set of most probable tokens with probabilities that add up to top_p or higher are kept."
            )
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
39
40
41
42
43
44
45
46
47
48
49
        },
    )
    top_k: int = field(
        default=50,
        metadata={"help": "The number of highest probability vocabulary tokens to keep for top-k filtering."},
    )
    num_beams: int = field(
        default=1,
        metadata={"help": "Number of beams for beam search. 1 means no beam search."},
    )
    max_length: int = field(
chenych's avatar
chenych committed
50
        default=1024,
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
51
52
53
        metadata={"help": "The maximum length the generated tokens can have. It can be overridden by max_new_tokens."},
    )
    max_new_tokens: int = field(
chenych's avatar
chenych committed
54
        default=1024,
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
55
56
57
58
59
60
61
62
63
64
        metadata={"help": "The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt."},
    )
    repetition_penalty: float = field(
        default=1.0,
        metadata={"help": "The parameter for repetition penalty. 1.0 means no penalty."},
    )
    length_penalty: float = field(
        default=1.0,
        metadata={"help": "Exponential penalty to the length that is used with beam-based generation."},
    )
luopl's avatar
luopl committed
65
66
67
68
    skip_special_tokens: bool = field(
        default=True,
        metadata={"help": "Whether or not to remove special tokens in the decoding."},
    )
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
69

chenych's avatar
chenych committed
70
    def to_dict(self, obey_generation_config: bool = False) -> dict[str, Any]:
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
71
72
73
74
75
        args = asdict(self)
        if args.get("max_new_tokens", -1) > 0:
            args.pop("max_length", None)
        else:
            args.pop("max_new_tokens", None)
luopl's avatar
luopl committed
76
77
78
79
80
81
82

        if obey_generation_config:
            generation_config = GenerationConfig()
            for key in list(args.keys()):
                if not hasattr(generation_config, key):
                    args.pop(key)

Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
83
        return args