supervised.py 9.02 KB
Newer Older
chenych's avatar
chenych committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from collections import defaultdict
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple

luopl's avatar
luopl committed
18
from ...extras import logging
chenych's avatar
chenych committed
19
from ...extras.constants import IGNORE_INDEX
luopl's avatar
luopl committed
20
from .processor_utils import greedy_knapsack, infer_seqlen
chenych's avatar
chenych committed
21
22
23
24
25
26


if TYPE_CHECKING:
    from transformers import PreTrainedTokenizer, ProcessorMixin

    from ...hparams import DataArguments
luopl's avatar
luopl committed
27
    from ..mm_plugin import ImageInput, VideoInput
chenych's avatar
chenych committed
28
29
30
    from ..template import Template


luopl's avatar
luopl committed
31
logger = logging.get_logger(__name__)
chenych's avatar
chenych committed
32
33
34
35
36
37
38


def _encode_supervised_example(
    prompt: Sequence[Dict[str, str]],
    response: Sequence[Dict[str, str]],
    system: Optional[str],
    tools: Optional[str],
luopl's avatar
luopl committed
39
40
    images: Sequence["ImageInput"],
    videos: Sequence["VideoInput"],
chenych's avatar
chenych committed
41
42
43
44
45
46
47
    template: "Template",
    tokenizer: "PreTrainedTokenizer",
    processor: Optional["ProcessorMixin"],
    cutoff_len: int,
    train_on_prompt: bool,
    mask_history: bool,
) -> Tuple[List[int], List[int]]:
luopl's avatar
luopl committed
48
49
    messages = template.mm_plugin.process_messages(prompt + response, images, videos, processor)
    input_ids, labels = template.mm_plugin.process_token_ids([], [], images, videos, tokenizer, processor)
chenych's avatar
chenych committed
50
    encoded_pairs = template.encode_multiturn(tokenizer, messages, system, tools)
luopl's avatar
luopl committed
51
    total_length = len(input_ids) + (1 if template.efficient_eos else 0)
chenych's avatar
chenych committed
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
    if mask_history:
        encoded_pairs = encoded_pairs[::-1]  # high priority for last turns

    for turn_idx, (source_ids, target_ids) in enumerate(encoded_pairs):
        if total_length >= cutoff_len:
            break

        source_len, target_len = infer_seqlen(len(source_ids), len(target_ids), cutoff_len - total_length)
        source_ids = source_ids[:source_len]
        target_ids = target_ids[:target_len]
        total_length += source_len + target_len

        if train_on_prompt:
            source_label = source_ids
        elif template.efficient_eos:
            source_label = [tokenizer.eos_token_id] + [IGNORE_INDEX] * (source_len - 1)
        else:
            source_label = [IGNORE_INDEX] * source_len

        if mask_history and turn_idx != 0:  # train on the last turn only
            target_label = [IGNORE_INDEX] * target_len
        else:
            target_label = target_ids

        if mask_history:  # reversed sequences
            input_ids = source_ids + target_ids + input_ids
            labels = source_label + target_label + labels
        else:
            input_ids += source_ids + target_ids
            labels += source_label + target_label

    if template.efficient_eos:
        input_ids += [tokenizer.eos_token_id]
        labels += [tokenizer.eos_token_id]

    return input_ids, labels


def preprocess_supervised_dataset(
    examples: Dict[str, List[Any]],
    template: "Template",
    tokenizer: "PreTrainedTokenizer",
    processor: Optional["ProcessorMixin"],
    data_args: "DataArguments",
luopl's avatar
luopl committed
96
) -> Dict[str, List[Any]]:
chenych's avatar
chenych committed
97
98
    # build inputs with format `<bos> X Y <eos>` and labels with format `<ignore> ... <ignore> Y <eos>`
    # for multiturn examples, we only mask the prompt part in each prompt-response pair.
luopl's avatar
luopl committed
99
100
101
    model_inputs = defaultdict(list)
    for i in range(len(examples["_prompt"])):
        if len(examples["_prompt"][i]) % 2 != 1 or len(examples["_response"][i]) != 1:
luopl's avatar
luopl committed
102
103
104
            logger.warning_rank0(
                "Dropped invalid example: {}".format(examples["_prompt"][i] + examples["_response"][i])
            )
chenych's avatar
chenych committed
105
106
107
            continue

        input_ids, labels = _encode_supervised_example(
luopl's avatar
luopl committed
108
109
110
111
112
113
            prompt=examples["_prompt"][i],
            response=examples["_response"][i],
            system=examples["_system"][i],
            tools=examples["_tools"][i],
            images=examples["_images"][i] or [],
            videos=examples["_videos"][i] or [],
chenych's avatar
chenych committed
114
115
116
117
118
119
120
121
122
123
            template=template,
            tokenizer=tokenizer,
            processor=processor,
            cutoff_len=data_args.cutoff_len,
            train_on_prompt=data_args.train_on_prompt,
            mask_history=data_args.mask_history,
        )
        model_inputs["input_ids"].append(input_ids)
        model_inputs["attention_mask"].append([1] * len(input_ids))
        model_inputs["labels"].append(labels)
luopl's avatar
luopl committed
124
125
        model_inputs["images"].append(examples["_images"][i])
        model_inputs["videos"].append(examples["_videos"][i])
chenych's avatar
chenych committed
126
127
128
129
130
131
132
133

    return model_inputs


def preprocess_packed_supervised_dataset(
    examples: Dict[str, List[Any]],
    template: "Template",
    tokenizer: "PreTrainedTokenizer",
luopl's avatar
luopl committed
134
    processor: Optional["ProcessorMixin"],
chenych's avatar
chenych committed
135
    data_args: "DataArguments",
luopl's avatar
luopl committed
136
137
) -> Dict[str, List[Any]]:
    # TODO: use `position_ids` to achieve packing
chenych's avatar
chenych committed
138
139
140
    # build inputs with format `<bos> X1 Y1 <eos> <bos> X2 Y2 <eos>`
    # and labels with format `<ignore> ... <ignore> Y1 <eos> <ignore> ... <ignore> Y2 <eos>`
    valid_num = 0
luopl's avatar
luopl committed
141
    batch_input_ids, batch_labels, batch_images, batch_videos = [], [], [], []
chenych's avatar
chenych committed
142
143
    lengths = []
    length2indexes = defaultdict(list)
luopl's avatar
luopl committed
144
145
    for i in range(len(examples["_prompt"])):
        if len(examples["_prompt"][i]) % 2 != 1 or len(examples["_response"][i]) != 1:
luopl's avatar
luopl committed
146
147
148
            logger.warning_rank0(
                "Dropped invalid example: {}".format(examples["_prompt"][i] + examples["_response"][i])
            )
chenych's avatar
chenych committed
149
150
151
            continue

        input_ids, labels = _encode_supervised_example(
luopl's avatar
luopl committed
152
153
154
155
156
157
            prompt=examples["_prompt"][i],
            response=examples["_response"][i],
            system=examples["_system"][i],
            tools=examples["_tools"][i],
            images=examples["_images"][i] or [],
            videos=examples["_videos"][i] or [],
chenych's avatar
chenych committed
158
159
            template=template,
            tokenizer=tokenizer,
luopl's avatar
luopl committed
160
            processor=processor,
chenych's avatar
chenych committed
161
162
163
164
165
166
            cutoff_len=data_args.cutoff_len - 1,  # reserved for the padding token
            train_on_prompt=data_args.train_on_prompt,
            mask_history=data_args.mask_history,
        )
        length = len(input_ids)
        if length > data_args.cutoff_len:
luopl's avatar
luopl committed
167
            logger.warning_rank0(f"Dropped lengthy example with length {length} > {data_args.cutoff_len}.")
chenych's avatar
chenych committed
168
169
170
171
172
        else:
            lengths.append(length)
            length2indexes[length].append(valid_num)
            batch_input_ids.append(input_ids)
            batch_labels.append(labels)
luopl's avatar
luopl committed
173
174
            batch_images.append(examples["_images"][i] or [])
            batch_videos.append(examples["_videos"][i] or [])
chenych's avatar
chenych committed
175
176
            valid_num += 1

luopl's avatar
luopl committed
177
    model_inputs = defaultdict(list)
chenych's avatar
chenych committed
178
179
180
    knapsacks = greedy_knapsack(lengths, data_args.cutoff_len - 1)  # reserved for the padding token
    for knapsack in knapsacks:
        packed_input_ids, packed_attention_masks, packed_labels = [], [], []
luopl's avatar
luopl committed
181
        packed_images, packed_videos = [], []
chenych's avatar
chenych committed
182
183
184
185
        for i, length in enumerate(knapsack):
            index = length2indexes[length].pop()
            packed_input_ids += batch_input_ids[index]
            packed_labels += batch_labels[index]
luopl's avatar
luopl committed
186
187
            packed_images += batch_images[index]
            packed_videos += batch_videos[index]
chenych's avatar
chenych committed
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
            if data_args.neat_packing:
                packed_attention_masks += [i + 1] * len(batch_input_ids[index])  # start from 1
            else:
                packed_attention_masks += [1] * len(batch_input_ids[index])

        if len(packed_input_ids) < data_args.cutoff_len:
            pad_length = data_args.cutoff_len - len(packed_input_ids)
            packed_input_ids += [tokenizer.pad_token_id] * pad_length
            packed_labels += [IGNORE_INDEX] * pad_length
            if data_args.neat_packing:
                packed_attention_masks += [0] * pad_length
            else:
                packed_attention_masks += [1] * pad_length  # more efficient flash_attn

        if len(packed_input_ids) != data_args.cutoff_len:
            raise ValueError("The length of packed example should be identical to the cutoff length.")

        model_inputs["input_ids"].append(packed_input_ids)
        model_inputs["attention_mask"].append(packed_attention_masks)
        model_inputs["labels"].append(packed_labels)
luopl's avatar
luopl committed
208
209
        model_inputs["images"].append(packed_images or None)
        model_inputs["videos"].append(packed_videos or None)
chenych's avatar
chenych committed
210
211
212
213
214
215
216
217
218

    return model_inputs


def print_supervised_dataset_example(example: Dict[str, List[int]], tokenizer: "PreTrainedTokenizer") -> None:
    valid_labels = list(filter(lambda x: x != IGNORE_INDEX, example["labels"]))
    print("input_ids:\n{}".format(example["input_ids"]))
    print("inputs:\n{}".format(tokenizer.decode(example["input_ids"], skip_special_tokens=False)))
    print("label_ids:\n{}".format(example["labels"]))
luopl's avatar
luopl committed
219
    print(f"labels:\n{tokenizer.decode(valid_labels, skip_special_tokens=False)}")