vllm_infer.py 11.4 KB
Newer Older
chenych's avatar
chenych committed
1
# Copyright 2025 the LlamaFactory team.
luopl's avatar
luopl committed
2
3
4
5
6
7
8
9
10
11
12
13
14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

chenych's avatar
chenych committed
15
import gc
luopl's avatar
luopl committed
16
import json
shihm's avatar
uodata  
shihm committed
17
import time
luopl's avatar
luopl committed
18

shihm's avatar
uodata  
shihm committed
19
import av
luopl's avatar
luopl committed
20
import fire
shihm's avatar
uodata  
shihm committed
21
22
from datasets import load_dataset
from eval_bleu_rouge import compute_metrics
chenych's avatar
chenych committed
23
from tqdm import tqdm
luopl's avatar
luopl committed
24
25
26
27
from transformers import Seq2SeqTrainingArguments

from llamafactory.data import get_dataset, get_template_and_fix_tokenizer
from llamafactory.extras.constants import IGNORE_INDEX
chenych's avatar
chenych committed
28
from llamafactory.extras.misc import get_device_count
luopl's avatar
luopl committed
29
30
31
32
33
34
35
36
37
38
from llamafactory.extras.packages import is_vllm_available
from llamafactory.hparams import get_infer_args
from llamafactory.model import load_tokenizer


if is_vllm_available():
    from vllm import LLM, SamplingParams
    from vllm.lora.request import LoRARequest


shihm's avatar
uodata  
shihm committed
39
40
41
42
43
44
45
46
def _need_video_kwargs(template):
    NEEDED_TEMPLATE = ["qwen3_vl", "glm4v"]
    if any(t in template for t in NEEDED_TEMPLATE):
        return True

    return False


luopl's avatar
luopl committed
47
48
49
50
51
52
53
def vllm_infer(
    model_name_or_path: str,
    adapter_name_or_path: str = None,
    dataset: str = "alpaca_en_demo",
    dataset_dir: str = "data",
    template: str = "default",
    cutoff_len: int = 2048,
shihm's avatar
uodata  
shihm committed
54
    max_samples: int | None = None,
luopl's avatar
luopl committed
55
56
    vllm_config: str = "{}",
    save_name: str = "generated_predictions.jsonl",
shihm's avatar
uodata  
shihm committed
57
    matrix_save_name: str = None,
luopl's avatar
luopl committed
58
59
60
61
62
    temperature: float = 0.95,
    top_p: float = 0.7,
    top_k: int = 50,
    max_new_tokens: int = 1024,
    repetition_penalty: float = 1.0,
chenych's avatar
chenych committed
63
    skip_special_tokens: bool = True,
shihm's avatar
uodata  
shihm committed
64
    default_system: str | None = None,
chenych's avatar
chenych committed
65
    enable_thinking: bool = True,
shihm's avatar
uodata  
shihm committed
66
    seed: int | None = None,
luopl's avatar
luopl committed
67
    pipeline_parallel_size: int = 1,
chenych's avatar
chenych committed
68
69
    image_max_pixels: int = 768 * 768,
    image_min_pixels: int = 32 * 32,
chenych's avatar
chenych committed
70
71
72
    video_fps: float = 2.0,
    video_maxlen: int = 128,
    batch_size: int = 1024,
luopl's avatar
luopl committed
73
):
chenych's avatar
chenych committed
74
75
    r"""Perform batch generation using vLLM engine, which supports tensor parallelism.

luopl's avatar
luopl committed
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
    Usage: python vllm_infer.py --model_name_or_path meta-llama/Llama-2-7b-hf --template llama --dataset alpaca_en_demo
    """
    if pipeline_parallel_size > get_device_count():
        raise ValueError("Pipeline parallel size should be smaller than the number of gpus.")

    model_args, data_args, _, generating_args = get_infer_args(
        dict(
            model_name_or_path=model_name_or_path,
            adapter_name_or_path=adapter_name_or_path,
            dataset=dataset,
            dataset_dir=dataset_dir,
            template=template,
            cutoff_len=cutoff_len,
            max_samples=max_samples,
            preprocessing_num_workers=16,
chenych's avatar
chenych committed
91
92
            default_system=default_system,
            enable_thinking=enable_thinking,
luopl's avatar
luopl committed
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
            vllm_config=vllm_config,
            temperature=temperature,
            top_p=top_p,
            top_k=top_k,
            max_new_tokens=max_new_tokens,
            repetition_penalty=repetition_penalty,
        )
    )

    training_args = Seq2SeqTrainingArguments(output_dir="dummy_dir")
    tokenizer_module = load_tokenizer(model_args)
    tokenizer = tokenizer_module["tokenizer"]
    template_obj = get_template_and_fix_tokenizer(tokenizer, data_args)
    template_obj.mm_plugin.expand_mm_tokens = False  # for vllm generate

chenych's avatar
chenych committed
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
    engine_args = {
        "model": model_args.model_name_or_path,
        "trust_remote_code": True,
        "dtype": model_args.infer_dtype,
        "max_model_len": cutoff_len + max_new_tokens,
        "tensor_parallel_size": (get_device_count() // pipeline_parallel_size) or 1,
        "pipeline_parallel_size": pipeline_parallel_size,
        "disable_log_stats": True,
        "enable_lora": model_args.adapter_name_or_path is not None,
    }
    if template_obj.mm_plugin.__class__.__name__ != "BasePlugin":
        engine_args["limit_mm_per_prompt"] = {"image": 4, "video": 2, "audio": 2}

    if isinstance(model_args.vllm_config, dict):
        engine_args.update(model_args.vllm_config)

shihm's avatar
uodata  
shihm committed
124
    model_preparation_start_time = time.time()
chenych's avatar
chenych committed
125
126
127
128
129
    llm = LLM(**engine_args)

    # load datasets
    dataset_module = get_dataset(template_obj, model_args, data_args, training_args, "ppo", **tokenizer_module)
    train_dataset = dataset_module["train_dataset"]
luopl's avatar
luopl committed
130
131
132
133
134

    sampling_params = SamplingParams(
        repetition_penalty=generating_args.repetition_penalty or 1.0,  # repetition_penalty must > 0
        temperature=generating_args.temperature,
        top_p=generating_args.top_p or 1.0,  # top_p must > 0
chenych's avatar
chenych committed
135
        top_k=generating_args.top_k or -1,  # top_k must > 0
luopl's avatar
luopl committed
136
137
        stop_token_ids=template_obj.get_stop_token_ids(tokenizer),
        max_tokens=generating_args.max_new_tokens,
chenych's avatar
chenych committed
138
        skip_special_tokens=skip_special_tokens,
chenych's avatar
chenych committed
139
        seed=seed,
luopl's avatar
luopl committed
140
141
142
143
144
145
    )
    if model_args.adapter_name_or_path is not None:
        lora_request = LoRARequest("default", 1, model_args.adapter_name_or_path[0])
    else:
        lora_request = None

chenych's avatar
chenych committed
146
147
    # Store all results in these lists
    all_prompts, all_preds, all_labels = [], [], []
shihm's avatar
uodata  
shihm committed
148
    need_video_kwargs = _need_video_kwargs(template)
chenych's avatar
chenych committed
149

shihm's avatar
uodata  
shihm committed
150
    model_predict_start_time = time.time()
chenych's avatar
chenych committed
151
152
153
154
155
156
157
158
159
160
161
162
163
164
    # Add batch process to avoid the issue of too many files opened
    for i in tqdm(range(0, len(train_dataset), batch_size), desc="Processing batched inference"):
        vllm_inputs, prompts, labels = [], [], []
        batch = train_dataset[i : min(i + batch_size, len(train_dataset))]

        for j in range(len(batch["input_ids"])):
            if batch["images"][j] is not None:
                image = batch["images"][j]
                multi_modal_data = {
                    "image": template_obj.mm_plugin._regularize_images(
                        image, image_max_pixels=image_max_pixels, image_min_pixels=image_min_pixels
                    )["images"]
                }
            elif batch["videos"][j] is not None:
shihm's avatar
uodata  
shihm committed
165
                video_metadata, video_metadata_kwargs = None, None
chenych's avatar
chenych committed
166
167
168
169
170
171
172
173
174
175
                video = batch["videos"][j]
                multi_modal_data = {
                    "video": template_obj.mm_plugin._regularize_videos(
                        video,
                        image_max_pixels=image_max_pixels,
                        image_min_pixels=image_min_pixels,
                        video_fps=video_fps,
                        video_maxlen=video_maxlen,
                    )["videos"]
                }
shihm's avatar
uodata  
shihm committed
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
                if need_video_kwargs:
                    container = av.open(video[0], "r")
                    video_stream = next(stream for stream in container.streams if stream.type == "video")
                    sampling_indices = template_obj.mm_plugin._get_video_sample_indices(
                        video_stream, video_fps, video_maxlen
                    )
                    total_frames = video_stream.frames
                    video_metadata_kwargs = {
                        "fps": getattr(tokenizer_module["processor"], "video_fps", 24.0),
                        "do_sample_frames": False,
                        "total_num_frames": total_frames,
                    }
                    video_metadata = dict(
                        fps=video_fps,
                        frames_indices=sampling_indices,
                        total_num_frames=total_frames,
                        video_backend="opencv",
                    )
                    multi_modal_data["video"] = (multi_modal_data["video"], video_metadata)
chenych's avatar
chenych committed
195
196
197
198
199
200
201
202
203
204
            elif batch["audios"][j] is not None:
                audio = batch["audios"][j]
                audio_data = template_obj.mm_plugin._regularize_audios(
                    audio,
                    sampling_rate=16000,
                )
                multi_modal_data = {"audio": zip(audio_data["audios"], audio_data["sampling_rates"])}
            else:
                multi_modal_data = None

shihm's avatar
uodata  
shihm committed
205
206
207
208
209
            vllm_input_data = {"prompt_token_ids": batch["input_ids"][j], "multi_modal_data": multi_modal_data}
            if "video_metadata_kwargs" in locals() and video_metadata_kwargs is not None:
                vllm_input_data["mm_processor_kwargs"] = video_metadata_kwargs

            vllm_inputs.append(vllm_input_data)
chenych's avatar
chenych committed
210
211
212
213
214
215
216
            prompts.append(tokenizer.decode(batch["input_ids"][j], skip_special_tokens=skip_special_tokens))
            labels.append(
                tokenizer.decode(
                    list(filter(lambda x: x != IGNORE_INDEX, batch["labels"][j])),
                    skip_special_tokens=skip_special_tokens,
                )
            )
luopl's avatar
luopl committed
217

chenych's avatar
chenych committed
218
219
220
221
222
223
224
225
        results = llm.generate(vllm_inputs, sampling_params, lora_request=lora_request)
        preds = [result.outputs[0].text for result in results]

        # Accumulate results
        all_prompts.extend(prompts)
        all_preds.extend(preds)
        all_labels.extend(labels)
        gc.collect()
luopl's avatar
luopl committed
226

shihm's avatar
uodata  
shihm committed
227
    model_predict_end_time = time.time()
chenych's avatar
chenych committed
228
    # Write all results at once outside the loop
luopl's avatar
luopl committed
229
    with open(save_name, "w", encoding="utf-8") as f:
chenych's avatar
chenych committed
230
        for text, pred, label in zip(all_prompts, all_preds, all_labels):
luopl's avatar
luopl committed
231
232
233
            f.write(json.dumps({"prompt": text, "predict": pred, "label": label}, ensure_ascii=False) + "\n")

    print("*" * 70)
chenych's avatar
chenych committed
234
    print(f"{len(all_prompts)} total generated results have been saved at {save_name}.")
luopl's avatar
luopl committed
235
236
    print("*" * 70)

shihm's avatar
uodata  
shihm committed
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
    # Write all matrix results when matrix_save_name is not None,
    # The result matrix is referencing src.llamafactory.train.sft.workflow.run_sft # 127~132
    # trainer.save_metrics("predict", predict_results.metrics)
    #
    #   {
    #        "predict_bleu-4": 4.349975,
    #        "predict_model_preparation_time": 0.0128,
    #        "predict_rouge-1": 21.873359375,
    #        "predict_rouge-2": 4.144340625,
    #        "predict_rouge-l": 10.83949375,
    #        "predict_runtime": 131.664,
    #        "predict_samples_per_second": 0.076,
    #        "predict_steps_per_second": 0.008
    #    }
    #
    if matrix_save_name is not None:
        predict_time = model_predict_end_time - model_predict_start_time
        preparation_time = model_predict_start_time - model_preparation_start_time

        start_time = time.time()
        dataset = load_dataset("json", data_files=save_name, split="train")
        dataset = dataset.map(compute_metrics, num_proc=8, remove_columns=dataset.column_names)
        score_dict = dataset.to_dict()

        average_score = {}
        for task, scores in sorted(score_dict.items(), key=lambda x: x[0]):
            score = sum(scores) / len(scores) if scores else 0.0
            print(f"predict_{task}: {score:.4f}")
            average_score["predict_" + task] = score

        average_score["predict_model_preparation_time"] = preparation_time
        average_score["predict_runtime"] = predict_time
        num_steps = len(range(0, len(train_dataset), batch_size))
        average_score["predict_samples_per_second"] = len(dataset) / predict_time if predict_time > 0 else 0.0
        average_score["predict_steps_per_second"] = num_steps / predict_time if predict_time > 0 else 0.0

        with open(matrix_save_name, "w", encoding="utf-8") as f:
            json.dump(average_score, f, indent=4)

        print("*" * 70)
        print(f"\nDone in {time.time() - start_time:.3f}s.\nScore file saved to {matrix_save_name}.")
        print("*" * 70)

luopl's avatar
luopl committed
280
281
282

if __name__ == "__main__":
    fire.Fire(vllm_infer)