workflow.py 5.91 KB
Newer Older
chenych's avatar
chenych committed
1
# Copyright 2025 HuggingFace Inc. and the LlamaFactory team.
chenych's avatar
chenych committed
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
#
# This code is inspired by the HuggingFace's transformers library.
# https://github.com/huggingface/transformers/blob/v4.40.0/examples/pytorch/summarization/run_summarization.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
17

chenych's avatar
chenych committed
18
from typing import TYPE_CHECKING, Optional
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
19

luopl's avatar
luopl committed
20
from ...data import SFTDataCollatorWith4DAttentionMask, get_dataset, get_template_and_fix_tokenizer
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
21
from ...extras.constants import IGNORE_INDEX
luopl's avatar
luopl committed
22
23
from ...extras.logging import get_logger
from ...extras.misc import calculate_tps, get_logits_processor
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
24
25
from ...extras.ploting import plot_loss
from ...model import load_model, load_tokenizer
chenych's avatar
chenych committed
26
27
from ..trainer_utils import create_modelcard_and_push
from .metric import ComputeAccuracy, ComputeSimilarity, eval_logit_processor
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
28
29
30
31
32
33
34
35
36
from .trainer import CustomSeq2SeqTrainer


if TYPE_CHECKING:
    from transformers import Seq2SeqTrainingArguments, TrainerCallback

    from ...hparams import DataArguments, FinetuningArguments, GeneratingArguments, ModelArguments


luopl's avatar
luopl committed
37
38
39
logger = get_logger(__name__)


Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
40
41
42
43
44
45
def run_sft(
    model_args: "ModelArguments",
    data_args: "DataArguments",
    training_args: "Seq2SeqTrainingArguments",
    finetuning_args: "FinetuningArguments",
    generating_args: "GeneratingArguments",
chenych's avatar
chenych committed
46
    callbacks: Optional[list["TrainerCallback"]] = None,
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
47
):
chenych's avatar
chenych committed
48
49
    tokenizer_module = load_tokenizer(model_args)
    tokenizer = tokenizer_module["tokenizer"]
luopl's avatar
luopl committed
50
51
    template = get_template_and_fix_tokenizer(tokenizer, data_args)
    dataset_module = get_dataset(template, model_args, data_args, training_args, stage="sft", **tokenizer_module)
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
52
53
54
55
56
    model = load_model(tokenizer, model_args, finetuning_args, training_args.do_train)

    if getattr(model, "is_quantized", False) and not training_args.do_train:
        setattr(model, "_hf_peft_config_loaded", True)  # hack here: make model compatible with prediction

chenych's avatar
chenych committed
57
    data_collator = SFTDataCollatorWith4DAttentionMask(
luopl's avatar
luopl committed
58
        template=template,
luopl's avatar
luopl committed
59
        model=model if not training_args.predict_with_generate else None,
chenych's avatar
chenych committed
60
        pad_to_multiple_of=8 if training_args.do_train else None,  # for shift short attention
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
61
        label_pad_token_id=IGNORE_INDEX if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id,
chenych's avatar
chenych committed
62
63
64
        block_diag_attn=model_args.block_diag_attn,
        attn_implementation=getattr(model.config, "_attn_implementation", None),
        compute_dtype=model_args.compute_dtype,
luopl's avatar
luopl committed
65
        **tokenizer_module,
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
66
67
68
69
70
    )

    # Override the decoding parameters of Seq2SeqTrainer
    training_args.generation_max_length = training_args.generation_max_length or data_args.cutoff_len
    training_args.generation_num_beams = data_args.eval_num_beams or training_args.generation_num_beams
luopl's avatar
luopl committed
71
    training_args.remove_unused_columns = False  # important for multimodal dataset
chenych's avatar
chenych committed
72
73
74
75
76
77
78
79

    # Metric utils
    metric_module = {}
    if training_args.predict_with_generate:
        metric_module["compute_metrics"] = ComputeSimilarity(tokenizer=tokenizer)
    elif finetuning_args.compute_accuracy:
        metric_module["compute_metrics"] = ComputeAccuracy()
        metric_module["preprocess_logits_for_metrics"] = eval_logit_processor
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
80

chenych's avatar
chenych committed
81
82
83
84
85
86
    # Keyword arguments for `model.generate`
    gen_kwargs = generating_args.to_dict(obey_generation_config=True)
    gen_kwargs["eos_token_id"] = [tokenizer.eos_token_id] + tokenizer.additional_special_tokens_ids
    gen_kwargs["pad_token_id"] = tokenizer.pad_token_id
    gen_kwargs["logits_processor"] = get_logits_processor()

Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
87
88
89
90
91
92
93
    # Initialize our Trainer
    trainer = CustomSeq2SeqTrainer(
        model=model,
        args=training_args,
        finetuning_args=finetuning_args,
        data_collator=data_collator,
        callbacks=callbacks,
chenych's avatar
chenych committed
94
        gen_kwargs=gen_kwargs,
chenych's avatar
chenych committed
95
96
97
        **dataset_module,
        **tokenizer_module,
        **metric_module,
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
98
99
100
101
102
    )

    # Training
    if training_args.do_train:
        train_result = trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
luopl's avatar
luopl committed
103
        trainer.save_model()
luopl's avatar
luopl committed
104
        if finetuning_args.include_effective_tokens_per_second:
luopl's avatar
luopl committed
105
106
            train_result.metrics["effective_tokens_per_sec"] = calculate_tps(
                dataset_module["train_dataset"], train_result.metrics, stage="sft"
luopl's avatar
luopl committed
107
108
            )

Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
109
110
111
112
        trainer.log_metrics("train", train_result.metrics)
        trainer.save_metrics("train", train_result.metrics)
        trainer.save_state()
        if trainer.is_world_process_zero() and finetuning_args.plot_loss:
chenych's avatar
chenych committed
113
114
115
116
            plot_loss(training_args.output_dir, keys=["loss", "eval_loss", "eval_accuracy"])

    if training_args.predict_with_generate:
        tokenizer.padding_side = "left"  # use left-padding in generation
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
117
118
119
120
121
122
123
124
125

    # Evaluation
    if training_args.do_eval:
        metrics = trainer.evaluate(metric_key_prefix="eval", **gen_kwargs)
        trainer.log_metrics("eval", metrics)
        trainer.save_metrics("eval", metrics)

    # Predict
    if training_args.do_predict:
luopl's avatar
luopl committed
126
        logger.warning_rank0_once("Batch generation can be very slow. Consider using `scripts/vllm_infer.py` instead.")
chenych's avatar
chenych committed
127
        predict_results = trainer.predict(dataset_module["eval_dataset"], metric_key_prefix="predict", **gen_kwargs)
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
128
129
        trainer.log_metrics("predict", predict_results.metrics)
        trainer.save_metrics("predict", predict_results.metrics)
luopl's avatar
luopl committed
130
        trainer.save_predictions(dataset_module["eval_dataset"], predict_results, generating_args.skip_special_tokens)
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
131
132
133

    # Create model card
    create_modelcard_and_push(trainer, model_args, data_args, training_args, finetuning_args)