finetuning_args.py 20.9 KB
Newer Older
chenych's avatar
chenych committed
1
# Copyright 2025 the LlamaFactory team.
chenych's avatar
chenych committed
2
3
4
5
6
7
8
9
10
11
12
13
14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

luopl's avatar
luopl committed
15
from dataclasses import asdict, dataclass, field
shihm's avatar
uodata  
shihm committed
16
from typing import Any, Literal
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
17
18
19
20


@dataclass
class FreezeArguments:
chenych's avatar
chenych committed
21
    r"""Arguments pertaining to the freeze (partial-parameter) training."""
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
22

chenych's avatar
chenych committed
23
24
25
26
27
28
29
30
31
32
33
    freeze_trainable_layers: int = field(
        default=2,
        metadata={
            "help": (
                "The number of trainable layers for freeze (partial-parameter) fine-tuning. "
                "Positive numbers mean the last n layers are set as trainable, "
                "negative numbers mean the first n layers are set as trainable."
            )
        },
    )
    freeze_trainable_modules: str = field(
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
34
35
        default="all",
        metadata={
chenych's avatar
chenych committed
36
37
38
39
40
            "help": (
                "Name(s) of trainable modules for freeze (partial-parameter) fine-tuning. "
                "Use commas to separate multiple modules. "
                "Use `all` to specify all the available modules."
            )
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
41
42
        },
    )
shihm's avatar
uodata  
shihm committed
43
    freeze_extra_modules: str | None = field(
chenych's avatar
chenych committed
44
45
46
47
48
49
50
51
        default=None,
        metadata={
            "help": (
                "Name(s) of modules apart from hidden layers to be set as trainable "
                "for freeze (partial-parameter) fine-tuning. "
                "Use commas to separate multiple modules."
            )
        },
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
52
53
54
55
56
    )


@dataclass
class LoraArguments:
chenych's avatar
chenych committed
57
    r"""Arguments pertaining to the LoRA training."""
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
58

shihm's avatar
uodata  
shihm committed
59
    additional_target: str | None = field(
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
60
61
        default=None,
        metadata={
chenych's avatar
chenych committed
62
63
64
65
66
            "help": (
                "Name(s) of modules apart from LoRA layers to be set as trainable "
                "and saved in the final checkpoint. "
                "Use commas to separate multiple modules."
            )
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
67
68
        },
    )
shihm's avatar
uodata  
shihm committed
69
    lora_alpha: int | None = field(
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
70
71
72
73
74
75
76
77
78
79
80
81
82
83
        default=None,
        metadata={"help": "The scale factor for LoRA fine-tuning (default: lora_rank * 2)."},
    )
    lora_dropout: float = field(
        default=0.0,
        metadata={"help": "Dropout rate for the LoRA fine-tuning."},
    )
    lora_rank: int = field(
        default=8,
        metadata={"help": "The intrinsic dimension for LoRA fine-tuning."},
    )
    lora_target: str = field(
        default="all",
        metadata={
chenych's avatar
chenych committed
84
85
86
87
88
            "help": (
                "Name(s) of target modules to apply LoRA. "
                "Use commas to separate multiple modules. "
                "Use `all` to specify all the linear modules."
            )
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
89
90
        },
    )
shihm's avatar
uodata  
shihm committed
91
    loraplus_lr_ratio: float | None = field(
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
        default=None,
        metadata={"help": "LoRA plus learning rate ratio (lr_B / lr_A)."},
    )
    loraplus_lr_embedding: float = field(
        default=1e-6,
        metadata={"help": "LoRA plus learning rate for lora embedding layers."},
    )
    use_rslora: bool = field(
        default=False,
        metadata={"help": "Whether or not to use the rank stabilization scaling factor for LoRA layer."},
    )
    use_dora: bool = field(
        default=False,
        metadata={"help": "Whether or not to use the weight-decomposed lora method (DoRA)."},
    )
chenych's avatar
chenych committed
107
108
109
110
111
112
113
114
115
116
117
118
    pissa_init: bool = field(
        default=False,
        metadata={"help": "Whether or not to initialize a PiSSA adapter."},
    )
    pissa_iter: int = field(
        default=16,
        metadata={"help": "The number of iteration steps performed by FSVD in PiSSA. Use -1 to disable it."},
    )
    pissa_convert: bool = field(
        default=False,
        metadata={"help": "Whether or not to convert the PiSSA adapter to a normal LoRA adapter."},
    )
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
119
120
121
122
123
124
    create_new_adapter: bool = field(
        default=False,
        metadata={"help": "Whether or not to create a new adapter with randomly initialized weight."},
    )


shihm's avatar
uodata  
shihm committed
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
@dataclass
class OFTArguments:
    r"""Arguments pertaining to the OFT training."""

    additional_target: str | None = field(
        default=None,
        metadata={
            "help": (
                "Name(s) of modules apart from LoRA layers to be set as trainable "
                "and saved in the final checkpoint. "
                "Use commas to separate multiple modules."
            )
        },
    )
    module_dropout: float = field(
        default=0.0,
        metadata={"help": "Dropout rate for the OFT fine-tuning."},
    )
    oft_rank: int = field(
        default=0,
        metadata={"help": "The intrinsic dimension for OFT fine-tuning."},
    )
    oft_block_size: int = field(
        default=32,
        metadata={"help": "The intrinsic dimension for OFT fine-tuning."},
    )
    oft_target: str = field(
        default="all",
        metadata={
            "help": (
                "Name(s) of target modules to apply OFT. "
                "Use commas to separate multiple modules. "
                "Use `all` to specify all the linear modules."
            )
        },
    )
    create_new_adapter: bool = field(
        default=False,
        metadata={"help": "Whether or not to create a new adapter with randomly initialized weight."},
    )


Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
167
168
@dataclass
class RLHFArguments:
chenych's avatar
chenych committed
169
    r"""Arguments pertaining to the PPO, DPO and KTO training."""
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
170

chenych's avatar
chenych committed
171
    pref_beta: float = field(
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
172
        default=0.1,
chenych's avatar
chenych committed
173
        metadata={"help": "The beta parameter in the preference loss."},
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
174
    )
chenych's avatar
chenych committed
175
176
177
178
    pref_ftx: float = field(
        default=0.0,
        metadata={"help": "The supervised fine-tuning loss coefficient in DPO training."},
    )
shihm's avatar
uodata  
shihm committed
179
180
181
182
    pref_bco_weight: float = field(
        default=0.0,
        metadata={"help": "The Binary Classifier Optimization coefficient in DPO training."},
    )
chenych's avatar
chenych committed
183
    pref_loss: Literal["sigmoid", "hinge", "ipo", "kto_pair", "orpo", "simpo"] = field(
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
184
185
186
187
188
189
190
        default="sigmoid",
        metadata={"help": "The type of DPO loss to use."},
    )
    dpo_label_smoothing: float = field(
        default=0.0,
        metadata={"help": "The robust DPO label smoothing parameter in cDPO that should be between 0 and 0.5."},
    )
chenych's avatar
chenych committed
191
192
193
    kto_chosen_weight: float = field(
        default=1.0,
        metadata={"help": "The weight factor of the desirable losses in KTO training."},
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
194
    )
chenych's avatar
chenych committed
195
196
197
198
199
200
201
    kto_rejected_weight: float = field(
        default=1.0,
        metadata={"help": "The weight factor of the undesirable losses in KTO training."},
    )
    simpo_gamma: float = field(
        default=0.5,
        metadata={"help": "The target reward margin term in SimPO loss."},
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
    )
    ppo_buffer_size: int = field(
        default=1,
        metadata={"help": "The number of mini-batches to make experience buffer in a PPO optimization step."},
    )
    ppo_epochs: int = field(
        default=4,
        metadata={"help": "The number of epochs to perform in a PPO optimization step."},
    )
    ppo_score_norm: bool = field(
        default=False,
        metadata={"help": "Use score normalization in PPO training."},
    )
    ppo_target: float = field(
        default=6.0,
        metadata={"help": "Target KL value for adaptive KL control in PPO training."},
    )
    ppo_whiten_rewards: bool = field(
        default=False,
        metadata={"help": "Whiten the rewards before compute advantages in PPO training."},
    )
shihm's avatar
uodata  
shihm committed
223
    ref_model: str | None = field(
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
224
225
226
        default=None,
        metadata={"help": "Path to the reference model used for the PPO or DPO training."},
    )
shihm's avatar
uodata  
shihm committed
227
    ref_model_adapters: str | None = field(
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
228
229
230
        default=None,
        metadata={"help": "Path to the adapters of the reference model."},
    )
shihm's avatar
uodata  
shihm committed
231
    ref_model_quantization_bit: int | None = field(
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
232
233
234
        default=None,
        metadata={"help": "The number of bits to quantize the reference model."},
    )
shihm's avatar
uodata  
shihm committed
235
    reward_model: str | None = field(
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
236
237
238
        default=None,
        metadata={"help": "Path to the reward model used for the PPO training."},
    )
shihm's avatar
uodata  
shihm committed
239
    reward_model_adapters: str | None = field(
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
240
241
242
        default=None,
        metadata={"help": "Path to the adapters of the reward model."},
    )
shihm's avatar
uodata  
shihm committed
243
    reward_model_quantization_bit: int | None = field(
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
244
245
246
247
248
249
250
        default=None,
        metadata={"help": "The number of bits to quantize the reward model."},
    )
    reward_model_type: Literal["lora", "full", "api"] = field(
        default="lora",
        metadata={"help": "The type of the reward model in PPO training. Lora model only supports lora training."},
    )
shihm's avatar
uodata  
shihm committed
251
    ld_alpha: float | None = field(
chenych's avatar
chenych committed
252
253
254
255
256
257
258
259
        default=None,
        metadata={
            "help": (
                "Alpha parameter from the LD-DPO paper, which controls the weighting of"
                " the verbose token log-probabilities in responses."
            )
        },
    )
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
260
261
262
263


@dataclass
class GaloreArguments:
chenych's avatar
chenych committed
264
    r"""Arguments pertaining to the GaLore algorithm."""
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
265
266
267
268
269
270
271
272

    use_galore: bool = field(
        default=False,
        metadata={"help": "Whether or not to use the gradient low-Rank projection (GaLore)."},
    )
    galore_target: str = field(
        default="all",
        metadata={
chenych's avatar
chenych committed
273
274
275
276
            "help": (
                "Name(s) of modules to apply GaLore. Use commas to separate multiple modules. "
                "Use `all` to specify all the linear modules."
            )
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
277
278
279
280
281
282
283
284
285
286
287
        },
    )
    galore_rank: int = field(
        default=16,
        metadata={"help": "The rank of GaLore gradients."},
    )
    galore_update_interval: int = field(
        default=200,
        metadata={"help": "Number of steps to update the GaLore projection."},
    )
    galore_scale: float = field(
chenych's avatar
chenych committed
288
        default=2.0,
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
289
290
291
292
293
294
295
296
297
298
299
300
        metadata={"help": "GaLore scaling coefficient."},
    )
    galore_proj_type: Literal["std", "reverse_std", "right", "left", "full"] = field(
        default="std",
        metadata={"help": "Type of GaLore projection."},
    )
    galore_layerwise: bool = field(
        default=False,
        metadata={"help": "Whether or not to enable layer-wise update to further save memory."},
    )


luopl's avatar
luopl committed
301
302
@dataclass
class ApolloArguments:
chenych's avatar
chenych committed
303
    r"""Arguments pertaining to the APOLLO algorithm."""
luopl's avatar
luopl committed
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326

    use_apollo: bool = field(
        default=False,
        metadata={"help": "Whether or not to use the APOLLO optimizer."},
    )
    apollo_target: str = field(
        default="all",
        metadata={
            "help": (
                "Name(s) of modules to apply APOLLO. Use commas to separate multiple modules. "
                "Use `all` to specify all the linear modules."
            )
        },
    )
    apollo_rank: int = field(
        default=16,
        metadata={"help": "The rank of APOLLO gradients."},
    )
    apollo_update_interval: int = field(
        default=200,
        metadata={"help": "Number of steps to update the APOLLO projection."},
    )
    apollo_scale: float = field(
chenych's avatar
chenych committed
327
        default=32.0,
luopl's avatar
luopl committed
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
        metadata={"help": "APOLLO scaling coefficient."},
    )
    apollo_proj: Literal["svd", "random"] = field(
        default="random",
        metadata={"help": "Type of APOLLO low-rank projection algorithm (svd or random)."},
    )
    apollo_proj_type: Literal["std", "right", "left"] = field(
        default="std",
        metadata={"help": "Type of APOLLO projection."},
    )
    apollo_scale_type: Literal["channel", "tensor"] = field(
        default="channel",
        metadata={"help": "Type of APOLLO scaling (channel or tensor)."},
    )
    apollo_layerwise: bool = field(
        default=False,
        metadata={"help": "Whether or not to enable layer-wise update to further save memory."},
    )
    apollo_scale_front: bool = field(
        default=False,
        metadata={"help": "Whether or not to use the norm-growth limiter in front of gradient scaling."},
    )


Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
352
353
@dataclass
class BAdamArgument:
chenych's avatar
chenych committed
354
    r"""Arguments pertaining to the BAdam optimizer."""
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
355
356
357
358
359
360
361
362
363

    use_badam: bool = field(
        default=False,
        metadata={"help": "Whether or not to use the BAdam optimizer."},
    )
    badam_mode: Literal["layer", "ratio"] = field(
        default="layer",
        metadata={"help": "Whether to use layer-wise or ratio-wise BAdam optimizer."},
    )
shihm's avatar
uodata  
shihm committed
364
    badam_start_block: int | None = field(
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
365
366
367
        default=None,
        metadata={"help": "The starting block index for layer-wise BAdam."},
    )
shihm's avatar
uodata  
shihm committed
368
    badam_switch_mode: Literal["ascending", "descending", "random", "fixed"] | None = field(
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
369
370
371
        default="ascending",
        metadata={"help": "the strategy of picking block to update for layer-wise BAdam."},
    )
shihm's avatar
uodata  
shihm committed
372
    badam_switch_interval: int | None = field(
chenych's avatar
chenych committed
373
374
375
376
377
        default=50,
        metadata={
            "help": "Number of steps to update the block for layer-wise BAdam. Use -1 to disable the block update."
        },
    )
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
378
    badam_update_ratio: float = field(
chenych's avatar
chenych committed
379
        default=0.05,
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
380
381
382
383
384
        metadata={"help": "The ratio of the update for ratio-wise BAdam."},
    )
    badam_mask_mode: Literal["adjacent", "scatter"] = field(
        default="adjacent",
        metadata={
chenych's avatar
chenych committed
385
386
387
388
389
            "help": (
                "The mode of the mask for BAdam optimizer. "
                "`adjacent` means that the trainable parameters are adjacent to each other, "
                "`scatter` means that trainable parameters are randomly choosed from the weight."
            )
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
390
391
392
393
394
        },
    )
    badam_verbose: int = field(
        default=0,
        metadata={
chenych's avatar
chenych committed
395
396
397
398
            "help": (
                "The verbosity level of BAdam optimizer. "
                "0 for no print, 1 for print the block prefix, 2 for print trainable parameters."
            )
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
399
400
401
402
403
        },
    )


@dataclass
luopl's avatar
luopl committed
404
405
406
407
408
class SwanLabArguments:
    use_swanlab: bool = field(
        default=False,
        metadata={"help": "Whether or not to use the SwanLab (an experiment tracking and visualization tool)."},
    )
shihm's avatar
uodata  
shihm committed
409
    swanlab_project: str | None = field(
luopl's avatar
luopl committed
410
411
412
        default="llamafactory",
        metadata={"help": "The project name in SwanLab."},
    )
shihm's avatar
uodata  
shihm committed
413
    swanlab_workspace: str | None = field(
luopl's avatar
luopl committed
414
415
416
        default=None,
        metadata={"help": "The workspace name in SwanLab."},
    )
shihm's avatar
uodata  
shihm committed
417
    swanlab_run_name: str | None = field(
luopl's avatar
luopl committed
418
419
420
421
422
423
424
        default=None,
        metadata={"help": "The experiment name in SwanLab."},
    )
    swanlab_mode: Literal["cloud", "local"] = field(
        default="cloud",
        metadata={"help": "The mode of SwanLab."},
    )
shihm's avatar
uodata  
shihm committed
425
    swanlab_api_key: str | None = field(
luopl's avatar
luopl committed
426
427
428
        default=None,
        metadata={"help": "The API key for SwanLab."},
    )
shihm's avatar
uodata  
shihm committed
429
    swanlab_logdir: str | None = field(
chenych's avatar
chenych committed
430
431
432
        default=None,
        metadata={"help": "The log directory for SwanLab."},
    )
shihm's avatar
uodata  
shihm committed
433
    swanlab_lark_webhook_url: str | None = field(
chenych's avatar
chenych committed
434
435
436
        default=None,
        metadata={"help": "The Lark(飞书) webhook URL for SwanLab."},
    )
shihm's avatar
uodata  
shihm committed
437
    swanlab_lark_secret: str | None = field(
chenych's avatar
chenych committed
438
439
440
        default=None,
        metadata={"help": "The Lark(飞书) secret for SwanLab."},
    )
luopl's avatar
luopl committed
441
442
443
444


@dataclass
class FinetuningArguments(
shihm's avatar
uodata  
shihm committed
445
446
447
448
449
450
451
452
    SwanLabArguments,
    BAdamArgument,
    ApolloArguments,
    GaloreArguments,
    RLHFArguments,
    LoraArguments,
    OFTArguments,
    FreezeArguments,
luopl's avatar
luopl committed
453
):
chenych's avatar
chenych committed
454
    r"""Arguments pertaining to which techniques we are going to fine-tuning with."""
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
455
456
457
458
459

    pure_bf16: bool = field(
        default=False,
        metadata={"help": "Whether or not to train model in purely bf16 precision (without AMP)."},
    )
chenych's avatar
chenych committed
460
    stage: Literal["pt", "sft", "rm", "ppo", "dpo", "kto"] = field(
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
461
462
463
        default="sft",
        metadata={"help": "Which stage will be performed in training."},
    )
shihm's avatar
uodata  
shihm committed
464
    finetuning_type: Literal["lora", "oft", "freeze", "full"] = field(
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
465
466
467
468
469
470
471
        default="lora",
        metadata={"help": "Which fine-tuning method to use."},
    )
    use_llama_pro: bool = field(
        default=False,
        metadata={"help": "Whether or not to make only the parameters in the expanded blocks trainable."},
    )
chenych's avatar
chenych committed
472
473
474
475
    use_adam_mini: bool = field(
        default=False,
        metadata={"help": "Whether or not to use the Adam-mini optimizer."},
    )
shihm's avatar
uodata  
shihm committed
476
477
478
479
480
481
482
483
484
    use_mca: bool = field(
        default=False,
        metadata={
            "help": (
                "Whether or not to use MCA (Megatron Core Adapter) training. "
                "Controlled by USE_MCA environment variable."
            )
        },
    )
chenych's avatar
chenych committed
485
486
487
488
    use_muon: bool = field(
        default=False,
        metadata={"help": "Whether or not to use the Muon optimizer."},
    )
shihm's avatar
uodata  
shihm committed
489
490
491
492
    use_dft_loss: bool = field(
        default=False,
        metadata={"help": "Whether to use the DFT loss."},
    )
chenych's avatar
chenych committed
493
494
    freeze_vision_tower: bool = field(
        default=True,
chenych's avatar
chenych committed
495
        metadata={"help": "Whether ot not to freeze the vision tower in MLLM training."},
chenych's avatar
chenych committed
496
    )
luopl's avatar
luopl committed
497
498
499
500
    freeze_multi_modal_projector: bool = field(
        default=True,
        metadata={"help": "Whether or not to freeze the multi modal projector in MLLM training."},
    )
chenych's avatar
chenych committed
501
    freeze_language_model: bool = field(
chenych's avatar
chenych committed
502
        default=False,
chenych's avatar
chenych committed
503
        metadata={"help": "Whether or not to freeze the language model in MLLM training."},
chenych's avatar
chenych committed
504
505
506
507
508
    )
    compute_accuracy: bool = field(
        default=False,
        metadata={"help": "Whether or not to compute the token-level accuracy at evaluation."},
    )
luopl's avatar
luopl committed
509
510
511
512
    disable_shuffling: bool = field(
        default=False,
        metadata={"help": "Whether or not to disable the shuffling of the training set."},
    )
shihm's avatar
uodata  
shihm committed
513
    early_stopping_steps: int | None = field(
chenych's avatar
chenych committed
514
515
516
        default=None,
        metadata={"help": "Number of steps to stop training if the `metric_for_best_model` does not improve."},
    )
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
517
518
519
520
    plot_loss: bool = field(
        default=False,
        metadata={"help": "Whether or not to save the training loss curves."},
    )
luopl's avatar
luopl committed
521
522
523
524
    include_effective_tokens_per_second: bool = field(
        default=False,
        metadata={"help": "Whether or not to compute effective tokens per second."},
    )
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
525
526
527
528
529
530
531

    def __post_init__(self):
        def split_arg(arg):
            if isinstance(arg, str):
                return [item.strip() for item in arg.split(",")]
            return arg

chenych's avatar
chenych committed
532
        self.freeze_trainable_modules: list[str] = split_arg(self.freeze_trainable_modules)
shihm's avatar
uodata  
shihm committed
533
        self.freeze_extra_modules: list[str] | None = split_arg(self.freeze_extra_modules)
chenych's avatar
chenych committed
534
        self.lora_alpha: int = self.lora_alpha or self.lora_rank * 2
chenych's avatar
chenych committed
535
        self.lora_target: list[str] = split_arg(self.lora_target)
shihm's avatar
uodata  
shihm committed
536
537
        self.oft_target: list[str] = split_arg(self.oft_target)
        self.additional_target: list[str] | None = split_arg(self.additional_target)
chenych's avatar
chenych committed
538
539
        self.galore_target: list[str] = split_arg(self.galore_target)
        self.apollo_target: list[str] = split_arg(self.apollo_target)
chenych's avatar
chenych committed
540
        self.use_ref_model = self.stage == "dpo" and self.pref_loss not in ["orpo", "simpo"]
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
541

shihm's avatar
uodata  
shihm committed
542
        assert self.finetuning_type in ["lora", "oft", "freeze", "full"], "Invalid fine-tuning method."
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
543
544
545
546
547
548
549
550
551
        assert self.ref_model_quantization_bit in [None, 8, 4], "We only accept 4-bit or 8-bit quantization."
        assert self.reward_model_quantization_bit in [None, 8, 4], "We only accept 4-bit or 8-bit quantization."

        if self.stage == "ppo" and self.reward_model is None:
            raise ValueError("`reward_model` is necessary for PPO training.")

        if self.stage == "ppo" and self.reward_model_type == "lora" and self.finetuning_type != "lora":
            raise ValueError("`reward_model_type` cannot be lora for Freeze/Full PPO training.")

shihm's avatar
uodata  
shihm committed
552
553
554
        if self.stage == "ppo" and self.reward_model_type == "oft" and self.finetuning_type != "oft":
            raise ValueError("`reward_model_type` cannot be oft for Freeze/Full PPO training.")

chenych's avatar
chenych committed
555
        if self.stage == "dpo" and self.pref_loss != "sigmoid" and self.dpo_label_smoothing > 1e-6:
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
556
557
558
            raise ValueError("`dpo_label_smoothing` is only valid for sigmoid loss function.")

        if self.use_llama_pro and self.finetuning_type == "full":
chenych's avatar
chenych committed
559
560
            raise ValueError("`use_llama_pro` is only valid for Freeze or LoRA training.")

luopl's avatar
luopl committed
561
562
        if self.finetuning_type == "lora" and (self.use_galore or self.use_apollo or self.use_badam):
            raise ValueError("Cannot use LoRA with GaLore, APOLLO or BAdam together.")
chenych's avatar
chenych committed
563

luopl's avatar
luopl committed
564
565
        if int(self.use_galore) + int(self.use_apollo) + (self.use_badam) > 1:
            raise ValueError("Cannot use GaLore, APOLLO or BAdam together.")
chenych's avatar
chenych committed
566
567
568

        if self.pissa_init and (self.stage in ["ppo", "kto"] or self.use_ref_model):
            raise ValueError("Cannot use PiSSA for current training stage.")
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
569

chenych's avatar
chenych committed
570
571
572
        if self.finetuning_type != "lora":
            if self.loraplus_lr_ratio is not None:
                raise ValueError("`loraplus_lr_ratio` is only valid for LoRA training.")
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
573

chenych's avatar
chenych committed
574
575
            if self.use_rslora:
                raise ValueError("`use_rslora` is only valid for LoRA training.")
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
576

chenych's avatar
chenych committed
577
578
            if self.use_dora:
                raise ValueError("`use_dora` is only valid for LoRA training.")
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
579

chenych's avatar
chenych committed
580
581
            if self.pissa_init:
                raise ValueError("`pissa_init` is only valid for LoRA training.")
luopl's avatar
luopl committed
582

chenych's avatar
chenych committed
583
    def to_dict(self) -> dict[str, Any]:
luopl's avatar
luopl committed
584
585
586
        args = asdict(self)
        args = {k: f"<{k.upper()}>" if k.endswith("api_key") else v for k, v in args.items()}
        return args