llama3_freeze_sft.yaml 758 Bytes
Newer Older
chenych's avatar
chenych committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
### model
model_name_or_path: models/llama3-8b-pro

### method
stage: sft
do_train: true
finetuning_type: freeze
freeze_trainable_layers: 8
freeze_trainable_modules: all
use_llama_pro: true

### dataset
dataset: identity,alpaca_en_demo
template: llama3
luopl's avatar
luopl committed
15
cutoff_len: 2048
chenych's avatar
chenych committed
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16

### output
output_dir: saves/llama3-8b-pro/freeze/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true

### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 1.0e-4
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
bf16: true
ddp_timeout: 180000000

### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500