qwen2_full_sft.yaml 720 Bytes
Newer Older
chenych's avatar
chenych committed
1
2
### model
model_name_or_path: Qwen/Qwen2-1.5B-Instruct
luopl's avatar
luopl committed
3
trust_remote_code: true
chenych's avatar
chenych committed
4
5
6
7
8
9
10
11
12
13

### method
stage: sft
do_train: true
finetuning_type: full
use_adam_mini: true

### dataset
dataset: identity,alpaca_en_demo
template: qwen
luopl's avatar
luopl committed
14
cutoff_len: 2048
chenych's avatar
chenych committed
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16

### output
output_dir: saves/qwen2-1_5b/full/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true

### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 1.0e-5
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
bf16: true
ddp_timeout: 180000000

### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500