sft.yaml 1.08 KB
Newer Older
Rayyyyy's avatar
Rayyyyy committed
1
2
3
4
5
data_config:
  train_file: train.jsonl
  val_file: dev.jsonl
  test_file: dev.jsonl
  num_proc: 1
Rayyyyy's avatar
Rayyyyy committed
6
7
8
9

combine: True
freezeV: True
max_input_length: 512
Rayyyyy's avatar
Rayyyyy committed
10
max_output_length: 512
Rayyyyy's avatar
Rayyyyy committed
11
12
# swanlab: "local"  # set to local if don`t use cloud

Rayyyyy's avatar
Rayyyyy committed
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
training_args:
  # see `transformers.Seq2SeqTrainingArguments`
  output_dir: ./output
  max_steps: 3000
  # needed to be fit for the dataset
  learning_rate: 5e-5
  # settings for data loading
  per_device_train_batch_size: 1
  dataloader_num_workers: 16
  remove_unused_columns: false
  # settings for saving checkpoints
  save_strategy: steps
  save_steps: 500
  # settings for logging
  log_level: info
  logging_strategy: steps
  logging_steps: 10
Rayyyyy's avatar
Rayyyyy committed
30
  run_name: "glm4-sft-finetune"
Rayyyyy's avatar
Rayyyyy committed
31
32
  # settings for evaluation
  per_device_eval_batch_size: 16
Rayyyyy's avatar
Rayyyyy committed
33
  eval_strategy: steps
Rayyyyy's avatar
Rayyyyy committed
34
35
36
37
38
39
40
41
42
43
  eval_steps: 500
  # settings for optimizer
  # adam_epsilon: 1e-6
  # uncomment the following line to detect nan or inf values
  # debug: underflow_overflow
  predict_with_generate: true
  generation_config:
    max_new_tokens: 512
  # set your absolute deepspeed path here
  deepspeed: configs/ds_zero_3.json