lora_train.sh 737 Bytes
Newer Older
zhaoying1's avatar
zhaoying1 committed
1
hostfile=""
zhaoying1's avatar
update  
zhaoying1 committed
2
HIP_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 deepspeed --hostfile=$hostfile fine-tune.py  \
zhaoying1's avatar
zhaoying1 committed
3
    --report_to "none" \
zhaoying1's avatar
update  
zhaoying1 committed
4
5
    --data_path "data/test.json" \
    --model_name_or_path "../../baichuan2-13b-chat-hf" \
zhaoying1's avatar
zhaoying1 committed
6
    --output_dir "output" \
zhaoying1's avatar
update  
zhaoying1 committed
7
    --model_max_length 64 \
zhaoying1's avatar
zhaoying1 committed
8
9
10
11
12
13
14
15
16
17
18
19
20
21
    --num_train_epochs 4 \
    --per_device_train_batch_size 2 \
    --gradient_accumulation_steps 1 \
    --save_strategy epoch \
    --learning_rate 2e-5 \
    --lr_scheduler_type constant \
    --adam_beta1 0.9 \
    --adam_beta2 0.98 \
    --adam_epsilon 1e-8 \
    --max_grad_norm 1.0 \
    --weight_decay 1e-4 \
    --warmup_ratio 0.0 \
    --logging_steps 1 \
    --gradient_checkpointing True \
zhaoying1's avatar
update  
zhaoying1 committed
22
    --deepspeed ds_config.json \
zhaoying1's avatar
zhaoying1 committed
23
24
    --fp16 \
    --use_lora True