lora_finetune_minicpm4.sh 774 Bytes
Newer Older
chenzk's avatar
v1.0  
chenzk committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
formatted_time=$(date +"%Y%m%d%H%M%S")
echo $formatted_time

export HIP_VISIBLE_DEVICES=0,1,2,3

deepspeed --include localhost:0,1,2,3 --master_port 19888 finetune.py \
    --model_name_or_path ../openbmb/MiniCPM4-8B \
    --output_dir output/OCNLILoRA/$formatted_time/ \
    --train_data_path data/ocnli_public_chatml/train.json \
    --eval_data_path data/ocnli_public_chatml/dev.json \
    --learning_rate 5e-5 --per_device_train_batch_size 40 \
    --per_device_eval_batch_size 128 --model_max_length 128 --bf16 --use_lora \
    --gradient_accumulation_steps 1 --warmup_steps 100 \
    --max_steps 1000 --weight_decay 0.01 \
    --eval_steps 500 \
    --save_strategy steps --save_steps 500 --seed 42 \
    --log_level info --logging_strategy steps --logging_steps 10 \