finetune_ds.sh 1.88 KB
Newer Older
wanglch's avatar
wanglch committed
1
2
3
4
5
6
7
8
#!/bin/bash

GPUS_PER_NODE=8
NNODES=1
NODE_RANK=0
MASTER_ADDR=localhost
MASTER_PORT=6001

wanglch's avatar
wanglch committed
9
MODEL="XXXXXXX/MiniCPM-Llama3-V-2_5" # or openbmb/MiniCPM-V-2
wanglch's avatar
wanglch committed
10
11
# ATTENTION: specify the path to your training data, which should be a json file consisting of a list of conversations.
# See the section for finetuning in README for more information.
wanglch's avatar
wanglch committed
12
13
DATA="/home/wanglch/MiniCPM-V/data/self_build/train_data/train_data.json"
EVAL_DATA="/home/wanglch/MiniCPM-V/data/self_build/eval_data/eval_data.json"
wanglch's avatar
wanglch committed
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
LLM_TYPE="llama3" # if use openbmb/MiniCPM-V-2, please set LLM_TYPE=minicpm

DISTRIBUTED_ARGS="
    --nproc_per_node $GPUS_PER_NODE \
    --nnodes $NNODES \
    --node_rank $NODE_RANK \
    --master_addr $MASTER_ADDR \
    --master_port $MASTER_PORT
"
torchrun $DISTRIBUTED_ARGS finetune.py  \
    --model_name_or_path $MODEL \
    --llm_type $LLM_TYPE \
    --data_path $DATA \
    --eval_data_path $EVAL_DATA \
    --remove_unused_columns false \
    --label_names "labels" \
    --prediction_loss_only false \
wanglch's avatar
wanglch committed
31
32
    --bf16 true \
    --bf16_full_eval true \
wanglch's avatar
wanglch committed
33
34
35
36
37
38
    --do_train \
    --do_eval \
    --tune_vision true \
    --tune_llm true \
    --model_max_length 2048 \
    --max_slice_nums 9 \
wanglch's avatar
wanglch committed
39
40
41
42
    --max_steps 100 \
    --eval_steps 10 \
    --output_dir  "/home/wanglch/MiniCPM-V/saves/MiniCPM-Llama3-V-2_5/train_lora/" \
    --logging_dir  "/home/wanglch/MiniCPM-V/saves/MiniCPM-Llama3-V-2_5/train_lora/" \
wanglch's avatar
wanglch committed
43
44
45
46
47
48
    --logging_strategy "steps" \
    --per_device_train_batch_size 1 \
    --per_device_eval_batch_size 1 \
    --gradient_accumulation_steps 1 \
    --evaluation_strategy "steps" \
    --save_strategy "steps" \
wanglch's avatar
wanglch committed
49
    --save_steps 100 \
wanglch's avatar
wanglch committed
50
51
52
53
54
55
56
57
    --save_total_limit 10 \
    --learning_rate 1e-6 \
    --weight_decay 0.1 \
    --adam_beta2 0.95 \
    --warmup_ratio 0.01 \
    --lr_scheduler_type "cosine" \
    --logging_steps 1 \
    --gradient_checkpointing true \
wanglch's avatar
wanglch committed
58
    --deepspeed ds_config_zero3.json \
wanglch's avatar
wanglch committed
59
    --report_to "tensorboard"