run_llama2_7B.sh 1.09 KB
Newer Older
silencealiang's avatar
silencealiang committed
1
2
3
4
5
6
7
8
for para in $*
do
    if [[ $para == --profiling* ]];then
        profiling=${para#*=}
    fi
done

# Those variables need to modify
wxj's avatar
wxj committed
9
10
11
12
13
14
GPUS="4"                 # how many gpus to use
HOST="localhost"                 # hostname
PORT="11451"                 # port id
DATA_PATH="/data/datasets/oscar-1GB_head-llama2_text_document"            # path to oscar-1GB_head-llama2_text_document
TOKENIZER_MODEL_PATH="/data/models/llama2/tokenizer.model" # path to tokenizer.model
CHECKPOINT_PATH="./ckpt"      # path to ckpt
silencealiang's avatar
silencealiang committed
15
16

# Runs Llama2 7B model
wxj's avatar
wxj committed
17
mpirun -np ${GPUS}  --hostfile hostfile \
silencealiang's avatar
silencealiang committed
18
19
20
21
                    --allow-run-as-root \
                    --bind-to none \
                    --mca plm_rsh_no_tree_spawn 1 \
                    bash -c "
wxj's avatar
wxj committed
22
                    ./train_llama2_7b_1nodes.sh \
silencealiang's avatar
silencealiang committed
23
24
25
26
27
28
29
30
                    ${HOST} \
                    ${PORT} \
                    --data_path=$DATA_PATH \
                    --tokenizer_path=$TOKENIZER_MODEL_PATH \
                    --checkpoint_path=$CHECKPOINT_PATH \
                    --profiling=$profiling" > log-$((${GPUS} / 8))nodes-`date +%F-%H%M`.log 2>&1

wait