run_gemini.sh 989 Bytes
Newer Older
1
set -x
2
3
# distplan in ["CAI_ZeRO1", "CAI_ZeRO2", "CAI_Gemini", "Pytorch_DDP", "Pytorch_ZeRO"]
export DISTPLAN=${DISTPLAN:-"CAI_Gemini"}
4

5
# The following options only valid when DISTPLAN="colossalai"
6
export GPUNUM=${GPUNUM:-1}
7
export TPDEGREE=${TPDEGREE:-1}
8
export PLACEMENT=${PLACEMENT:-"cpu"}
9
10
11
export USE_SHARD_INIT=${USE_SHARD_INIT:-False}
export BATCH_SIZE=${BATCH_SIZE:-16}
export MODEL_TYPE=${MODEL_TYPE:-"gpt2_medium"}
12
export TRAIN_STEP=${TRAIN_STEP:-10}
13
14
# export PYTHONPATH=$PWD:$PYTHONPATH

15
16
17
18
19
20
if [ ${USE_SHARD_INIT} = "True" ]; then
  USE_SHARD_INIT="--shardinit"
else
  USE_SHARD_INIT=""
fi

21
mkdir -p gemini_logs
22
23

torchrun --standalone --nproc_per_node=${GPUNUM} ./train_gpt_demo.py \
24
25
26
27
--tp_degree=${TPDEGREE} \
--model_type=${MODEL_TYPE} \
--batch_size=${BATCH_SIZE} \
--placement=${PLACEMENT} \
28
${USE_SHARD_INIT} \
29
--distplan=${DISTPLAN} \
30
--train_step=${TRAIN_STEP} \
31
2>&1 | tee ./gemini_logs/${MODEL_TYPE}_${DISTPLAN}_gpu_${GPUNUM}_bs_${BATCH_SIZE}_tp_${TPDEGREE}_${PLACEMENT}.log