train_mixtral_8x7B_4nodes.sh 4.38 KB
Newer Older
1
2
3
4
#!/bin/bash

for para in $*
do
5
6
7
8
9
10
11
    if [[ $para == --data_path* ]];then
        data_path=${para#*=}
    elif [[ $para == --tokenizer_path* ]];then
        tokenizer_path=${para#*=}
    elif [[ $para == --checkpoint_path* ]];then
        checkpoint_path=${para#*=}
    elif [[ $para == --profiling* ]];then
12
13
14
15
        profiling=${para#*=}
    fi
done

16
17
18
19
# data path
DATA_PATH=${data_path}
TOKENIZER_MODEL_PATH=${tokenizer_path}
CHECKPOINT_PATH=${checkpoint_path}
20
21
22

# default env
DIST_URL=${1}
23
DIST_PORT=${2}
24
25
26
RANK=$OMPI_COMM_WORLD_RANK
LOCAL_RANK=$OMPI_COMM_WORLD_LOCAL_RANK
WORLD_SIZE=$OMPI_COMM_WORLD_SIZE
silencealiang's avatar
silencealiang committed
27
28
CURRENT_DIR="$( cd "$( dirname "$0" )" && pwd )"
MEGATRON_PATH=$( dirname $( dirname ${CURRENT_DIR}))
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
export GLOG_minloglevel=3
export CUDA_DEVICE_MAX_CONNECTIONS=1
export HSA_FORCE_FINE_GRAIN_PCIE=1
export OMP_NUM_THREADS=1
export GPU_MAX_HW_QUEUES=10

# enable BatchLinear
export GROUPED_GEMM_BatchLinear=1

DISTRIBUTED_ARGS=(
    --rank ${RANK}
    --world-size ${WORLD_SIZE}
    --local-rank ${LOCAL_RANK}
    --dist-url tcp://${DIST_URL}:${DIST_PORT}
)

MODEL_ARGS=(
    --use-mcore-models
    --disable-bias-linear
    --seq-length 4096
    --max-position-embeddings 32768
    --num-layers 32
    --hidden-size 4096
    --ffn-hidden-size 14336
    --num-attention-heads 32
    --init-method-std 0.01
    --attention-dropout 0.0
    --hidden-dropout 0.0
    --normalization RMSNorm
    --position-embedding-type rope
    --swiglu
    --untie-embeddings-and-output-weights
    --group-query-attention
    --num-query-groups 8
    --no-masked-softmax-fusion
    --no-position-embedding
    --rotary-base 1000000
    --ckpt-format torch
)

MOE_ARGS=(
    --num-experts 8
    --moe-router-topk 2
    --moe-router-load-balancing-type aux_loss
    --moe-aux-loss-coeff 1e-2
    --moe-token-dispatcher-type alltoall
    --moe-expert-capacity-factor 0.5
    --moe-pad-expert-input-to-capacity
    #--moe-grouped-gemm
)

DATA_ARGS=(
    --tokenizer-type Llama2Tokenizer
82
83
    --tokenizer-model ${TOKENIZER_MODEL_PATH}
    --data-path ${DATA_PATH}
84
85
86
87
88
89
90
91
    --split 99990,8,2
)

TRAINING_ARGS=(
    --micro-batch-size 1
    --global-batch-size 256
    --lr 1e-4
    --train-iters 10
silencealiang's avatar
silencealiang committed
92
    --lr-decay-iters 10000
93
    --lr-decay-style cosine
silencealiang's avatar
silencealiang committed
94
    --min-lr 1.0e-6
95
    --weight-decay 0.1
silencealiang's avatar
silencealiang committed
96
    --lr-warmup-iters 2000
97
98
99
100
101
102
103
104
105
106
107
    --clip-grad 1.0
    --bf16
    --overlap-param-gather
    --overlap-grad-reduce
)

MODEL_PARALLEL_ARGS=(
    --tensor-model-parallel-size 2
    --pipeline-model-parallel-size 4
    --expert-model-parallel-size 8
    --expert-tensor-parallel-size 1
silencealiang's avatar
silencealiang committed
108
    --context-parallel-size 1
109
110
111
112
113
114
115
116
117
118
119
120
121
122
    --use-distributed-optimizer
    --sequence-parallel
)

LOGGING_ARGS=(
    --log-throughput \
    --log-interval 1 \
    --save-interval 10000 \
    --eval-interval 1000 \
    --eval-iters -1 \
    #--save $CHECKPOINT_PATH \
    #--load $CHECKPOINT_PATH \
    --tensorboard-dir "${CHECKPOINT_PATH}/tensorboard" \
    --no-load-optim \
silencealiang's avatar
silencealiang committed
123
124
    --no-load-rng \
    --no-save-optim
125
126
)

127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
TORCH_PROFIE_ARGS=(
    --profile
    --profile-ranks 0 1 2 3 8 9 10 11 
    --profile-step-start 3
    --profile-step-end 4
    --profile-dir torch_prof_mixtral8x7B_4nodes_tp2-pp4-ep8-etp1-cp1
    --use-pytorch-profiler
)

HIP_PROFIE_ARGS=(
    --profile
    --profile-ranks 0 1 2 3 4 5 6 7
    --profile-step-start 4
    --profile-step-end 5
    --use-hip-profiler
)

144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
if [ -n "${WANDB_API_KEY}" ]; then
    LOGGING_ARGS+=(
        --wandb-project ${WANDB_PROJECT:-"Mixtral"}
        --wandb-exp-name ${WANDB_NAME:-"Mixtral_8x7B"}
    )
fi

APP="python3 -u ${MEGATRON_PATH}/pretrain_gpt.py \
    ${DISTRIBUTED_ARGS[@]} \
    ${MODEL_ARGS[@]} \
    ${MOE_ARGS[@]} \
    ${DATA_ARGS[@]} \
    ${TRAINING_ARGS[@]} \
    ${MODEL_PARALLEL_ARGS[@]} \
    ${LOGGING_ARGS[@]} \
    "

if [[ $profiling == "torch" ]]; then
    APP+=" ${TORCH_PROFIE_ARGS[@]}"
elif [[ $profiling == "hip" ]]; then
    mkdir -p hip_prof_data
    APP+=" ${HIP_PROFIE_ARGS[@]}"
    APP="hipprof -d hip_prof_data --hip-trace --trace-off ${APP}"
fi

#for hygon cpu
170
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 
171
case ${LOCAL_RANK} in
172
173
174
175
176
177
178
179
180
    0) numactl --cpunodebind=0 --membind=0 ${APP} ;;
    1) numactl --cpunodebind=1 --membind=1 ${APP} ;;
    2) numactl --cpunodebind=2 --membind=2 ${APP} ;;
    3) numactl --cpunodebind=3 --membind=3 ${APP} ;;
    4) numactl --cpunodebind=4 --membind=4 ${APP} ;;
    5) numactl --cpunodebind=5 --membind=5 ${APP} ;;
    6) numactl --cpunodebind=6 --membind=6 ${APP} ;;
    7) numactl --cpunodebind=7 --membind=7 ${APP} ;;
esac