train_mixtral_8x7B_4nodes.sh 4.82 KB
Newer Older
silencealiang's avatar
add  
silencealiang committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
#!/bin/bash

for para in $*
do
    if [[ $para == --profiling* ]];then
        profiling=${para#*=}
    fi
done

source /opt/dtk/env.sh
# Runs Mixtral 8x7B model
export CUDA_DEVICE_MAX_CONNECTIONS=1
export HSA_FORCE_FINE_GRAIN_PCIE=1
export OMP_NUM_THREADS=1
export GPU_MAX_HW_QUEUES=10

export NCCL_ALGO=Ring
export NCCL_MIN_NCHANNELS=32
export NCCL_MAX_NCHANNELS=32
export NCCL_NET_GDR_LEVEL=7
export NCCL_NET_GDR_READ=1
export RCCL_SDMA_COPY_ENABLE=0
export NCCL_IB_HCA=mlx5_2:1,mlx5_3:1,mlx5_4:1,mlx5_5:1,mlx5_6:1,mlx5_7:1,mlx5_8:1,mlx5_9:1
export NCCL_TOPO_FILE="/public/home/xingjl/dependency/rccl-tests-0204/topo-input.xml"
export GROUPED_GEMM_BatchLinear=1
export GLOG_minloglevel=3
export LD_LIBRARY_PATH=/public/home/xingjl/dependency/rocblas-install-0227/lib:$LD_LIBRARY_PATH

RANK=$OMPI_COMM_WORLD_RANK
LOCAL_RANK=$OMPI_COMM_WORLD_LOCAL_RANK
WORLD_SIZE=$OMPI_COMM_WORLD_SIZE
DIST_URL=${1}
DIST_PORT=25900

CHECKPOINT_PATH=./CKPT 
TOKENIZER_MODEL=./mixtral_dataset/tokenizer.model
DATA_PATH=./mixtral_dataset/my-mixtral_text_document

DISTRIBUTED_ARGS=(
    --rank ${RANK}
    --world-size ${WORLD_SIZE}
    --local-rank ${LOCAL_RANK}
    --dist-url tcp://${DIST_URL}:${DIST_PORT}
)

MODEL_ARGS=(
    --use-mcore-models
    --disable-bias-linear
    --seq-length 4096
    --max-position-embeddings 32768
    --num-layers 32
    --hidden-size 4096
    --ffn-hidden-size 14336
    --num-attention-heads 32
    --init-method-std 0.01
    --attention-dropout 0.0
    --hidden-dropout 0.0
    --normalization RMSNorm
    --position-embedding-type rope
    --swiglu
    --untie-embeddings-and-output-weights
    --group-query-attention
    --num-query-groups 8
    --no-masked-softmax-fusion
    --no-position-embedding
    --rotary-base 1000000
)

MOE_ARGS=(
    --num-experts 8
    --moe-router-topk 2
    --moe-router-load-balancing-type aux_loss
    --moe-aux-loss-coeff 1e-2
    --moe-token-dispatcher-type alltoall
    --moe-expert-capacity-factor 0.5
    --moe-pad-expert-input-to-capacity
    --moe-grouped-gemm
)

DATA_ARGS=(
    --tokenizer-type Llama2Tokenizer
    --tokenizer-model ${TOKENIZER_MODEL}
    --data-path $DATA_PATH
    --split 99990,8,2
)

TRAINING_ARGS=(
    --micro-batch-size 1
    --global-batch-size 256
    --lr 1e-4
    --train-iters 10
    --lr-decay-iters 320000
    --lr-decay-style cosine
    --min-lr 1.0e-5
    --weight-decay 0.1
    --lr-warmup-iters 500
    --clip-grad 1.0
    --bf16
    --overlap-param-gather
    --overlap-grad-reduce
)

TORCH_PROFIE_ARGS=(
    --profile
    --profile-ranks 0 1 2 3 8 9 10 11 
    --profile-step-start 3
    --profile-step-end 4
    --profile-dir torch_prof_mixtral_4nodes_tp2-pp8-ep2-ep_tp1
    --use-pytorch-profiler
)

HIP_PROFIE_ARGS=(
    --profile
    --profile-ranks 0 1 2 3 8 9 10 11 
    --profile-step-start 4
    --profile-step-end 5
    --use-hip-profiler
)

MODEL_PARALLEL_ARGS=(
    --tensor-model-parallel-size 2
    --pipeline-model-parallel-size 8
    --expert-model-parallel-size 2
    --expert-tensor-parallel-size 1
    --use-distributed-optimizer
    --sequence-parallel
)

LOGGING_ARGS=(
    --log-throughput \
    --log-interval 1 \
    --save-interval 10000 \
    --eval-interval 1000 \
    --eval-iters -1 \
    #--save $CHECKPOINT_PATH \
    #--load $CHECKPOINT_PATH \
    --tensorboard-dir "${CHECKPOINT_PATH}/tensorboard" \
    --no-load-optim \
    --no-load-rng
)

if [ -n "${WANDB_API_KEY}" ]; then
    LOGGING_ARGS+=(
        --wandb-project ${WANDB_PROJECT:-"Mixtral"}
        --wandb-exp-name ${WANDB_NAME:-"Mixtral_8x7B"}
    )
fi

APP="python3 -u pretrain_gpt.py \
    ${DISTRIBUTED_ARGS[@]} \
    ${MODEL_ARGS[@]} \
    ${MOE_ARGS[@]} \
    ${DATA_ARGS[@]} \
    ${TRAINING_ARGS[@]} \
    ${MODEL_PARALLEL_ARGS[@]} \
    ${LOGGING_ARGS[@]} \
    "

if [[ $profiling == "torch" ]]; then
    APP+=" ${TORCH_PROFIE_ARGS[@]}"
elif [[ $profiling == "hip" ]]; then
    mkdir -p hip_prof_data
    APP+=" ${HIP_PROFIE_ARGS[@]}"
    APP="hipprof -d hip_prof_data --hip-trace --trace-off ${APP}"
fi

#for hygon cpu
case ${LOCAL_RANK} in
[0])
  export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
  ${APP}
  #numactl --cpunodebind=0 --membind=0 ${APP}
  ;;
[1])
  export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
  ${APP}
  #numactl --cpunodebind=1 --membind=1 ${APP}
  ;;
[2])
  export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
  ${APP}
  #numactl --cpunodebind=2 --membind=2 ${APP}
  ;;
[3])
  export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
  ${APP}
  #numactl --cpunodebind=3 --membind=3 ${APP}
  ;;
[4])
  export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
  ${APP}
  #numactl --cpunodebind=4 --membind=4 ${APP}
  ;;
[5])
  export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
  ${APP}
  #numactl --cpunodebind=5 --membind=5 ${APP}
  ;;
[6])
  export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
  ${APP}
  #numactl --cpunodebind=6 --membind=6 ${APP}
  ;;
[7])
  export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
  ${APP}
  #numactl --cpunodebind=7 --membind=7 ${APP}
  ;;
esac