train_mixtral_8x22B_8nodes.sh 4.03 KB
Newer Older
1
2
3
4
#!/bin/bash

for para in $*
do
silencealiang's avatar
silencealiang committed
5
6
7
8
9
10
11
    if [[ $para == --data_path* ]];then
        data_path=${para#*=}
    elif [[ $para == --tokenizer_path* ]];then
        tokenizer_path=${para#*=}
    elif [[ $para == --checkpoint_path* ]];then
        checkpoint_path=${para#*=}
    elif [[ $para == --profiling* ]];then
12
13
14
15
        profiling=${para#*=}
    fi
done

silencealiang's avatar
silencealiang committed
16
17
18
19
# data path
DATA_PATH=${data_path}
TOKENIZER_MODEL_PATH=${tokenizer_path}
CHECKPOINT_PATH=${checkpoint_path}
20
21
22

# default env
DIST_URL=${1}
silencealiang's avatar
silencealiang committed
23
DIST_PORT=${2}
24
25
26
RANK=$OMPI_COMM_WORLD_RANK
LOCAL_RANK=$OMPI_COMM_WORLD_LOCAL_RANK
WORLD_SIZE=$OMPI_COMM_WORLD_SIZE
silencealiang's avatar
silencealiang committed
27
28
CURRENT_DIR="$( cd "$( dirname "$0" )" && pwd )"
MEGATRON_PATH=$( dirname $( dirname ${CURRENT_DIR}))
29
30
31
32
33
export GLOG_minloglevel=3
export CUDA_DEVICE_MAX_CONNECTIONS=1
export HSA_FORCE_FINE_GRAIN_PCIE=1
export OMP_NUM_THREADS=1
export GPU_MAX_HW_QUEUES=10
silencealiang's avatar
silencealiang committed
34
export PYTHONPATH=${MEGATRON_PATH}/Megatron-LM:$PYTHONPATH
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75

# enable BatchLinear
export GROUPED_GEMM_BatchLinear=1

DISTRIBUTED_ARGS=(
    --rank ${RANK}
    --world-size ${WORLD_SIZE}
    --local-rank ${LOCAL_RANK}
    --dist-url tcp://${DIST_URL}:${DIST_PORT}
)

MODEL_ARGS=(
    --use-mcore-models
    --disable-bias-linear
    --seq-length 4096
    --max-position-embeddings 65536
    --num-layers 56
    --hidden-size 6144
    --ffn-hidden-size 16384
    --num-attention-heads 48
    --init-method-std 0.01
    --attention-dropout 0.0
    --hidden-dropout 0.0
    --normalization RMSNorm
    --position-embedding-type rope
    --swiglu
    --untie-embeddings-and-output-weights
    --group-query-attention
    --num-query-groups 8
    --no-masked-softmax-fusion
    --no-position-embedding
    --rotary-base 1000000
    --ckpt-format torch
)

MOE_ARGS=(
    --num-experts 8
    --moe-router-topk 2
    --moe-router-load-balancing-type aux_loss
    --moe-aux-loss-coeff 1e-3
    --moe-token-dispatcher-type alltoall
silencealiang's avatar
silencealiang committed
76
    --moe-expert-capacity-factor 1
77
78
79
80
81
82
    --moe-pad-expert-input-to-capacity
    #--moe-grouped-gemm
)

DATA_ARGS=(
    --tokenizer-type Llama2Tokenizer
silencealiang's avatar
silencealiang committed
83
84
    --tokenizer-model ${TOKENIZER_MODEL_PATH}
    --data-path ${DATA_PATH}
85
86
87
88
89
90
91
92
    --split 99990,8,2
)

TRAINING_ARGS=(
    --micro-batch-size 1
    --global-batch-size 256
    --lr 1e-4
    --train-iters 10
silencealiang's avatar
silencealiang committed
93
    --lr-decay-iters 10000
94
    --lr-decay-style cosine
silencealiang's avatar
silencealiang committed
95
    --min-lr 1.0e-6
96
    --weight-decay 0.1
silencealiang's avatar
silencealiang committed
97
    --lr-warmup-iters 2000
98
99
100
101
102
103
104
105
106
107
108
    --clip-grad 1.0
    --bf16
    --overlap-param-gather
    --overlap-grad-reduce
)

MODEL_PARALLEL_ARGS=(
    --tensor-model-parallel-size 4
    --pipeline-model-parallel-size 8
    --expert-model-parallel-size 8
    --expert-tensor-parallel-size 1
silencealiang's avatar
silencealiang committed
109
    --context-parallel-size 1
110
111
112
113
114
115
116
117
118
119
120
121
122
123
    --use-distributed-optimizer
    --sequence-parallel
)

LOGGING_ARGS=(
    --log-throughput \
    --log-interval 1 \
    --save-interval 10000 \
    --eval-interval 1000 \
    --eval-iters -1 \
    #--save $CHECKPOINT_PATH \
    #--load $CHECKPOINT_PATH \
    --tensorboard-dir "${CHECKPOINT_PATH}/tensorboard" \
    --no-load-optim \
silencealiang's avatar
silencealiang committed
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
    --no-load-rng \
    --no-save-optim
)

TORCH_PROFIE_ARGS=(
    --profile
    --profile-ranks 0 1 8 9 16 17 24 25
    --profile-step-start 3
    --profile-step-end 4
    --profile-dir torch_prof_mixtral8x22B_8nodes_tp4-pp8-ep8-etp1-cp1
    --use-pytorch-profiler
)

HIP_PROFIE_ARGS=(
    --profile
    --profile-ranks 0 1 2 3 4 5 6 7
    --profile-step-start 4
    --profile-step-end 5
    --use-hip-profiler
143
144
145
146
147
)

if [ -n "${WANDB_API_KEY}" ]; then
    LOGGING_ARGS+=(
        --wandb-project ${WANDB_PROJECT:-"Mixtral"}
silencealiang's avatar
silencealiang committed
148
        --wandb-exp-name ${WANDB_NAME:-"Mixtral_8x22B"}
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
    )
fi

APP="python3 -u ${MEGATRON_PATH}/pretrain_gpt.py \
    ${DISTRIBUTED_ARGS[@]} \
    ${MODEL_ARGS[@]} \
    ${MOE_ARGS[@]} \
    ${DATA_ARGS[@]} \
    ${TRAINING_ARGS[@]} \
    ${MODEL_PARALLEL_ARGS[@]} \
    ${LOGGING_ARGS[@]} \
    "

if [[ $profiling == "torch" ]]; then
    APP+=" ${TORCH_PROFIE_ARGS[@]}"
elif [[ $profiling == "hip" ]]; then
    mkdir -p hip_prof_data
    APP+=" ${HIP_PROFIE_ARGS[@]}"
    APP="hipprof -d hip_prof_data --hip-trace --trace-off ${APP}"
fi

#for hygon cpu
silencealiang's avatar
silencealiang committed
171
${MEGATRON_PATH}/requirements/launch_with_binding.sh ${LOCAL_RANK} ${APP}