Llama_pretraining.sh 6.04 KB
Newer Older
wxj's avatar
wxj committed
1
2
3
4
5
6
7
8
9
10
#!/bin/bash
set -eux

#export FLASH_ATTENTION_PRINT_PARAM=1
# Runs the "7B" parameter model
export HSA_FORCE_FINE_GRAIN_PCIE=1
export OMP_NUM_THREADS=1
export NCCL_P2P_LEVEL=PXB # SYS

#export HIP_ALLOC_INITIALIZE=0
wxj's avatar
wxj committed
11
#export GPU_MAX_HW_QUEUES=20 # sglang空泡
wxj's avatar
wxj committed
12
13
14
15
16
17
18
19

export NCCL_ALGO=Ring
export NCCL_NCHANNELS_PER_PEER=16
export NCCL_MIN_NCHANNELS=20
export NCCL_IB_TIMEOUT=22
export CUDA_DEVICE_MAX_CONNECTIONS=1

export NCCL_IB_HCA=mlx5_1,mlx5_2
wxj's avatar
wxj committed
20
# export NCCL_IB_HCA=mlx5_0,mlx5_1,mlx5_2,mlx5_3,,mlx5_4,,mlx5_5,,mlx5_6,,mlx5_7
wxj's avatar
wxj committed
21
22
export NCCL_NET_GDR_LEVEL=SYS
export NCCL_NET_GDR_READ=0
wxj's avatar
wxj committed
23
export GLOG_minloglevel=3 # 打印error级别的nccl日志
wxj's avatar
wxj committed
24
25
26

# export TORCH_COMPILE_DEBUG=1 # 查看编译后的图

wxj's avatar
wxj committed
27
source /opt/dtk/env.sh
wxj's avatar
wxj committed
28
# 导入hipblaslt库
wxj's avatar
wxj committed
29
# export LD_LIBRARY_PATH=/data/hipblaslt-install-0904/lib:$LD_LIBRARY_PATH 
wxj's avatar
wxj committed
30
# 更新rocblas
wxj's avatar
wxj committed
31
32
33
34
35
36
# export LD_LIBRARY_PATH=/data/rocblas-install_qwen1211/lib:$LD_LIBRARY_PATH
# export LD_LIBRARY_PATH=/data/rocblas-install_qwen1228/lib:$LD_LIBRARY_PATH
# export LD_LIBRARY_PATH=/data/rocblas-install_0107_trans/lib:$LD_LIBRARY_PATH

# torch控制多流转单流
# export ALLREDUCE_STREAM_WITH_COMPUTE=1
wxj's avatar
wxj committed
37

wxj's avatar
wxj committed
38
# prof采集添加同步, 避免卡顿
wxj's avatar
wxj committed
39
40
# export GPU_FLUSH_ON_EXECUTION=1
# export HIP_DIRECT_DISPATCH=0
wxj's avatar
wxj committed
41

wxj's avatar
wxj committed
42
43
44
45
46
# 采集rocblas size
export ROCBLAS_LAYER=3
# 采集 fa size
# export FLASH_ATTENTION_PRINT_PARAM=1

wxj's avatar
wxj committed
47
48
CHECKPOINT_PATH=./tmp_7b #$1 #<Specify path>
TENSORBOARD_LOGS_PATH=./tmp_7b  #$2 #<Specify path>
wxj's avatar
wxj committed
49
DATA_PATH="/data/datasets/nemo_pretrain/oscar-1GB/oscar-1GB-llama_text_document" 
wxj's avatar
wxj committed
50
51

GPT_MODEL_ARGS=(
wxj's avatar
wxj committed
52
    --num-layers 32
wxj's avatar
wxj committed
53
54
55
56
    --hidden-size 4096
    --ffn-hidden-size 11008 
    --num-attention-heads 32
    --max-position-embeddings 4096
wxj's avatar
wxj committed
57
58
59
60

    --normalization RMSNorm 
    --position-embedding-type rope
    --untie-embeddings-and-output-weights # 分开处理embed和输出权重, 增加灵活性
wxj's avatar
wxj committed
61
62
)

wxj's avatar
wxj committed
63
64
65
66
67
68
69
70
71
72
73
74
# GPT_MODEL_ARGS=(
#     --num-layers 40
#     --hidden-size 5120
#     --ffn-hidden-size 13824 
#     --num-attention-heads 40
#     --max-position-embeddings 4096

#     --normalization RMSNorm 
#     --position-embedding-type rope
#     --untie-embeddings-and-output-weights # 分开处理embed和输出权重, 增加灵活性
# )

wxj's avatar
wxj committed
75
76
# export NVTE_FLASH_ATTN=1 # 走cutlass
export NVTE_FLASH_ATTN_TRITON=1 # 走triton_fa
wxj's avatar
wxj committed
77
# --transformer-impl transformer_engine # 走core用这两组参数
wxj's avatar
wxj committed
78
    # --use-mcore-models
wxj's avatar
wxj committed
79
    # --transformer-impl local # 走legacy用这两组参数
wxj's avatar
wxj committed
80
    # --use-legacy-models 
wxj's avatar
wxj committed
81
TRAINING_ARGS=(
wxj's avatar
wxj committed
82
83
84
85
    --transformer-impl local # 走legacy用这两组参数
    --use-legacy-models 
    --micro-batch-size 1
    --global-batch-size 64 #240 #60 #512 #64
wxj's avatar
wxj committed
86
    --train-iters 10
wxj's avatar
wxj committed
87
88
89
90
91
92
    --weight-decay 0.1 
    --adam-beta1 0.9 
    --adam-beta2 0.95 
    --init-method-std 0.006 
    --clip-grad 1.0 
    --bf16
wxj's avatar
wxj committed
93
94
    # --fp16 # 开启fp16需要指定loss-scale
    # --loss-scale 1024
wxj's avatar
wxj committed
95
96
97
98
    --use-distributed-optimizer 
    --disable-bias-linear
    --attention-dropout 0
    --hidden-dropout 0
wxj's avatar
wxj committed
99
    --no-gradient-accumulation-fusion # 开启后精度不对, apex更新后可以开启
wxj's avatar
wxj committed
100
101
102
103
104
    --swiglu
    --lr 3.0e-5 
    --lr-decay-style cosine 
    --min-lr 3.0e-6
    --lr-warmup-iters 1
wxj's avatar
wxj committed
105
    --ckpt-format torch
wxj's avatar
wxj committed
106
107
108
109
110
111
112
113
    --ddp-average-in-collective # 在dp阶段通信中, 梯度或参数将被直接平均, 而不是先求和(到一个设备)再平均
    # --recompute-granularity full # 开启重计算降低显存增加耗时
    # --recompute-num-layers 5 #0 #
    # --recompute-method block
    --overlap-grad-reduce # 重叠ddp grad reduce
    # --tp-comm-overlap # tensor parallel comm和gemm重叠, 优化项未适配
    # --tp-comm-overlap-rs-dgrad # reduce-scatter和dgrad gemm重叠, 优化项未适配
    --use-flash-attn-cutlass
wxj's avatar
wxj committed
114
)
wxj's avatar
wxj committed
115
116
# --use-flash-attn-cutlass # cutlass fa
# --use-flash-attn-triton # triton fa
wxj's avatar
wxj committed
117
118
119
120
121
122
123
124
125

MODEL_PARALLEL_ARGS=(
    --sequence-parallel
	--tensor-model-parallel-size 2
	--pipeline-model-parallel-size 2
)

DATA_ARGS=(
    --data-path $DATA_PATH 
wxj's avatar
wxj committed
126
    --seq-length 4096 #4096
wxj's avatar
wxj committed
127
128
    --split 949,50,1
    --tokenizer-type Llama2Tokenizer
wxj's avatar
wxj committed
129
    --tokenizer-model /data/model_weights/llama2_7b_hf/tokenizer.model
wxj's avatar
wxj committed
130
131
132
133
134
135
136
137
138
139
140
141
142
)

EVAL_AND_LOGGING_ARGS=(
    --log-interval 1
    --log-throughput
    --save-interval 1000 
    --eval-interval 1000 
    --save $CHECKPOINT_PATH 
    --load $CHECKPOINT_PATH 
    --eval-iters 10
    --tensorboard-dir $TENSORBOARD_LOGS_PATH 
)

wxj's avatar
wxj committed
143
144
145
146
147
PROFILE_ARGS=(
    --profile
    --profile-step-start 4
    --profile-step-end 5
    --use-pytorch-profiler
wxj's avatar
wxj committed
148
    --profile-ranks 0 1 2 3 4 5 6 7
wxj's avatar
wxj committed
149
150
151
    --profile-dir prof_data
)

wxj's avatar
wxj committed
152
153
154
155
RANK=$OMPI_COMM_WORLD_RANK
LOCAL_RANK=$OMPI_COMM_WORLD_LOCAL_RANK
WORLD_SIZE=$OMPI_COMM_WORLD_SIZE
DIST_URL=${1}
wxj's avatar
wxj committed
156
DIST_PORT=34567
wxj's avatar
wxj committed
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171

DISTRIBUTED_ARGS=(
    --rank ${RANK}
    --world-size ${WORLD_SIZE}
    --local-rank ${LOCAL_RANK}
    --dist-url tcp://${DIST_URL}:${DIST_PORT}
)

APP="python -u pretrain_gpt.py \
        ${GPT_MODEL_ARGS[@]} \
        ${TRAINING_ARGS[@]} \
        ${MODEL_PARALLEL_ARGS[@]} \
        ${DATA_ARGS[@]} \
        ${EVAL_AND_LOGGING_ARGS[@]} \
        ${DISTRIBUTED_ARGS[@]} \
wxj's avatar
wxj committed
172
        
wxj's avatar
wxj committed
173
"
wxj's avatar
wxj committed
174
175
# 开启profile
# ${PROFILE_ARGS[@]} \
wxj's avatar
wxj committed
176

wxj's avatar
wxj committed
177
export HIP_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 #  # 4,5,6,7 #,
wxj's avatar
wxj committed
178
# export CUDA_VISIBLE_DEVICES=4,5,6,7 # 0,1,2,3,
wxj's avatar
wxj committed
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
${APP}
# case ${LOCAL_RANK} in
# [0])
# #   export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
#   ${APP}
#   # numactl --cpunodebind=0 --membind=0 ${APP}
#   ;;
# [1])
# #   export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
#   ${APP}
#   # numactl --cpunodebind=0 --membind=0 ${APP}
#   ;;
# [2])
# #   export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
#   ${APP}
#   # numactl --cpunodebind=0 --membind=0 ${APP}
#   ;;
# [3])
# #   export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
#   ${APP}
#   # numactl --cpunodebind=0 --membind=0 ${APP}
#   ;;
wxj's avatar
wxj committed
201
202
# [4])
#   export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
wxj's avatar
wxj committed
203
#   ${APP}
wxj's avatar
wxj committed
204
205
206
207
#   # numactl --cpunodebind=0 --membind=0 ${APP}
#   ;;
# [5])
#   export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
wxj's avatar
wxj committed
208
#   ${APP}
wxj's avatar
wxj committed
209
210
211
212
#   # numactl --cpunodebind=0 --membind=0 ${APP}
#   ;;
# [6])
#   export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
wxj's avatar
wxj committed
213
#   ${APP}
wxj's avatar
wxj committed
214
215
216
217
#   # numactl --cpunodebind=0 --membind=0 ${APP}
#   ;;
# [7])
#   export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
wxj's avatar
wxj committed
218
#   ${APP}
wxj's avatar
wxj committed
219
220
#   # numactl --cpunodebind=0 --membind=0 ${APP}
#   ;;
wxj's avatar
wxj committed
221
# esac