Commit c271aaae authored by wxj's avatar wxj
Browse files

Update Llama_pretraining.sh

parent c5369391
Pipeline #2447 passed with stage
...@@ -8,7 +8,7 @@ export OMP_NUM_THREADS=1 ...@@ -8,7 +8,7 @@ export OMP_NUM_THREADS=1
export NCCL_P2P_LEVEL=PXB # SYS export NCCL_P2P_LEVEL=PXB # SYS
#export HIP_ALLOC_INITIALIZE=0 #export HIP_ALLOC_INITIALIZE=0
#export GPU_MAX_HW_QUEUES=20 #export GPU_MAX_HW_QUEUES=20 # sglang空泡
export NCCL_ALGO=Ring export NCCL_ALGO=Ring
export NCCL_NCHANNELS_PER_PEER=16 export NCCL_NCHANNELS_PER_PEER=16
...@@ -17,54 +17,72 @@ export NCCL_IB_TIMEOUT=22 ...@@ -17,54 +17,72 @@ export NCCL_IB_TIMEOUT=22
export CUDA_DEVICE_MAX_CONNECTIONS=1 export CUDA_DEVICE_MAX_CONNECTIONS=1
export NCCL_IB_HCA=mlx5_1,mlx5_2 export NCCL_IB_HCA=mlx5_1,mlx5_2
# export NCCL_IB_HCA=mlx5_0,mlx5_1,mlx5_2,mlx5_3,,mlx5_4,,mlx5_5,,mlx5_6,,mlx5_7
export NCCL_NET_GDR_LEVEL=SYS export NCCL_NET_GDR_LEVEL=SYS
export NCCL_NET_GDR_READ=0 export NCCL_NET_GDR_READ=0
export GLOG_minloglevel=3 # 打印error级别的nccl日志 export GLOG_minloglevel=3 # 打印error级别的nccl日志
# export TORCH_COMPILE_DEBUG=1 # 查看编译后的图
source /opt/dtk/env.sh source /opt/dtk/env.sh
# te调用gemm需要导入hipblaslt库 # 导入hipblaslt库
# export LD_LIBRARY_PATH=/data/hipblaslt-install-0904/lib:$LD_LIBRARY_PATH # export LD_LIBRARY_PATH=/data/hipblaslt-install-0904/lib:$LD_LIBRARY_PATH
# 更新rocblas # 更新rocblas
export LD_LIBRARY_PATH=/data/rocblas-install/lib:$LD_LIBRARY_PATH # export LD_LIBRARY_PATH=/data/rocblas-install_qwen1211/lib:$LD_LIBRARY_PATH
# export LD_LIBRARY_PATH=/data/rocblas-install_qwen1228/lib:$LD_LIBRARY_PATH
# export LD_LIBRARY_PATH=/data/rocblas-install_0107_trans/lib:$LD_LIBRARY_PATH
# torch控制多流转单流
# export ALLREDUCE_STREAM_WITH_COMPUTE=1
# # prof采集添加同步 # prof采集添加同步, 避免卡顿
# export GPU_FLUSH_ON_EXECUTION=1 # export GPU_FLUSH_ON_EXECUTION=1
# export HIP_DIRECT_DISPATCH=0 # export HIP_DIRECT_DISPATCH=0
# 采集rocblas size
export ROCBLAS_LAYER=3
# 采集 fa size
# export FLASH_ATTENTION_PRINT_PARAM=1
CHECKPOINT_PATH=./tmp_7b #$1 #<Specify path> CHECKPOINT_PATH=./tmp_7b #$1 #<Specify path>
TENSORBOARD_LOGS_PATH=./tmp_7b #$2 #<Specify path> TENSORBOARD_LOGS_PATH=./tmp_7b #$2 #<Specify path>
DATA_PATH="/public/home/wangxj3/Downloads/datasets/nemo_pretrain/oscar-1GB/oscar-1GB-llama_text_document" #<Specify path and file prefix>_text_document DATA_PATH="/data/datasets/nemo_pretrain/oscar-1GB/oscar-1GB-llama_text_document"
# GPT_MODEL_ARGS=(
# --num-layers 32
# --hidden-size 5120
# --ffn-hidden-size 13824
# --num-attention-heads 40
# --seq-length 4096 #4096
# --max-position-embeddings 32768 #4096
# --num-query-groups 40
# --group-query-attention
# )
GPT_MODEL_ARGS=( GPT_MODEL_ARGS=(
--num-layers 6 --num-layers 32
--hidden-size 4096 --hidden-size 4096
--ffn-hidden-size 11008 --ffn-hidden-size 11008
--num-attention-heads 32 --num-attention-heads 32
--seq-length 4096 #4096
--max-position-embeddings 4096 --max-position-embeddings 4096
--normalization RMSNorm
--position-embedding-type rope
--untie-embeddings-and-output-weights # 分开处理embed和输出权重, 增加灵活性
) )
# GPT_MODEL_ARGS=(
# --num-layers 40
# --hidden-size 5120
# --ffn-hidden-size 13824
# --num-attention-heads 40
# --max-position-embeddings 4096
# --normalization RMSNorm
# --position-embedding-type rope
# --untie-embeddings-and-output-weights # 分开处理embed和输出权重, 增加灵活性
# )
# export NVTE_FLASH_ATTN=1 # 走cutlass # export NVTE_FLASH_ATTN=1 # 走cutlass
export NVTE_FLASH_ATTN_TRITON=1 # 走triton_fa export NVTE_FLASH_ATTN_TRITON=1 # 走triton_fa
# --transformer-impl transformer_engine # --transformer-impl transformer_engine # 走core用这两组参数
# --use-mcore-models # --use-mcore-models
# --transformer-impl local # --transformer-impl local # 走legacy用这两组参数
# --use-legacy-models # --use-legacy-models
TRAINING_ARGS=( TRAINING_ARGS=(
--transformer-impl transformer_engine --transformer-impl local # 走legacy用这两组参数
--use-mcore-models --use-legacy-models
--micro-batch-size 1 --micro-batch-size 1
--global-batch-size 6 #240 #60 #512 #64 --global-batch-size 64 #240 #60 #512 #64
--train-iters 10 --train-iters 10
--weight-decay 0.1 --weight-decay 0.1
--adam-beta1 0.9 --adam-beta1 0.9
...@@ -72,34 +90,30 @@ TRAINING_ARGS=( ...@@ -72,34 +90,30 @@ TRAINING_ARGS=(
--init-method-std 0.006 --init-method-std 0.006
--clip-grad 1.0 --clip-grad 1.0
--bf16 --bf16
# --fp16 # 开启fp16需要指定loss-scale
# --loss-scale 1024
--use-distributed-optimizer --use-distributed-optimizer
--disable-bias-linear --disable-bias-linear
--attention-dropout 0 --attention-dropout 0
--hidden-dropout 0 --hidden-dropout 0
--no-gradient-accumulation-fusion --no-gradient-accumulation-fusion # 开启后精度不对, apex更新后可以开启
--add-qkv-bias
--swiglu --swiglu
--lr 3.0e-5 --lr 3.0e-5
--lr-decay-style cosine --lr-decay-style cosine
--min-lr 3.0e-6 --min-lr 3.0e-6
--lr-warmup-iters 1 --lr-warmup-iters 1
--ckpt-format torch --ckpt-format torch
--ddp-average-in-collective --ddp-average-in-collective # 在dp阶段通信中, 梯度或参数将被直接平均, 而不是先求和(到一个设备)再平均
--recompute-granularity full # --recompute-granularity full # 开启重计算降低显存增加耗时
--recompute-num-layers 5 #0 # # --recompute-num-layers 5 #0 #
--recompute-method block # --recompute-method block
--overlap-grad-reduce --overlap-grad-reduce # 重叠ddp grad reduce
--use-flash-attn-triton # --tp-comm-overlap # tensor parallel comm和gemm重叠, 优化项未适配
# --tp-comm-overlap-rs-dgrad # reduce-scatter和dgrad gemm重叠, 优化项未适配
--use-flash-attn-cutlass
) )
# --add-qkv-bias # qwen # --use-flash-attn-cutlass # cutlass fa
# --ckpt-format torch # --use-flash-attn-triton # triton fa
# --ddp-average-in-collective
# --recompute-granularity full
# --recompute-num-layers 5
# --recompute-method block
# --overlap-grad-reduce
# --use-flash-attn-cutlass
# --use-flash-attn-triton
MODEL_PARALLEL_ARGS=( MODEL_PARALLEL_ARGS=(
--sequence-parallel --sequence-parallel
...@@ -109,13 +123,10 @@ MODEL_PARALLEL_ARGS=( ...@@ -109,13 +123,10 @@ MODEL_PARALLEL_ARGS=(
DATA_ARGS=( DATA_ARGS=(
--data-path $DATA_PATH --data-path $DATA_PATH
--seq-length 4096 #4096
--split 949,50,1 --split 949,50,1
--untie-embeddings-and-output-weights
--use-rotary-position-embeddings
--normalization RMSNorm
--no-position-embedding
--tokenizer-type Llama2Tokenizer --tokenizer-type Llama2Tokenizer
--tokenizer-model /public/home/wangxj3/Downloads/model_weights/llama2_7b_hf/tokenizer.model --tokenizer-model /data/model_weights/llama2_7b_hf/tokenizer.model
) )
EVAL_AND_LOGGING_ARGS=( EVAL_AND_LOGGING_ARGS=(
...@@ -134,7 +145,7 @@ PROFILE_ARGS=( ...@@ -134,7 +145,7 @@ PROFILE_ARGS=(
--profile-step-start 4 --profile-step-start 4
--profile-step-end 5 --profile-step-end 5
--use-pytorch-profiler --use-pytorch-profiler
--profile-ranks 0 3 --profile-ranks 0 1 2 3 4 5 6 7
--profile-dir prof_data --profile-dir prof_data
) )
...@@ -142,7 +153,7 @@ RANK=$OMPI_COMM_WORLD_RANK ...@@ -142,7 +153,7 @@ RANK=$OMPI_COMM_WORLD_RANK
LOCAL_RANK=$OMPI_COMM_WORLD_LOCAL_RANK LOCAL_RANK=$OMPI_COMM_WORLD_LOCAL_RANK
WORLD_SIZE=$OMPI_COMM_WORLD_SIZE WORLD_SIZE=$OMPI_COMM_WORLD_SIZE
DIST_URL=${1} DIST_URL=${1}
DIST_PORT=34566 DIST_PORT=34567
DISTRIBUTED_ARGS=( DISTRIBUTED_ARGS=(
--rank ${RANK} --rank ${RANK}
...@@ -158,33 +169,35 @@ APP="python -u pretrain_gpt.py \ ...@@ -158,33 +169,35 @@ APP="python -u pretrain_gpt.py \
${DATA_ARGS[@]} \ ${DATA_ARGS[@]} \
${EVAL_AND_LOGGING_ARGS[@]} \ ${EVAL_AND_LOGGING_ARGS[@]} \
${DISTRIBUTED_ARGS[@]} \ ${DISTRIBUTED_ARGS[@]} \
${PROFILE_ARGS[@]} \
" "
# 开启profile
# ${PROFILE_ARGS[@]} \
export HIP_VISIBLE_DEVICES=4,5,6,7 # 0,1,2,3 # 4,5,6,7 #, export HIP_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 # # 4,5,6,7 #,
# export CUDA_VISIBLE_DEVICES=4,5,6,7 # 0,1,2,3, # export CUDA_VISIBLE_DEVICES=4,5,6,7 # 0,1,2,3,
# ${APP} ${APP}
case ${LOCAL_RANK} in # case ${LOCAL_RANK} in
[0]) # [0])
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 # # export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
${APP} # ${APP}
# numactl --cpunodebind=0 --membind=0 ${APP} # # numactl --cpunodebind=0 --membind=0 ${APP}
;; # ;;
[1]) # [1])
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 # # export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
${APP} # ${APP}
# numactl --cpunodebind=0 --membind=0 ${APP} # # numactl --cpunodebind=0 --membind=0 ${APP}
;; # ;;
[2]) # [2])
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 # # export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
${APP} # ${APP}
# numactl --cpunodebind=0 --membind=0 ${APP} # # numactl --cpunodebind=0 --membind=0 ${APP}
;; # ;;
[3]) # [3])
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 # # export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
${APP} # ${APP}
# numactl --cpunodebind=0 --membind=0 ${APP} # # numactl --cpunodebind=0 --membind=0 ${APP}
;; # ;;
# [4]) # [4])
# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 # export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
# ${APP} # ${APP}
...@@ -205,4 +218,4 @@ case ${LOCAL_RANK} in ...@@ -205,4 +218,4 @@ case ${LOCAL_RANK} in
# ${APP} # ${APP}
# # numactl --cpunodebind=0 --membind=0 ${APP} # # numactl --cpunodebind=0 --membind=0 ${APP}
# ;; # ;;
esac # esac
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment