#!/bin/bash set -eux #export FLASH_ATTENTION_PRINT_PARAM=1 # Runs the "7B" parameter model export HSA_FORCE_FINE_GRAIN_PCIE=1 export OMP_NUM_THREADS=1 export NCCL_P2P_LEVEL=PXB # SYS #export HIP_ALLOC_INITIALIZE=0 #export GPU_MAX_HW_QUEUES=20 export NCCL_ALGO=Ring export NCCL_NCHANNELS_PER_PEER=16 export NCCL_MIN_NCHANNELS=20 export NCCL_IB_TIMEOUT=22 export CUDA_DEVICE_MAX_CONNECTIONS=1 export NCCL_IB_HCA=mlx5_1,mlx5_2 export NCCL_NET_GDR_LEVEL=SYS export NCCL_NET_GDR_READ=0 export GLOG_minloglevel=3 # 打印error级别的nccl日志 source /opt/dtk/env.sh # te调用gemm需要导入hipblaslt库 # export LD_LIBRARY_PATH=/data/hipblaslt-install-0904/lib:$LD_LIBRARY_PATH # 更新rocblas export LD_LIBRARY_PATH=/data/rocblas-install/lib:$LD_LIBRARY_PATH # # prof采集添加同步 # export GPU_FLUSH_ON_EXECUTION=1 # export HIP_DIRECT_DISPATCH=0 CHECKPOINT_PATH=./tmp_7b #$1 # TENSORBOARD_LOGS_PATH=./tmp_7b #$2 # DATA_PATH="/data/datasets/nemo_pretrain/oscar-1GB/oscar-1GB-llama_text_document" #_text_document # GPT_MODEL_ARGS=( # --num-layers 32 # --hidden-size 5120 # --ffn-hidden-size 13824 # --num-attention-heads 40 # --seq-length 4096 #4096 # --max-position-embeddings 32768 #4096 # --num-query-groups 40 # --group-query-attention # ) GPT_MODEL_ARGS=( --num-layers 6 --hidden-size 4096 --ffn-hidden-size 11008 --num-attention-heads 32 --seq-length 4096 #4096 --max-position-embeddings 4096 ) # export NVTE_FLASH_ATTN=1 # 走cutlass export NVTE_FLASH_ATTN_TRITON=1 # 走triton_fa # --transformer-impl transformer_engine # --use-mcore-models # --transformer-impl local # --use-legacy-models TRAINING_ARGS=( --transformer-impl transformer_engine --use-mcore-models --micro-batch-size 1 --global-batch-size 6 #240 #60 #512 #64 --train-iters 10 --weight-decay 0.1 --adam-beta1 0.9 --adam-beta2 0.95 --init-method-std 0.006 --clip-grad 1.0 --bf16 --use-distributed-optimizer --disable-bias-linear --attention-dropout 0 --hidden-dropout 0 --no-gradient-accumulation-fusion --add-qkv-bias --swiglu --lr 3.0e-5 --lr-decay-style cosine --min-lr 3.0e-6 --lr-warmup-iters 1 --ckpt-format torch --ddp-average-in-collective --recompute-granularity full --recompute-num-layers 5 #0 # --recompute-method block --overlap-grad-reduce --use-flash-attn-triton ) # --add-qkv-bias # qwen # --ckpt-format torch # --ddp-average-in-collective # --recompute-granularity full # --recompute-num-layers 5 # --recompute-method block # --overlap-grad-reduce # --use-flash-attn-cutlass # --use-flash-attn-triton MODEL_PARALLEL_ARGS=( --sequence-parallel --tensor-model-parallel-size 2 --pipeline-model-parallel-size 2 ) DATA_ARGS=( --data-path $DATA_PATH --split 949,50,1 --untie-embeddings-and-output-weights --use-rotary-position-embeddings --normalization RMSNorm --no-position-embedding --tokenizer-type Llama2Tokenizer --tokenizer-model /data/model_weights/llama2_7b_hf/tokenizer.model ) EVAL_AND_LOGGING_ARGS=( --log-interval 1 --log-throughput --save-interval 1000 --eval-interval 1000 --save $CHECKPOINT_PATH --load $CHECKPOINT_PATH --eval-iters 10 --tensorboard-dir $TENSORBOARD_LOGS_PATH ) PROFILE_ARGS=( --profile --profile-step-start 4 --profile-step-end 5 --use-pytorch-profiler --profile-ranks 0 3 --profile-dir prof_data ) RANK=$OMPI_COMM_WORLD_RANK LOCAL_RANK=$OMPI_COMM_WORLD_LOCAL_RANK WORLD_SIZE=$OMPI_COMM_WORLD_SIZE DIST_URL=${1} DIST_PORT=34566 DISTRIBUTED_ARGS=( --rank ${RANK} --world-size ${WORLD_SIZE} --local-rank ${LOCAL_RANK} --dist-url tcp://${DIST_URL}:${DIST_PORT} ) APP="python -u pretrain_gpt.py \ ${GPT_MODEL_ARGS[@]} \ ${TRAINING_ARGS[@]} \ ${MODEL_PARALLEL_ARGS[@]} \ ${DATA_ARGS[@]} \ ${EVAL_AND_LOGGING_ARGS[@]} \ ${DISTRIBUTED_ARGS[@]} \ ${PROFILE_ARGS[@]} \ " export HIP_VISIBLE_DEVICES=4,5,6,7 # 0,1,2,3 # 4,5,6,7 #, # export CUDA_VISIBLE_DEVICES=4,5,6,7 # 0,1,2,3, # ${APP} case ${LOCAL_RANK} in [0]) # export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 ${APP} # numactl --cpunodebind=0 --membind=0 ${APP} ;; [1]) # export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 ${APP} # numactl --cpunodebind=0 --membind=0 ${APP} ;; [2]) # export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 ${APP} # numactl --cpunodebind=0 --membind=0 ${APP} ;; [3]) # export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 ${APP} # numactl --cpunodebind=0 --membind=0 ${APP} ;; # [4]) # export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 # ${APP} # # numactl --cpunodebind=0 --membind=0 ${APP} # ;; # [5]) # export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 # ${APP} # # numactl --cpunodebind=0 --membind=0 ${APP} # ;; # [6]) # export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 # ${APP} # # numactl --cpunodebind=0 --membind=0 ${APP} # ;; # [7]) # export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 # ${APP} # # numactl --cpunodebind=0 --membind=0 ${APP} # ;; esac