#!/bin/bash # Runs the "7B" parameter model export HSA_FORCE_FINE_GRAIN_PCIE=1 export OMP_NUM_THREADS=1 export NCCL_P2P_LEVEL=PXB source /opt/dtk/env.sh export LD_LIBRARY_PATH=/mnt/fs/user/llama/dcu/dtk-24.04/rocblas/lib:$LD_LIBRARY_PATH #export LD_LIBRARY_PATH=/mnt/fs/user/llama/dcu/rocblas-install_padding/lib:$LD_LIBRARY_PATH #export HIP_ALLOC_INITIALIZE=0 #export GPU_MAX_HW_QUEUES=20 export NCCL_ALGO=Ring export NCCL_NCHANNELS_PER_PEER=16 export NCCL_MIN_NCHANNELS=20 export NCCL_IB_TIMEOUT=22 export CUDA_DEVICE_MAX_CONNECTIONS=1 export NCCL_IB_HCA=mlx5_1,mlx5_2 #export NCCL_SOCKET_IFNAME=ibs8 export NCCL_NET_GDR_LEVEL=SYS export NCCL_NET_GDR_READ=0 #export NCCL_DEBUG=info lrank=$OMPI_COMM_WORLD_LOCAL_RANK RANK=$OMPI_COMM_WORLD_RANK WORLD_SIZE=$OMPI_COMM_WORLD_SIZE CHECKPOINT_PATH=./tmp #$1 # TENSORBOARD_LOGS_PATH=./tmp #$2 # DATA_PATH="/mnt/fs/user/llama/dcu/Megatron-LM-main/oscar/my-gpt2_text_document" #_text_document GPT_MODEL_ARGS=( --num-layers 96 --hidden-size 12288 --num-attention-heads 96 --seq-length 2048 --max-position-embeddings 2048 ) TRAINING_ARGS=( --transformer-impl local --use-legacy-models --micro-batch-size 1 --global-batch-size 512 #256 1024 --train-iters 100 --weight-decay 0.1 --adam-beta1 0.9 --adam-beta2 0.95 --init-method-std 0.006 --clip-grad 1.0 --bf16 --disable-bias-linear --no-bias-gelu-fusion --no-gradient-accumulation-fusion --use-distributed-optimizer --use-flash-attn-triton --lr 1.5e-04 --use-tp-pp-dp-mapping --lr-decay-style cosine --min-lr 1.5e-05 --lr-warmup-iters 1 ) #--initial-loss-scale 65536 #--loss-scale 1024 #--recompute-granularity full #--recompute-method uniform #--recompute-num-layers 1 #--recompute-activations #--use-flash-attn MODEL_PARALLEL_ARGS=( --sequence-parallel --tensor-model-parallel-size 4 #4 --pipeline-model-parallel-size 8 #4 8 ) DATA_ARGS=( --data-path $DATA_PATH --split 949,50,1 --tokenizer-type GPT2BPETokenizer --vocab-file gpt2-vocab.json --merge-file gpt2-merges.txt ) EVAL_AND_LOGGING_ARGS=( --log-interval 1 --log-throughput --save-interval 10000 --eval-interval 1000 --save $CHECKPOINT_PATH --load $CHECKPOINT_PATH --eval-iters 10 --tensorboard-dir $TENSORBOARD_LOGS_PATH ) APP="python3 -u pretrain_gpt.py \ ${GPT_MODEL_ARGS[@]} \ ${TRAINING_ARGS[@]} \ ${MODEL_PARALLEL_ARGS[@]} \ ${DATA_ARGS[@]} \ ${EVAL_AND_LOGGING_ARGS[@]} --rank ${RANK} \ --world_size ${WORLD_SIZE} \ --dist_url tcp://${1}:34566 \ " case ${lrank} in [0]) export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 ${APP} ;; [1]) export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 ${APP} ;; [2]) export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 ${APP} ;; [3]) export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 ${APP} ;; [4]) export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 ${APP} ;; [5]) export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 ${APP} ;; [6]) export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 ${APP} ;; [7]) export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 ${APP} ;; esac