#!/bin/bash # Runs the "7B" parameter model export HSA_FORCE_FINE_GRAIN_PCIE=1 export OMP_NUM_THREADS=1 export NCCL_P2P_LEVEL=SYS export NCCL_ALGO=Ring export NCCL_NCHANNELS_PER_PEER=16 export NCCL_MIN_NCHANNELS=20 export NCCL_IB_TIMEOUT=22 export CUDA_DEVICE_MAX_CONNECTIONS=1 export NCCL_NET_GDR_LEVEL=SYS export NCCL_NET_GDR_READ=0 CHECKPOINT_PATH=./tmp #$1 # TENSORBOARD_LOGS_PATH=./tmp #$2 # DATA_PATH="/datasets/oscar-1GB-gpt_text_document" #_text_document VOCAB_PATH=./gpt2-vocab.json MERGE_PATH=./gpt2-merges.txt GPT_MODEL_ARGS=( --num-layers 12 --hidden-size 768 --num-attention-heads 12 --ffn-hidden-size 3072 --seq-length 1024 --max-position-embeddings 1024 ) # export NVTE_FLASH_ATTN=1 # čµ°autlass # export NVTE_FLASH_ATTN_TRITON=1 # čµ°triton_fa # --transformer-impl transformer_engine # --use-mcore-models TRAINING_ARGS=( --transformer-impl local --use-legacy-models --micro-batch-size 1 --global-batch-size 60 #240 #512 #64 --train-iters 100 --weight-decay 0.1 --adam-beta1 0.9 --adam-beta2 0.95 --init-method-std 0.006 --clip-grad 1.0 --bf16 --use-distributed-optimizer --ckpt-format torch --disable-bias-linear --overlap-grad-reduce --attention-dropout 0 --hidden-dropout 0 --ddp-average-in-collective --recompute-granularity full --recompute-num-layers 5 --recompute-method block --no-gradient-accumulation-fusion --swiglu --lr 3.0e-5 --lr-decay-style cosine --min-lr 3.0e-6 --lr-warmup-iters 1 ) MODEL_PARALLEL_ARGS=( --sequence-parallel --tensor-model-parallel-size 2 --pipeline-model-parallel-size 1 ) DATA_ARGS=( --data-path $DATA_PATH --split 949,50,1 --untie-embeddings-and-output-weights --use-rotary-position-embeddings --normalization RMSNorm --no-position-embedding --vocab-file $VOCAB_PATH --merge-file $MERGE_PATH --tokenizer-type GPT2BPETokenizer ) EVAL_AND_LOGGING_ARGS=( --log-interval 1 --save-interval 10000 --eval-interval 1000 --save $CHECKPOINT_PATH --load $CHECKPOINT_PATH --eval-iters 10 --tensorboard-dir $TENSORBOARD_LOGS_PATH ) NNODES=1 NODE_RANK=0 MASTER_ADDR=localhost while [ $# -gt 0 ] do case $1 in --NNODES) NNODES=$2; shift;; --NODE_RANK) NODE_RANK=$2; shift;; --MASTER_ADDR) MASTER_ADDR=$2; shift;; (*) break;; esac shift done DISTRIBUTED_ARGS=( --nproc_per_node 2 --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port 29500 ) export HIP_VISIBLE_DEVICES=2,3 #0,1,2,3,4,5,6,7 torchrun ${DISTRIBUTED_ARGS[@]} pretrain_gpt.py \ ${GPT_MODEL_ARGS[@]} \ ${TRAINING_ARGS[@]} \ ${MODEL_PARALLEL_ARGS[@]} \ ${DATA_ARGS[@]} \ ${EVAL_AND_LOGGING_ARGS[@]}