GPT_pretraining.sh 2.82 KB
Newer Older
unknown's avatar
unknown committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
#!/bin/bash

# Runs the "7B" parameter model
export HSA_FORCE_FINE_GRAIN_PCIE=1
export OMP_NUM_THREADS=1
export NCCL_P2P_LEVEL=SYS

export NCCL_ALGO=Ring
export NCCL_NCHANNELS_PER_PEER=16
export NCCL_MIN_NCHANNELS=20
export NCCL_IB_TIMEOUT=22
export CUDA_DEVICE_MAX_CONNECTIONS=1

export NCCL_NET_GDR_LEVEL=SYS
export NCCL_NET_GDR_READ=0


CHECKPOINT_PATH=./tmp #$1 #<Specify path>
TENSORBOARD_LOGS_PATH=./tmp  #$2 #<Specify path>
DATA_PATH="/datasets/oscar-1GB-gpt_text_document" #<Specify path and file prefix>_text_document
VOCAB_PATH=./gpt2-vocab.json
MERGE_PATH=./gpt2-merges.txt

GPT_MODEL_ARGS=(
    --num-layers 12
    --hidden-size 768
    --num-attention-heads 12
    --ffn-hidden-size 3072
    --seq-length 1024
    --max-position-embeddings 1024
)

# export NVTE_FLASH_ATTN=1 # 走autlass
# export NVTE_FLASH_ATTN_TRITON=1 # 走triton_fa
# --transformer-impl transformer_engine
    # --use-mcore-models
TRAINING_ARGS=(
    --transformer-impl local
    --use-legacy-models 
    --micro-batch-size 1 
    --global-batch-size 60 #240 #512 #64
    --train-iters 100
    --weight-decay 0.1 
    --adam-beta1 0.9 
    --adam-beta2 0.95 
    --init-method-std 0.006 
    --clip-grad 1.0 
    --bf16
    --use-distributed-optimizer 
    --ckpt-format torch
    --disable-bias-linear
    --overlap-grad-reduce
    --attention-dropout 0
    --hidden-dropout 0
    --ddp-average-in-collective
    --recompute-granularity full
    --recompute-num-layers 5
    --recompute-method block
    --no-gradient-accumulation-fusion
    --swiglu
    --lr 3.0e-5 
    --lr-decay-style cosine 
    --min-lr 3.0e-6
    --lr-warmup-iters 1
)
MODEL_PARALLEL_ARGS=(
        --sequence-parallel
	--tensor-model-parallel-size 2
	--pipeline-model-parallel-size 1
)

DATA_ARGS=(
    --data-path $DATA_PATH 
    --split 949,50,1
    --untie-embeddings-and-output-weights
    --use-rotary-position-embeddings 
    --normalization RMSNorm 
    --no-position-embedding
    --vocab-file $VOCAB_PATH 
    --merge-file $MERGE_PATH
    --tokenizer-type GPT2BPETokenizer 
)

EVAL_AND_LOGGING_ARGS=(
    --log-interval 1
    --save-interval 10000 
    --eval-interval 1000 
    --save $CHECKPOINT_PATH 
    --load $CHECKPOINT_PATH 
    --eval-iters 10
    --tensorboard-dir $TENSORBOARD_LOGS_PATH 
)

NNODES=1
NODE_RANK=0
MASTER_ADDR=localhost
while [ $# -gt 0 ] 
do
case $1 in
  --NNODES)
    NNODES=$2; shift;;
  --NODE_RANK)
    NODE_RANK=$2; shift;;
  --MASTER_ADDR)
    MASTER_ADDR=$2; shift;;
  (*)
    break;;
esac
shift
done

DISTRIBUTED_ARGS=(
    --nproc_per_node 2
    --nnodes $NNODES
    --node_rank $NODE_RANK
    --master_addr $MASTER_ADDR
    --master_port 29500
)
export HIP_VISIBLE_DEVICES=2,3 #0,1,2,3,4,5,6,7
torchrun ${DISTRIBUTED_ARGS[@]} pretrain_gpt.py \
    ${GPT_MODEL_ARGS[@]} \
    ${TRAINING_ARGS[@]} \
    ${MODEL_PARALLEL_ARGS[@]} \
    ${DATA_ARGS[@]} \
    ${EVAL_AND_LOGGING_ARGS[@]}