qwen2_7b.sh 3.26 KB
Newer Older
liangjing's avatar
liangjing committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
#!/bin/bash

# Runs the "7B" parameter model
export HSA_FORCE_FINE_GRAIN_PCIE=1
export OMP_NUM_THREADS=1
export NCCL_P2P_LEVEL=SYS

export NCCL_ALGO=Ring
export NCCL_NCHANNELS_PER_PEER=16
export NCCL_MIN_NCHANNELS=20
export NCCL_IB_TIMEOUT=22
export CUDA_DEVICE_MAX_CONNECTIONS=1

export NCCL_IB_HCA=xx #based on your environment
export NCCL_NET_GDR_LEVEL=SYS
liangjing's avatar
update  
liangjing committed
16
17
18

export NVTE_FLASH_ATTN=1
export NVTE_FUSED_ATTN=0
liangjing's avatar
liangjing committed
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41


lrank=$OMPI_COMM_WORLD_LOCAL_RANK
RANK=$OMPI_COMM_WORLD_RANK
WORLD_SIZE=$OMPI_COMM_WORLD_SIZE

CHECKPOINT_PATH=./tmp #$1 #<Specify path>
TENSORBOARD_LOGS_PATH=./tmp  #$2 #<Specify path>
DATA_PATH="/path_to_my-qwen_text_document" #<Specify path and file prefix>_text_document

GPT_MODEL_ARGS=(
    --num-layers 28     
    --hidden-size 3584   
    --ffn-hidden-size 18944
    --num-attention-heads 28 
    --seq-length 4096 
    --max-position-embeddings 32768
    --num-query-groups 4
    --group-query-attention
)

TRAINING_ARGS=(
    --log-throughput
liangjing's avatar
update  
liangjing committed
42
43
    --transformer-impl transformer_engine
    --use-mcore-models
liangjing's avatar
liangjing committed
44
    --micro-batch-size 1 
liangjing's avatar
update  
liangjing committed
45
    --global-batch-size 12
liangjing's avatar
liangjing committed
46
47
48
49
50
51
52
53
    --train-iters 100
    --weight-decay 0.1 
    --adam-beta1 0.9 
    --adam-beta2 0.95 
    --init-method-std 0.006 
    --clip-grad 1.0 
    --bf16
    --use-distributed-optimizer 
liangjing's avatar
update  
liangjing committed
54
    --use-flash-attn
liangjing's avatar
liangjing committed
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
    --disable-bias-linear
    --attention-dropout 0
    --hidden-dropout 0
    --no-gradient-accumulation-fusion
    --add-qkv-bias
    --swiglu
    --lr 3.0e-5 
    --lr-decay-style cosine 
    --min-lr 3.0e-6
    --lr-warmup-iters 1
)
MODEL_PARALLEL_ARGS=(
        --sequence-parallel
	--tensor-model-parallel-size 2
	--pipeline-model-parallel-size 4
)

DATA_ARGS=(
    --data-path $DATA_PATH 
    --split 949,50,1
    --untie-embeddings-and-output-weights
    --use-rotary-position-embeddings 
    --normalization RMSNorm 
    --no-position-embedding 
    --tokenizer-type QwenTokenizer
    --merge-file /path_to_qwen_token/merges.txt
    --vocab-file /path_to_qwen_token/vocab.json
)

EVAL_AND_LOGGING_ARGS=(
    --log-interval 1
    --save-interval 1000 
    --eval-interval 1000 
    --save $CHECKPOINT_PATH 
    --load $CHECKPOINT_PATH 
    --eval-iters 10
    --tensorboard-dir $TENSORBOARD_LOGS_PATH 
)

APP="python3  -u pretrain_gpt.py \
     ${GPT_MODEL_ARGS[@]} \
     ${TRAINING_ARGS[@]} \
     ${MODEL_PARALLEL_ARGS[@]} \
     ${DATA_ARGS[@]} \
     ${EVAL_AND_LOGGING_ARGS[@]}
     --rank ${RANK} \
     --world_size ${WORLD_SIZE} \
     --dist_url tcp://${1}:34566 \
    "

#for hygon
case ${lrank} in
[0])
  export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
  numactl --cpunodebind=0 --membind=0 ${APP}
  ;;
[1])
  export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
  numactl --cpunodebind=1 --membind=1 ${APP}
  ;;
[2])
  export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
  numactl --cpunodebind=2 --membind=2 ${APP}
  ;;
[3])
  export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
  numactl --cpunodebind=3 --membind=3 ${APP}
  ;;
[4])
  export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
  numactl --cpunodebind=4 --membind=4 ${APP}
  ;;
[5])
  export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
  numactl --cpunodebind=5 --membind=5 ${APP}
  ;;
[6])
  export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
  numactl --cpunodebind=6 --membind=6 ${APP}
  ;;
[7])
  export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
  numactl --cpunodebind=7 --membind=7 ${APP}
  ;;
esac