single.sh 2.4 KB
Newer Older
liangjing's avatar
v1  
liangjing committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
#!/bin/bash
# This example script is contributed by external user https://github.com/nrailgun
set -ex
######################################
#####################################
export CUDA_DEVICE_MAX_CONNECTIONS=1
export HSA_FORCE_FINE_GRAIN_PCIE=1
export OMP_NUM_THREADS=1
export NCCL_P2P_LEVEL=5


lrank=$OMPI_COMM_WORLD_LOCAL_RANK
RANK=$OMPI_COMM_WORLD_RANK
WORLD_SIZE=$OMPI_COMM_WORLD_SIZE

export NCCL_IB_TIMEOUT=22
# Change the below configurations here
BASE_PATH=./tmp
DATASET_1="./dataset/my-gpt2_text_document"
DATASET="1 ${DATASET_1}"
CHECKPOINT_PATH=./tmp

TP=4
PP=1

HIDDEN_SIZE=4096 
NUM_LAYERS=32
NUM_HEADS=32
SEQ_LENGTH=4096

VOCAB_PATH=./gpt2-vocab.json
MERGE_PATH=./gpt2-merges.txt

MICRO_BATCH_SIZE=1
GLOBAL_BATCH_SIZE=60 
TRAIN_STEPS=250000 
LR=3e-4
MIN_LR=3e-5
LR_WARMUP_STEPS=2000
WEIGHT_DECAY=0.1
GRAD_CLIP=1


APP="python3 -u pretrain_gpt.py \
       --tensor-model-parallel-size $TP \
       --pipeline-model-parallel-size $PP \
       --num-layers $NUM_LAYERS \
       --hidden-size $HIDDEN_SIZE \
       --num-attention-heads $NUM_HEADS \
       --micro-batch-size $MICRO_BATCH_SIZE \
       --global-batch-size $GLOBAL_BATCH_SIZE \
       --seq-length $SEQ_LENGTH \
       --max-position-embeddings $SEQ_LENGTH \
       --train-iters $TRAIN_STEPS \
       --save $CHECKPOINT_PATH \
       --load $CHECKPOINT_PATH \
       --data-path $DATASET \
       --data-impl mmap \
       --split 949,50,1 \
       --distributed-backend nccl \
       --lr $LR \
       --lr-decay-style cosine \
       --min-lr $MIN_LR \
       --weight-decay $WEIGHT_DECAY \
       --clip-grad $GRAD_CLIP \
       --lr-warmup-iters $LR_WARMUP_STEPS \
       --optimizer adam \
       --adam-beta1 0.9 \
       --adam-beta2 0.95 \
       --log-interval 1 \
       --vocab-file ${VOCAB_PATH} \
       --merge-file ${MERGE_PATH} \
       --tokenizer-type GPT2BPETokenizer \
       --save-interval 1000 \
       --eval-interval 1000 \
       --eval-iters 1000 \
       --fp16 \
       --recompute-activations \
       --disable-bias-linear \
       --no-gradient-accumulation-fusion \
       --rank ${RANK} \
       --world_size ${WORLD_SIZE} \
       --dist_url tcp://${1}:34566 \
       --num-workers 2 \
      "
case ${lrank} in
[0])
  export HIP_VISIBLE_DEVICES=0,1,2,3
  ${APP}
  ;;
[1])
  export HIP_VISIBLE_DEVICES=0,1,2,3
  ${APP}
  ;;
[2])
  export HIP_VISIBLE_DEVICES=0,1,2,3
  ${APP}
  ;;
[3])
  export HIP_VISIBLE_DEVICES=0,1,2,3
  ${APP}
  ;;
esac