train_t5_220m_distributed.sh 1.82 KB
Newer Older
1
2
#!/bin/bash

xingjinliang's avatar
xingjinliang committed
3
4
# Runs the "220M" parameter model

5
6
export CUDA_DEVICE_MAX_CONNECTIONS=1

7
8
9
10
GPUS_PER_NODE=8
# Change for multinode config
MASTER_ADDR=localhost
MASTER_PORT=6000
xingjinliang's avatar
xingjinliang committed
11
NUM_NODES=1
12
NODE_RANK=0
xingjinliang's avatar
xingjinliang committed
13
WORLD_SIZE=$(($GPUS_PER_NODE*$NUM_NODES))
14

xingjinliang's avatar
xingjinliang committed
15
16
17
18
CHECKPOINT_PATH=$1 #<Specify path>
TENSORBOARD_DIR=$2 #<Specify path>
VOCAB_FILE=$3 #<Specify path to file>/bert-large-cased-vocab.txt
DATA_PATH=$4 #<Specify path and file prefix>_text_document
19
20
21

DISTRIBUTED_ARGS="
    --nproc_per_node $GPUS_PER_NODE \
xingjinliang's avatar
xingjinliang committed
22
    --nnodes $NUM_NODES \
23
24
25
26
27
28
    --node_rank $NODE_RANK \
    --master_addr $MASTER_ADDR \
    --master_port $MASTER_PORT
"

T5_ARGS="
xingjinliang's avatar
xingjinliang committed
29
30
    --encoder-num-layers 12 \
    --decoder-num-layers 12 \
31
32
33
34
35
36
37
    --hidden-size 768 \
    --num-attention-heads 12 \
    --kv-channels 64 \
    --ffn-hidden-size 3072 \
    --encoder-seq-length 512 \
    --decoder-seq-length 128 \
    --max-position-embeddings 512 \
xingjinliang's avatar
xingjinliang committed
38
39
    --micro-batch-size 64 \
    --global-batch-size 512 \
40
41
42
43
44
45
46
47
    --lr 0.0001 \
    --train-iters 1000000 \
    --lr-decay-iters 1000000 \
    --lr-decay-style linear \
    --min-lr 0.00001 \
    --weight-decay 1e-2 \
    --lr-warmup-fraction .01 \
    --clip-grad 1.0 \
xingjinliang's avatar
xingjinliang committed
48
49
50
51
52
53
54
    --bf16 \
    --vocab-extra-ids 100 \
    --init-method-std 0.015 \
    --transformer-impl transformer_engine \
    --tensor-model-parallel-size 1 \
    --pipeline-model-parallel-size 1 \
    --attention-backend auto \
55
56
57
58
59
"

DATA_ARGS="
    --data-path $DATA_PATH \
    --vocab-file $VOCAB_FILE \
xingjinliang's avatar
xingjinliang committed
60
61
    --tokenizer-type BertWordPieceCase \
    --split 99982,9,9 \
62
63
64
65
"

OUTPUT_ARGS="
    --log-interval 100 \
xingjinliang's avatar
xingjinliang committed
66
67
    --tensorboard-dir ${TENSORBOARD_DIR} \
    --save-interval 500 \
68
69
70
    --eval-interval 1000 \
    --eval-iters 10
"
71

72
73
74
75
76
77
torchrun $DISTRIBUTED_ARGS pretrain_t5.py \
    $T5_ARGS \
    $DATA_ARGS \
    $OUTPUT_ARGS \
    --distributed-backend nccl \
    --save $CHECKPOINT_PATH \
xingjinliang's avatar
xingjinliang committed
78
    --load $CHECKPOINT_PATH \