Commit 78cb1781 authored by Mohammad's avatar Mohammad
Browse files

moved steves branch

parent c6b5c137
This diff is collapsed.
#!/bin/bash
WORLD_SIZE=8
DISTRIBUTED_ARGS="--nproc_per_node $WORLD_SIZE \
--nnodes 1 \
--node_rank 0 \
--master_addr localhost \
--master_port 6000"
TASK="LAMBADA"
VALID_DATA=<lambada path>
VOCAB_FILE=gpt2-vocab.json
MERGE_FILE=gpt2-merges.txt
CHECKPOINT=checkpoints/gpt2_345m
python -m torch.distributed.launch $DISTRIBUTED_ARGS ./tasks/main.py \
--task $TASK \
--valid-data $VALID_DATA \
--tokenizer-type GPT2BPETokenizer \
--strict-lambada
--vocab-file $VOCAB_FILE \
--merge-file $MERGE_FILE \
--load $CHECKPOINT \
--model-parallel-size 1 \
--num-layers 24 \
--hidden-size 1024 \
--num-attention-heads 16 \
--batch-size 8 \
--checkpoint-activations \
--seq-length 512 \
--max-position-embeddings 512 \
--log-interval 10 \
--fp16 \
--no-load-optim \
--no-load-rng
#!/bin/bash
WORLD_SIZE=8
DISTRIBUTED_ARGS="--nproc_per_node $WORLD_SIZE \
--nnodes 1 \
--node_rank 0 \
--master_addr localhost \
--master_port 6000"
TRAIN_DATA="data/RACE/train/middle"
VALID_DATA="data/RACE/dev/middle \
data/RACE/dev/high"
VOCAB_FILE=bert-vocab.txt
PRETRIANED_CHECKPOINT=checkpoints/bert_345m
CHECKPOINT_PATH=checkpoints/bert_345m_race
python -m torch.distributed.launch $DISTRIBUTED_ARGS ./tasks/main.py \
--task RACE \
--seed 1234 \
--train-data $TRAIN_DATA \
--valid-data $VALID_DATA \
--tokenizer-type BertWordPieceLowerCase \
--vocab-file $VOCAB_FILE \
--epochs 3 \
--pretrained-checkpoint $PRETRIANED_CHECKPOINT \
--model-parallel-size 1 \
--num-layers 24 \
--hidden-size 1024 \
--num-attention-heads 16 \
--batch-size 4 \
--checkpoint-activations \
--lr 1.0e-5 \
--lr-decay-style linear \
--warmup 0.06 \
--seq-length 512 \
--max-position-embeddings 512 \
--save-interval 500000 \
--save $CHECKPOINT_PATH \
--log-interval 10 \
--eval-interval 100 \
--eval-iters 50 \
--weight-decay 1.0e-1 \
--clip-grad 1.0 \
--hidden-dropout 0.1 \
--attention-dropout 0.1 \
--fp16
#!/bin/bash
WORLD_SIZE=8
DISTRIBUTED_ARGS="--nproc_per_node $WORLD_SIZE \
--nnodes 1 \
--node_rank 0 \
--master_addr localhost \
--master_port 6000"
TRAIN_DATA="data/glue_data/MNLI/train.tsv"
VALID_DATA="data/glue_data/MNLI/dev_matched.tsv \
data/glue_data/MNLI/dev_mismatched.tsv"
PRETRAINED_CHECKPOINT=checkpoints/bert_345m
VOCAB_FILE=bert-vocab.txt
CHECKPOINT_PATH=checkpoints/bert_345m_mnli
python -m torch.distributed.launch $DISTRIBUTED_ARGS ./tasks/main.py \
--task MNLI \
--seed 1234 \
--train-data $TRAIN_DATA \
--valid-data $VALID_DATA \
--tokenizer-type BertWordPieceLowerCase \
--vocab-file $VOCAB_FILE \
--epochs 5 \
--pretrained-checkpoint $PRETRAINED_CHECKPOINT \
--model-parallel-size 1 \
--num-layers 24 \
--hidden-size 1024 \
--num-attention-heads 16 \
--batch-size 8 \
--checkpoint-activations \
--lr 5.0e-5 \
--lr-decay-style linear \
--warmup 0.065 \
--seq-length 512 \
--max-position-embeddings 512 \
--save-interval 500000 \
--save $CHECKPOINT_PATH \
--log-interval 10 \
--eval-interval 100 \
--eval-iters 50 \
--weight-decay 1.0e-1 \
--fp16
#!/bin/bash #!/bin/bash
CHECKPOINT_PATH=checkpoints/gpt2_345m/ CHECKPOINT_PATH=checkpoints/gpt2_345m
MPSIZE=1 VOCAB_FILE=gpt2-vocab.json
NLAYERS=12 MERGE_FILE=gpt2-merges.txt
NHIDDEN=768
NATT=12
MAXSEQLEN=1024
#SAMPLING ARGS python tools/generate_samples_gpt2.py \
TEMP=0.9 --model-parallel-size 1 \
#If TOPK/TOPP are 0 it defaults to greedy sampling, top-k will also override top-p --num-layers 24 \
TOPK=0 --hidden-size 1024 \
TOPP=0
python generate_samples.py \
--model-parallel-size $MPSIZE \
--num-layers $NLAYERS \
--hidden-size $NHIDDEN \
--load $CHECKPOINT_PATH \ --load $CHECKPOINT_PATH \
--num-attention-heads $NATT \ --num-attention-heads 16 \
--max-position-embeddings 1024 \ --max-position-embeddings 1024 \
--tokenizer-type GPT2BPETokenizer \ --tokenizer-type GPT2BPETokenizer \
--fp16 \ --fp16 \
--cache-dir cache \ --batch-size 2 \
--out-seq-length $MAXSEQLEN \ --seq-length 1024 \
--temperature $TEMP \ --out-seq-length 1024 \
--top_k $TOPK \ --temperature 1.0 \
--genfile dbg_unconditional.json \ --vocab-file $VOCAB_FILE \
--num-samples 10 \ --merge-file $MERGE_FILE \
--top_p $TOPP \ --genfile unconditional_samples.json \
--num-samples 2 \
--top_p 0.9 \
--recompute --recompute
#!/bin/bash
MODEL_PARALLEL_SIZE=2
VOCAB_FILE=bert-vocab.txt
CHECKPOINT_PATH=checkpoints/bert_345m
WORLD_SIZE=$MODEL_PARALLEL_SIZE python tools/merge_mp_partitions.py \
--model-type BERT \
--model-parallel-size $MODEL_PARALLEL_SIZE \
--tokenizer-type BertWordPieceLowerCase \
--vocab-file $VOCAB_FILE \
--num-layers 24 \
--hidden-size 1024 \
--num-attention-heads 16 \
--seq-length 512 \
--max-position-embeddings 512 \
--load $CHECKPOINT_PATH
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
RANK=0 RANK=0
WORLD_SIZE=1 WORLD_SIZE=1
DATA_PATH=<Specify path and file prefix>_text_sentence
CHECKPOINT_PATH=<Specify path>
python pretrain_bert.py \ python pretrain_bert.py \
--num-layers 24 \ --num-layers 24 \
...@@ -9,26 +11,25 @@ python pretrain_bert.py \ ...@@ -9,26 +11,25 @@ python pretrain_bert.py \
--num-attention-heads 16 \ --num-attention-heads 16 \
--batch-size 4 \ --batch-size 4 \
--seq-length 512 \ --seq-length 512 \
--max-preds-per-seq 80 \
--max-position-embeddings 512 \ --max-position-embeddings 512 \
--train-iters 1000000 \ --train-iters 2000000 \
--save checkpoints/bert_345m \ --save $CHECKPOINT_PATH \
--load checkpoints/bert_345m \ --load $CHECKPOINT_PATH \
--resume-dataloader \ --data-path $DATA_PATH \
--train-data wikipedia \ --vocab-file bert-vocab.txt \
--lazy-loader \ --data-impl mmap \
--tokenizer-type BertWordPieceTokenizer \
--tokenizer-model-type bert-large-uncased \
--presplit-sentences \
--cache-dir cache \
--split 949,50,1 \ --split 949,50,1 \
--distributed-backend nccl \ --distributed-backend nccl \
--lr 0.0001 \ --lr 0.0001 \
--min-lr 0.00001 \
--lr-decay-style linear \ --lr-decay-style linear \
--lr-decay-iters 990000 \ --lr-decay-iters 990000 \
--weight-decay 1e-2 \ --weight-decay 1e-2 \
--clip-grad 1.0 \ --clip-grad 1.0 \
--warmup .01 \ --warmup .01 \
--fp16 \ --log-interval 100 \
--fp32-layernorm \ --save-interval 10000 \
--fp32-embedding --eval-interval 1000 \
--eval-iters 10 \
--fp16
...@@ -8,27 +8,26 @@ NNODES=1 ...@@ -8,27 +8,26 @@ NNODES=1
NODE_RANK=0 NODE_RANK=0
WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
DATA_PATH=<Specify path and file prefix>_text_sentence
CHECKPOINT_PATH=<Specify path>
DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT" DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"
python -m torch.distributed.launch $DISTRIBUTED_ARGS \ python -m torch.distributed.launch $DISTRIBUTED_ARGS \
pretrain_bert.py \ pretrain_bert.py \
--model-parallel-size 1 \
--num-layers 24 \ --num-layers 24 \
--hidden-size 1024 \ --hidden-size 1024 \
--num-attention-heads 16 \ --num-attention-heads 16 \
--batch-size 4 \ --batch-size 4 \
--seq-length 512 \ --seq-length 512 \
--max-preds-per-seq 80 \
--max-position-embeddings 512 \ --max-position-embeddings 512 \
--train-iters 1000000 \ --train-iters 1000000 \
--save checkpoints/bert_345m \ --save $CHECKPOINT_PATH \
--load checkpoints/bert_345m \ --load $CHECKPOINT_PATH \
--resume-dataloader \ --data-path $DATA_PATH \
--train-data wikipedia \ --vocab-file bert-vocab.txt \
--lazy-loader \ --data-impl mmap \
--tokenizer-type BertWordPieceTokenizer \
--tokenizer-model-type bert-large-uncased \
--presplit-sentences \
--cache-dir cache \
--split 949,50,1 \ --split 949,50,1 \
--distributed-backend nccl \ --distributed-backend nccl \
--lr 0.0001 \ --lr 0.0001 \
...@@ -37,7 +36,8 @@ python -m torch.distributed.launch $DISTRIBUTED_ARGS \ ...@@ -37,7 +36,8 @@ python -m torch.distributed.launch $DISTRIBUTED_ARGS \
--weight-decay 1e-2 \ --weight-decay 1e-2 \
--clip-grad 1.0 \ --clip-grad 1.0 \
--warmup .01 \ --warmup .01 \
--fp16 \ --log-interval 100 \
--fp32-layernorm \ --save-interval 10000 \
--fp32-embedding --eval-interval 1000 \
--eval-iters 10 \
--fp16
...@@ -5,6 +5,10 @@ ...@@ -5,6 +5,10 @@
RANK=0 RANK=0
WORLD_SIZE=1 WORLD_SIZE=1
DATA_PATH=<Specify path and file prefix>_text_document
CHECKPOINT_PATH=<Specify path>
python pretrain_gpt2.py \ python pretrain_gpt2.py \
--num-layers 24 \ --num-layers 24 \
--hidden-size 1024 \ --hidden-size 1024 \
...@@ -12,22 +16,27 @@ python pretrain_gpt2.py \ ...@@ -12,22 +16,27 @@ python pretrain_gpt2.py \
--batch-size 8 \ --batch-size 8 \
--seq-length 1024 \ --seq-length 1024 \
--max-position-embeddings 1024 \ --max-position-embeddings 1024 \
--train-iters 320000 \ --train-iters 500000 \
--save checkpoints/gpt2_345m \ --lr-decay-iters 320000 \
--load checkpoints/gpt2_345m \ --save $CHECKPOINT_PATH \
--resume-dataloader \ --load $CHECKPOINT_PATH \
--train-data wikipedia \ --data-path $DATA_PATH \
--lazy-loader \ --vocab-file gpt2-vocab.json \
--tokenizer-type GPT2BPETokenizer \ --merge-file gpt2-merges.txt \
--cache-dir cache \ --data-impl mmap \
--split 949,50,1 \ --split 949,50,1 \
--distributed-backend nccl \ --distributed-backend nccl \
--lr 0.00015 \ --lr 0.00015 \
--min-lr 1.0e-5 \
--lr-decay-style cosine \ --lr-decay-style cosine \
--weight-decay 1e-2 \ --weight-decay 1e-2 \
--clip-grad 1.0 \ --clip-grad 1.0 \
--warmup .01 \ --warmup .01 \
--checkpoint-activations \ --checkpoint-activations \
--log-interval 100 \
--save-interval 10000 \
--eval-interval 1000 \
--eval-iters 10 \
--fp16 --fp16
......
...@@ -10,24 +10,28 @@ NNODES=1 ...@@ -10,24 +10,28 @@ NNODES=1
NODE_RANK=0 NODE_RANK=0
WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
DATA_PATH=<Specify path and file prefix>_text_document
CHECKPOINT_PATH=<Specify path>
DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT" DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"
python -m torch.distributed.launch $DISTRIBUTED_ARGS \ python -m torch.distributed.launch $DISTRIBUTED_ARGS \
pretrain_gpt2.py \ pretrain_gpt2.py \
--model-parallel-size 1 \
--num-layers 24 \ --num-layers 24 \
--hidden-size 1024 \ --hidden-size 1024 \
--num-attention-heads 16 \ --num-attention-heads 16 \
--batch-size 8 \ --batch-size 8 \
--seq-length 1024 \ --seq-length 1024 \
--max-position-embeddings 1024 \ --max-position-embeddings 1024 \
--train-iters 320000 \ --train-iters 500000 \
--save checkpoints/gpt2_345m \ --lr-decay-iters 320000 \
--load checkpoints/gpt2_345m \ --save $CHECKPOINT_PATH \
--resume-dataloader \ --load $CHECKPOINT_PATH \
--train-data wikipedia \ --data-path $DATA_PATH \
--lazy-loader \ --vocab-file gpt2-vocab.json \
--tokenizer-type GPT2BPETokenizer \ --merge-file gpt2-merges.txt \
--cache-dir cache \ --data-impl mmap \
--split 949,50,1 \ --split 949,50,1 \
--distributed-backend nccl \ --distributed-backend nccl \
--lr 0.00015 \ --lr 0.00015 \
...@@ -36,7 +40,12 @@ python -m torch.distributed.launch $DISTRIBUTED_ARGS \ ...@@ -36,7 +40,12 @@ python -m torch.distributed.launch $DISTRIBUTED_ARGS \
--clip-grad 1.0 \ --clip-grad 1.0 \
--warmup .01 \ --warmup .01 \
--checkpoint-activations \ --checkpoint-activations \
--log-interval 100 \
--save-interval 10000 \
--eval-interval 1000 \
--eval-iters 10 \
--fp16 --fp16
set +x set +x
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment