#!/bin/bash # Runs the "175B" parameter model export CUDA_DEVICE_MAX_CONNECTIONS=1 GPUS_PER_NODE=1 #8 # Change for multinode config MASTER_ADDR=localhost MASTER_PORT=6000 NUM_NODES=1 NODE_RANK=0 WORLD_SIZE=$(($GPUS_PER_NODE*$NUM_NODES)) CHECKPOINT_PATH=./tmp #$1 # TENSORBOARD_LOGS_PATH=./tmp #$2 # #VOCAB_FILE=$3 #/gpt2-vocab.json #MERGE_FILE=$4 #/gpt2-merges.txt DATA_PATH="/root/megatron-llama/dataset/my-llama_text_document" #_text_document TOKENIZER_PATH="/root/megatron-llama/tokenizer.model" DISTRIBUTED_ARGS=( --nproc_per_node $GPUS_PER_NODE --nnodes $NUM_NODES --master_addr $MASTER_ADDR --master_port $MASTER_PORT ) GPT_MODEL_ARGS=( --num-layers 12 --hidden-size 512 --num-attention-heads 8 --seq-length 2048 --max-position-embeddings 2048 ) TRAINING_ARGS=( --transformer-impl local --use-legacy-models --micro-batch-size 1 --global-batch-size 60 --train-iters 50 --weight-decay 0.1 --adam-beta1 0.9 --adam-beta2 0.95 --init-method-std 0.006 --clip-grad 1.0 --fp16 --lr 6.0e-5 --lr-decay-style cosine --min-lr 6.0e-6 --lr-warmup-fraction .001 --lr-decay-iters 20 ) MODEL_PARALLEL_ARGS=( --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 ) DATA_ARGS=( --data-path $DATA_PATH --split 949,50,1 --untie-embeddings-and-output-weights --position-embedding-type rope --tokenizer-model $TOKENIZER_PATH --tokenizer-type GPTSentencePieceTokenizer ) EVAL_AND_LOGGING_ARGS=( --log-interval 1 --save-interval 10000 --eval-interval 1000 --save $CHECKPOINT_PATH --load $CHECKPOINT_PATH --eval-iters 10 --tensorboard-dir $TENSORBOARD_LOGS_PATH ) torchrun ${DISTRIBUTED_ARGS[@]} pretrain_gpt.py \ ${GPT_MODEL_ARGS[@]} \ ${TRAINING_ARGS[@]} \ ${MODEL_PARALLEL_ARGS[@]} \ ${DATA_ARGS[@]} \ ${EVAL_AND_LOGGING_ARGS[@]}