Commit 0024a5c6 authored by zhuwenwen's avatar zhuwenwen
Browse files

Merge branch 'main' of https://github.com/NVIDIA/Megatron-LM

parents b004456b 3db2063b
Pipeline #229 failed with stages
in 0 seconds
#! /bin/bash
DATA_PATH=$1
CHECKPOINT_PATH=$2
TENSORBOARD_DIR=$3
TP_SIZE=$4
PP_SIZE=$5
NNODES=$6
GPUS_PER_NODE=8
# Change for multinode config
MASTER_ADDR=localhost
MASTER_PORT=6000
NODE_RANK=0
WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
export CUDA_DEVICE_MAX_CONNECTIONS=1
# Runs the "345M" parameter model
DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"
# Run for 100 iterations
python -m torch.distributed.launch $DISTRIBUTED_ARGS \
pretrain_bert.py \
--use-checkpoint-args \
--use-checkpoint-opt_param-scheduler \
--num-layers 24 \
--hidden-size 1024 \
--num-attention-heads 16 \
--log-params-norm \
--log-num-zeros-in-grad \
--log-validation-ppl-to-tensorboard \
--log-timers-to-tensorboard \
--tensorboard-dir ${TENSORBOARD_DIR} \
--micro-batch-size 4 \
--global-batch-size 128 \
--seq-length 512 \
--max-position-embeddings 512 \
--train-iters 100 \
--timing-log-level 2 \
--lr-decay-iters 990000 \
--save $CHECKPOINT_PATH \
--load $CHECKPOINT_PATH \
--data-path $DATA_PATH \
--vocab-file /workspace/data/bert_data/vocab.txt \
--data-impl mmap \
--split 949,50,1 \
--distributed-backend nccl \
--lr 0.0001 \
--min-lr 0.00001 \
--lr-warmup-fraction 0.01 \
--log-interval 1 \
--save-interval 50 \
--eval-interval 1000 \
--eval-iters 10 \
--tensor-model-parallel-size $TP_SIZE \
--pipeline-model-parallel-size $PP_SIZE \
--no-gradient-accumulation-fusion \
--fp16
echo 50 > $CHECKPOINT_PATH/latest_checkpointed_iteration.txt
# Resume from 50th iteration ckpt and continue to 100 iterations
python -m torch.distributed.launch $DISTRIBUTED_ARGS \
pretrain_bert.py \
--use-checkpoint-args \
--use-checkpoint-opt_param-scheduler \
--num-layers 24 \
--hidden-size 1024 \
--num-attention-heads 16 \
--log-params-norm \
--log-num-zeros-in-grad \
--log-validation-ppl-to-tensorboard \
--log-timers-to-tensorboard \
--tensorboard-dir ${TENSORBOARD_DIR} \
--micro-batch-size 4 \
--global-batch-size 128 \
--seq-length 512 \
--max-position-embeddings 512 \
--train-iters 100 \
--timing-log-level 2 \
--lr-decay-iters 990000 \
--save $CHECKPOINT_PATH \
--load $CHECKPOINT_PATH \
--data-path $DATA_PATH \
--vocab-file /workspace/data/bert_data/vocab.txt \
--data-impl mmap \
--split 949,50,1 \
--distributed-backend nccl \
--lr 0.0001 \
--min-lr 0.00001 \
--lr-warmup-fraction 0.01 \
--log-interval 1 \
--save-interval 10000 \
--eval-interval 1000 \
--eval-iters 10 \
--tensor-model-parallel-size $TP_SIZE \
--pipeline-model-parallel-size $PP_SIZE \
--no-gradient-accumulation-fusion \
--fp16
\ No newline at end of file
#! /bin/bash
set -o xtrace
DATA_PATH=$1
CHECKPOINT_PATH=$2
TENSORBOARD_DIR=$3
TP_SIZE=$4
PP_SIZE=$5
NNODES=$6
MAX_STEPS=$7
VP_SIZE=$8
GPUS_PER_NODE=8
# Change for multinode config
MASTER_ADDR=localhost
MASTER_PORT=6000
NODE_RANK=0
WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
export CUDA_DEVICE_MAX_CONNECTIONS=1
# Runs the "345M" parameter model
DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"
python -m torch.distributed.launch $DISTRIBUTED_ARGS \
pretrain_bert.py \
--num-layers 24 \
--hidden-size 1024 \
--num-attention-heads 16 \
--log-params-norm \
--log-num-zeros-in-grad \
--log-validation-ppl-to-tensorboard \
--log-timers-to-tensorboard \
--tensorboard-dir ${TENSORBOARD_DIR} \
--micro-batch-size 4 \
--global-batch-size 128 \
--seq-length 512 \
--max-position-embeddings 512 \
--train-iters $MAX_STEPS \
--timing-log-level 2 \
--lr-decay-iters 990000 \
--save $CHECKPOINT_PATH \
--load $CHECKPOINT_PATH \
--data-path $DATA_PATH \
--vocab-file /workspace/data/bert_data/vocab.txt \
--data-impl mmap \
--split 949,50,1 \
--distributed-backend nccl \
--lr 0.0001 \
--min-lr 0.00001 \
--lr-warmup-fraction 0.01 \
--log-interval 1 \
--save-interval 10000 \
--eval-interval 1000 \
--eval-iters 10 \
--tensor-model-parallel-size $TP_SIZE \
--pipeline-model-parallel-size $PP_SIZE \
${VP_SIZE:+--num-layers-per-virtual-pipeline-stage "$VP_SIZE"} \
--no-gradient-accumulation-fusion \
--fp16
\ No newline at end of file
#!/bin/bash
# Parameters
#SBATCH --account=adlr
#SBATCH --job-name=adlr-ci:megatron-job
#SBATCH --nodes=1
#SBATCH --partition=luna
DATA_PATH=/workspace/data/bert_data/my-bert_00_text_sentence
CHECKPOINT_PATH=/workspace/checkpoints
TENSORBOARD_DIR=/workspace/logs
srun --output $BASE_DIR/results/slurm-%j.out --error $BASE_DIR/results/slurm-%j.out --container-image gitlab-master.nvidia.com/dl/dgx/pytorch:21.12-py3-devel --container-mounts $BASE_DIR/logs:/workspace/logs,$BASE_DIR/checkpoints:/workspace/checkpoints,$BUILD_DIR:/workspace/megatron-lm,$DATA_DIR:/workspace/data --no-container-mount-home bash -c "
ls
cd /workspace/megatron-lm
./tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_resume_checkpoint_test.sh $DATA_PATH $CHECKPOINT_PATH $TENSORBOARD_DIR $TP_SIZE $PP_SIZE $NUM_NODES"
\ No newline at end of file
#!/bin/bash
# Parameters
#SBATCH --account=adlr
#SBATCH --job-name=adlr-ci:megatron-job
#SBATCH --nodes=1
#SBATCH --partition=luna
DATA_PATH=/workspace/data/bert_data/my-bert_00_text_sentence
CHECKPOINT_PATH=/workspace/checkpoints
TENSORBOARD_DIR=/workspace/logs
srun --output $BASE_DIR/results/slurm-%j.out --error $BASE_DIR/results/slurm-%j.out --container-image gitlab-master.nvidia.com/dl/dgx/pytorch:21.12-py3-devel --container-mounts $BASE_DIR/logs:/workspace/logs,$BASE_DIR/checkpoints:/workspace/checkpoints,$BUILD_DIR:/workspace/megatron-lm,$DATA_DIR:/workspace/data --no-container-mount-home bash -c "
ls
cd /workspace/megatron-lm
./tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh $DATA_PATH $CHECKPOINT_PATH $TENSORBOARD_DIR $TP_SIZE $PP_SIZE $NUM_NODES $MAX_STEPS $VP_SIZE"
\ No newline at end of file
#! /bin/bash
DATA_PATH=$1
CHECKPOINT_PATH=$2
TENSORBOARD_DIR=$3
TP_SIZE=$4
PP_SIZE=$5
NNODES=$6
GPUS_PER_NODE=8
# Change for multinode config
MASTER_ADDR=localhost
MASTER_PORT=6000
NODE_RANK=0
WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
export CUDA_DEVICE_MAX_CONNECTIONS=1
# Runs the "345M" parameter model
DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"
# Run for 100 iterations and save checkpoint at 50
python -m torch.distributed.launch $DISTRIBUTED_ARGS \
pretrain_gpt.py \
--use-checkpoint-args \
--use-checkpoint-opt_param-scheduler \
--num-layers 12 \
--hidden-size 512 \
--num-attention-heads 8 \
--log-params-norm \
--log-num-zeros-in-grad \
--log-validation-ppl-to-tensorboard \
--log-timers-to-tensorboard \
--tensorboard-dir ${TENSORBOARD_DIR} \
--micro-batch-size 4 \
--global-batch-size 32 \
--seq-length 1024 \
--max-position-embeddings 1024 \
--train-iters 100 \
--timing-log-level 2 \
--lr-decay-iters 320000 \
--save $CHECKPOINT_PATH \
--load $CHECKPOINT_PATH \
--data-path $DATA_PATH \
--vocab-file /workspace/data/gpt3_data/gpt2-vocab.json \
--merge-file /workspace/data/gpt3_data/gpt2-merges.txt \
--data-impl mmap \
--split 949,50,1 \
--distributed-backend nccl \
--lr 0.00015 \
--lr-decay-style cosine \
--min-lr 1.0e-5 \
--weight-decay 1e-2 \
--clip-grad 1.0 \
--lr-warmup-fraction .01 \
--log-interval 1 \
--save-interval 50 \
--eval-interval 1000 \
--eval-iters 10 \
--tensor-model-parallel-size $TP_SIZE \
--pipeline-model-parallel-size $PP_SIZE \
--no-gradient-accumulation-fusion \
--fp16
echo 50 > $CHECKPOINT_PATH/latest_checkpointed_iteration.txt
# Resume from 50th iteration ckpt and continue to 100 iterations
python -m torch.distributed.launch $DISTRIBUTED_ARGS \
pretrain_gpt.py \
--use-checkpoint-args \
--use-checkpoint-opt_param-scheduler \
--num-layers 12 \
--hidden-size 512 \
--num-attention-heads 8 \
--log-params-norm \
--log-num-zeros-in-grad \
--log-validation-ppl-to-tensorboard \
--log-timers-to-tensorboard \
--tensorboard-dir ${TENSORBOARD_DIR} \
--micro-batch-size 4 \
--global-batch-size 32 \
--seq-length 1024 \
--max-position-embeddings 1024 \
--train-iters 100 \
--timing-log-level 2 \
--lr-decay-iters 320000 \
--save $CHECKPOINT_PATH \
--load $CHECKPOINT_PATH \
--data-path $DATA_PATH \
--vocab-file /workspace/data/gpt3_data/gpt2-vocab.json \
--merge-file /workspace/data/gpt3_data/gpt2-merges.txt \
--data-impl mmap \
--split 949,50,1 \
--distributed-backend nccl \
--lr 0.00015 \
--lr-decay-style cosine \
--min-lr 1.0e-5 \
--weight-decay 1e-2 \
--clip-grad 1.0 \
--lr-warmup-fraction .01 \
--log-interval 1 \
--save-interval 10000 \
--eval-interval 1000 \
--eval-iters 10 \
--tensor-model-parallel-size $TP_SIZE \
--pipeline-model-parallel-size $PP_SIZE \
--no-gradient-accumulation-fusion \
--fp16
\ No newline at end of file
#! /bin/bash
DATA_PATH=$1
CHECKPOINT_PATH=$2
TENSORBOARD_DIR=$3
TP_SIZE=$4
PP_SIZE=$5
NNODES=$6
MAX_STEPS=$7
VP_SIZE=$8
MBS=$9
GBS=${10}
GPUS_PER_NODE=8
# Change for multinode config
MASTER_ADDR=localhost
MASTER_PORT=6000
NODE_RANK=0
WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
export CUDA_DEVICE_MAX_CONNECTIONS=1
# Runs the "345M" parameter model
DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"
python -m torch.distributed.launch $DISTRIBUTED_ARGS \
pretrain_gpt.py \
--num-layers 12 \
--hidden-size 512 \
--num-attention-heads 8 \
--log-params-norm \
--log-num-zeros-in-grad \
--log-validation-ppl-to-tensorboard \
--log-timers-to-tensorboard \
--tensorboard-dir ${TENSORBOARD_DIR} \
--micro-batch-size ${MBS:-4} \
--global-batch-size ${GBS:-32} \
--seq-length 1024 \
--max-position-embeddings 1024 \
--train-iters $MAX_STEPS \
--timing-log-level 2 \
--lr-decay-iters 320000 \
--save $CHECKPOINT_PATH \
--load $CHECKPOINT_PATH \
--data-path $DATA_PATH \
--vocab-file /workspace/data/gpt3_data/gpt2-vocab.json \
--merge-file /workspace/data/gpt3_data/gpt2-merges.txt \
--data-impl mmap \
--split 949,50,1 \
--distributed-backend nccl \
--lr 0.00015 \
--lr-decay-style cosine \
--min-lr 1.0e-5 \
--weight-decay 1e-2 \
--clip-grad 1.0 \
--lr-warmup-fraction .01 \
--log-interval 1 \
--save-interval 10000 \
--eval-interval 1000 \
--eval-iters 10 \
--tensor-model-parallel-size $TP_SIZE \
--pipeline-model-parallel-size $PP_SIZE \
${VP_SIZE:+--num-layers-per-virtual-pipeline-stage "$VP_SIZE"} \
--no-gradient-accumulation-fusion \
--fp16
#!/bin/bash
# Parameters
#SBATCH --account=adlr
#SBATCH --job-name=adlr-ci:megatron-job
#SBATCH --nodes=1
#SBATCH --partition=luna
DATA_PATH=/workspace/data/gpt3_data/my-gpt3_00_text_document
CHECKPOINT_PATH=/workspace/checkpoints
TENSORBOARD_DIR=/workspace/logs
srun --output $BASE_DIR/results/slurm-%j.out --error $BASE_DIR/results/slurm-%j.out --container-image gitlab-master.nvidia.com/dl/dgx/pytorch:21.12-py3-devel --container-mounts $BASE_DIR/logs:/workspace/logs,$BASE_DIR/checkpoints:/workspace/checkpoints,$BUILD_DIR:/workspace/megatron-lm,$DATA_DIR:/workspace/data --no-container-mount-home bash -c "
ls
cd /workspace/megatron-lm
./tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_resume_checkpoint_test.sh $DATA_PATH $CHECKPOINT_PATH $TENSORBOARD_DIR $TP_SIZE $PP_SIZE $NUM_NODES"
\ No newline at end of file
#!/bin/bash
# Parameters
#SBATCH --account=adlr
#SBATCH --job-name=adlr-ci:megatron-job
#SBATCH --nodes=1
#SBATCH --partition=luna
DATA_PATH=/workspace/data/gpt3_data/my-gpt3_00_text_document
CHECKPOINT_PATH=/workspace/checkpoints
TENSORBOARD_DIR=/workspace/logs
srun --output $BASE_DIR/results/slurm-%j.out --error $BASE_DIR/results/slurm-%j.out --container-image gitlab-master.nvidia.com/dl/dgx/pytorch:21.12-py3-devel --container-mounts $BASE_DIR/logs:/workspace/logs,$BASE_DIR/checkpoints:/workspace/checkpoints,$BUILD_DIR:/workspace/megatron-lm,$DATA_DIR:/workspace/data --no-container-mount-home bash -c "
ls
cd /workspace/megatron-lm
./tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh $DATA_PATH $CHECKPOINT_PATH $TENSORBOARD_DIR $TP_SIZE $PP_SIZE $NUM_NODES $MAX_STEPS $VP_SIZE $MBS $GBS"
import torch
from tests.test_utilities import Utils
import megatron.core.pipeline_parallel.schedules as schedule
from pytest_mock import mocker
import pytest
rank = Utils.rank
def test_get_forward_backward_func():
Utils.initialize_model_parallel(tensor_model_parallel_size=2, pipeline_model_parallel_size=1)
assert(schedule.get_forward_backward_func() == schedule.forward_backward_no_pipelining)
Utils.destroy_model_parallel()
Utils.initialize_model_parallel(tensor_model_parallel_size=2, pipeline_model_parallel_size=4)
assert(schedule.get_forward_backward_func() == schedule.forward_backward_pipelining_without_interleaving)
Utils.destroy_model_parallel()
Utils.initialize_model_parallel(tensor_model_parallel_size=2, pipeline_model_parallel_size=4, virtual_pipeline_model_parallel_size=2)
assert(schedule.get_forward_backward_func() == schedule.forward_backward_pipelining_with_interleaving)
Utils.destroy_model_parallel()
def test_deallocate_output_tensor():
out = torch.tensor([[1, 2, 3], [4, 5, 6]])
schedule.deallocate_output_tensor(out)
assert(out.nelement() == 1)
def test_forward_backward_func_without_pipeline_parallel(mocker):
from megatron.core.pipeline_parallel import get_forward_backward_func
Utils.initialize_model_parallel(tensor_model_parallel_size=2, pipeline_model_parallel_size=1)
def forward_step_func(data_iterator, model):
import os
rank = int(os.environ['LOCAL_RANK'])
dummy_data = torch.ones(1,4)
def loss_func(output_tensor):
return rank, {'loss_reduced':rank}
return model(dummy_data), loss_func
model = torch.nn.Linear(4,1)
model.model_type = 'unit-test'
def set_input_tensor(input_tensor):
return None
model.set_input_tensor = set_input_tensor
forward_backward_func = get_forward_backward_func()
assert(schedule.get_forward_backward_func() == schedule.forward_backward_no_pipelining)
mocker.patch("megatron.core.pipeline_parallel.schedules.custom_backward", return_value=2)
losses_reduced = forward_backward_func(
forward_step_func=forward_step_func,
data_iterator=None,
model=[model],
num_microbatches=4,
forward_only=False)
loss_reduced_expected = [{'loss_reduced': rank}, {'loss_reduced': rank}, {'loss_reduced': rank}, {'loss_reduced': rank}]
for i,j in zip(losses_reduced, loss_reduced_expected):
print(losses_reduced)
assert(i['loss_reduced'] == j['loss_reduced'])
Utils.destroy_model_parallel()
def test_forward_backward_func_with_pipeline_parallel(mocker):
from megatron.core.pipeline_parallel import get_forward_backward_func
Utils.initialize_model_parallel(tensor_model_parallel_size=1, pipeline_model_parallel_size=4)
def forward_step_func(data_iterator, model):
import os
rank = int(os.environ['LOCAL_RANK'])
def loss_func(output_tensor):
return rank, {'loss_reduced':rank}
return torch.rand(512,8,256).cuda(), loss_func
model = torch.nn.Linear(4,1)
model.model_type = 'unit-test'
def set_input_tensor(input_tensor):
return None
model.set_input_tensor = set_input_tensor
forward_backward_func = get_forward_backward_func()
assert(schedule.get_forward_backward_func() == schedule.forward_backward_pipelining_without_interleaving)
sequence_length = 512
micro_batch_size = 8
hidden_size = 256
losses_reduced = forward_backward_func(
forward_step_func=forward_step_func,
data_iterator=None,
dtype=torch.float32,
model=[model],
num_microbatches= micro_batch_size,
tensor_shape=[sequence_length, micro_batch_size, hidden_size],
decoder_seq_length=sequence_length,
sequence_parallel=False,
forward_only=True)
loss_reduced_expected = [{'loss_reduced': rank}, {'loss_reduced': rank}, {'loss_reduced': rank}, {'loss_reduced': rank}]
for i,j in zip(losses_reduced, loss_reduced_expected):
print(losses_reduced)
assert(i['loss_reduced'] == j['loss_reduced'])
Utils.destroy_model_parallel()
"""
def test_forward_backward_func_with_interleaving(mocker):
from megatron.core.pipeline_parallel import get_forward_backward_func
from megatron.core.enums import ModelType
Utils.initialize_model_parallel(tensor_model_parallel_size=1, pipeline_model_parallel_size=4, virtual_pipeline_model_parallel_size=2)
def forward_step_func(data_iterator, model):
import os
rank = int(os.environ['LOCAL_RANK'])
def loss_func(output_tensor):
return rank, {'loss_reduced':rank}
return torch.rand(512,8,256).cuda(), loss_func
model = torch.nn.Linear(4,1)
def set_input_tensor(input_tensor):
return None
model.set_input_tensor = set_input_tensor
forward_backward_func = get_forward_backward_func()
assert(schedule.get_forward_backward_func() == schedule.forward_backward_pipelining_with_interleaving)
sequence_length = 512
micro_batch_size = 8
hidden_size = 256
mocker.patch("megatron.core.pipeline_parallel.schedules.custom_backward", return_value=2)
with pytest.raises(RuntimeError):
model.model_type = ModelType.encoder_and_decoder
forward_backward_func(
forward_step_func=forward_step_func,
data_iterator=range(0,100),
dtype=torch.float32,
model=[model, model],
num_microbatches= micro_batch_size,
tensor_shape=[sequence_length, micro_batch_size, hidden_size],
decoder_seq_length=sequence_length,
sequence_parallel=False,
forward_only=True)
with pytest.raises(RuntimeError):
model.model_type = ModelType.encoder_or_decoder
forward_backward_func(
forward_step_func=forward_step_func,
data_iterator=range(0,100),
dtype=torch.float32,
model=[model, model],
num_microbatches= micro_batch_size,
tensor_shape=[sequence_length, micro_batch_size, hidden_size],
decoder_seq_length=256,
sequence_parallel=False,
forward_only=True)
with pytest.raises(RuntimeError):
model.model_type = ModelType.encoder_or_decoder
forward_backward_func(
forward_step_func=forward_step_func,
data_iterator=range(0,100),
dtype=torch.float32,
model=[model, model],
num_microbatches= 7,
tensor_shape=[sequence_length, micro_batch_size, hidden_size],
decoder_seq_length=512,
sequence_parallel=False,
forward_only=True)
model.model_type = ModelType.encoder_or_decoder
losses_reduced = forward_backward_func(
forward_step_func=forward_step_func,
data_iterator=range(0,100),
dtype=torch.float32,
model=[model, model],
num_microbatches= micro_batch_size,
tensor_shape=[sequence_length, micro_batch_size, hidden_size],
decoder_seq_length=sequence_length,
sequence_parallel=True,
forward_only=True)
loss_reduced_expected = [{'loss_reduced': rank}, {'loss_reduced': rank}, {'loss_reduced': rank}, {'loss_reduced': rank}]
for i,j in zip(losses_reduced, loss_reduced_expected):
print(losses_reduced)
assert(i['loss_reduced'] == j['loss_reduced'])
Utils.destroy_model_parallel()
"""
\ No newline at end of file
from megatron.core.tensor_parallel.cross_entropy import vocab_parallel_cross_entropy
import torch
from tests.unit_tests.test_utilities import Utils
import numpy as np
def test_vocab_parallel_cross_entropy():
Utils.initialize_model_parallel(4,2)
vocab_parallel_logits = torch.range(0,7).repeat(16,4).cuda()
target = torch.arange(0,32,2).cuda()
output = vocab_parallel_cross_entropy(vocab_parallel_logits, target)
expected_output = torch.tensor([10.2309, 8.2309, 6.2309, 4.2309, 10.2309, 8.2309, 6.2309, 4.2309,
10.2309, 8.2309, 6.2309, 4.2309, 10.2309, 8.2309, 6.2309, 4.2309]).cuda()
assert(torch.equal(torch.round(expected_output), torch.round(output)))
Utils.destroy_model_parallel()
\ No newline at end of file
from megatron.core.tensor_parallel.data import broadcast_data
import torch
from tests.unit_tests.test_utilities import Utils
def test_broadcast_data():
Utils.initialize_model_parallel(2,4)
input_data = {
0 : torch.ones((8,8)).cuda() * 0.0,
1 : torch.ones((8,8)).cuda() * 1.0,
2 : torch.ones((8,8)).cuda() * 2.0,
3 : torch.ones((8,8)).cuda() * 3.0,
4 : torch.ones((8,8)).cuda() * 4.0,
5 : torch.ones((8,8)).cuda() * 5.0,
6 : torch.ones((8,8)).cuda() * 6.0,
7 : torch.ones((8,8)).cuda() * 7.0
}
dtype = torch.float32
actual_output = broadcast_data([0,1],input_data, dtype)
assert(torch.equal(actual_output[0], input_data[0]))
assert(torch.equal(actual_output[1], input_data[1]))
Utils.destroy_model_parallel()
\ No newline at end of file
from megatron.core.tensor_parallel import mappings
from tests.unit_tests.test_utilities import Utils
import torch
def test_CopyToModelParallelRegion():
Utils.initialize_model_parallel(4,2)
input_data = torch.ones((1)).cuda()*Utils.rank
output_data = mappings._CopyToModelParallelRegion.backward(None, input_data)
result = torch.ones(1).cuda()
result = result * 22 if Utils.rank >= 4 else result * 6
assert(torch.equal(output_data, result))
assert(torch.equal(input_data, mappings.copy_to_tensor_model_parallel_region(input_data)))
assert(torch.equal(input_data, mappings._CopyToModelParallelRegion.symbolic(None, input_data)))
Utils.destroy_model_parallel()
def test_ReduceFromModelParallelRegion():
Utils.initialize_model_parallel(4,2)
input_data = torch.ones((1)).cuda()*Utils.rank
output_data = mappings._ReduceFromModelParallelRegion.symbolic(None, input_data)
result = torch.ones(1).cuda()
result = result * 22 if Utils.rank >= 4 else result * 6
assert(torch.equal(output_data, result))
input_data = torch.ones((1)).cuda()*Utils.rank
assert(torch.equal(mappings.reduce_from_tensor_model_parallel_region(input_data), result))
assert(torch.equal(input_data, mappings._ReduceFromModelParallelRegion.backward(None, input_data)))
Utils.destroy_model_parallel()
def test_ScatterToModelParallelRegion():
Utils.initialize_model_parallel(4,2)
input_data = torch.rand((8,4)).cuda()
output_data = mappings.scatter_to_tensor_model_parallel_region(input_data)
req_dim = int(Utils.rank%(Utils.world_size/2))
assert(torch.equal(output_data, input_data[:,req_dim].reshape((8,1))))
output_data = mappings._ScatterToModelParallelRegion.symbolic(None, input_data)
assert(torch.equal(output_data, input_data[:, req_dim].reshape((8,1))))
input_data = torch.ones(8).cuda() * Utils.rank
actual_output_data = mappings._ScatterToModelParallelRegion.backward(None, input_data)
expected_output = torch.cat((
torch.ones(8)*0,
torch.ones(8)*1,
torch.ones(8)*2,
torch.ones(8)*3)).cuda()
if (Utils.rank >= 4):
expected_output = expected_output + 4
assert(torch.equal(actual_output_data, expected_output))
Utils.destroy_model_parallel()
def test_GatherFromModelParallelRegion():
Utils.initialize_model_parallel(4,2)
input_data = torch.rand((8,4)).cuda()
req_dim = int(Utils.rank%(Utils.world_size/2))
output_data = mappings._GatherFromModelParallelRegion.backward(None, input_data)
assert(torch.equal(output_data, input_data[:, req_dim].reshape((8,1))))
input_data = torch.ones(8).cuda() * Utils.rank
actual_output_data = mappings.gather_from_tensor_model_parallel_region(input_data)
expected_output = torch.cat((
torch.ones(8)*0,
torch.ones(8)*1,
torch.ones(8)*2,
torch.ones(8)*3)).cuda()
if (Utils.rank >= 4):
expected_output = expected_output + 4
assert(torch.equal(actual_output_data, expected_output))
assert(torch.equal(mappings._GatherFromModelParallelRegion.symbolic(None, input_data), expected_output))
Utils.destroy_model_parallel()
def test_ScatterToSequenceParallelRegion():
Utils.initialize_model_parallel(4,2)
input_data = torch.rand((8,4)).cuda()
req_dim = int(Utils.rank%(Utils.world_size/2))*2
output_data = mappings._ScatterToSequenceParallelRegion.symbolic(None, input_data)
assert(torch.equal(output_data, input_data[req_dim:req_dim+2, :]))
output_data = mappings.scatter_to_sequence_parallel_region(input_data)
assert(torch.equal(output_data, input_data[req_dim:req_dim+2, :]))
input_data = torch.ones(4).cuda() * Utils.rank
output_data = mappings._ScatterToModelParallelRegion.backward(None, input_data)
expected_output = torch.concat((
torch.ones(4)*0,
torch.ones(4)*1,
torch.ones(4)*2,
torch.ones(4)*3)).cuda()
if (Utils.rank >= 4):
expected_output = expected_output + 4
assert(torch.equal(output_data, expected_output))
Utils.destroy_model_parallel()
def test_GatherFromSequenceParallelRegion():
Utils.initialize_model_parallel(4,2)
input_data = torch.ones(4).cuda() * Utils.rank
output_data = mappings.gather_from_sequence_parallel_region(input_data)
expected_output = torch.concat((
torch.ones(4)*0,
torch.ones(4)*1,
torch.ones(4)*2,
torch.ones(4)*3)).cuda()
if (Utils.rank >= 4):
expected_output = expected_output + 4
assert(torch.equal(output_data, expected_output))
assert(torch.equal(mappings._GatherFromSequenceParallelRegion.symbolic(None, input_data), expected_output))
input_data = torch.vstack((
torch.ones(4)*0,
torch.ones(4)*1,
torch.ones(4)*2,
torch.ones(4)*3)).cuda()
class Ctx:
tensor_parallel_output_grad = True
output_data = mappings._GatherFromSequenceParallelRegion.backward(Ctx(), input_data)
expected_output = torch.ones((1,4)).cuda() * 4 * int(Utils.rank % 4)
assert(torch.equal(output_data[0], expected_output))
Utils.destroy_model_parallel()
def test_ReduceScatterToSequenceParallelRegion():
Utils.initialize_model_parallel(4,2)
input_data = torch.vstack((
torch.ones(4)*0,
torch.ones(4)*1,
torch.ones(4)*2,
torch.ones(4)*3)).cuda()
output_data = mappings.reduce_scatter_to_sequence_parallel_region(input_data)
expected_output = torch.ones(4).cuda() * 4 * int(Utils.rank % 4)
assert(torch.equal(output_data[0], expected_output))
assert(torch.equal(mappings._ReduceScatterToSequenceParallelRegion.symbolic(None, input_data) , expected_output.reshape((1,4))))
input_data = torch.ones(4).cuda() * Utils.rank
output_data = mappings._ReduceScatterToSequenceParallelRegion.backward(None,input_data)
expected_output = torch.concat((
torch.ones(4)*0,
torch.ones(4)*1,
torch.ones(4)*2,
torch.ones(4)*3)).cuda()
if (Utils.rank >= 4):
expected_output = expected_output + 4
assert(torch.equal(output_data, expected_output))
Utils.destroy_model_parallel()
from megatron.core.tensor_parallel.random import CudaRNGStatesTracker
from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed
from megatron.core.tensor_parallel.random import _CUDA_RNG_STATE_TRACKER
from megatron.core.tensor_parallel.random import checkpoint
from tests.unit_tests.test_utilities import Utils
import pytest
import torch
def test_cuda_rng_states_tracker():
rng_tracker = CudaRNGStatesTracker()
rng_tracker.set_states({"state1":1234})
assert(rng_tracker.get_states()["state1"] == 1234)
rng_tracker.reset()
assert(rng_tracker.get_states() == {})
seed = 1111
rng_tracker.add("state2",seed)
with pytest.raises(Exception):
assert(rng_tracker.add("state3",seed))
with pytest.raises(Exception):
assert(rng_tracker.add("state2",111))
assert(rng_tracker.get_states()['state2'] is not None)
with pytest.raises(Exception):
assert()
rng_tracker.fork("state2")
torch.cuda.manual_seed(seed)
rng_state = torch.cuda.get_rng_state()
assert torch.equal(rng_tracker.get_states()['state2'], rng_state)
def test_model_parallel_cuda_manual_seed():
Utils.initialize_model_parallel(4,2)
model_parallel_cuda_manual_seed(0)
assert(_CUDA_RNG_STATE_TRACKER.get_states()['model-parallel-rng'] is not None)
Utils.destroy_model_parallel()
def test_checkpoint():
def test_forward(*input):
return input[0]+input[1]
assert(torch.equal(torch.ones(16)*3,checkpoint(test_forward, None, torch.ones(16), torch.ones(16)*2)))
Utils.initialize_model_parallel()
input1 = torch.ones((4,4))
checkpoint(test_forward, True, input1, torch.ones((4,4))*2)
assert(torch.equal(torch.ones(input1.numel()).cuda(), input1))
Utils.destroy_model_parallel()
\ No newline at end of file
import torch
import megatron.core.tensor_parallel.utils as util
import megatron.core.parallel_state as ps
from tests.unit_tests.test_utilities import Utils
rank = Utils.rank
def test_split_tensor_along_last_dim():
input_tensor = torch.rand((3,4))
torch.equal(input_tensor[0:2,0:2], util.split_tensor_along_last_dim(input_tensor,2)[0])
torch.equal(input_tensor[2:,2:], util.split_tensor_along_last_dim(input_tensor,2)[1])
def test_split_tensor_into_1d_equal_chunks():
Utils.initialize_model_parallel(tensor_model_parallel_size=2, pipeline_model_parallel_size=4)
input_tensor = torch.rand((3,4))
output_tensor = util.split_tensor_into_1d_equal_chunks(input_tensor)
if rank % 2 == 0 :
start = 0
end = int(input_tensor.numel()/2)
else :
start = int(input_tensor.numel()/2)
end = input_tensor.numel()
assert torch.equal(output_tensor, input_tensor.flatten()[start:end])
Utils.destroy_model_parallel()
def test_gather_split_1d_tensor():
Utils.initialize_model_parallel(tensor_model_parallel_size=2, pipeline_model_parallel_size=4)
input_tensor = torch.ones((2,4)).cuda() * rank
actual_output_tensor = util.gather_split_1d_tensor(input_tensor)
if rank %2 == 0:
expected_output_tensor = torch.concat((input_tensor.flatten(), input_tensor.flatten() + 1))
else :
expected_output_tensor = torch.concat((input_tensor.flatten() - 1, input_tensor.flatten()))
assert(torch.equal(actual_output_tensor, expected_output_tensor))
Utils.destroy_model_parallel()
def test_vocab():
global_vocab_size = 1600
per_partition_vocab_size = 1600 / Utils.world_size
assert((rank * per_partition_vocab_size, (rank + 1)* per_partition_vocab_size) == (util.VocabUtility.vocab_range_from_per_partition_vocab_size(global_vocab_size // Utils.world_size, rank, Utils.world_size)))
assert((rank * per_partition_vocab_size, (rank + 1)* per_partition_vocab_size) == (util.VocabUtility.vocab_range_from_global_vocab_size(global_vocab_size, rank, Utils.world_size)))
\ No newline at end of file
def test_import():
import megatron
import torch
import megatron.core.parallel_state as ps
import pytest
from tests.unit_tests.test_utilities import Utils
import os
rank = Utils.rank
world_size = Utils.world_size
def test_initialize__and_destroy_model_parallel():
with pytest.raises(AssertionError):
assert(ps.initialize_model_parallel())
Utils.initialize_distributed()
with pytest.raises(RuntimeError):
assert(ps.initialize_model_parallel(tensor_model_parallel_size=2*world_size))
with pytest.raises(RuntimeError):
assert(ps.initialize_model_parallel(pipeline_model_parallel_size=2*world_size))
with pytest.raises(RuntimeError):
assert(ps.initialize_model_parallel(pipeline_model_parallel_size=world_size, tensor_model_parallel_size=world_size))
with pytest.raises(RuntimeError):
assert(ps.initialize_model_parallel(virtual_pipeline_model_parallel_size=2))
Utils.initialize_model_parallel(tensor_model_parallel_size=2, pipeline_model_parallel_size=4)
assert(ps.model_parallel_is_initialized())
assert(ps.get_model_parallel_group() is not None)
assert(ps.get_tensor_model_parallel_group() is not None)
assert(ps.get_pipeline_model_parallel_group() is not None)
assert(ps.get_data_parallel_group() is not None)
Utils.destroy_model_parallel()
assert(ps._MODEL_PARALLEL_GROUP is None)
def test_pipeline_parallel_initializations():
Utils.initialize_model_parallel(tensor_model_parallel_size=2, pipeline_model_parallel_size=4)
assert(ps.get_pipeline_model_parallel_first_rank() == rank % 2 )
assert(ps.get_data_parallel_src_rank() == rank)
assert(ps.get_pipeline_model_parallel_next_rank() == ((rank + 2) % world_size))
assert(ps.get_pipeline_model_parallel_prev_rank() == ((rank - 2) % world_size))
Utils.destroy_model_parallel()
def test_data_parallel_initializations():
Utils.initialize_model_parallel(pipeline_model_parallel_size=world_size)
assert(ps.get_data_parallel_src_rank() == rank)
assert(ps.get_data_parallel_world_size() == 1)
assert(ps.get_data_parallel_rank() == 0)
Utils.destroy_model_parallel()
def test_tensor_model_parellel_world_size():
Utils.initialize_model_parallel(tensor_model_parallel_size=world_size)
assert(ps.get_tensor_model_parallel_world_size() == world_size)
ps.set_tensor_model_parallel_world_size(None)
assert(ps.get_tensor_model_parallel_world_size() == world_size)
Utils.destroy_model_parallel()
def test_pipeline_model_parallel_world_size():
Utils.initialize_model_parallel(pipeline_model_parallel_size=world_size)
assert(ps.get_pipeline_model_parallel_world_size() == world_size)
ps.set_pipeline_model_parallel_world_size(None)
assert(ps.get_pipeline_model_parallel_world_size() == world_size)
Utils.destroy_model_parallel()
def test_tensor_model_parallel_rank():
Utils.initialize_model_parallel(tensor_model_parallel_size=world_size)
assert(ps.get_tensor_model_parallel_rank() == rank)
ps.set_tensor_model_parallel_rank(None)
assert(ps.get_tensor_model_parallel_rank() == rank)
Utils.destroy_model_parallel()
def test_pipeline_model_parallel_rank():
Utils.initialize_model_parallel(pipeline_model_parallel_size=world_size)
assert(ps.get_pipeline_model_parallel_rank() == rank)
ps.set_pipeline_model_parallel_rank(None)
assert(ps.get_pipeline_model_parallel_rank() == rank)
Utils.destroy_model_parallel()
def test_is_pipeline_first_stage():
Utils.initialize_model_parallel(pipeline_model_parallel_size=world_size)
assert(ps.is_pipeline_first_stage(ignore_virtual=True) == (rank == 0))
assert(ps.is_pipeline_first_stage() == (rank == 0))
Utils.destroy_model_parallel()
def test_is_pipeline_last_stage():
Utils.initialize_model_parallel(pipeline_model_parallel_size=world_size)
assert(ps.is_pipeline_last_stage(ignore_virtual=True) == (rank == world_size-1))
assert(ps.is_pipeline_last_stage() == (rank == world_size-1))
Utils.destroy_model_parallel()
def test_virtual_pipeline_model_parallel_rank():
Utils.initialize_model_parallel(pipeline_model_parallel_size=world_size)
ps.set_virtual_pipeline_model_parallel_rank(rank)
assert(ps.get_virtual_pipeline_model_parallel_rank() == rank)
Utils.destroy_model_parallel()
def test_get_tensor_model_parallel_src_rank():
Utils.initialize_model_parallel(tensor_model_parallel_size=world_size)
assert(ps.get_tensor_model_parallel_src_rank() == ((rank // world_size) * world_size))
Utils.destroy_model_parallel()
\ No newline at end of file
import os
import torch
import megatron.core.parallel_state as ps
class Utils:
world_size = torch.cuda.device_count()
rank = int(os.environ['LOCAL_RANK'])
@staticmethod
def initialize_distributed():
print(f'Initializing torch.distributed with rank: {Utils.rank}, world_size: {Utils.world_size}')
torch.cuda.set_device(Utils.rank % torch.cuda.device_count())
init_method = 'tcp://'
master_ip = os.getenv('MASTER_ADDR', 'localhost')
master_port = os.getenv('MASTER_PORT', '6000')
init_method += master_ip + ':' + master_port
torch.distributed.init_process_group(backend='nccl', world_size=Utils.world_size, rank=Utils.rank, init_method=init_method)
@staticmethod
def destroy_model_parallel():
ps.destroy_model_parallel()
torch.distributed.barrier()
@staticmethod
def initialize_model_parallel(tensor_model_parallel_size = 1, pipeline_model_parallel_size = 1, virtual_pipeline_model_parallel_size = None, pipeline_model_parallel_split_rank = None):
ps.destroy_model_parallel()
if not torch.distributed.is_initialized():
Utils.initialize_distributed()
ps.initialize_model_parallel(tensor_model_parallel_size, pipeline_model_parallel_size, virtual_pipeline_model_parallel_size, pipeline_model_parallel_split_rank)
\ No newline at end of file
import pytest
import torch
import megatron.core.utils as util
import numpy as np
def test_divide_properly():
assert util.divide(4,2) == 2
def test_divide_improperly():
with pytest.raises(AssertionError):
util.divide(4,5)
def test_global_memory_buffer():
global_memory_buffer = util.GlobalMemoryBuffer()
obtained_tensor = global_memory_buffer.get_tensor((3,2), torch.float32, "test_tensor")
expected_tensor = torch.empty((3,2), dtype=torch.float32, device=torch.cuda.current_device())
assert torch.equal(obtained_tensor, expected_tensor)
def test_make_viewless_tensor():
inp = torch.rand((3,4))
assert(torch.equal(inp, util.make_viewless_tensor(inp, True, True)))
assert(torch.equal(inp, util.make_viewless_tensor(inp, True, False)))
def test_safely_set_viewless_tensor_data():
tensor = torch.zeros((3,4))
new_data_tensor = torch.tensor(np.random.rand(3,4))
util.safely_set_viewless_tensor_data(tensor, new_data_tensor)
assert(torch.equal(tensor, new_data_tensor))
def test_assert_viewless_tensor():
tensor = torch.rand((3,4))
assert(torch.equal(util.assert_viewless_tensor(tensor), tensor))
input_tensor_list=[tensor,tensor,tensor]
output_tensor_list = util.assert_viewless_tensor(input_tensor_list)
for inp,out in zip(input_tensor_list, output_tensor_list):
assert(torch.equal(inp,out))
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment