Commit d520d24f authored by silencealiang's avatar silencealiang
Browse files

Merge branch 'main' into 'main'

megatron升级v0.10

See merge request !3
parents 3aca1415 481609bb
Pipeline #2055 failed with stages
in 0 seconds
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
from typing import List, Literal, Optional, Tuple
import torch
from torch import Tensor
from megatron.core import InferenceParams, parallel_state, tensor_parallel
from megatron.core.config_logger import has_config_logger_enabled, log_config_to_disk
from megatron.core.dist_checkpointing.mapping import ShardedStateDict
from megatron.core.enums import ModelType
from megatron.core.models.common.embeddings.language_model_embedding import LanguageModelEmbedding
from megatron.core.models.common.embeddings.rotary_pos_embedding import RotaryEmbedding
from megatron.core.models.common.language_module.language_module import LanguageModule
from megatron.core.packed_seq_params import PackedSeqParams
from megatron.core.transformer.module import MegatronModule
from megatron.core.transformer.spec_utils import ModuleSpec
from megatron.core.transformer.transformer_block import TransformerBlock
from megatron.core.transformer.transformer_config import TransformerConfig
class T5LMHead(MegatronModule):
"""Masked LM head for T5
Args:
config (TransformerConfig): transformer config
parallel_output (bool): wether output logits being distributed or not.
vocab_size (int): vocabulary size
pre_process (bool): Include embedding layer
share_embeddings_and_output_weights (bool): When True, input
embeddings and output logit weights are shared.
"""
def __init__(
self,
config: TransformerConfig,
parallel_output: bool,
vocab_size: int,
pre_process: bool = True,
share_embeddings_and_output_weights: bool = False,
):
super(T5LMHead, self).__init__(config=config)
if has_config_logger_enabled(config):
log_config_to_disk(config, locals(), prefix=type(self).__name__)
self.parallel_output = parallel_output
self.output_layer = tensor_parallel.ColumnParallelLinear(
config.hidden_size,
vocab_size,
config=config,
init_method=config.init_method,
bias=share_embeddings_and_output_weights,
skip_bias_add=not share_embeddings_and_output_weights,
gather_output=not self.parallel_output,
skip_weight_param_allocation=pre_process and share_embeddings_and_output_weights,
)
def forward(self, hidden_states: Tensor, word_embeddings_weight: Tensor) -> Tensor:
"""Forward pass.
Args:
hidden_states (Tensor): output hidden states from decoder
word_embeddings_weight (Tensor): word embedding weight
Returns:
Tensor: logits tensor
"""
logits, _ = self.output_layer(hidden_states, weight=word_embeddings_weight)
return logits
class T5Model(LanguageModule):
"""T5 Language model.
Args:
config (TransformerConfig): transformer config
encoder_config (TransformerConfig): encoder transformer config
transformer_encoder_layer_spec (ModuleSpec): transformer layer
customization specs for encoder
transformer_decoder_layer_spec (ModuleSpec): transformer layer
customization specs for decoder
vocab_size (int): vocabulary size
max_sequence_length (int): maximum size of sequence. This is used for positional embedding
pre_process (bool): Include embedding layer (used with pipeline parallelism)
post_process (bool): Include an output layer (used with pipeline parallelism)
fp16_lm_cross_entropy (bool, optional): Defaults to False
parallel_output (bool): Do not gather the outputs,
keep them split across tensor parallel ranks
share_embeddings_and_output_weights (bool): When True,
input embeddings and output logit weights are shared. Defaults to False.
position_embedding_type (string): Position embedding type.
Options ['learned_absolute', 'rope'].
Defaults is 'learned_absolute'.
rotary_percent (float): Percent of rotary dimension to use for rotary position embeddings.
Defaults to 1.0 (100%). Ignored unless position_embedding_type is 'rope'.
seq_len_interpolation_factor (float): scale of linearly interpolating
RoPE for longer sequences. The value must be a float larger than 1.0.
Defaults to None.
add_encoder (bool): Create the encoder (used with pipeline parallelism).
When using pipelining, the encoder will only be created on a subset
of the pipeline ranks.
add_decoder (bool): Include an output layer (used with pipeline parallelism).
As with `add_encoder`, when using this model and pipelining,
the decoder will only be created on a subset of the pipeline ranks.
"""
def __init__(
self,
config: TransformerConfig,
encoder_config: TransformerConfig,
transformer_encoder_layer_spec: ModuleSpec,
transformer_decoder_layer_spec: ModuleSpec,
vocab_size: int,
max_sequence_length: int,
pre_process: bool = True,
post_process: bool = True,
fp16_lm_cross_entropy: bool = False,
parallel_output: bool = True,
share_embeddings_and_output_weights: bool = False,
position_embedding_type: Literal['learned_absolute', 'rope'] = 'learned_absolute',
rotary_percent: float = 1.0,
seq_len_interpolation_factor: Optional[float] = None,
add_encoder: bool = True,
add_decoder: bool = True,
):
super(T5Model, self).__init__(config=config)
self.config: TransformerConfig = config
self.encoder_config: TransformerConfig = encoder_config
self.transformer_encoder_layer_spec: ModuleSpec = transformer_encoder_layer_spec
self.transformer_decoder_layer_spec: ModuleSpec = transformer_decoder_layer_spec
self.vocab_size = vocab_size
self.max_sequence_length = max_sequence_length
self.pre_process = pre_process
self.post_process = post_process
self.add_encoder = add_encoder
self.add_decoder = add_decoder
self.fp16_lm_cross_entropy = fp16_lm_cross_entropy
self.parallel_output = parallel_output
self.share_embeddings_and_output_weights = share_embeddings_and_output_weights
self.position_embedding_type = position_embedding_type
self.encoder_hidden_state = None
self.model_type = ModelType.encoder_and_decoder
# Tells schedules.py that this model has a skip connection
# between the encoder's output and the decoder
# (and hence both the encoder and decoder's tensors are required for correct backprop).
self.xattn_needed = True
# specify the position embeddings as a member
# variable in the T5 class so that they are easy to
# find for `finalize_model_grads._allreduce_position_embedding_grads`
self.position_embeddings = None
if self.pre_process:
self.embedding = LanguageModelEmbedding(
config=self.config,
vocab_size=self.vocab_size,
max_sequence_length=self.max_sequence_length,
position_embedding_type=self.position_embedding_type,
)
if position_embedding_type == "learned_absolute":
self.position_embeddings = self.embedding.position_embeddings
else:
self.position_embeddings = None
# Rotary Position Embeddings
if self.position_embedding_type == 'rope':
self.rotary_pos_emb = RotaryEmbedding(
kv_channels=self.config.kv_channels,
rotary_percent=rotary_percent,
rotary_interleaved=self.config.rotary_interleaved,
seq_len_interpolation_factor=seq_len_interpolation_factor,
use_cpu_initialization=self.config.use_cpu_initialization,
)
# Transformer encoder
encoder_spec, decoder_spec = (
self.transformer_encoder_layer_spec,
self.transformer_decoder_layer_spec,
)
if self.add_encoder:
self.encoder = TransformerBlock(
config=self.encoder_config,
spec=encoder_spec,
pre_process=self.pre_process,
post_process=self.post_process,
)
else:
self.encoder = None
if self.add_decoder:
# Transformer decoder
self.decoder = TransformerBlock(
config=self.config,
spec=decoder_spec,
pre_process=self.pre_process,
post_process=self.post_process,
)
else:
self.decoder = None
# Output
if post_process:
self.lm_head = T5LMHead(
config,
parallel_output,
self.vocab_size,
self.pre_process,
self.share_embeddings_and_output_weights,
)
self.output_layer = self.lm_head.output_layer
if self.pre_process or self.post_process:
self.setup_embeddings_and_output_layer()
def forward(
self,
encoder_input_ids: Tensor,
decoder_input_ids: Tensor,
encoder_attn_mask: Tensor,
decoder_attn_mask: Tensor,
encoder_decoder_attn_mask: Tensor,
lm_labels: Tensor = None,
encoder_hidden_states: Tensor = None,
output_encoder_hidden_only: bool = False,
inference_params: InferenceParams = None,
packed_seq_params: PackedSeqParams = None,
) -> Tensor:
"""Forward pass.
Args:
encoder_input_ids (Tensor): input ids for encoder
decoder_input_ids (Tensor): input ids for decoder
encoder_attn_mask (Tensor): self-attention mask for encoder
decoder_attn_mask (Tensor): self-attention mask for decoder
encoder_decoder_attn_mask (Tensor): cross-attention mask between encoder and decoder
lm_labels (Tensor): labels for decoder output
inference_params (InferenceParams): relevant arguments for inferencing
Returns:
Tensor: loss tensor
"""
## Encoder forward
if encoder_hidden_states is None:
# Encoder position ids
encoder_position_ids = t5_position_ids(encoder_input_ids)
# Encoder embedding.
if self.pre_process:
encoder_input = self.embedding(
input_ids=encoder_input_ids, position_ids=encoder_position_ids
)
else:
# intermediate stage of pipeline
encoder_input = None
# Rotary positional embeddings
rotary_pos_emb = None
if self.position_embedding_type == 'rope':
rotary_seq_len = self.rotary_pos_emb.get_rotary_seq_len(
inference_params, self.encoder, encoder_input, self.config, packed_seq_params
)
rotary_pos_emb = self.rotary_pos_emb(rotary_seq_len)
# Run encoder.
if self.add_encoder:
encoder_hidden_states = self.encoder(
hidden_states=encoder_input,
attention_mask=encoder_attn_mask,
inference_params=inference_params,
rotary_pos_emb=rotary_pos_emb,
)
else:
encoder_hidden_states = self.encoder_hidden_state
if not self.add_decoder or output_encoder_hidden_only:
return encoder_hidden_states
## Decoder forward
# Decoder position ids
decoder_position_ids = t5_position_ids(decoder_input_ids)
# Decoder embedding.
if self.pre_process:
decoder_input = self.embedding(
input_ids=decoder_input_ids, position_ids=decoder_position_ids
)
else:
# intermediate stage of pipeline
decoder_input = None ### should it take encoder_hidden_states
# Rotary positional embeddings
rotary_pos_emb = None
if self.position_embedding_type == 'rope':
rotary_seq_len = self.rotary_pos_emb.get_rotary_seq_len(
inference_params, self.encoder, encoder_input, self.config, packed_seq_params
)
rotary_pos_emb = self.rotary_pos_emb(rotary_seq_len)
# Run decoder.
decoder_hidden_states = self.decoder(
hidden_states=decoder_input,
attention_mask=decoder_attn_mask,
context=encoder_hidden_states,
context_mask=encoder_decoder_attn_mask,
inference_params=inference_params,
rotary_pos_emb=rotary_pos_emb,
)
if self.post_process:
lm_logits = self.lm_head(
decoder_hidden_states, self.shared_embedding_or_output_weight()
)
if lm_labels is None:
# [s b h] => [b s h]
return lm_logits.transpose(0, 1).contiguous()
else:
# [b s] => [s b]
lm_loss = self.compute_language_model_loss(lm_labels, lm_logits)
return lm_loss
else:
return decoder_hidden_states
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
# This is usually handled in schedules.py but some inference code still
# gives us non-lists or None
if not isinstance(input_tensor, list):
input_tensor = [input_tensor]
if self.add_encoder and self.add_decoder:
assert (
len(input_tensor) == 1
), 'input_tensor should only be length 1 for stage with both encoder and decoder'
self.encoder.set_input_tensor(input_tensor[0])
elif self.add_encoder:
assert (
len(input_tensor) == 1
), 'input_tensor should only be length 1 for stage with only encoder'
self.encoder.set_input_tensor(input_tensor[0])
elif self.add_decoder:
if len(input_tensor) == 2:
self.decoder.set_input_tensor(input_tensor[0])
self.encoder_hidden_state = input_tensor[1]
elif len(input_tensor) == 1:
self.decoder.set_input_tensor(None)
self.encoder_hidden_state = input_tensor[0]
else:
raise Exception('input_tensor must have either length 1 or 2')
else:
raise Exception('Stage must have at least either encoder or decoder')
def shared_embedding_or_output_weight(self) -> Tensor:
"""Function to share the input embeddings and output logit weights."""
if self.pre_process:
return self.embedding.word_embeddings.weight
elif self.post_process:
return self.lm_head.output_layer.weight
return None
def sharded_state_dict(
self,
prefix: str = '',
sharded_offsets: Tuple[Tuple[int, int, int]] = (),
metadata: Optional[dict] = None,
) -> ShardedStateDict:
"""Sharded state dict implementation handling duplication of encoder and decoder layers.
Some layers (output, embedding) are shared between the encoder and decoder.
This method sets the replica_id for them to ensure there is only one
layer instance with replica_id (0, 0, 0).
Args:
prefix (str): Module name prefix.
sharded_offsets (tuple): PP related offsets, expected to be empty at this module level.
metadata (Optional[Dict]): metadata controlling sharded state dict creation.
Returns:
ShardedStateDict: sharded state dict for the T5Model
"""
sharded_sd = super().sharded_state_dict(prefix, sharded_offsets, metadata)
if not parallel_state.is_inside_encoder():
for k, sh_ten in sharded_sd.items():
if not k.startswith(f'{prefix}decoder'):
# Bump replica_id of all the layers shared with the encoder (output, embedding)
sh_ten.replica_id = (sh_ten.replica_id[0] + 1, *sh_ten.replica_id[1:])
return sharded_sd
def t5_extended_attention_mask(attention_mask_list: List[Tensor]) -> List[Tensor]:
"""Creates the extended attention mask
Converts the attention mask of dimension [batch size, seq_len, seq_len]
to [batch size, 1, seq_len, seq_len]
Args:
attention_mask (Tensor): The input attention mask
Returns:
Tensor: The extended binary attention mask
"""
def attn_mask_postprocess(attn_mask):
# [b, 1, s, s]
extended_attention_mask = attn_mask.unsqueeze(1)
return extended_attention_mask
return [
(attn_mask_postprocess(attn_mask) if attn_mask is not None else None)
for attn_mask in attention_mask_list
]
def t5_position_ids(token_ids: Tensor) -> Tensor:
"""Calculate position ids from token ids
Args:
token_ids (Tensor): input tokens
Returns:
Tensor: position ids
"""
seq_length = token_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=token_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(token_ids)
return position_ids
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
from megatron.core.fusions.fused_bias_dropout import get_bias_dropout_add
from megatron.core.tensor_parallel.layers import ColumnParallelLinear, RowParallelLinear
from megatron.core.transformer.attention import (
CrossAttention,
CrossAttentionSubmodules,
SelfAttention,
SelfAttentionSubmodules,
)
from megatron.core.transformer.dot_product_attention import DotProductAttention
from megatron.core.transformer.enums import AttnMaskType
from megatron.core.transformer.identity_op import IdentityOp
from megatron.core.transformer.mlp import MLP, MLPSubmodules
from megatron.core.transformer.spec_utils import ModuleSpec
from megatron.core.transformer.transformer_block import TransformerBlockSubmodules
from megatron.core.transformer.transformer_layer import TransformerLayer, TransformerLayerSubmodules
try:
from megatron.core.extensions.transformer_engine import (
TEColumnParallelLinear,
TEDotProductAttention,
TELayerNormColumnParallelLinear,
TENorm,
TERowParallelLinear,
)
HAVE_TE = True
except ImportError:
HAVE_TE = False
try:
import apex # pylint: disable=unused-import
from megatron.core.fusions.fused_layer_norm import FusedLayerNorm
HAVE_APEX = True
LNImpl = FusedLayerNorm
except ImportError:
import warnings
from megatron.core.transformer.torch_norm import WrappedTorchNorm
warnings.warn(f'Apex is not installed. Falling back to Torch Norm')
LNImpl = WrappedTorchNorm
def encoder_model_with_transformer_engine_default_spec() -> ModuleSpec:
"""T5 encoder TE spec (uses Transformer Engine components)."""
return ModuleSpec(
module=TransformerLayer,
submodules=TransformerLayerSubmodules(
self_attention=ModuleSpec(
module=SelfAttention,
params={"attn_mask_type": AttnMaskType.padding},
submodules=SelfAttentionSubmodules(
linear_qkv=TELayerNormColumnParallelLinear,
core_attention=TEDotProductAttention,
linear_proj=TERowParallelLinear,
q_layernorm=IdentityOp,
k_layernorm=IdentityOp,
),
),
self_attn_bda=get_bias_dropout_add,
mlp=ModuleSpec(
module=MLP,
submodules=MLPSubmodules(
linear_fc1=TELayerNormColumnParallelLinear, linear_fc2=TERowParallelLinear
),
),
mlp_bda=get_bias_dropout_add,
),
)
def decoder_model_with_transformer_engine_default_spec() -> ModuleSpec:
"""T5 decoder TE spec (uses Transformer Engine components)."""
return ModuleSpec(
module=TransformerLayer,
submodules=TransformerLayerSubmodules(
self_attention=ModuleSpec(
module=SelfAttention,
params={"attn_mask_type": AttnMaskType.causal},
submodules=SelfAttentionSubmodules(
linear_qkv=TELayerNormColumnParallelLinear,
core_attention=TEDotProductAttention,
linear_proj=TERowParallelLinear,
q_layernorm=IdentityOp,
k_layernorm=IdentityOp,
),
),
self_attn_bda=get_bias_dropout_add,
pre_cross_attn_layernorm=TENorm,
cross_attention=ModuleSpec(
module=CrossAttention,
params={"attn_mask_type": AttnMaskType.padding},
submodules=CrossAttentionSubmodules(
linear_q=TEColumnParallelLinear,
linear_kv=TEColumnParallelLinear,
core_attention=TEDotProductAttention,
linear_proj=TERowParallelLinear,
),
),
cross_attn_bda=get_bias_dropout_add,
mlp=ModuleSpec(
module=MLP,
submodules=MLPSubmodules(
linear_fc1=TELayerNormColumnParallelLinear, linear_fc2=TERowParallelLinear
),
),
mlp_bda=get_bias_dropout_add,
),
)
def encoder_model_with_local_spec() -> ModuleSpec:
"""T5 encoder local spec (uses Megatron-Core components)."""
return ModuleSpec(
module=TransformerLayer,
submodules=TransformerLayerSubmodules(
input_layernorm=LNImpl,
self_attention=ModuleSpec(
module=SelfAttention,
params={"attn_mask_type": AttnMaskType.arbitrary},
submodules=SelfAttentionSubmodules(
linear_qkv=ColumnParallelLinear,
core_attention=DotProductAttention,
linear_proj=RowParallelLinear,
q_layernorm=IdentityOp,
k_layernorm=IdentityOp,
),
),
self_attn_bda=get_bias_dropout_add,
pre_mlp_layernorm=LNImpl,
mlp=ModuleSpec(
module=MLP,
submodules=MLPSubmodules(
linear_fc1=ColumnParallelLinear, linear_fc2=RowParallelLinear
),
),
mlp_bda=get_bias_dropout_add,
sharded_state_dict_keys_map={
'input_layernorm.': 'self_attention.linear_qkv.layer_norm_',
'pre_mlp_layernorm.': 'mlp.linear_fc1.layer_norm_',
},
),
)
def decoder_model_with_local_spec() -> ModuleSpec:
"""T5 decoder local spec (uses Megatron-Core components)."""
return ModuleSpec(
module=TransformerLayer,
submodules=TransformerLayerSubmodules(
input_layernorm=LNImpl,
self_attention=ModuleSpec(
module=SelfAttention,
params={"attn_mask_type": AttnMaskType.causal},
submodules=SelfAttentionSubmodules(
linear_qkv=ColumnParallelLinear,
core_attention=DotProductAttention,
linear_proj=RowParallelLinear,
q_layernorm=IdentityOp,
k_layernorm=IdentityOp,
),
),
self_attn_bda=get_bias_dropout_add,
pre_cross_attn_layernorm=LNImpl,
cross_attention=ModuleSpec(
module=CrossAttention,
params={"attn_mask_type": AttnMaskType.arbitrary},
submodules=CrossAttentionSubmodules(
linear_q=ColumnParallelLinear,
linear_kv=ColumnParallelLinear,
core_attention=DotProductAttention,
linear_proj=RowParallelLinear,
),
),
cross_attn_bda=get_bias_dropout_add,
pre_mlp_layernorm=LNImpl,
mlp=ModuleSpec(
module=MLP,
submodules=MLPSubmodules(
linear_fc1=ColumnParallelLinear, linear_fc2=RowParallelLinear
),
),
mlp_bda=get_bias_dropout_add,
sharded_state_dict_keys_map={
'input_layernorm.': 'self_attention.linear_qkv.layer_norm_',
'pre_mlp_layernorm.': 'mlp.linear_fc1.layer_norm_',
},
),
)
def get_t5_encoder_with_transformer_engine_block_spec(
num_layers: int,
) -> TransformerBlockSubmodules:
"""T5 encoder block spec for Transformer Engine
Args:
config (TransformerConfig): config, containing number of layers for encoder
"""
layer_spec = encoder_model_with_transformer_engine_default_spec()
block_spec = TransformerBlockSubmodules([layer_spec] * num_layers, layer_norm=TENorm)
return block_spec
def get_t5_decoder_with_transformer_engine_block_spec(
num_layers: int,
) -> TransformerBlockSubmodules:
"""T5 decoder block spec for Transformer Engine
Args:
config (TransformerConfig): config, containing number of layers for decoder
"""
layer_spec = decoder_model_with_transformer_engine_default_spec()
block_spec = TransformerBlockSubmodules([layer_spec] * num_layers, layer_norm=TENorm)
return block_spec
def get_t5_encoder_with_local_block_spec(num_layers: int) -> TransformerBlockSubmodules:
"""T5 encoder block spec for local (uses Megatron-Core components)
Args:
num_layers (int): number of encoder layers
"""
layer_spec = encoder_model_with_local_spec()
block_spec = TransformerBlockSubmodules([layer_spec] * num_layers, layer_norm=TENorm)
return block_spec
def get_t5_decoder_with_local_block_spec(num_layers: int) -> TransformerBlockSubmodules:
"""T5 decoder block spec for local (uses Megatron-Core components)
Args:
num_layers (int): number of decoder layers
"""
layer_spec = decoder_model_with_local_spec()
block_spec = TransformerBlockSubmodules([layer_spec] * num_layers, layer_norm=TENorm)
return block_spec
File mode changed from 100644 to 100755
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
from megatron.core.fusions.fused_bias_dropout import get_bias_dropout_add
from megatron.core.tensor_parallel.layers import ColumnParallelLinear, RowParallelLinear
from megatron.core.transformer.attention import SelfAttention, SelfAttentionSubmodules
from megatron.core.transformer.dot_product_attention import DotProductAttention
from megatron.core.transformer.enums import AttnMaskType
from megatron.core.transformer.identity_op import IdentityOp
from megatron.core.transformer.mlp import MLP, MLPSubmodules
from megatron.core.transformer.spec_utils import ModuleSpec
from megatron.core.transformer.transformer_layer import TransformerLayer, TransformerLayerSubmodules
try:
from megatron.core.extensions.transformer_engine import (
TEDotProductAttention,
TELayerNormColumnParallelLinear,
TERowParallelLinear,
)
HAVE_TE = True
except ImportError:
HAVE_TE = False
try:
import apex # pylint: disable=unused-import
from megatron.core.fusions.fused_layer_norm import FusedLayerNorm
HAVE_APEX = True
LNImpl = FusedLayerNorm
except ImportError:
import warnings
from megatron.core.transformer.torch_norm import WrappedTorchNorm
warnings.warn(f'Apex is not installed. Falling back to Torch Norm')
LNImpl = WrappedTorchNorm
# Use this spec to use lower level Transformer Engine modules (required for fp8 training)
bert_layer_with_transformer_engine_spec = ModuleSpec(
module=TransformerLayer,
submodules=TransformerLayerSubmodules(
self_attention=ModuleSpec(
module=SelfAttention,
params={"attn_mask_type": AttnMaskType.padding},
submodules=SelfAttentionSubmodules(
linear_qkv=TELayerNormColumnParallelLinear,
core_attention=TEDotProductAttention,
linear_proj=TERowParallelLinear,
q_layernorm=IdentityOp,
k_layernorm=IdentityOp,
),
),
self_attn_bda=get_bias_dropout_add,
mlp=ModuleSpec(
module=MLP,
submodules=MLPSubmodules(
linear_fc1=TELayerNormColumnParallelLinear, linear_fc2=TERowParallelLinear
),
),
mlp_bda=get_bias_dropout_add,
),
)
# Use this spec for an implementation using only modules in megatron core
bert_layer_local_spec = ModuleSpec(
module=TransformerLayer,
submodules=TransformerLayerSubmodules(
input_layernorm=LNImpl,
self_attention=ModuleSpec(
module=SelfAttention,
params={"attn_mask_type": AttnMaskType.padding},
submodules=SelfAttentionSubmodules(
linear_qkv=ColumnParallelLinear,
core_attention=DotProductAttention,
linear_proj=RowParallelLinear,
q_layernorm=IdentityOp,
k_layernorm=IdentityOp,
),
),
self_attn_bda=get_bias_dropout_add,
pre_mlp_layernorm=LNImpl,
mlp=ModuleSpec(
module=MLP,
submodules=MLPSubmodules(linear_fc1=ColumnParallelLinear, linear_fc2=RowParallelLinear),
),
mlp_bda=get_bias_dropout_add,
sharded_state_dict_keys_map={
'input_layernorm.': 'self_attention.linear_qkv.layer_norm_',
'pre_mlp_layernorm.': 'mlp.linear_fc1.layer_norm_',
},
),
)
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
import torch
from torch import Tensor
from megatron.core.fusions.fused_layer_norm import HAVE_FUSED_LAYER_NORM, FusedLayerNorm
from megatron.core.transformer.module import MegatronModule
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.transformer.utils import get_linear_layer
if HAVE_FUSED_LAYER_NORM:
LNImpl = FusedLayerNorm
else:
import warnings
warnings.warn(f'Apex is not installed. Falling back to Torch Norm')
from megatron.core.transformer.torch_norm import WrappedTorchNorm as LNImpl
class BertLMHead(MegatronModule):
"""Masked LM head for Bert.
Args:
hidden_size: hidden size
config (TransformerConfig): TransformerConfig object
"""
def __init__(self, hidden_size: int, config: TransformerConfig):
super().__init__(config=config)
# TODO: Should switch this to TE ?
self.dense = get_linear_layer(
hidden_size, hidden_size, config.init_method, config.perform_initialization
)
setattr(self.dense.weight, 'sequence_parallel', config.sequence_parallel)
setattr(self.dense.bias, 'sequence_parallel', config.sequence_parallel)
self.layer_norm = LNImpl(
config=config, hidden_size=hidden_size, eps=config.layernorm_epsilon
)
self.gelu = torch.nn.functional.gelu
def forward(self, hidden_states: Tensor) -> Tensor:
"""forward pass"""
hidden_states = self.dense(hidden_states)
hidden_states = self.gelu(hidden_states)
hidden_states = self.layer_norm(hidden_states)
return hidden_states
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import warnings
from typing import Literal, Optional
import torch
from torch import Tensor
from megatron.core import parallel_state, tensor_parallel
from megatron.core.config_logger import has_config_logger_enabled, log_config_to_disk
from megatron.core.models.bert.bert_lm_head import BertLMHead
from megatron.core.models.bert.pooler import Pooler
from megatron.core.models.common.embeddings.language_model_embedding import LanguageModelEmbedding
from megatron.core.models.common.embeddings.rotary_pos_embedding import RotaryEmbedding
from megatron.core.models.common.language_module.language_module import LanguageModule
from megatron.core.transformer.dot_product_attention import (
DotProductAttention as MCoreDotProductAttention,
)
from megatron.core.transformer.enums import AttnBackend, AttnMaskType, ModelType
from megatron.core.transformer.spec_utils import ModuleSpec
from megatron.core.transformer.transformer_block import TransformerBlock
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.transformer.utils import get_linear_layer
from megatron.core.utils import get_te_version as _get_te_version
from megatron.core.utils import is_te_min_version
def get_te_version():
"""Included for backwards compatibility."""
warnings.warn("`get_te_version` will be deprecated in a future release")
return _get_te_version()
class BertModel(LanguageModule):
"""Transformer language model.
Args:
config (TransformerConfig): transformer config
num_tokentypes (int) : Set to 2 when args.bert_binary_head is True, and 0 otherwise.
Defaults to 0.
transformer_layer_spec (ModuleSpec): Specifies module to use for transformer layers
vocab_size (int): vocabulary size
max_sequence_length (int): maximum size of sequence. This is used for positional embedding
pre_process (bool): Include embedding layer (used with pipeline parallelism)
post_process (bool): Include an output layer (used with pipeline parallelism)
parallel_output (bool): Do not gather the outputs, keep them split across tensor parallel
ranks
share_embeddings_and_output_weights (bool): When True, input embeddings and output logit
weights are shared. Defaults to False.
position_embedding_type (string): Position embedding type.
Options ['learned_absolute', 'rope']. Defaults is 'learned_absolute'.
rotary_percent (float): Percent of rotary dimension to use for rotary position embeddings.
Defaults to 1.0 (100%). Ignored unless position_embedding_type is 'rope'.
"""
def __init__(
self,
config: TransformerConfig,
num_tokentypes: int,
transformer_layer_spec: ModuleSpec,
vocab_size: int,
max_sequence_length: int,
pre_process: bool = True,
post_process: bool = True,
fp16_lm_cross_entropy: bool = False,
parallel_output: bool = True,
share_embeddings_and_output_weights: bool = False,
position_embedding_type: Literal['learned_absolute', 'rope'] = 'learned_absolute',
rotary_percent: float = 1.0,
seq_len_interpolation_factor: Optional[float] = None,
add_binary_head=True,
return_embeddings=False,
):
super(BertModel, self).__init__(config=config)
if has_config_logger_enabled(config):
log_config_to_disk(config, locals(), prefix=type(self).__name__)
if return_embeddings:
assert self.post_process and self.add_binary_head
self.config: TransformerConfig = config
self.transformer_layer_spec: ModuleSpec = transformer_layer_spec
self.vocab_size = vocab_size
self.max_sequence_length = max_sequence_length
self.pre_process = pre_process
self.post_process = post_process
self.fp16_lm_cross_entropy = fp16_lm_cross_entropy
self.parallel_output = parallel_output
self.share_embeddings_and_output_weights = share_embeddings_and_output_weights
self.position_embedding_type = position_embedding_type
self.add_binary_head = add_binary_head
self.return_embeddings = return_embeddings
# megatron core pipelining currently depends on model type
self.model_type = ModelType.encoder_or_decoder
self.attn_mask_dimensions = self._sanity_check_attention_and_get_attn_mask_dimension()
# Embeddings.
if self.pre_process:
self.embedding = LanguageModelEmbedding(
config=self.config,
vocab_size=self.vocab_size,
max_sequence_length=self.max_sequence_length,
position_embedding_type=position_embedding_type,
num_tokentypes=num_tokentypes,
)
if self.position_embedding_type == 'rope':
self.rotary_pos_emb = RotaryEmbedding(
kv_channels=self.config.kv_channels,
rotary_percent=rotary_percent,
rotary_interleaved=self.config.rotary_interleaved,
seq_len_interpolation_factor=seq_len_interpolation_factor,
use_cpu_initialization=self.config.use_cpu_initialization,
)
# Transformer.
self.encoder = TransformerBlock(
config=self.config,
spec=self.transformer_layer_spec,
pre_process=self.pre_process,
post_process=self.post_process,
)
# Output
if post_process:
# TODO: Make sure you are passing in the mpu_vocab_size properly
self.lm_head = BertLMHead(config.hidden_size, config)
self.output_layer = tensor_parallel.ColumnParallelLinear(
config.hidden_size,
self.vocab_size,
config=config,
init_method=config.init_method,
bias=True,
skip_bias_add=False,
gather_output=not self.parallel_output,
skip_weight_param_allocation=pre_process and share_embeddings_and_output_weights,
)
self.binary_head = None
if self.add_binary_head:
# TODO: Shoudl switch this to TE ?
self.binary_head = get_linear_layer(
config.hidden_size, 2, config.init_method, config.perform_initialization
)
self.pooler = Pooler(
config.hidden_size, config.init_method, config, config.sequence_parallel
)
if self.pre_process or self.post_process:
self.setup_embeddings_and_output_layer()
# pylint: disable=line-too-long
def _sanity_check_attention_and_get_attn_mask_dimension(self) -> str:
"""We do some checks and return attention mask dimensions for self attention
Transformer engine library underwent a lot of change. So we need to change dimensions of
the attention mask depending on the TE version. We also santiy check some arguments.
1. If we use local version of attention dimension of the mask is [b,1,s,s]
2. If we use transformer engine > 1.10 we support all 3 backends with padding mask and [b,1,s,s]
3. If we use transformer engine >= 1.7 but less than 1.10
a ) Flash and Fused attention uses padding mask with [b,1,1,s]
b ) Unfused attention works with arbitrary mask with [b,1,s,s]
4. If we use transformer engine < 1.7
Flash and fused attention is not supported. Unfused attention will work with padding mask [b,1,s,s]
Default if you dont set any NVTE_ATTN flag will it will just use the fused path for transformer engine version >= 1.7 and unfused path for other
Args:
transformer_layer_spec (ModuleSpec): The transformer layer spec
Returns:
str: A string showing the format of the attn mask dimensions
"""
attention_backend = self.config.attention_backend
attn_mask_dimensions = None
# For local layer spec we just use b1ss
if (
self.transformer_layer_spec.submodules.self_attention.submodules.core_attention
== MCoreDotProductAttention
):
assert attention_backend in [
AttnBackend.local,
AttnBackend.auto,
], f'Expected AttnBackend to be local or auto while using mcore self attention, but found {attention_backend}. Set --attn-backend to local or dont use MCore SelfAttention submodule in layer specs'
attn_mask_dimensions = "b1ss"
else:
attn_mask_type = self.transformer_layer_spec.submodules.self_attention.params[
'attn_mask_type'
]
# For TE >= 1.10 (We always use padding mask and use b11s)
if is_te_min_version("1.10.0"):
attn_mask_dimensions = "b11s"
if attn_mask_type != AttnMaskType.padding:
warnings.warn(
f'For TE versions >= 1.10 , flash/fused/unfused support padding mask. Setting attention mask from {attn_mask_type} to padding'
)
self.transformer_layer_spec.submodules.self_attention.params[
'attn_mask_type'
] = AttnMaskType.padding
# For 1.7 >= TE < 1.10 flash and fused path use padding mask with b11s and unfused path uses arbitrary mask with b1ss
elif is_te_min_version("1.7.0"):
if attention_backend in [AttnBackend.flash, AttnBackend.fused, AttnBackend.auto]:
attn_mask_dimensions = "b11s"
else:
if attn_mask_type != AttnMaskType.arbitrary:
warnings.warn(
f'For TE versions >= 1.7 but < 1.10 , unfused path supports only arbitrary mask. Setting attention mask from {attn_mask_type} to arbitray'
)
self.transformer_layer_spec.submodules.self_attention.params[
'attn_mask_type'
] = AttnMaskType.arbitrary
attn_mask_dimensions = "b1ss"
# For TE < 1.7 we only support unfused attention with b1ss and padding mask
else:
attn_mask_dimensions = "b1ss"
assert not (attention_backend in [AttnBackend.flash, AttnBackend.fused]), (
"Flash and fused attention is not supported with transformer engine version "
"< 1.7. Set --attention-backend to unfused or leave it to be default (auto) or upgrade transformer engine >= 1.7"
)
return attn_mask_dimensions
def bert_extended_attention_mask(self, attention_mask: Tensor) -> Tensor:
"""Creates the extended attention mask
Converts the attention mask of dimension
[batch size, 1, seq len] to [batch size, 1, seq len, seq len]
or [batch size, 1, 1, seq_len] and makes it binary
Args:
attention_mask (Tensor): The input attention mask
Returns:
Tensor: The extended binary attention mask
"""
# We create a 3D attention mask from a 2D tensor mask.
if self.attn_mask_dimensions == "b1ss":
# [b, 1, s]
attention_mask_b1s = attention_mask.unsqueeze(1)
# [b, s, 1]
attention_mask_bs1 = attention_mask.unsqueeze(2)
# [b, s, s]
attention_mask_bss = attention_mask_b1s * attention_mask_bs1
# [b, 1, s, s]
extended_attention_mask = attention_mask_bss.unsqueeze(1)
else:
# [b, 1, 1, s]
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(1)
# Convert attention mask to binary:
extended_attention_mask = extended_attention_mask < 0.5
return extended_attention_mask
def bert_position_ids(self, token_ids):
"""Position ids for bert model"""
# Create position ids
seq_length = token_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=token_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(token_ids)
return position_ids
def set_input_tensor(self, input_tensor: Tensor) -> None:
"""Sets input tensor to the model.
See megatron.model.transformer.set_input_tensor()
Args:
input_tensor (Tensor): Sets the input tensor for the model.
"""
# This is usually handled in schedules.py but some inference code still
# gives us non-lists or None
if not isinstance(input_tensor, list):
input_tensor = [input_tensor]
assert len(input_tensor) == 1, 'input_tensor should only be length 1 for gpt/bert'
self.encoder.set_input_tensor(input_tensor[0])
def forward(
self,
input_ids: Tensor,
attention_mask: Tensor,
tokentype_ids: Tensor = None,
lm_labels: Tensor = None,
inference_params=None,
):
"""Forward function of BERT model
Forward function of the BERT Model This function passes the input tensors
through the embedding layer, and then the encoder and finally into the post
processing layer (optional).
It either returns the Loss values if labels are given or the final hidden units
"""
extended_attention_mask = self.bert_extended_attention_mask(attention_mask)
if parallel_state.is_pipeline_first_stage():
input_ids = input_ids
position_ids = self.bert_position_ids(input_ids)
else:
position_ids = None
input_ids = None
# Encoder embedding.
if self.pre_process:
encoder_input = self.embedding(
input_ids=input_ids, position_ids=position_ids, tokentype_ids=tokentype_ids
)
else:
# intermediate stage of pipeline
# encoder will get hidden_states from encoder.input_tensor
encoder_input = None
# Rotary positional embeddings (Why not move this into BERT/GPTEmberdding ?)
rotary_pos_emb = None
if self.position_embedding_type == 'rope':
rotary_seq_len = self.rotary_pos_emb.get_rotary_seq_len(
inference_params, self.encoder, encoder_input, self.config
)
rotary_pos_emb = self.rotary_pos_emb(rotary_seq_len)
# Run encoder.
hidden_states = self.encoder(
hidden_states=encoder_input,
attention_mask=extended_attention_mask,
inference_params=inference_params,
rotary_pos_emb=rotary_pos_emb,
)
if not self.post_process:
return hidden_states
if self.add_binary_head:
pooled_output = self.pooler(hidden_states, 0)
if self.return_embeddings:
embeddings = torch.transpose(hidden_states, 0, 1)
masks = torch.sum(attention_mask, dim=1)
# Collect masked embeddings.
output = torch.zeros(
size=(embeddings.shape[0], embeddings.shape[2]),
dtype=torch.float32,
device=torch.cuda.current_device(),
)
for i, (embedding, mask) in enumerate(zip(embeddings, masks)):
output[i, :] = torch.mean(embedding[1 : mask - 1], dim=0)
return output
# logits and loss
output_weight = None
if self.share_embeddings_and_output_weights:
output_weight = self.shared_embedding_or_output_weight()
hidden_states_after_lm_head = self.lm_head(hidden_states=hidden_states)
logits, _ = self.output_layer(hidden_states_after_lm_head, weight=output_weight)
binary_logits = None
if self.binary_head is not None:
binary_logits = self.binary_head(pooled_output)
if lm_labels is None:
# [s b h] => [b s h]
return logits.transpose(0, 1).contiguous(), binary_logits
loss = self.compute_language_model_loss(lm_labels, logits)
return loss, binary_logits
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
import torch
from torch import Tensor
from megatron.core import tensor_parallel
from megatron.core.transformer.module import MegatronModule
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.transformer.utils import get_linear_layer
class Pooler(MegatronModule):
"""Pooler layer.
Pool hidden states of a specific token (for example start of the
sequence) and add a linear transformation followed by a tanh.
Args:
hidden_size (int): The hidden size_
init_method (callable): weight initialization method for the linear layer. bias is set to zero.
config (TransformerConfig): The transformer configuration
sequence_parallel (bool): Using squence parallel ? Defaults to False
"""
def __init__(
self,
hidden_size: int,
init_method: callable,
config: TransformerConfig,
sequence_parallel: bool = False,
):
super(Pooler, self).__init__(config)
# TODO: Shoudl switch this to TE ?
self.dense = get_linear_layer(
hidden_size, hidden_size, init_method, config.perform_initialization
)
self.sequence_parallel = sequence_parallel
def forward(self, hidden_states: Tensor, sequence_index=0):
# hidden_states: [s, b, h]
# sequence_index: index of the token to pool.
# gather data along sequence dimensions
# same pooler is run on all tensor parallel nodes
if self.sequence_parallel:
hidden_states = tensor_parallel.gather_from_sequence_parallel_region(
hidden_states, tensor_parallel_output_grad=False
)
pooled = hidden_states[sequence_index, :, :]
pooled = self.dense(pooled)
pooled = torch.tanh(pooled)
return pooled
File mode changed from 100644 to 100755
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
from .rope_utils import apply_rotary_pos_emb
from .rotary_pos_embedding import RotaryEmbedding
from .yarn_rotary_pos_embedding import YarnRotaryEmbedding, _yarn_get_mscale
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
from typing import Literal
import torch
from torch import Tensor
from megatron.core import tensor_parallel
from megatron.core.transformer.module import MegatronModule
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.utils import (
make_sharded_tensor_for_checkpoint,
make_tp_sharded_tensor_for_checkpoint,
)
class GPTEmbedding(MegatronModule):
class LanguageModelEmbedding(MegatronModule):
"""Language model embeddings.
Arguments:
Args:
config (TransformerConfig): config object with all necessary configs for TransformerBlock
vocab_size (int): vocabulary size
max_sequence_length (int): maximum size of sequence. This
is used for positional embedding
add_position_embedding (bool): Add a position embedding.
embedding_dropout_prob float): dropout probability for embeddings
embedding_dropout_prob (float): dropout probability for embeddings
num_tokentypes (int): Set to 0 without binary head, and 2 with a binary head. Defaults to 0.
scatter_to_sequence_parallel (bool): Set to False to disable scatter of embedding
across sequence parallel region. Defaults to True.
"""
def __init__(
......@@ -28,20 +30,31 @@ class GPTEmbedding(MegatronModule):
config: TransformerConfig,
vocab_size: int,
max_sequence_length: int,
add_position_embedding: bool,
position_embedding_type: Literal['learned_absolute', 'rope', 'none'] = 'learned_absolute',
num_tokentypes: int = 0,
scatter_to_sequence_parallel: bool = True,
):
super().__init__(config=config)
self.config: TransformerConfig = config
self.vocab_size: int = vocab_size
self.max_sequence_length: int = max_sequence_length
self.add_position_embedding: bool = add_position_embedding
self.add_position_embedding: bool = position_embedding_type == 'learned_absolute'
self.num_tokentypes = num_tokentypes
self.scatter_to_sequence_parallel = scatter_to_sequence_parallel
self.reduce_scatter_embeddings = (
(not self.add_position_embedding)
and self.num_tokentypes <= 0
and self.config.sequence_parallel
and self.scatter_to_sequence_parallel
)
# Word embeddings (parallel).
self.word_embeddings = tensor_parallel.VocabParallelEmbedding(
num_embeddings=self.vocab_size,
embedding_dim=self.config.hidden_size,
init_method=self.config.init_method,
reduce_scatter_embeddings=self.reduce_scatter_embeddings,
config=self.config,
)
......@@ -55,6 +68,16 @@ class GPTEmbedding(MegatronModule):
if self.config.perform_initialization:
self.config.init_method(self.position_embeddings.weight)
if self.num_tokentypes > 0:
self.tokentype_embeddings = torch.nn.Embedding(
self.num_tokentypes, self.config.hidden_size
)
# Initialize the token-type embeddings.
if self.config.perform_initialization:
self.config.init_method(self.tokentype_embeddings.weight)
else:
self.tokentype_embeddings = None
# Embeddings dropout
self.embedding_dropout = torch.nn.Dropout(self.config.hidden_dropout)
......@@ -64,9 +87,22 @@ class GPTEmbedding(MegatronModule):
self.word_embeddings.weight.shared = True
self.position_embeddings.weight.data.fill_(0)
self.position_embeddings.weight.shared = True
def forward(self, input_ids, position_ids):
# Embeddings.
if self.num_tokentypes > 0:
self.tokentype_embeddings.weight.data.fill_(0)
self.tokentype_embeddings.weight.shared = True
def forward(self, input_ids: Tensor, position_ids: Tensor, tokentype_ids: int = None) -> Tensor:
"""Forward pass of the embedding module.
Args:
input_ids (Tensor): The input tokens
position_ids (Tensor): The position id's used to calculate position embeddings
tokentype_ids (int): The token type ids. Used when args.bert_binary_head is
set to True. Defaults to None
Returns:
Tensor: The output embeddings
"""
word_embeddings = self.word_embeddings(input_ids)
if self.add_position_embedding:
position_embeddings = self.position_embeddings(position_ids)
......@@ -74,8 +110,17 @@ class GPTEmbedding(MegatronModule):
else:
embeddings = word_embeddings
# Data format change to avoid explicit tranposes : [b s h] --> [s b h].
embeddings = embeddings.transpose(0, 1).contiguous()
if not self.reduce_scatter_embeddings:
# Data format change to avoid explicit tranposes : [b s h] --> [s b h].
embeddings = embeddings.transpose(0, 1).contiguous()
if tokentype_ids is not None:
assert self.tokentype_embeddings is not None
# [b s h] -> [s b h] (So that it can be added with embeddings)
tokentype_embedding = self.tokentype_embeddings(tokentype_ids).permute(1, 0, 2)
embeddings = embeddings + tokentype_embedding
else:
assert self.tokentype_embeddings is None
# If the input flag for fp32 residual connection is set, convert for float.
if self.config.fp32_residual_connection:
......@@ -83,41 +128,16 @@ class GPTEmbedding(MegatronModule):
# Dropout.
if self.config.sequence_parallel:
embeddings = tensor_parallel.scatter_to_sequence_parallel_region(embeddings)
if not self.reduce_scatter_embeddings and self.scatter_to_sequence_parallel:
embeddings = tensor_parallel.scatter_to_sequence_parallel_region(embeddings)
# `scatter_to_sequence_parallel_region` returns a view, which prevents
# the original tensor from being garbage collected. Clone to facilitate GC.
# Has a small runtime cost (~0.5%).
if self.config.clone_scatter_output_in_embedding and self.scatter_to_sequence_parallel:
embeddings = embeddings.clone()
with tensor_parallel.get_cuda_rng_tracker().fork():
embeddings = self.embedding_dropout(embeddings)
else:
embeddings = self.embedding_dropout(embeddings)
return embeddings
def sharded_state_dict(self, prefix=''):
sharded_state_dict = {}
word_embeddings_prefix = f'{prefix}word_embeddings.'
word_embeddings_state_dict = self.word_embeddings.state_dict(
prefix=word_embeddings_prefix, keep_vars=True
)
sharded_word_embeddings_key = f'{word_embeddings_prefix}weight'
sharded_word_embeddings_tensor = make_tp_sharded_tensor_for_checkpoint(
tensor=word_embeddings_state_dict[sharded_word_embeddings_key],
key=sharded_word_embeddings_key,
allow_shape_mismatch=True,
)
sharded_state_dict[sharded_word_embeddings_key] = sharded_word_embeddings_tensor
if self.add_position_embedding:
position_embeddings_prefix = f'{prefix}position_embeddings.'
position_embeddings_state_dict = self.position_embeddings.state_dict(
prefix=position_embeddings_prefix, keep_vars=True
)
sharded_position_embeddings_key = f'{position_embeddings_prefix}weight'
sharded_position_embeddings_tensor = make_sharded_tensor_for_checkpoint(
tensor=position_embeddings_state_dict[sharded_position_embeddings_key],
key=sharded_position_embeddings_key,
)
sharded_state_dict[sharded_position_embeddings_key] = sharded_position_embeddings_tensor
return sharded_state_dict
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
from __future__ import annotations
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from megatron.core.transformer.transformer_config import TransformerConfig
import logging
import torch
from torch import Tensor
from megatron.core import parallel_state
from megatron.core.utils import is_te_min_version
logger = logging.getLogger(__name__)
try:
from megatron.core.extensions.transformer_engine import (
fused_apply_rotary_pos_emb,
fused_apply_rotary_pos_emb_thd,
)
HAVE_APPLY_ROPE_FUSION = True
except ImportError:
try:
from apex.transformer.functional import (
fused_apply_rotary_pos_emb,
fused_apply_rotary_pos_emb_thd,
)
HAVE_APPLY_ROPE_FUSION = True
except ImportError:
HAVE_APPLY_ROPE_FUSION = False
try:
from flash_attn.layers.rotary import apply_rotary_emb as apply_rotary_emb_flash
except ImportError:
apply_rotary_emb_flash = None
__all__ = ['apply_rotary_emb_flash']
def get_pos_emb_on_this_cp_rank(pos_emb: Tensor, seq_dim: int) -> Tensor:
"""Get the position embedding on the current context parallel rank.
Args:
pos_emb (Tensor): Positional embedding tensor
seq_dim (int): Sequence dimension
"""
cp_size = parallel_state.get_context_parallel_world_size()
cp_rank = parallel_state.get_context_parallel_rank()
cp_idx = torch.tensor(
[cp_rank, (2 * cp_size - cp_rank - 1)], device="cpu", pin_memory=True
).cuda(non_blocking=True)
pos_emb = pos_emb.view(
*pos_emb.shape[:seq_dim], 2 * cp_size, -1, *pos_emb.shape[(seq_dim + 1) :]
)
pos_emb = pos_emb.index_select(seq_dim, cp_idx)
pos_emb = pos_emb.view(*pos_emb.shape[:seq_dim], -1, *pos_emb.shape[(seq_dim + 2) :])
return pos_emb
def _rotate_half(x: Tensor, rotary_interleaved: bool) -> Tensor:
"""Change sign so the last dimension becomes [-odd, +even]
Args:
x (Tensor): Input tensor
Returns:
Tensor: Tensor rotated half
"""
if not rotary_interleaved:
x1, x2 = torch.chunk(x, 2, dim=-1)
return torch.cat((-x2, x1), dim=-1)
else:
x1 = x[:, :, :, ::2]
x2 = x[:, :, :, 1::2]
x_new = torch.stack((-x2, x1), dim=-1)
return x_new.view(x_new.shape[0], x_new.shape[1], x_new.shape[2], -1)
def _apply_rotary_pos_emb_bshd(
t: Tensor,
freqs: Tensor,
rotary_interleaved: bool = False,
multi_latent_attention: bool = False,
mscale: float = 1.0,
) -> Tensor:
"""Apply rotary positional embedding to input tensor T.
check https://kexue.fm/archives/8265 for detailed formulas
Args:
t (Tensor): Input tensor T is of shape [seq_length, ... , dim]
freqs (Tensor): Rotary Positional embedding tensor freq is of shape [seq_length, ..., dim]
Returns:
Tensor: The input tensor after applying RoPE
"""
rot_dim = freqs.shape[-1]
# ideally t_pass is empty so rotary pos embedding is applied to all tensor t
t, t_pass = t[..., :rot_dim], t[..., rot_dim:]
if multi_latent_attention:
x1 = t[..., 0::2]
x2 = t[..., 1::2]
t = torch.cat((x1, x2), dim=-1)
# first part is cosine component
# second part is sine component, need to change signs with _rotate_half method
cos_ = (torch.cos(freqs) * mscale).to(t.dtype)
sin_ = (torch.sin(freqs) * mscale).to(t.dtype)
t = (t * cos_) + (_rotate_half(t, rotary_interleaved) * sin_)
return torch.cat((t, t_pass), dim=-1)
def _get_thd_freqs_on_this_cp_rank(cp_rank: int, cp_size: int, x: Tensor, freqs: Tensor) -> Tensor:
if cp_size > 1:
cp_seg = x.size(0) // 2
full_seqlen = cp_size * x.size(0)
return torch.cat(
[
freqs[cp_rank * cp_seg : (cp_rank + 1) * cp_seg],
freqs[full_seqlen - (cp_rank + 1) * cp_seg : full_seqlen - cp_rank * cp_seg],
]
)
else:
return freqs[: x.size(0)]
def _apply_rotary_pos_emb_thd(
t: Tensor,
cu_seqlens: Tensor,
freqs: Tensor,
rotary_interleaved: bool = False,
multi_latent_attention: bool = False,
mscale: float = 1.0,
) -> Tensor:
"""A baseline implementation of applying RoPE for `thd` format.
Args:
t (Tensor): Input tensor T is of shape [t, h, d]
cu_seqlens(Tensor): Cumulative sum of sequence lengths in a batch for `t`,
with shape [b + 1] and dtype torch.int32.
freqs (Tensor): Rotary Positional embedding tensor freq is of shape [max_s, 1, 1, d]
Returns:
Tensor: Shape [t, h, d]. The input tensor after applying RoPE.
"""
cp_size = parallel_state.get_context_parallel_world_size()
cp_rank = parallel_state.get_context_parallel_rank()
cu_seqlens = cu_seqlens // cp_size
seqlens = (cu_seqlens[1:] - cu_seqlens[:-1]).tolist()
return torch.cat(
[
_apply_rotary_pos_emb_bshd(
x.unsqueeze(1),
_get_thd_freqs_on_this_cp_rank(cp_rank, cp_size, x, freqs),
rotary_interleaved=rotary_interleaved,
multi_latent_attention=multi_latent_attention,
mscale=mscale,
)
for x in torch.split(t, seqlens)
]
).squeeze(1)
def apply_rotary_pos_emb(
t: Tensor,
freqs: Tensor,
config: TransformerConfig,
cu_seqlens: Optional[Tensor] = None,
mscale: float = 1.0,
):
"""
Reroute to the appropriate apply_rotary_pos_emb function depending on
fused/unfused kernels, or bshd (conventional) / thd (packed seq) format
"""
if config.apply_rope_fusion:
if cu_seqlens is None:
return fused_apply_rotary_pos_emb(t, freqs)
else:
cp_size = parallel_state.get_context_parallel_world_size()
if cp_size > 1:
if not is_te_min_version("1.11.0", check_equality=False):
raise ValueError("Only TE >= 1.12 supports RoPE fusion for THD format with CP.")
return fused_apply_rotary_pos_emb_thd(
t,
cu_seqlens,
freqs,
cp_size=cp_size,
cp_rank=parallel_state.get_context_parallel_rank(),
)
else:
return fused_apply_rotary_pos_emb_thd(t, cu_seqlens, freqs)
else:
if cu_seqlens is None:
return _apply_rotary_pos_emb_bshd(
t,
freqs,
rotary_interleaved=config.rotary_interleaved,
multi_latent_attention=config.multi_latent_attention,
mscale=mscale,
)
else:
return _apply_rotary_pos_emb_thd(
t,
cu_seqlens,
freqs,
rotary_interleaved=config.rotary_interleaved,
multi_latent_attention=config.multi_latent_attention,
mscale=mscale,
)
def apply_rotary_pos_emb_with_cos_sin(
t: Tensor, cos: Tensor, sin: Tensor, rotary_interleaved: bool = False
) -> Tensor:
"""
This function applies rotary positional embedding to the target tensor t
using precomputed cos and sin of size (seq_len, d_rot / 2)
"""
cos = cos.to(t.dtype)
sin = sin.to(t.dtype)
if apply_rotary_emb_flash is None:
# Combine cos and sin into freqs
freqs = torch.stack([cos, sin], dim=-1).flatten(start_dim=-2)
# Expand freqs to match t's shape
while freqs.dim() < t.dim():
freqs = freqs.unsqueeze(1)
freqs = freqs.expand(t.shape[:-1] + (-1,))
y = _apply_rotary_pos_emb_bshd(
t,
freqs,
rotary_interleaved=rotary_interleaved,
multi_latent_attention=False,
mscale=1.0,
)
else:
# Use Flash Attention's optimized kernel for rotary embedding
t = t.permute(1, 0, 2, 3)
y = apply_rotary_emb_flash(t, cos, sin, rotary_interleaved)
y = y.permute(1, 0, 2, 3)
return y
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.transformer.transformer_block import TransformerBlock
from megatron.core.inference_params import InferenceParams
from megatron.core.packed_seq_params import PackedSeqParams
import logging
import math
from functools import lru_cache
import torch
from torch import Tensor, nn
from megatron.core import parallel_state
from megatron.core.models.common.embeddings.rope_utils import ( # for backward compatibility; pylint: disable=unused-import
_apply_rotary_pos_emb_bshd,
_apply_rotary_pos_emb_thd,
_rotate_half,
apply_rotary_pos_emb,
get_pos_emb_on_this_cp_rank,
)
logger = logging.getLogger(__name__)
__all__ = ['RotaryEmbedding']
class RotaryEmbedding(nn.Module):
"""Rotary Embedding for language model.
Args:
kv_channels (int): Projection weights dimension in multi-head attention. Obtained
from transformer config
rotary_percent (float): Percent of rotary dimension to use for rotary position
embeddings.
rotary_interleaved (bool, optional): If True, interleaved rotary position embeddings.
Defaults to False.
seq_len_interpolation_factor (float, optional): scale of linearly interpolating RoPE
for longer sequences. The value must be a float larger than 1.0. Defaults to None
rotary_base (int, optional): Base period for rotary position embeddings. Defaults to
10000.
rope_scaling (bool, optional): Apply rope scaling as used in llama 3.1
use_cpu_initialization (bool, optional): If False, initialize the inv_freq directly
on the GPU. Defaults to False
"""
def __init__(
self,
kv_channels: int,
rotary_percent: float,
rotary_interleaved: bool = False,
seq_len_interpolation_factor: float = None,
rotary_base: int = 10000,
rope_scaling: bool = False,
use_cpu_initialization: bool = False,
) -> None:
super().__init__()
dim = kv_channels
if rotary_percent < 1.0:
dim = int(dim * rotary_percent)
self.rotary_interleaved = rotary_interleaved
self.seq_len_interpolation_factor = seq_len_interpolation_factor
device = 'cpu' if use_cpu_initialization else torch.cuda.current_device()
self.inv_freq = 1.0 / (
rotary_base ** (torch.arange(0, dim, 2, dtype=torch.float32, device=device) / dim)
)
if rope_scaling:
self.inv_freq = self._apply_scaling(self.inv_freq)
def _apply_scaling(
self,
freqs,
factor=8,
low_freq_factor=1,
high_freq_factor=4,
original_max_position_embeddings=8192,
):
# This implementation is adapted from:
# https://github.com/huggingface/transformers/blob/2a5a6ad18aa22e98429bb5ecb880660328030ea0/src/transformers/modeling_rope_utils.py#L303-L343
factor = factor # `8` in the original implementation
low_freq_factor = low_freq_factor # `1` in the original implementation
high_freq_factor = high_freq_factor # `4` in the original implementation
old_context_len = original_max_position_embeddings # `8192` in the original implementation
low_freq_wavelen = old_context_len / low_freq_factor
high_freq_wavelen = old_context_len / high_freq_factor
wavelen = 2 * math.pi / freqs
# wavelen < high_freq_wavelen: do nothing
# wavelen > low_freq_wavelen: divide by factor
inv_freq_llama = torch.where(wavelen > low_freq_wavelen, freqs / factor, freqs)
# otherwise: interpolate between the two, using a smooth factor
smooth_factor = (old_context_len / wavelen - low_freq_factor) / (
high_freq_factor - low_freq_factor
)
smoothed_inv_freq = (
1 - smooth_factor
) * inv_freq_llama / factor + smooth_factor * inv_freq_llama
is_medium_freq = ~(wavelen < high_freq_wavelen) * ~(wavelen > low_freq_wavelen)
inv_freq_llama = torch.where(is_medium_freq, smoothed_inv_freq, inv_freq_llama)
return inv_freq_llama
def get_freqs_non_repeated(self, max_seq_len: int, offset: int = 0) -> Tensor:
"""Generates matrix of frequencies based on positions in the sequence,
used to create positional encodings"""
seq = (
torch.arange(max_seq_len, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
+ offset
)
if self.seq_len_interpolation_factor is not None:
seq *= 1 / self.seq_len_interpolation_factor
freqs = torch.outer(seq, self.inv_freq) # [seq len, dim]
return freqs
def get_cos_sin(self, max_seq_len: int, offset: int = 0) -> (Tensor, Tensor):
"""Cosine and sine values for RoPE are precomputed for all positions up to the maximum
sequence length"""
freqs = self.get_freqs_non_repeated(max_seq_len, offset)
cos = torch.cos(freqs)
sin = torch.sin(freqs)
return cos, sin
@lru_cache(maxsize=32)
def forward(self, max_seq_len: int, offset: int = 0, packed_seq: bool = False) -> Tensor:
"""Forward pass of RoPE embedding.
Args:
max_seq_len (int): Maximum size of sequence
offset (int, optional): RoPE offset. Defaults to 0.
packed_seq (bool, optional): Whether to use packed sequence. Defaults to False.
Returns:
Tensor: Embeddings after applying RoPE.
"""
if self.inv_freq.device.type == 'cpu':
# move `inv_freq` to GPU once at the first micro-batch forward pass
self.inv_freq = self.inv_freq.to(device=torch.cuda.current_device())
freqs = self.get_freqs_non_repeated(max_seq_len, offset)
# first part even vector components, second part odd vector components,
# 2 * dim in dimension size
if not self.rotary_interleaved:
emb = torch.cat((freqs, freqs), dim=-1)
else:
emb = torch.stack((freqs.view(-1, 1), freqs.view(-1, 1)), dim=-1).view(
freqs.shape[0], -1
)
# emb [seq_length, .., dim]
emb = emb[:, None, None, :]
if parallel_state.get_context_parallel_world_size() > 1 and not packed_seq:
# slice rotary_pos_emb along sequence dimension and select the parition of the current
# CP rank
emb = get_pos_emb_on_this_cp_rank(emb, 0)
return emb
def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs):
state_dict.pop(f'{prefix}inv_freq', None)
return super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
def get_rotary_seq_len(
self,
inference_params: InferenceParams,
transformer: TransformerBlock,
transformer_input: Tensor,
transformer_config: TransformerConfig,
packed_seq_params: PackedSeqParams,
) -> float:
"""Function to get the rotary sequence length.
Args:
inference_params : Used during Inference time
transformer (TransformerBlock): The transformer block (decoder/encoder) used
by the model
transformer_input (Tensor): Input tensor to the transformer
transformer_config (TransformerConfig): Transformer config used by the model
packed_seq_params (PackedSeqParams): Packed sequence params
Returns:
float: The rotary sequence length
"""
if packed_seq_params is not None:
# max_seqlen are the max sequence length in the packed sequence before being divived
# by the tp and cp size.
return max(packed_seq_params.max_seqlen_q, packed_seq_params.max_seqlen_kv)
elif inference_params is not None:
rotary_seq_len = inference_params.max_sequence_length
else:
if transformer.input_tensor is not None:
rotary_seq_len = transformer.input_tensor.size(0)
else:
rotary_seq_len = transformer_input.size(0)
if transformer_config.sequence_parallel:
rotary_seq_len *= transformer_config.tensor_model_parallel_size
rotary_seq_len *= transformer_config.context_parallel_size
return rotary_seq_len
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
from __future__ import annotations
import logging
import math
from functools import lru_cache
import torch
from torch import Tensor
from megatron.core import parallel_state
from megatron.core.models.common.embeddings.rope_utils import get_pos_emb_on_this_cp_rank
from megatron.core.models.common.embeddings.rotary_pos_embedding import RotaryEmbedding
logger = logging.getLogger(__name__)
class YarnRotaryEmbedding(RotaryEmbedding):
"""Yarn Rotary Embedding for language model.
Args:
kv_channels (int): Projection weights dimension in multi-head attention. Obtained from
transformer config
rotary_percent (float): Percent of rotary dimension to use for rotary position embeddings.
rotary_interleaved (bool, optional): If True, interleaved rotary position embeddings.
Defaults to False.
seq_len_interpolation_factor (float, optional): scale of linearly interpolating RoPE for
longer sequences. The value must be a float larger than 1.0. Defaults to None
rotary_base (float, optional): Base period for rotary position embeddings. Defaults to
10000.
use_cpu_initialization (bool, optional): If False, initialize the inv_freq directly on
the GPU. Defaults to False
scaling_factor (float, optional): Scaling factor for Yarn RoPE. Defaults to 1.0.
original_max_position_embeddings (int, optional): Original maximum position embeddings
length. Defaults to 4096.
beta_fast (float, optional): Fast beta value for Yarn RoPE. Defaults to 32.
beta_slow (float, optional): Slow beta value for Yarn RoPE. Defaults to 1.
mscale (float, optional): Mscale value for Yarn RoPE. Defaults to 1.
mscale_all_dim (float, optional): Mscale all dim value for Yarn RoPE. Defaults to 0.
"""
def __init__(
self,
kv_channels: int,
rotary_percent: float = 1.0,
rotary_interleaved: bool = False,
seq_len_interpolation_factor: float = None,
rotary_base: float = 10000.0,
use_cpu_initialization: bool = False,
scaling_factor: float = 1.0,
original_max_position_embeddings: int = 4096,
beta_fast: float = 32.0,
beta_slow: float = 1.0,
mscale: float = 1.0,
mscale_all_dim: float = 0.0,
):
self.dim = kv_channels
self.rotary_base = rotary_base
self.scaling_factor = scaling_factor
self.original_max_position_embeddings = original_max_position_embeddings
self.beta_fast = beta_fast
self.beta_slow = beta_slow
self.mscale = mscale
self.mscale_all_dim = mscale_all_dim
device = 'cpu' if use_cpu_initialization else torch.cuda.current_device()
self.inv_freq_extra = 1.0 / (
self.rotary_base
** (torch.arange(0, self.dim, 2, dtype=torch.float32, device=device) / self.dim)
)
self.inv_freq_inter = 1.0 / (
self.scaling_factor
* self.rotary_base
** (torch.arange(0, self.dim, 2, dtype=torch.float32, device=device) / self.dim)
)
super().__init__(
kv_channels,
rotary_percent,
rotary_interleaved,
seq_len_interpolation_factor,
rotary_base,
use_cpu_initialization,
)
@lru_cache(maxsize=32)
def forward(self, max_seq_len: int, offset: int = 0) -> Tensor:
"""Forward pass of Yarn Rotary Embedding.
Args:
max_seq_len (int): Maximum size of sequence
offset (int, optional): RoPE offset. Defaults to 0.
Returns:
Tensor: Embeddings after applying Yarn RoPE.
"""
assert (
not self.rotary_interleaved
), "Yarn RoPE does not support interleaved rotary embeddings"
if self.inv_freq_extra.device.type == 'cpu':
# move `inv_freq_extra` to GPU once at the first micro-batch forward pass
self.inv_freq_extra = self.inv_freq_extra.to(device=torch.cuda.current_device())
if self.inv_freq_inter.device.type == 'cpu':
# move `inv_freq_inter` to GPU once at the first micro-batch forward pass
self.inv_freq_inter = self.inv_freq_inter.to(device=torch.cuda.current_device())
low, high = _yarn_find_correction_range(
self.beta_fast,
self.beta_slow,
self.dim,
self.rotary_base,
self.original_max_position_embeddings,
)
inv_freq_mask = 1.0 - _yarn_linear_ramp_mask(low, high, self.dim // 2).to(
device=self.inv_freq_extra.device, dtype=torch.float32
)
inv_freq = self.inv_freq_inter * (1 - inv_freq_mask) + self.inv_freq_extra * inv_freq_mask
seq = (
torch.arange(
max_seq_len, device=self.inv_freq_extra.device, dtype=self.inv_freq_extra.dtype
)
+ offset
)
freqs = torch.outer(seq, inv_freq)
_mscale = float(
_yarn_get_mscale(self.scaling_factor, self.mscale)
/ _yarn_get_mscale(self.scaling_factor, self.mscale_all_dim)
)
emb = torch.cat((freqs, freqs), dim=-1)
# emb [seq_length, .., dim]
emb = emb[:, None, None, :]
if parallel_state.get_context_parallel_world_size() > 1:
# slice rotary_pos_emb along sequence dimension
# and select the parition of the current CP rank
emb = get_pos_emb_on_this_cp_rank(emb, 0)
return emb, _mscale
# Inverse dim formula to find dim based on number of rotations
def _yarn_find_correction_dim(
num_rotations: float, dim: int, rotary_base: float = 10000, max_position_embeddings: int = 2048
) -> float:
return (dim * math.log(max_position_embeddings / (num_rotations * 2 * math.pi))) / (
2 * math.log(rotary_base)
)
# Find dim range bounds based on rotations
def _yarn_find_correction_range(
low_rot: float,
high_rot: float,
dim: int,
rotary_base: float = 10000,
max_position_embeddings: int = 2048,
) -> tuple[int, int]:
low = math.floor(_yarn_find_correction_dim(low_rot, dim, rotary_base, max_position_embeddings))
high = math.ceil(_yarn_find_correction_dim(high_rot, dim, rotary_base, max_position_embeddings))
return max(low, 0), min(high, dim - 1) # Clamp values just in case
def _yarn_linear_ramp_mask(min: float, max: float, dim: int) -> Tensor:
if min == max:
max += 0.001 # Prevent singularity
linear_func = (torch.arange(dim, dtype=torch.float32) - min) / (max - min)
ramp_func = torch.clamp(linear_func, 0, 1)
return ramp_func
def _yarn_get_mscale(scale: float = 1, mscale: float = 1) -> float:
if scale <= 1:
return 1.0
return 0.1 * mscale * math.log(scale) + 1.0
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
import logging
import os
from typing import Optional, Tuple
import torch
from torch import Tensor
from megatron.core import parallel_state, tensor_parallel
from megatron.core.dist_checkpointing.mapping import ShardedStateDict
from megatron.core.fusions.fused_cross_entropy import fused_vocab_parallel_cross_entropy
from megatron.core.transformer.enums import AttnBackend
from megatron.core.transformer.module import MegatronModule
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.utils import make_tp_sharded_tensor_for_checkpoint
class LanguageModule(MegatronModule):
"""Base language module that has common helper functions used across GPT, BERT etc.
Args:
config (TransformerConfig): Input transformer config for the model
"""
def __init__(self, config: TransformerConfig) -> None:
super().__init__(config=config)
self._set_attention_backend()
# pylint: disable=line-too-long
def _set_attention_backend(self):
"""Set attention backend
Transformer engine works based on optout. By default all three attention backend flags are set to 1. So if the user choses a particular attention backend we set the other two to 0. If the user choses local, we set all 3 TE env variables to 0.
"""
def check_and_set_env_variable(
env_variable_name: str, expected_value: int, attn_type: AttnBackend
) -> None:
current_value = os.getenv(env_variable_name)
assert current_value is None or current_value == str(
expected_value
), f'{env_variable_name} set to {current_value}, but expected {expected_value} for attention backend type {attn_type.name}. unset NVTE_FLASH_ATTN, NVTE_FUSED_ATTN and NVTE_UNFUSED_ATTN. Use the --attention-backend argument if you want to choose between (flash/fused/unfused/auto/local). Default is auto.'
os.environ[env_variable_name] = str(expected_value)
if self.config.attention_backend == AttnBackend.local:
check_and_set_env_variable("NVTE_FLASH_ATTN", 0, AttnBackend.flash)
check_and_set_env_variable("NVTE_FUSED_ATTN", 0, AttnBackend.flash)
check_and_set_env_variable("NVTE_UNFUSED_ATTN", 0, AttnBackend.flash)
elif self.config.attention_backend == AttnBackend.flash:
check_and_set_env_variable("NVTE_FLASH_ATTN", 1, AttnBackend.flash)
check_and_set_env_variable("NVTE_FUSED_ATTN", 0, AttnBackend.flash)
check_and_set_env_variable("NVTE_UNFUSED_ATTN", 0, AttnBackend.flash)
elif self.config.attention_backend == AttnBackend.fused:
check_and_set_env_variable("NVTE_FLASH_ATTN", 0, AttnBackend.fused)
check_and_set_env_variable("NVTE_FUSED_ATTN", 1, AttnBackend.fused)
check_and_set_env_variable("NVTE_UNFUSED_ATTN", 0, AttnBackend.fused)
elif self.config.attention_backend == AttnBackend.unfused:
check_and_set_env_variable("NVTE_FLASH_ATTN", 0, AttnBackend.unfused)
check_and_set_env_variable("NVTE_FUSED_ATTN", 0, AttnBackend.unfused)
check_and_set_env_variable("NVTE_UNFUSED_ATTN", 1, AttnBackend.unfused)
elif self.config.attention_backend == AttnBackend.auto:
check_and_set_env_variable("NVTE_FLASH_ATTN", 1, AttnBackend.auto)
check_and_set_env_variable("NVTE_FUSED_ATTN", 1, AttnBackend.auto)
check_and_set_env_variable("NVTE_UNFUSED_ATTN", 1, AttnBackend.auto)
def compute_language_model_loss(self, labels: Tensor, logits: Tensor) -> Tensor:
"""Computes the language model loss (Cross entropy across vocabulary)
Args:
labels (Tensor): The labels of dimension [batch size, seq length]
logits (Tensor): The final logits returned by the output layer of the transformer model
Returns:
Tensor: Loss tensor of dimensions [batch size, sequence_length]
"""
# [b s] => [s b]
labels = labels.transpose(0, 1).contiguous()
if self.config.cross_entropy_loss_fusion:
loss = fused_vocab_parallel_cross_entropy(logits, labels)
else:
loss = tensor_parallel.vocab_parallel_cross_entropy(logits, labels)
# [s b] => [b, s]
loss = loss.transpose(0, 1).contiguous()
return loss
def setup_embeddings_and_output_layer(self) -> None:
"""Sets up embedding layer in first stage and output layer in last stage.
This function initalizes word embeddings in the final stage when we are
using pipeline parallelism and sharing word embeddings, and sets up param
attributes on the embedding and output layers.
"""
# Set `is_embedding_or_output_parameter` attribute.
if self.pre_process:
self.embedding.word_embeddings.weight.is_embedding_or_output_parameter = True
if self.post_process and self.output_layer.weight is not None:
self.output_layer.weight.is_embedding_or_output_parameter = True
if not self.share_embeddings_and_output_weights:
return
if parallel_state.get_pipeline_model_parallel_world_size() == 1:
# Zero out wgrad if sharing embeddings between two layers on same
# pipeline stage to make sure grad accumulation into main_grad is
# correct and does not include garbage values (e.g., from torch.empty).
self.shared_embedding_or_output_weight().zero_out_wgrad = True
return
if parallel_state.is_pipeline_first_stage() and self.pre_process and not self.post_process:
self.shared_embedding_or_output_weight().shared_embedding = True
if self.post_process and not self.pre_process:
assert not parallel_state.is_pipeline_first_stage()
# set word_embeddings weights to 0 here, then copy first
# stage's weights using all_reduce below.
self.output_layer.weight.data.fill_(0)
self.output_layer.weight.shared = True
self.output_layer.weight.shared_embedding = True
# Parameters are shared between the word embeddings layers, and the
# heads at the end of the model. In a pipelined setup with more than
# one stage, the initial embedding layer and the head are on different
# workers, so we do the following:
# 1. Create a second copy of word_embeddings on the last stage, with
# initial parameters of 0.0.
# 2. Do an all-reduce between the first and last stage to ensure that
# the two copies of word_embeddings start off with the same
# parameter values.
# 3. In the training loop, before an all-reduce between the grads of
# the two word_embeddings layers to ensure that every applied weight
# update is the same on both stages.
# Ensure that first and last stages have the same initial parameter
# values.
if torch.distributed.is_initialized():
if parallel_state.is_rank_in_embedding_group():
weight = self.shared_embedding_or_output_weight()
weight.data = weight.data.cuda()
torch.distributed.all_reduce(
weight.data, group=parallel_state.get_embedding_group()
)
elif not getattr(LanguageModule, "embedding_warning_printed", False):
logging.getLogger(__name__).warning(
"Distributed processes aren't initialized, so the output layer "
"is not initialized with weights from the word embeddings. "
"If you are just manipulating a model this is fine, but "
"this needs to be handled manually. If you are training "
"something is definitely wrong."
)
LanguageModule.embedding_warning_printed = True
def shared_embedding_or_output_weight(self) -> Tensor:
"""Gets the emedding weight or output logit weights when share embedding and output weights set to True.
Returns:
Tensor: During pre processing it returns the input embeddings weight while during post processing it returns the final output layers weight
"""
if self.pre_process:
return self.embedding.word_embeddings.weight
elif self.post_process:
return self.output_layer.weight
return None
def sharded_state_dict(
self,
prefix: str = '',
sharded_offsets: Tuple[Tuple[int, int, int]] = (),
metadata: Optional[dict] = None,
) -> ShardedStateDict:
"""Sharded state dict implementation that handles the output layer weights tying.
Args:
prefix (str): Module name prefix.
sharded_offsets (tuple): PP related offsets, expected to be empty at this module level.
metadata (Optional[Dict]): metadata controlling sharded state dict creation.
Returns:
ShardedStateDict: sharded state dict for the LanguageModel
"""
assert not sharded_offsets, "Unexpected sharded offsets"
sharded_state_dict = super().sharded_state_dict(prefix, sharded_offsets, metadata)
first_stage_word_emb_key = f'{prefix}embedding.word_embeddings.weight'
output_layer_weight_key = f'{prefix}output_layer.weight'
output_layer_bias_key = f'{prefix}output_layer.bias'
if self.share_embeddings_and_output_weights:
self.tie_embeddings_and_output_weights_state_dict(
sharded_state_dict, output_layer_weight_key, first_stage_word_emb_key
)
elif self.post_process:
# Make sure the output layer follows the embeddings padding logic
sharded_state_dict[output_layer_weight_key].allow_shape_mismatch = True
# Regardless of sharing the output weights with embeddings, we must handle the bias padding
if self.post_process and output_layer_bias_key in sharded_state_dict:
sharded_state_dict[output_layer_bias_key].allow_shape_mismatch = True
return sharded_state_dict
def tie_embeddings_and_output_weights_state_dict(
self,
sharded_state_dict: ShardedStateDict,
output_layer_weight_key: str,
first_stage_word_emb_key: str,
) -> None:
"""Ties the embedding and output weights in a given sharded state dict.
Args:
sharded_state_dict (ShardedStateDict): state dict with the weight to tie
output_layer_weight_key (str): key of the output layer weight in the state dict.
This entry will be replaced with a tied version
first_stage_word_emb_key (str): this must be the same as the
ShardedTensor.key of the first stage word embeddings.
Returns: None, acts in-place
"""
if not self.post_process:
# No output layer
assert output_layer_weight_key not in sharded_state_dict, sharded_state_dict.keys()
return
if self.pre_process:
# Output layer is equivalent to the embedding already
return
# Replace the default output layer with a one sharing the weights with the embedding
del sharded_state_dict[output_layer_weight_key]
tensor = self.shared_embedding_or_output_weight()
last_stage_word_emb_replica_id = (
1, # copy of first stage embedding
0,
parallel_state.get_data_parallel_rank(with_context_parallel=True),
)
sharded_state_dict[output_layer_weight_key] = make_tp_sharded_tensor_for_checkpoint(
tensor=tensor,
key=first_stage_word_emb_key,
replica_id=last_stage_word_emb_replica_id,
allow_shape_mismatch=True,
)
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import importlib.util
import torch
from torch import einsum, nn
__all__ = ['RotaryEmbedding', 'apply_rotary_pos_emb']
class RotaryEmbedding(nn.Module):
def __init__(self, dim, seq_len_interpolation_factor=None):
super().__init__()
self.seq_len_interpolation_factor = seq_len_interpolation_factor
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq, persistent=False)
def forward(self, max_seq_len, offset=0):
seq = torch.arange(max_seq_len, device=self.inv_freq.device) + offset
if self.seq_len_interpolation_factor is not None:
seq = seq.type_as(self.inv_freq)
seq *= 1 / self.seq_len_interpolation_factor
freqs = einsum('i , j -> i j', seq.type_as(self.inv_freq), self.inv_freq)
# first part even vector components, second part odd vector components,
# 2 * dim in dimension size
emb = torch.cat((freqs, freqs), dim=-1)
# emb [seq_length, .., dim]
return emb[:, None, None, :]
def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs):
state_dict.pop(f'{prefix}inv_freq', None)
return super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
def _rotate_half(x):
"""
change sign so the last dimension becomes [-odd, +even]
"""
x1, x2 = torch.chunk(x, 2, dim=-1)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(t, freqs):
"""
input tensor t is of shape [seq_length, ..., dim]
rotary positional embeding tensor freqs is of shape [seq_length, ..., dim]
check https://kexue.fm/archives/8265 for detailed formulas
"""
rot_dim = freqs.shape[-1]
# ideally t_pass is empty so rotary pos embedding is applied to all tensor t
t, t_pass = t[..., :rot_dim], t[..., rot_dim:]
# first part is cosine component
# second part is sine component, need to change signs with _rotate_half method
t = (t * freqs.cos()) + (_rotate_half(t) * freqs.sin())
return torch.cat((t, t_pass), dim=-1)
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
"""Megatron Vision Module."""
from megatron.core.transformer.module import MegatronModule
from megatron.core.transformer.transformer_config import TransformerConfig
# Note: This is only a stub at the moment. This will be expanded in follow-up changes.
class VisionModule(MegatronModule):
"""Base vision module that has common helper functions used across CLIP, ViT, etc.
Args:
config (TransformerConfig): Input transformer config for the model
"""
def __init__(self, config: TransformerConfig) -> None:
super().__init__(config=config)
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
from .gpt_model import GPTModel
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment