Commit 688448db authored by silencealiang's avatar silencealiang
Browse files

更新代码

parent a02a5490
Pipeline #2503 passed with stage
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
import argparse
import os
import torch
def convert(output_path, tensor_parallel_size, use_te, version):
device = "cuda"
model = torch.hub.load('NVlabs/RADIO', 'radio_model', version=version, progress=True)
state_dict = model.state_dict()
new_state_dicts = [{"model": dict()} for _ in range(tensor_parallel_size)]
# Indices from mapping pytorch multihead attention to megatron.
kv_channels = 80
hidden_dim = 1280
num_heads = 16
indices = []
for i in range(num_heads):
lb = i * kv_channels
ub = (i + 1) * kv_channels
indices.append(torch.arange(lb, ub, dtype=torch.int))
indices.append(torch.arange(hidden_dim + lb, hidden_dim + ub, dtype=torch.int))
indices.append(torch.arange(2 * hidden_dim + lb, 2 * hidden_dim + ub, dtype=torch.int))
indices = torch.cat(indices)
for name, tensor in state_dict.items():
# Map parameter names to ones used in megatron.
new_name = ""
new_tensor = tensor
if new_tensor.dtype == torch.float16:
new_tensor = new_tensor.to(torch.float32)
# This is used for chunking some tensors to target tensor parallel size.
chunk_dim = None
if "summary_idxs" in name:
continue
elif "patch_generator" in name:
if "embedder" in name:
new_name = "embedder.weight"
chunk_dim = 0
elif "cls_token" in name:
new_name = "class_token"
elif "pos_embed" in name:
new_name = "position_embeddings"
elif "input_conditioner" in name:
continue
elif "blocks" in name:
layer_idx = name.split(".")[2]
base = f"decoder.layers.{layer_idx}"
if "attn.qkv.weight" in name:
new_name = f"{base}.self_attention.linear_qkv.weight"
new_tensor = new_tensor[indices]
chunk_dim = 0
elif "attn.qkv.bias" in name:
new_name = f"{base}.self_attention.linear_qkv.bias"
new_tensor = new_tensor[indices]
chunk_dim = 0
elif "attn.proj.weight" in name:
new_name = f"{base}.self_attention.linear_proj.weight"
chunk_dim = 1
elif "attn.proj.bias" in name:
new_name = f"{base}.self_attention.linear_proj.bias"
elif "norm1.weight" in name:
new_name = f"{base}.input_layernorm.weight"
if use_te:
new_name = f"{base}.self_attention.linear_qkv.layer_norm_weight"
elif "norm1.bias" in name:
new_name = f"{base}.input_layernorm.bias"
if use_te:
new_name = f"{base}.self_attention.linear_qkv.layer_norm_bias"
elif "mlp.fc1.weight" in name:
new_name = f"{base}.mlp.linear_fc1.weight"
chunk_dim = 0
elif "mlp.fc1.bias" in name:
new_name = f"{base}.mlp.linear_fc1.bias"
chunk_dim = 0
elif "mlp.fc2.weight" in name:
new_name = f"{base}.mlp.linear_fc2.weight"
chunk_dim = 1
elif "mlp.fc2.bias" in name:
new_name = f"{base}.mlp.linear_fc2.bias"
elif "norm2.weight" in name:
new_name = f"{base}.pre_mlp_layernorm.weight"
if use_te:
new_name = f"{base}.mlp.linear_fc1.layer_norm_weight"
elif "norm2.bias" in name:
new_name = f"{base}.pre_mlp_layernorm.bias"
if use_te:
new_name = f"{base}.mlp.linear_fc1.layer_norm_bias"
assert new_name != "", f"unexpected layer name {name}"
if chunk_dim is None:
new_tensors = [new_tensor for _ in range(tensor_parallel_size)]
else:
new_tensors = torch.chunk(new_tensor, tensor_parallel_size, dim=chunk_dim)
for i in range(tensor_parallel_size):
# chunk() creates a view of a bigger tensor. clone() is used here to avoid excessive storage.
new_state_dicts[i]["model"][new_name] = new_tensors[i].clone()
# TE sets _extra_state (for FP8 purposes), so set an empty one here for compatibility.
extra_state_layers = ("linear_qkv", "linear_proj", "linear_fc1", "linear_fc2")
is_extra_state_layer = any([l in new_name for l in extra_state_layers])
if use_te and is_extra_state_layer:
layer = new_name.split(".")[-2]
if layer in extra_state_layers:
extra_state_name = (
new_name[: new_name.rfind(".") + 1] + "_extra_state"
) # Replace the weight name.
new_state_dicts[i]["model"][extra_state_name] = None
for i in range(tensor_parallel_size):
output_dir_tp = os.path.join(output_path, "iter_0000001", f"mp_rank_0{i}")
os.makedirs(output_dir_tp)
output_path_tp = os.path.join(output_dir_tp, "model_optim_rng.pt")
torch.save(new_state_dicts[i], output_path_tp)
with open(os.path.join(output_path, "latest_checkpointed_iteration.txt"), "w") as f:
f.write("1")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="""
Convert RADIO weights to megatron format.
Example usage:
python radio_converter.py --output /some/output/folder --tensor-parallel-size 4
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"--output", type=str, required=True, help="output directory for megatron state dict file(s)"
)
parser.add_argument(
"--tensor-parallel-size", type=int, default=1, help="model tensor parallel size"
)
parser.add_argument("--use-te", action="store_true", help="Use Transformer Engine")
parser.add_argument("--version", type=str, default="radio_v2.5-h", help="Version of radio to load for conversion")
args = parser.parse_args()
convert(args.output, args.tensor_parallel_size, args.use_te, args.version)
print("done.")
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
from megatron.core.models.multimodal.llava_model import IMAGE_TOKEN
def add_multimodal_extra_args(parser):
"""Extra arguments."""
group = parser.add_argument_group(title='multimodal arguments')
group.add_argument('--dataset-config', type=str, default=None)
group.add_argument("--prompt-path", type=str, default=None)
group.add_argument('--freeze-LM', action='store_true', default=False)
group.add_argument('--freeze-ViT', action='store_true', default=False)
group.add_argument('--language-model-type', type=str, required=True)
group.add_argument('--vision-model-type', type=str, default="clip")
group.add_argument("--disable-vision-class-token", action="store_true", default=False)
group.add_argument(
"--allow-missing-vision-projection-checkpoint", action="store_true", default=False
)
group.add_argument("--use-te", action="store_true", default=False)
group.add_argument(
"--dataloader-save", type=str, default=None, help="Energon dataloader state save path"
)
group.add_argument(
"--use-tiling", action="store_true", default=False, help="Use input image tiling"
)
group.add_argument("--max-num-tiles", type=int, default=1, help="Maximum number of image tiles")
group.add_argument(
"--use-thumbnail", action="store_true", default=False, help="Add image thumbnail as a tile"
)
group.add_argument(
"--dataloader-seq-length",
type=int,
help="Make dataloader to produce sequences of specific length.",
)
group.add_argument(
"--num-frames",
type=int,
default=1,
help="Number of frames to regularly sample from the video as input to the model.",
)
group.add_argument(
"--online-evaluation-config", type=str, help="Config file for online evaluation."
)
group.add_argument(
"--special-tokens",
nargs="*",
default=[IMAGE_TOKEN],
help="Special tokens used in the multimodal model",
)
group.add_argument(
"--tokenizer-prompt-format",
type=str,
choices=["mistral", "llama3", "chatml", "nvlm-yi-34b", "qwen2p0", "qwen2p5"],
required=True,
help="Prompt format to use with the tokenizer.",
)
group.add_argument("--pixel-shuffle", action="store_true", default=False)
group.add_argument(
"--image-tag-type",
type=str,
choices=["nvlm", "internvl", ""],
default="", # Default: Image tag not used.
help="Surround image tokens with tags.",
)
group.add_argument("--use-tile-tags", action="store_true", default=False, help="Use tile tags")
group.add_argument(
"--packing-buffer-size",
type=int,
default=None, # Packing is disabled by default.
help="Enable sample packing by setting the buffer size to > 0",
)
group.add_argument(
"--packing-seq-length", type=int, default=0, help="Packing sequence length. Must be > 0 if using packing."
)
group.add_argument(
"--recompute-vision", action="store_true", default=False, help="Enable activation checkpointing in the vision model"
)
return parser
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
from megatron.core.models.multimodal.llava_model import IMAGE_TOKEN
def add_multimodal_extra_args(parser):
"""Extra arguments."""
group = parser.add_argument_group(title='multimodal arguments')
group.add_argument('--dataset-config', type=str, default=None)
group.add_argument("--prompt-path", type=str, default=None)
group.add_argument('--freeze-LM', action='store_true', default=False)
group.add_argument('--freeze-ViT', action='store_true', default=False)
group.add_argument('--language-model-type', type=str, required=True)
group.add_argument('--language-huggingface-model-name-or-path', type=str)
group.add_argument('--vision-model-type', type=str, default="clip")
group.add_argument('--vision-huggingface-model-name-or-path', type=str)
group.add_argument("--disable-vision-class-token", action="store_true", default=False)
group.add_argument(
"--allow-missing-vision-projection-checkpoint", action="store_true", default=False
)
group.add_argument("--use-te", action="store_true", default=False)
group.add_argument(
"--dataloader-save", type=str, default=None, help="Energon dataloader state save path"
)
group.add_argument(
"--use-tiling", action="store_true", default=False, help="Use input image tiling"
)
group.add_argument("--max-num-tiles", type=int, default=1, help="Maximum number of image tiles")
group.add_argument(
"--use-thumbnail", action="store_true", default=False, help="Add image thumbnail as a tile"
)
group.add_argument(
"--dataloader-seq-length",
type=int,
help="Make dataloader to produce sequences of specific length.",
)
group.add_argument(
"--num-frames",
type=int,
default=1,
help="Number of frames to regularly sample from the video as input to the model.",
)
group.add_argument(
"--online-evaluation-config", type=str, help="Config file for online evaluation."
)
group.add_argument(
"--special-tokens",
nargs="*",
default=[IMAGE_TOKEN],
help="Special tokens used in the multimodal model",
)
group.add_argument(
"--tokenizer-prompt-format",
type=str,
choices=["mistral", "llama3", "llama3p1", "chatml", "nvlm-yi-34b", "qwen2p0", "qwen2p5"],
required=True,
help="Prompt format to use with the tokenizer.",
)
group.add_argument("--pixel-shuffle", action="store_true", default=False)
group.add_argument(
"--image-tag-type",
type=str,
choices=["nvlm", "internvl", ""],
default="", # Default: Image tag not used.
help="Surround image tokens with tags.",
)
group.add_argument("--use-tile-tags", action="store_true", default=False, help="Use tile tags")
group.add_argument(
"--packing-buffer-size",
type=int,
default=None, # Packing is disabled by default.
help="Enable sample packing by setting the buffer size to > 0",
)
group.add_argument(
"--packing-seq-length", type=int, default=0, help="Packing sequence length. Must be > 0 if using packing."
)
group.add_argument(
"--recompute-vision", action="store_true", default=False, help="Enable activation checkpointing in the vision model"
)
group.add_argument(
"--use-loss-scaling", action="store_true", default=False, help="Scale loss based on conversation turn length (in tokens)."
)
group.add_argument(
"--use-area-weighted-aspect-ratio", action="store_true", default=False,
help=(
"When --use-tiling is True, find the aspect ratio to use based on the original ",
"image aspect ratio and the area covered by the tiles.")
)
return parser
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
""""
NOTE: NVLM uses InternViT with tensor parallel (TP) size = 8.
Since InternViT has 25 attention heads and Megatron currently requires the number of attention heads
to be divisible by the TP size, we add 7 dummy zero attention heads to have 32 attention heads.
This workaround requires some changes to how we compute RMSNorm, Attention etc.
Additionally, InternViT introduces some unique features like Layer Scaling.
Those code changes are gathered here.
"""
from functools import partial
from typing import Dict
import torch
from megatron.core.dist_checkpointing.mapping import ShardedStateDict
from megatron.core.extensions.transformer_engine import (
TEColumnParallelLinear,
TEDotProductAttention,
TERowParallelLinear,
)
from megatron.core.parallel_state import (
get_tensor_model_parallel_group,
get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size,
)
from megatron.core.tensor_parallel.layers import ColumnParallelLinear, RowParallelLinear
from megatron.core.transformer.attention import SelfAttention, SelfAttentionSubmodules
from megatron.core.transformer.dot_product_attention import DotProductAttention
from megatron.core.transformer.enums import AttnMaskType
from megatron.core.transformer.mlp import MLP, MLPSubmodules
from megatron.core.transformer.module import MegatronModule
from megatron.core.transformer.spec_utils import ModuleSpec, build_module
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.transformer.transformer_layer import TransformerLayer, TransformerLayerSubmodules
from megatron.core.transformer.utils import make_sharded_tensors_for_checkpoint
class InternViTRMSNorm(MegatronModule):
def __init__(
self,
config,
hidden_size: int,
eps: float = 1e-6,
sequence_parallel: bool = False,
compute_var: bool = False,
):
"""Custom RMSNorm for InternViT.
Args:
config (TransformerConfig): Config.
hidden_size (int): Input hidden size.
eps (float): epsilon to use for the norm, default to 1e-6
sequence_parallel (bool): Set to true if sequence parallelism is being used,
this marks the weights as needing to be allreduced.
compute_var (bool): Indicator to compute statistic manually.
"""
super().__init__(config=config)
self.config = config
self.eps = eps
self.weight = torch.nn.Parameter(torch.ones(hidden_size))
self._compute_var = compute_var
assert not sequence_parallel, "Sequence parallelism is not supported with InternViT."
setattr(self.weight, 'sequence_parallel', sequence_parallel)
def _norm(self, x, var):
if var is None:
var = x.pow(2).mean(-1, keepdim=True)
return x * torch.rsqrt(var + self.eps)
def forward(self, x):
"""Run RMSNorm with an option to compute custom statistic."""
var = None
if self._compute_var:
unpadded_hidden_size = self.config.hidden_size # 3200
max_dim = x.shape[-1] # 128
x = x.reshape(x.size(0), x.size(1), -1)
var = self._gather_var(x.float().pow(2), max_dim) / unpadded_hidden_size
output = self._norm(x.float(), var).type_as(x)
output = output * self.weight
if self._compute_var:
output = output.reshape(output.size(0), output.size(1), -1, max_dim)
return output
def _gather_var(self, input_, max_dim, valid_ranks=6):
"""Compute statistic across the non-dummy heads."""
world_size = get_tensor_model_parallel_world_size()
assert world_size == 8, "tested only with TP=8"
# Size and dimension.
last_dim = input_.dim() - 1
rank = get_tensor_model_parallel_rank()
if rank < valid_ranks: # Ranks 0-5 have 24 non-dummy attention heads.
var = input_.sum(-1, keepdim=True)
elif rank == valid_ranks: # Rank 6 has 1 non-dummy attention head.
var = input_[..., :max_dim].sum(-1, keepdim=True)
else:
var = input_.sum(-1, keepdim=True) * 0.0 # Zero-out the dummy heads.
tensor_list = [torch.empty_like(var) for _ in range(world_size)]
tensor_list[rank] = var
torch.distributed.all_gather(tensor_list, var, group=get_tensor_model_parallel_group())
output = torch.cat(tensor_list, dim=last_dim).contiguous()
return output.sum(-1, keepdim=True)
def sharded_state_dict(self, prefix='', sharded_offsets=(), metadata={}):
# in InternVitSelfAttention the q_layernorm and k_layernorm weights
# are tensor-parallel so must be converted to sharded tensors
if 'q_layernorm' in prefix or 'k_layernorm' in prefix:
state_dict = self.state_dict(prefix='', keep_vars=True)
return make_sharded_tensors_for_checkpoint(
state_dict, prefix, {'weight': 0}, sharded_offsets
)
else:
return super().sharded_state_dict(prefix, sharded_offsets, metadata)
def get_mlp_module_spec(use_te: bool = True) -> ModuleSpec:
# Dense MLP w/ or w/o TE modules.
return ModuleSpec(
module=MLP,
submodules=MLPSubmodules(
linear_fc1=TEColumnParallelLinear if use_te else ColumnParallelLinear,
linear_fc2=TERowParallelLinear if use_te else RowParallelLinear,
),
)
# Handle InternViT's layer scaling.
def _bias_dropout_add_func_internvit(ls, x_with_bias, residual, prob, training):
x, bias = x_with_bias # unpack
residual = residual if residual.dtype == x.dtype else residual.to(x.dtype)
if bias is not None:
x = x + bias
out = torch.nn.functional.dropout(x, p=prob, training=training)
out = residual + out * ls
return out
else:
out = torch.nn.functional.dropout(x, p=prob, training=training)
out = residual + out * ls
return out
def bias_dropout_add_unfused_internvit(ls, training):
"""Bias-dropout-add as in Megatron but with added LayerScaling handling."""
def _bias_dropout_add(x_with_bias, residual, prob):
return _bias_dropout_add_func_internvit(ls, x_with_bias, residual, prob, training)
return _bias_dropout_add
def get_bias_dropout_add_internvit(ls, training, fused):
"""Bias-dropout-add as in Megatron but with added LayerScaling handling."""
assert not fused, "Fused bias-dropout-add not implemented for InternViT."
return bias_dropout_add_unfused_internvit(ls, training)
# Add InternViT specialties to our default TransformerLayer.
class InternViTTransformerLayer(TransformerLayer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ls1 = torch.nn.Parameter(torch.ones(self.config.hidden_size))
self.ls2 = torch.nn.Parameter(torch.ones(self.config.hidden_size))
self.self_attn_bda = partial(self.self_attn_bda, self.ls1)
self.mlp_bda = partial(self.mlp_bda, self.ls2)
# Override a few things that are special in InternViT and not supported by the SelfAttention class.
class InternViTSelfAttention(SelfAttention):
def __init__(
self, config: TransformerConfig, submodules: SelfAttentionSubmodules, *args, **kwargs
):
super().__init__(config=config, submodules=submodules, *args, **kwargs)
# Need to override linear_qkv, q_layernorm and k_layernorm.
qkv_bias = False
self.linear_qkv = build_module(
submodules.linear_qkv,
self.config.hidden_size,
self.query_projection_size + 2 * self.kv_projection_size,
config=self.config,
init_method=self.config.init_method,
gather_output=False,
bias=qkv_bias,
skip_bias_add=False,
is_expert=False,
tp_comm_buffer_name='qkv',
)
qk_layernorm_hidden_size = (
self.hidden_size_per_attention_head * self.num_attention_heads_per_partition
) # 512 for internvit
self.q_layernorm = build_module(
submodules.q_layernorm,
hidden_size=qk_layernorm_hidden_size,
config=self.config,
eps=self.config.layernorm_epsilon,
compute_var=True,
)
self.k_layernorm = build_module(
submodules.k_layernorm,
hidden_size=qk_layernorm_hidden_size,
config=self.config,
eps=self.config.layernorm_epsilon,
compute_var=True,
)
class InternViTTEDotProductAttention(TEDotProductAttention):
"""Adjusted Attention for InternViT"""
def forward(self, *args, **kwargs):
"""Regular TEDotProductAttention + zero-out dummy attention heads."""
out = super().forward(*args, **kwargs)
# This makes sure the dummy attention heads are zeroed out.
mask = torch.ones_like(out, dtype=out.dtype, device=out.device)
rank = get_tensor_model_parallel_rank()
max_dim = out.shape[-1] # 128
valid_ranks = 6
if rank == valid_ranks:
mask[..., max_dim:] *= 0.0
elif rank > valid_ranks:
mask *= 0.0
out *= mask
return out
def get_internvit_layer_spec(use_te) -> ModuleSpec:
mlp = get_mlp_module_spec(use_te) # no norm
return ModuleSpec(
module=InternViTTransformerLayer,
submodules=TransformerLayerSubmodules(
input_layernorm=InternViTRMSNorm,
self_attention=ModuleSpec(
module=InternViTSelfAttention,
params={"attn_mask_type": AttnMaskType.no_mask},
submodules=SelfAttentionSubmodules(
linear_qkv=TEColumnParallelLinear if use_te else ColumnParallelLinear,
core_attention=TEDotProductAttention if use_te else DotProductAttention,
linear_proj=TERowParallelLinear if use_te else RowParallelLinear,
q_layernorm=InternViTRMSNorm,
k_layernorm=InternViTRMSNorm,
),
),
self_attn_bda=get_bias_dropout_add_internvit,
pre_mlp_layernorm=InternViTRMSNorm,
mlp=mlp,
mlp_bda=get_bias_dropout_add_internvit,
),
)
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
""""
NOTE: NVLM uses InternViT with tensor parallel (TP) size = 8.
Since InternViT has 25 attention heads and Megatron currently requires the number of attention heads
to be divisible by the TP size, we add 7 dummy zero attention heads to have 32 attention heads.
This workaround requires some changes to how we compute RMSNorm, Attention etc.
Additionally, InternViT introduces some unique features like Layer Scaling.
Those code changes are gathered here.
"""
from functools import partial
import torch
from megatron.core.utils import divide
from megatron.core.extensions.transformer_engine import (
TEColumnParallelLinear,
TEDotProductAttention,
TERowParallelLinear,
)
from megatron.core.parallel_state import (
get_tensor_model_parallel_group,
get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size,
)
from megatron.core.tensor_parallel.layers import ColumnParallelLinear, RowParallelLinear
from megatron.core.transformer.attention import SelfAttention, SelfAttentionSubmodules
from megatron.core.transformer.dot_product_attention import DotProductAttention
from megatron.core.transformer.enums import AttnMaskType
from megatron.core.transformer.mlp import MLP, MLPSubmodules
from megatron.core.transformer.module import MegatronModule
from megatron.core.transformer.spec_utils import ModuleSpec, build_module
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.transformer.transformer_layer import TransformerLayer, TransformerLayerSubmodules
from megatron.core.transformer.utils import make_sharded_tensors_for_checkpoint
class InternViTRMSNorm(MegatronModule):
def __init__(
self,
config,
hidden_size: int,
eps: float = 1e-6,
sequence_parallel: bool = False,
compute_var: bool = False,
):
"""Custom RMSNorm for InternViT.
Args:
config (TransformerConfig): Config.
hidden_size (int): Input hidden size.
eps (float): epsilon to use for the norm, default to 1e-6
sequence_parallel (bool): Set to true if sequence parallelism is being used,
this marks the weights as needing to be allreduced.
compute_var (bool): Indicator to compute statistic manually.
"""
super().__init__(config=config)
self.config = config
self.eps = eps
self.weight = torch.nn.Parameter(torch.ones(hidden_size))
self._compute_var = compute_var
assert not sequence_parallel, "Sequence parallelism is not supported with InternViT."
setattr(self.weight, 'sequence_parallel', sequence_parallel)
def _norm(self, x, var):
if var is None:
var = x.pow(2).mean(-1, keepdim=True)
return x * torch.rsqrt(var + self.eps)
def forward(self, x):
"""Run RMSNorm with an option to compute custom statistic."""
var = None
if self._compute_var:
unpadded_hidden_size = self.config.hidden_size # 3200
max_dim = x.shape[-1] # 128
x = x.reshape(x.size(0), x.size(1), -1)
var = self._gather_var(x.float().pow(2), max_dim) / unpadded_hidden_size
output = self._norm(x.float(), var).type_as(x)
output = output * self.weight
if self._compute_var:
output = output.reshape(output.size(0), output.size(1), -1, max_dim)
return output
def _gather_var(self, input_, max_dim):
"""Compute statistic across the non-dummy heads."""
world_size = get_tensor_model_parallel_world_size()
# Size and dimension.
last_dim = input_.dim() - 1
rank = get_tensor_model_parallel_rank()
num_attention_heads_per_partition = divide(self.config.num_attention_heads, world_size)
valid_ranks = 24 // num_attention_heads_per_partition
residual_heads = 25 % num_attention_heads_per_partition
if residual_heads == 0:
residual_heads = num_attention_heads_per_partition
max_dim = max_dim * residual_heads
if rank < valid_ranks: # Ranks without any dummy attention heads.
var = input_.sum(-1, keepdim=True)
elif rank == valid_ranks: # The only rank which may contain 'residual_heads' dummy attention heads.
var = input_[..., :max_dim].sum(-1, keepdim=True)
else:
var = input_.sum(-1, keepdim=True) * 0.0 # All heads in these ranks are dummy heads: Zero-out.
tensor_list = [torch.empty_like(var) for _ in range(world_size)]
tensor_list[rank] = var
torch.distributed.all_gather(tensor_list, var, group=get_tensor_model_parallel_group())
output = torch.cat(tensor_list, dim=last_dim).contiguous()
return output.sum(-1, keepdim=True)
def sharded_state_dict(self, prefix='', sharded_offsets=(), metadata={}):
# in InternVitSelfAttention the q_layernorm and k_layernorm weights
# are tensor-parallel so must be converted to sharded tensors
if 'q_layernorm' in prefix or 'k_layernorm' in prefix:
state_dict = self.state_dict(prefix='', keep_vars=True)
return make_sharded_tensors_for_checkpoint(
state_dict, prefix, {'weight': 0}, sharded_offsets
)
else:
return super().sharded_state_dict(prefix, sharded_offsets, metadata)
def get_mlp_module_spec(use_te: bool = True) -> ModuleSpec:
# Dense MLP w/ or w/o TE modules.
return ModuleSpec(
module=MLP,
submodules=MLPSubmodules(
linear_fc1=TEColumnParallelLinear if use_te else ColumnParallelLinear,
linear_fc2=TERowParallelLinear if use_te else RowParallelLinear,
),
)
# Handle InternViT's layer scaling.
def _bias_dropout_add_func_internvit(ls, x_with_bias, residual, prob, training):
x, bias = x_with_bias # unpack
residual = residual if residual.dtype == x.dtype else residual.to(x.dtype)
if bias is not None:
x = x + bias
out = torch.nn.functional.dropout(x, p=prob, training=training)
out = residual + out * ls
return out
else:
out = torch.nn.functional.dropout(x, p=prob, training=training)
out = residual + out * ls
return out
def bias_dropout_add_unfused_internvit(ls, training):
"""Bias-dropout-add as in Megatron but with added LayerScaling handling."""
def _bias_dropout_add(x_with_bias, residual, prob):
return _bias_dropout_add_func_internvit(ls, x_with_bias, residual, prob, training)
return _bias_dropout_add
def get_bias_dropout_add_internvit(ls, training, fused):
"""Bias-dropout-add as in Megatron but with added LayerScaling handling."""
assert not fused, "Fused bias-dropout-add not implemented for InternViT."
return bias_dropout_add_unfused_internvit(ls, training)
# Add InternViT specialties to our default TransformerLayer.
class InternViTTransformerLayer(TransformerLayer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ls1 = torch.nn.Parameter(torch.ones(self.config.hidden_size))
self.ls2 = torch.nn.Parameter(torch.ones(self.config.hidden_size))
self.self_attn_bda = partial(self.self_attn_bda, self.ls1)
self.mlp_bda = partial(self.mlp_bda, self.ls2)
# Override a few things that are special in InternViT and not supported by the SelfAttention class.
class InternViTSelfAttention(SelfAttention):
def __init__(
self, config: TransformerConfig, submodules: SelfAttentionSubmodules, *args, **kwargs
):
super().__init__(config=config, submodules=submodules, *args, **kwargs)
# Need to override linear_qkv, q_layernorm and k_layernorm.
qkv_bias = False
self.linear_qkv = build_module(
submodules.linear_qkv,
self.config.hidden_size,
self.query_projection_size + 2 * self.kv_projection_size,
config=self.config,
init_method=self.config.init_method,
gather_output=False,
bias=qkv_bias,
skip_bias_add=False,
is_expert=False,
tp_comm_buffer_name='qkv',
)
qk_layernorm_hidden_size = (
self.hidden_size_per_attention_head * self.num_attention_heads_per_partition
) # 512 for internvit
self.q_layernorm = build_module(
submodules.q_layernorm,
hidden_size=qk_layernorm_hidden_size,
config=self.config,
eps=self.config.layernorm_epsilon,
compute_var=True,
)
self.k_layernorm = build_module(
submodules.k_layernorm,
hidden_size=qk_layernorm_hidden_size,
config=self.config,
eps=self.config.layernorm_epsilon,
compute_var=True,
)
class InternViTTEDotProductAttention(TEDotProductAttention):
"""Adjusted Attention for InternViT"""
def forward(self, *args, **kwargs):
"""Regular TEDotProductAttention + zero-out dummy attention heads."""
out = super().forward(*args, **kwargs)
# This makes sure the dummy attention heads are zeroed out.
mask = torch.ones_like(out, dtype=out.dtype, device=out.device)
rank = get_tensor_model_parallel_rank()
max_dim = out.shape[-1] # 128
valid_ranks = 6
if rank == valid_ranks:
mask[..., max_dim:] *= 0.0
elif rank > valid_ranks:
mask *= 0.0
out *= mask
return out
def get_internvit_layer_spec(use_te) -> ModuleSpec:
mlp = get_mlp_module_spec(use_te) # no norm
return ModuleSpec(
module=InternViTTransformerLayer,
submodules=TransformerLayerSubmodules(
input_layernorm=InternViTRMSNorm,
self_attention=ModuleSpec(
module=InternViTSelfAttention,
params={"attn_mask_type": AttnMaskType.no_mask},
submodules=SelfAttentionSubmodules(
linear_qkv=TEColumnParallelLinear if use_te else ColumnParallelLinear,
core_attention=TEDotProductAttention if use_te else DotProductAttention,
linear_proj=TERowParallelLinear if use_te else RowParallelLinear,
q_layernorm=InternViTRMSNorm,
k_layernorm=InternViTRMSNorm,
),
),
self_attn_bda=get_bias_dropout_add_internvit,
pre_mlp_layernorm=InternViTRMSNorm,
mlp=mlp,
mlp_bda=get_bias_dropout_add_internvit,
),
)
File mode changed from 100644 to 100755
#!/bin/bash
# Your SBATCH commands here if using SLURM.
# Please launch this script from megatron-lm root.
# Train a multimodal model.
export NCCL_IB_SL=1
export CUDA_DEVICE_MAX_CONNECTIONS=1
export TOKENIZERS_PARALLELISM="false"
DEBUG=0
if [[ $BATCH -eq 0 ]]; then
DATETIME=`date +'%y-%m-%d-%H-%M-%S'`
MODEL_NAME="mcore-nous-yi34b-internvit-mlp-${DATETIME}"
else
MODEL_NAME="mcore-nous-yi34b-internvit-mlp"
fi
WORKSPACE="<some dir>"
SOURCE=`pwd`
OUTPUT_BASE="${WORKSPACE}/output"
OUTPUT="${OUTPUT_BASE}/${MODEL_NAME}"
FINETUNE_DIR=${OUTPUT}/checkpoints
LOGS_DIR="${OUTPUT}/logs"
TENSORBOARD_DIR="${OUTPUT}/tensorboard"
LOAD_NAME="combined-yi-34b-internvit-tp8-mcore"
CHECKPOINT_DIR="${WORKSPACE}/${LOAD_NAME}"
DATA_TRAIN="${SOURCE}/examples/multimodal/nvlm/pretrain_blend.yaml"
if [[ $DEBUG -eq 1 ]]; then
MBZ=1
BZ=1
NW=0
LI=1
AD=0.0
HD=0.0
EXTRA_ARGS=""
ALLOW_NONDETERMINISTIC=1
else
MBZ=1
BZ=2048
NW=8
LI=5
AD=0.1
HD=0.1
EXTRA_ARGS=""
ALLOW_NONDETERMINISTIC=1
fi
SEQ_LEN=256 # Image embeddings sequence length.
DECODER_SEQ_LEN=512 # Language model sequence length.
MAX_POS_EMBED=512
OPTIONS=" \
--swiglu \
--use-distributed-optimizer \
--num-workers ${NW} \
--num-layers 60 \
--hidden-size 7168 \
--normalization RMSNorm \
--num-attention-heads 56 \
--exit-duration-in-mins 230 \
--group-query-attention \
--num-query-groups 8 \
--ffn-hidden-size 20480 \
--seq-length ${SEQ_LEN} \
--decoder-seq-length ${DECODER_SEQ_LEN} \
--max-position-embeddings ${MAX_POS_EMBED} \
--tokenizer-type MultimodalTokenizer \
--tokenizer-model NousResearch/Nous-Hermes-2-Yi-34B \
--tokenizer-prompt-format nvlm-yi-34b \
--vocab-size 64000 \
--make-vocab-size-divisible-by 1 \
--position-embedding-type rope \
--rotary-percent 1.0 \
--rotary-base 5000000 \
--disable-bias-linear \
--tensor-model-parallel-size 8 \
--language-model-type yi-34b \
--vision-model-type internvit \
--micro-batch-size ${MBZ} \
--global-batch-size ${BZ} \
--train-samples 122880000 \
--lr-decay-samples 25600000 \
--lr-warmup-samples 83200 \
--lr 1e-4 \
--min-lr 2.5e-5 \
--lr-decay-style cosine \
--clip-grad 10.0 \
--weight-decay 0.1 \
--adam-beta1 0.9 \
--adam-beta2 0.95 \
--init-method-std 0.014 \
--attention-dropout ${AD} \
--hidden-dropout ${HD} \
--eod-mask-loss \
--bf16 \
--tensorboard-dir=${TENSORBOARD_DIR} \
--freeze-LM \
--freeze-ViT \
--img-h 448 \
--img-w 448 \
--patch-dim 14 \
--data-path ${DATA_TRAIN} \
--dataloader-type external \
--split 100,0,0 \
--prompt-path ${SOURCE}/examples/multimodal/nvlm/nvlm_prompts.json \
--log-interval ${LI} \
--save-interval 2000 \
--eval-interval 500 \
--eval-iters 10 \
--log-params-norm \
--log-num-zeros-in-grad \
${EXTRA_ARGS} \
--save ${FINETUNE_DIR} \
--load ${FINETUNE_DIR} \
--dataloader-save ${FINETUNE_DIR}/dataloader \
--pretrained-checkpoint ${CHECKPOINT_DIR} \
--allow-missing-vision-projection-checkpoint \
--disable-vision-class-token \
--use-te \
--use-checkpoint-args \
--ckpt-format torch \
--pixel-shuffle \
--image-tag-type nvlm
"
export NVTE_ALLOW_NONDETERMINISTIC_ALGO=${ALLOW_NONDETERMINISTIC}
export NVTE_APPLY_QK_LAYER_SCALING=0
# Interactive or batch mode
if [[ $BATCH -eq 0 ]]; then
torchrun --nproc_per_node 8 examples/multimodal/train.py ${OPTIONS}
else
run_cmd="python -u ${SOURCE}/examples/multimodal/train.py ${OPTIONS}"
DATETIME=`date +'date_%y-%m-%d_time_%H-%M-%S'`
srun -l --verbose \
--container-image <path to docker image> \
--container-mounts "<some mount>" \
--output=${LOGS_DIR}/%x_%j_$DATETIME.log \
sh -c "${run_cmd}"
set +x
fi
#!/bin/bash
# Your SBATCH commands here if using SLURM.
# Please launch this script from megatron-lm root.
# Train a multimodal model.
export NCCL_IB_SL=1
export CUDA_DEVICE_MAX_CONNECTIONS=1
export TOKENIZERS_PARALLELISM="false"
DEBUG=0
if [[ $BATCH -eq 0 ]]; then
DATETIME=`date +'%y-%m-%d-%H-%M-%S'`
MODEL_NAME="mcore-nous-yi34b-internvit-mlp-${DATETIME}"
else
MODEL_NAME="mcore-nous-yi34b-internvit-mlp"
fi
WORKSPACE="<some dir>"
SOURCE=`pwd`
OUTPUT_BASE="${WORKSPACE}/output"
OUTPUT="${OUTPUT_BASE}/${MODEL_NAME}"
FINETUNE_DIR=${OUTPUT}/checkpoints
LOGS_DIR="${OUTPUT}/logs"
TENSORBOARD_DIR="${OUTPUT}/tensorboard"
LOAD_NAME="combined-yi-34b-internvit-tp8-mcore"
CHECKPOINT_DIR="${WORKSPACE}/${LOAD_NAME}"
DATA_TRAIN="${SOURCE}/examples/multimodal/nvlm/pretrain_blend.yaml"
if [[ $DEBUG -eq 1 ]]; then
MBZ=1
BZ=1
NW=0
LI=1
AD=0.0
HD=0.0
EXTRA_ARGS=""
ALLOW_NONDETERMINISTIC=1
else
MBZ=1
BZ=2048
NW=8
LI=5
AD=0.1
HD=0.1
EXTRA_ARGS=""
ALLOW_NONDETERMINISTIC=1
fi
SEQ_LEN=256 # Image embeddings sequence length.
DECODER_SEQ_LEN=512 # Language model sequence length.
MAX_POS_EMBED=512
OPTIONS=" \
--swiglu \
--use-distributed-optimizer \
--num-workers ${NW} \
--num-layers 60 \
--hidden-size 7168 \
--normalization RMSNorm \
--num-attention-heads 56 \
--exit-duration-in-mins 230 \
--group-query-attention \
--num-query-groups 8 \
--ffn-hidden-size 20480 \
--seq-length ${SEQ_LEN} \
--decoder-seq-length ${DECODER_SEQ_LEN} \
--max-position-embeddings ${MAX_POS_EMBED} \
--tokenizer-type MultimodalTokenizer \
--tokenizer-model NousResearch/Nous-Hermes-2-Yi-34B \
--tokenizer-prompt-format nvlm-yi-34b \
--vocab-size 64000 \
--make-vocab-size-divisible-by 1 \
--position-embedding-type rope \
--rotary-percent 1.0 \
--rotary-base 5000000 \
--disable-bias-linear \
--tensor-model-parallel-size 8 \
--language-model-type yi-34b \
--vision-model-type internvit \
--micro-batch-size ${MBZ} \
--global-batch-size ${BZ} \
--train-samples 122880000 \
--lr-decay-samples 25600000 \
--lr-warmup-samples 83200 \
--lr 1e-4 \
--min-lr 2.5e-5 \
--lr-decay-style cosine \
--clip-grad 10.0 \
--weight-decay 0.1 \
--adam-beta1 0.9 \
--adam-beta2 0.95 \
--init-method-std 0.014 \
--attention-dropout ${AD} \
--hidden-dropout ${HD} \
--untie-embeddings-and-output-weights \
--eod-mask-loss \
--bf16 \
--tensorboard-dir=${TENSORBOARD_DIR} \
--freeze-LM \
--freeze-ViT \
--img-h 448 \
--img-w 448 \
--patch-dim 14 \
--data-path ${DATA_TRAIN} \
--dataloader-type external \
--split 100,0,0 \
--prompt-path ${SOURCE}/examples/multimodal/nvlm/nvlm_prompts.json \
--log-interval ${LI} \
--save-interval 2000 \
--eval-interval 500 \
--eval-iters 10 \
--log-params-norm \
--log-num-zeros-in-grad \
${EXTRA_ARGS} \
--save ${FINETUNE_DIR} \
--load ${FINETUNE_DIR} \
--dataloader-save ${FINETUNE_DIR}/dataloader \
--pretrained-checkpoint ${CHECKPOINT_DIR} \
--allow-missing-vision-projection-checkpoint \
--disable-vision-class-token \
--use-te \
--use-checkpoint-args \
--ckpt-format torch \
--pixel-shuffle \
--image-tag-type nvlm
"
export NVTE_ALLOW_NONDETERMINISTIC_ALGO=${ALLOW_NONDETERMINISTIC}
export NVTE_APPLY_QK_LAYER_SCALING=0
# Interactive or batch mode
if [[ $BATCH -eq 0 ]]; then
torchrun --nproc_per_node 8 examples/multimodal/train.py ${OPTIONS}
else
run_cmd="python -u ${SOURCE}/examples/multimodal/train.py ${OPTIONS}"
DATETIME=`date +'date_%y-%m-%d_time_%H-%M-%S'`
srun -l --verbose \
--container-image <path to docker image> \
--container-mounts "<some mount>" \
--output=${LOGS_DIR}/%x_%j_$DATETIME.log \
sh -c "${run_cmd}"
set +x
fi
#!/bin/bash
export NCCL_IB_SL=1
export CUDA_DEVICE_MAX_CONNECTIONS=1
export NVTE_APPLY_QK_LAYER_SCALING=0
export TOKENIZERS_PARALLELISM="false"
INPUT_IMAGE_PATH="placeholder"
GROUNDTRUTH_PATH="placeholder"
USE_TILING=0
USE_PIXEL_SHUFFLE_ONLY=0
while [[ $# -gt 0 ]]; do
case $1 in
--input-image-path)
INPUT_IMAGE_PATH="$2"
shift
shift
;;
-o|--output-path)
OUTPUT_PATH="$2"
shift
shift
;;
-m|--model-path)
MODEL_PATH="$2"
shift
shift
;;
--task)
TASK="$2"
shift
shift
;;
-g|--gt-path)
GROUNDTRUTH_PATH="$2"
shift
shift
;;
--use-tiling)
USE_TILING=1
shift
shift
;;
--use-pixel-shuffle-only)
USE_PIXEL_SHUFFLE_ONLY=1
shift
shift
;;
-*|--*)
echo "Invalid option $1"
exit 1
;;
esac
done
# Please modify these as needed.
NUM_PARTITIONS=0
START=0
END=0
SEQ_LEN=1024 # Image embeddings sequence length.
DECODER_SEQ_LEN=8192 # Language model sequence length.
MAX_POS_EMBED=8192
# Additional arguments.
EXTRA_ARGS=""
if [[ $USE_TILING -eq 1 ]]; then
EXTRA_ARGS+=" --pixel-shuffle --use-tiling --max-num-tiles 6 --use-thumbnail --use-tile-tags"
SEQ_LEN=261 # Image embeddings sequence length (256 image embeddings + 5 tile tag embeddings).
fi
if [[ $USE_PIXEL_SHUFFLE_ONLY -eq 1 ]]; then
EXTRA_ARGS+=" --pixel-shuffle"
SEQ_LEN=256
fi
for PARTITION_ID in $( eval echo {$START..$END} )
do
torchrun --nproc_per_node 8 examples/multimodal/run_text_generation.py \
--attention-softmax-in-fp32 \
--no-masked-softmax-fusion \
--swiglu \
--num-layers 80 \
--hidden-size 8192 \
--normalization RMSNorm \
--norm-epsilon 1e-06 \
--num-attention-heads 64 \
--exit-on-missing-checkpoint \
--group-query-attention \
--num-query-groups 8 \
--ffn-hidden-size 29568 \
--load ${MODEL_PATH} \
--seq-length ${SEQ_LEN} \
--decoder-seq-length ${DECODER_SEQ_LEN} \
--max-position-embeddings ${MAX_POS_EMBED} \
--tokenizer-type MultimodalTokenizer \
--tokenizer-model Qwen/Qwen2-72B-Instruct \
--tokenizer-prompt-format qwen2p0 \
--position-embedding-type rope \
--rotary-percent 1.0 \
--rotary-base 1000000 \
--disable-bias-linear \
--add-qkv-bias \
--tensor-model-parallel-size 8 \
--pipeline-model-parallel-size 1 \
--language-model-type qwen2.0_72B \
--vision-model-type internvit \
--micro-batch-size 1 \
--attention-dropout 0.0 \
--hidden-dropout 0.0 \
--bf16 \
--freeze-LM \
--freeze-ViT \
--img-h 448 \
--img-w 448 \
--patch-dim 14 \
--use-te \
--transformer-impl transformer_engine \
--use-checkpoint-args \
--out-seq-length 16 \
--temperature 1.0 \
--patch-dim 14 \
--seed 1234 \
--top_k 1 \
--no-load-rng \
--no-load-optim \
--num-partitions ${NUM_PARTITIONS} \
--partition-id ${PARTITION_ID} \
--output-path ${OUTPUT_PATH} \
--gt-path ${GROUNDTRUTH_PATH} \
--disable-vision-class-token \
--input-image-path ${INPUT_IMAGE_PATH} \
--gt-path ${GROUNDTRUTH_PATH} \
${EXTRA_ARGS} \
--task ${TASK} \
--image-tag-type nvlm \
--ckpt-format torch
done
#!/bin/bash
export NCCL_IB_SL=1
export CUDA_DEVICE_MAX_CONNECTIONS=1
export NVTE_APPLY_QK_LAYER_SCALING=0
export TOKENIZERS_PARALLELISM="false"
INPUT_IMAGE_PATH="placeholder"
GROUNDTRUTH_PATH="placeholder"
USE_TILING=0
USE_PIXEL_SHUFFLE_ONLY=0
while [[ $# -gt 0 ]]; do
case $1 in
--input-image-path)
INPUT_IMAGE_PATH="$2"
shift
shift
;;
-o|--output-path)
OUTPUT_PATH="$2"
shift
shift
;;
-m|--model-path)
MODEL_PATH="$2"
shift
shift
;;
--task)
TASK="$2"
shift
shift
;;
-g|--gt-path)
GROUNDTRUTH_PATH="$2"
shift
shift
;;
--use-tiling)
USE_TILING=1
shift
shift
;;
--use-pixel-shuffle-only)
USE_PIXEL_SHUFFLE_ONLY=1
shift
shift
;;
-*|--*)
echo "Invalid option $1"
exit 1
;;
esac
done
# Please modify these as needed.
NUM_PARTITIONS=0
START=0
END=0
SEQ_LEN=1024 # Image embeddings sequence length.
DECODER_SEQ_LEN=8192 # Language model sequence length.
MAX_POS_EMBED=8192
# Additional arguments.
EXTRA_ARGS=""
if [[ $USE_TILING -eq 1 ]]; then
EXTRA_ARGS+=" --pixel-shuffle --use-tiling --max-num-tiles 6 --use-thumbnail --use-tile-tags"
SEQ_LEN=261 # Image embeddings sequence length (256 image embeddings + 5 tile tag embeddings).
fi
if [[ $USE_PIXEL_SHUFFLE_ONLY -eq 1 ]]; then
EXTRA_ARGS+=" --pixel-shuffle"
SEQ_LEN=256
fi
for PARTITION_ID in $( eval echo {$START..$END} )
do
torchrun --nproc_per_node 8 examples/multimodal/run_text_generation.py \
--attention-softmax-in-fp32 \
--no-masked-softmax-fusion \
--swiglu \
--num-layers 80 \
--hidden-size 8192 \
--normalization RMSNorm \
--norm-epsilon 1e-06 \
--num-attention-heads 64 \
--exit-on-missing-checkpoint \
--group-query-attention \
--num-query-groups 8 \
--ffn-hidden-size 29568 \
--load ${MODEL_PATH} \
--seq-length ${SEQ_LEN} \
--decoder-seq-length ${DECODER_SEQ_LEN} \
--max-position-embeddings ${MAX_POS_EMBED} \
--tokenizer-type MultimodalTokenizer \
--tokenizer-model Qwen/Qwen2-72B-Instruct \
--tokenizer-prompt-format qwen2p0 \
--position-embedding-type rope \
--rotary-percent 1.0 \
--rotary-base 1000000 \
--disable-bias-linear \
--add-qkv-bias \
--tensor-model-parallel-size 8 \
--pipeline-model-parallel-size 1 \
--language-model-type qwen2.0_72B \
--vision-model-type internvit \
--micro-batch-size 1 \
--attention-dropout 0.0 \
--hidden-dropout 0.0 \
--bf16 \
--freeze-LM \
--freeze-ViT \
--img-h 448 \
--img-w 448 \
--patch-dim 14 \
--use-te \
--transformer-impl transformer_engine \
--use-checkpoint-args \
--out-seq-length 16 \
--temperature 1.0 \
--patch-dim 14 \
--seed 1234 \
--top_k 1 \
--no-load-rng \
--no-load-optim \
--num-partitions ${NUM_PARTITIONS} \
--partition-id ${PARTITION_ID} \
--output-path ${OUTPUT_PATH} \
--gt-path ${GROUNDTRUTH_PATH} \
--disable-vision-class-token \
--input-image-path ${INPUT_IMAGE_PATH} \
--gt-path ${GROUNDTRUTH_PATH} \
${EXTRA_ARGS} \
--task ${TASK} \
--image-tag-type nvlm \
--ckpt-format torch
done
#!/bin/bash
export NCCL_IB_SL=1
export CUDA_DEVICE_MAX_CONNECTIONS=1
export NVTE_APPLY_QK_LAYER_SCALING=0
export TOKENIZERS_PARALLELISM="false"
INPUT_IMAGE_PATH="placeholder"
GROUNDTRUTH_PATH="placeholder"
while [[ $# -gt 0 ]]; do
case $1 in
--input-image-path)
INPUT_IMAGE_PATH="$2"
shift
shift
;;
--input-metadata-path)
INPUT_METADATA_PATH="$2"
shift
shift
;;
--num-frames)
NUM_FRAMES="$2"
shift
shift
;;
-g|--groundtruth-path)
GROUNDTRUTH_PATH="$2"
shift
shift
;;
-o|--output-path)
OUTPUT_PATH="$2"
shift
shift
;;
-m|--model-path)
MODEL_PATH="$2"
shift
shift
;;
--task)
TASK="$2"
shift
shift
;;
-g|--gt-path)
GROUNDTRUTH_PATH="$2"
shift
shift
;;
-*|--*)
echo "Invalid option $1"
exit 1
;;
esac
done
# Please modify these as needed.
NUM_PARTITIONS=0
START=0
END=0
SEQ_LEN=256
DECODER_SEQ_LEN=16384
EXTRA_ARGS=" --pixel-shuffle"
for PARTITION_ID in $( eval echo {$START..$END} )
do
torchrun --nproc_per_node 8 examples/multimodal/run_text_generation.py \
--attention-softmax-in-fp32 \
--transformer-impl transformer_engine \
--use-te \
--use-checkpoint-args \
--normalization RMSNorm \
--norm-epsilon 1e-06 \
--language-model-type=qwen2.5_7B \
--untie-embeddings-and-output-weights \
--disable-bias-linear \
--position-embedding-type rope \
--rotary-percent 1.0 \
--rotary-base 1000000 \
--swiglu \
--attention-dropout 0.0 \
--hidden-dropout 0.0 \
--tensor-model-parallel-size 4 \
--pipeline-model-parallel-size 1 \
--group-query-attention \
--num-query-groups 4 \
--num-layers 28 \
--hidden-size 3584 \
--ffn-hidden-size 18944 \
--add-qkv-bias \
--num-attention-heads 28 \
--max-position-embeddings 32768 \
--no-masked-softmax-fusion \
--load ${MODEL_PATH} \
--tokenizer-type MultimodalTokenizer \
--tokenizer-model Qwen/Qwen2.5-7B-Instruct \
--tokenizer-prompt-format qwen2p5 \
--bf16 \
--micro-batch-size 1 \
--seq-length ${SEQ_LEN} \
--decoder-seq-length ${DECODER_SEQ_LEN} \
--out-seq-length 128 \
--temperature 1.0 \
--img-h 448 \
--img-w 448 \
--patch-dim 14 \
--seed 153 \
--top_k 1 \
--no-load-rng \
--no-load-optim \
--input-image-path ${INPUT_IMAGE_PATH} \
--num-partitions ${NUM_PARTITIONS} \
--partition-id ${PARTITION_ID} \
--output-path ${OUTPUT_PATH} \
--gt-path ${GROUNDTRUTH_PATH} \
--task ${TASK} \
${EXTRA_ARGS} \
--special-tokens "<image>" "<img>" "</img>" \
--vision-model-type internvit \
--num-frames ${NUM_FRAMES} \
--ckpt-format torch
done
#!/bin/bash
export NCCL_IB_SL=1
export CUDA_DEVICE_MAX_CONNECTIONS=1
export NVTE_APPLY_QK_LAYER_SCALING=0
export TOKENIZERS_PARALLELISM="false"
INPUT_IMAGE_PATH="placeholder"
GROUNDTRUTH_PATH="placeholder"
while [[ $# -gt 0 ]]; do
case $1 in
-i|--input-image-path)
INPUT_IMAGE_PATH="$2"
shift
shift
;;
-o|--output-path)
OUTPUT_PATH="$2"
shift
shift
;;
-m|--model-path)
MODEL_PATH="$2"
shift
shift
;;
-t|--task)
TASK="$2"
shift
shift
;;
-g|--gt-path)
GROUNDTRUTH_PATH="$2"
shift
shift
;;
-*|--*)
echo "Invalid option $1"
exit 1
;;
esac
done
# Please modify these as needed.
NUM_PARTITIONS=0
START=0
END=0
SEQ_LEN=256
DECODER_SEQ_LEN=8192
EXTRA_ARGS=" --pixel-shuffle --use-tiling --max-num-tiles 12 --use-thumbnail"
for PARTITION_ID in $( eval echo {$START..$END} )
do
torchrun --nproc_per_node 8 examples/multimodal/run_text_generation.py \
--attention-softmax-in-fp32 \
--transformer-impl transformer_engine \
--use-te \
--use-checkpoint-args \
--normalization RMSNorm \
--norm-epsilon 1e-06 \
--language-model-type=qwen2.5_7B \
--untie-embeddings-and-output-weights \
--disable-bias-linear \
--position-embedding-type rope \
--rotary-percent 1.0 \
--rotary-base 1000000 \
--swiglu \
--attention-dropout 0.0 \
--hidden-dropout 0.0 \
--tensor-model-parallel-size 4 \
--pipeline-model-parallel-size 1 \
--group-query-attention \
--num-query-groups 4 \
--num-layers 28 \
--hidden-size 3584 \
--ffn-hidden-size 18944 \
--add-qkv-bias \
--num-attention-heads 28 \
--max-position-embeddings 32768 \
--no-masked-softmax-fusion \
--load ${MODEL_PATH} \
--tokenizer-type MultimodalTokenizer \
--tokenizer-model Qwen/Qwen2.5-7B-Instruct \
--tokenizer-prompt-format qwen2p5 \
--bf16 \
--micro-batch-size 1 \
--seq-length ${SEQ_LEN} \
--decoder-seq-length ${DECODER_SEQ_LEN} \
--out-seq-length 128 \
--temperature 1.0 \
--img-h 448 \
--img-w 448 \
--patch-dim 14 \
--seed 153 \
--top_k 1 \
--no-load-rng \
--no-load-optim \
--input-image-path ${INPUT_IMAGE_PATH} \
--num-partitions ${NUM_PARTITIONS} \
--partition-id ${PARTITION_ID} \
--output-path ${OUTPUT_PATH} \
--gt-path ${GROUNDTRUTH_PATH} \
--task ${TASK} \
${EXTRA_ARGS} \
--special-tokens "<image>" "<img>" "</img>" \
--vision-model-type siglip \
--ckpt-format torch
done
#!/bin/bash
export NCCL_IB_SL=1
export CUDA_DEVICE_MAX_CONNECTIONS=1
export NVTE_APPLY_QK_LAYER_SCALING=0
export TOKENIZERS_PARALLELISM="false"
INPUT_IMAGE_PATH="placeholder"
GROUNDTRUTH_PATH="placeholder"
while [[ $# -gt 0 ]]; do
case $1 in
-i|--input-image-path)
INPUT_IMAGE_PATH="$2"
shift
shift
;;
-o|--output-path)
OUTPUT_PATH="$2"
shift
shift
;;
-m|--model-path)
MODEL_PATH="$2"
shift
shift
;;
-t|--task)
TASK="$2"
shift
shift
;;
-g|--gt-path)
GROUNDTRUTH_PATH="$2"
shift
shift
;;
-*|--*)
echo "Invalid option $1"
exit 1
;;
esac
done
# Please modify these as needed.
NUM_PARTITIONS=0
START=0
END=0
SEQ_LEN=256
DECODER_SEQ_LEN=8192
EXTRA_ARGS=" --pixel-shuffle --use-tiling --max-num-tiles 12 --use-thumbnail"
for PARTITION_ID in $( eval echo {$START..$END} )
do
torchrun --nproc_per_node 8 examples/multimodal/run_text_generation.py \
--attention-softmax-in-fp32 \
--transformer-impl transformer_engine \
--use-te \
--use-checkpoint-args \
--normalization RMSNorm \
--norm-epsilon 1e-06 \
--language-model-type=qwen2.5_7B \
--untie-embeddings-and-output-weights \
--disable-bias-linear \
--position-embedding-type rope \
--rotary-percent 1.0 \
--rotary-base 1000000 \
--swiglu \
--attention-dropout 0.0 \
--hidden-dropout 0.0 \
--tensor-model-parallel-size 4 \
--pipeline-model-parallel-size 1 \
--group-query-attention \
--num-query-groups 4 \
--num-layers 28 \
--hidden-size 3584 \
--ffn-hidden-size 18944 \
--add-qkv-bias \
--num-attention-heads 28 \
--max-position-embeddings 32768 \
--no-masked-softmax-fusion \
--load ${MODEL_PATH} \
--tokenizer-type MultimodalTokenizer \
--tokenizer-model Qwen/Qwen2.5-7B-Instruct \
--tokenizer-prompt-format qwen2p5 \
--bf16 \
--micro-batch-size 1 \
--seq-length ${SEQ_LEN} \
--decoder-seq-length ${DECODER_SEQ_LEN} \
--out-seq-length 128 \
--temperature 1.0 \
--img-h 448 \
--img-w 448 \
--patch-dim 14 \
--seed 153 \
--top_k 1 \
--no-load-rng \
--no-load-optim \
--input-image-path ${INPUT_IMAGE_PATH} \
--num-partitions ${NUM_PARTITIONS} \
--partition-id ${PARTITION_ID} \
--output-path ${OUTPUT_PATH} \
--gt-path ${GROUNDTRUTH_PATH} \
--task ${TASK} \
${EXTRA_ARGS} \
--special-tokens "<image>" "<img>" "</img>" \
--vision-model-type siglip \
--ckpt-format torch
done
#!/bin/bash
# Your SBATCH commands here if using SLURM.
# Please launch this script from megatron-lm root.
# Train a multimodal model.
export NCCL_IB_SL=1
export CUDA_DEVICE_MAX_CONNECTIONS=1
export NCCL_ALGO=^NVLS
export TOKENIZERS_PARALLELISM="false"
DEBUG=0
if [[ $BATCH -eq 0 ]]; then
DATETIME=`date +'%y-%m-%d-%H-%M-%S'`
MODEL_NAME="mcore-nous-yi34b-internvit-mlp-sft-${DATETIME}"
else
MODEL_NAME="mcore-nous-yi34b-internvit-mlp-sft"
fi
WORKSPACE="<some dir>"
SOURCE=`pwd`
OUTPUT_BASE="${WORKSPACE}/output"
OUTPUT="${OUTPUT_BASE}/${MODEL_NAME}"
FINETUNE_DIR=${OUTPUT}/checkpoints
LOGS_DIR="${OUTPUT}/logs"
TENSORBOARD_DIR="${OUTPUT}/tensorboard"
LOAD_NAME="mcore-nous-yi34b-internvit-mlp" # From pretraining
CHECKPOINT_DIR="${WORKSPACE}/output/${LOAD_NAME}/checkpoints"
DATA_TRAIN="${SOURCE}/examples/multimodal/nvlm/sft_blend.yaml"
if [[ $DEBUG -eq 1 ]]; then
MBZ=1
BZ=1
NW=0
LI=1
AD=0.0
HD=0.0
ALLOW_NONDETERMINISTIC=1
# Can run out of GPU memory in interactive memory without this.
# This is just for interactive testing purposes. Do not use for proper training.
EXTRA_ARGS=" --freeze-LM"
else
MBZ=1
BZ=128
NW=2
LI=5
AD=0.0
HD=0.0
ALLOW_NONDETERMINISTIC=1
EXTRA_ARGS=""
fi
SEQ_LEN=261 # Image embeddings sequence length (256 image embeddings + 5 tile tag embeddings).
DECODER_SEQ_LEN=3200 # Language model sequence length.
MAX_POS_EMBED=3200
OPTIONS=" \
--swiglu \
--use-distributed-optimizer \
--num-workers ${NW} \
--num-layers 60 \
--hidden-size 7168 \
--normalization RMSNorm \
--num-attention-heads 56 \
--exit-duration-in-mins 230 \
--group-query-attention \
--num-query-groups 8 \
--ffn-hidden-size 20480 \
--seq-length ${SEQ_LEN} \
--decoder-seq-length ${DECODER_SEQ_LEN} \
--max-position-embeddings ${MAX_POS_EMBED} \
--tokenizer-type MultimodalTokenizer \
--tokenizer-model NousResearch/Nous-Hermes-2-Yi-34B \
--tokenizer-prompt-format nvlm-yi-34b \
--vocab-size 64000 \
--make-vocab-size-divisible-by 1 \
--position-embedding-type rope \
--rotary-percent 1.0 \
--rotary-base 5000000 \
--disable-bias-linear \
--tensor-model-parallel-size 8 \
--language-model-type yi-34b \
--vision-model-type internvit \
--micro-batch-size ${MBZ} \
--global-batch-size ${BZ} \
--train-samples 30000000 \
--lr-decay-samples 25600000 \
--lr-warmup-samples 83200 \
--lr 2e-6 \
--min-lr 2.5e-7 \
--lr-decay-style cosine \
--split 100,0,0 \
--clip-grad 10 \
--weight-decay 0.1 \
--adam-beta1 0.9 \
--adam-beta2 0.95 \
--init-method-std 0.014 \
--attention-dropout ${AD} \
--hidden-dropout ${HD} \
--eod-mask-loss \
--bf16 \
--tensorboard-dir=${TENSORBOARD_DIR} \
--freeze-ViT \
--img-h 448 \
--img-w 448 \
--patch-dim 14 \
--data-path ${DATA_TRAIN} \
--dataloader-type external \
--dataloader-save ${FINETUNE_DIR}/dataloader \
--prompt-path ${SOURCE}/examples/multimodal/nvlm/nvlm_prompts.json \
--log-interval ${LI} \
--load ${FINETUNE_DIR} \
--save ${FINETUNE_DIR} \
--pretrained-checkpoint ${CHECKPOINT_DIR} \
--save-interval 5000 \
--eval-interval 500 \
--eval-iters 10 \
--log-params-norm \
--log-num-zeros-in-grad \
${EXTRA_ARGS} \
--disable-vision-class-token \
--use-te \
--ckpt-format torch \
--pixel-shuffle \
--use-tiling \
--max-num-tiles 6 \
--use-thumbnail \
--use-tile-tags \
--image-tag-type nvlm
"
export NVTE_ALLOW_NONDETERMINISTIC_ALGO=${ALLOW_NONDETERMINISTIC}
export NVTE_APPLY_QK_LAYER_SCALING=0
# Interactive or batch mode
if [[ $BATCH -eq 0 ]]; then
torchrun --nproc_per_node 8 examples/multimodal/train.py ${OPTIONS}
else
run_cmd="python -u ${SOURCE}/examples/multimodal/train.py ${OPTIONS}"
DATETIME=`date +'date_%y-%m-%d_time_%H-%M-%S'`
srun -l --verbose \
--container-image <path to docker image> \
--container-mounts "<some mount>" \
--output=${LOGS_DIR}/%x_%j_$DATETIME.log \
sh -c "${run_cmd}"
set +x
fi
#!/bin/bash
# Your SBATCH commands here if using SLURM.
# Please launch this script from megatron-lm root.
# Train a multimodal model.
export NCCL_IB_SL=1
export CUDA_DEVICE_MAX_CONNECTIONS=1
export NCCL_ALGO=^NVLS
export TOKENIZERS_PARALLELISM="false"
DEBUG=0
if [[ $BATCH -eq 0 ]]; then
DATETIME=`date +'%y-%m-%d-%H-%M-%S'`
MODEL_NAME="mcore-nous-yi34b-internvit-mlp-sft-${DATETIME}"
else
MODEL_NAME="mcore-nous-yi34b-internvit-mlp-sft"
fi
WORKSPACE="<some dir>"
SOURCE=`pwd`
OUTPUT_BASE="${WORKSPACE}/output"
OUTPUT="${OUTPUT_BASE}/${MODEL_NAME}"
FINETUNE_DIR=${OUTPUT}/checkpoints
LOGS_DIR="${OUTPUT}/logs"
TENSORBOARD_DIR="${OUTPUT}/tensorboard"
LOAD_NAME="mcore-nous-yi34b-internvit-mlp" # From pretraining
CHECKPOINT_DIR="${WORKSPACE}/output/${LOAD_NAME}/checkpoints"
DATA_TRAIN="${SOURCE}/examples/multimodal/nvlm/sft_blend.yaml"
if [[ $DEBUG -eq 1 ]]; then
MBZ=1
BZ=1
NW=0
LI=1
AD=0.0
HD=0.0
ALLOW_NONDETERMINISTIC=1
# Can run out of GPU memory in interactive memory without this.
# This is just for interactive testing purposes. Do not use for proper training.
EXTRA_ARGS=" --freeze-LM"
else
MBZ=1
BZ=128
NW=2
LI=5
AD=0.0
HD=0.0
ALLOW_NONDETERMINISTIC=1
EXTRA_ARGS=""
fi
SEQ_LEN=261 # Image embeddings sequence length (256 image embeddings + 5 tile tag embeddings).
DECODER_SEQ_LEN=3200 # Language model sequence length.
MAX_POS_EMBED=3200
OPTIONS=" \
--swiglu \
--use-distributed-optimizer \
--num-workers ${NW} \
--num-layers 60 \
--hidden-size 7168 \
--normalization RMSNorm \
--num-attention-heads 56 \
--exit-duration-in-mins 230 \
--group-query-attention \
--num-query-groups 8 \
--ffn-hidden-size 20480 \
--seq-length ${SEQ_LEN} \
--decoder-seq-length ${DECODER_SEQ_LEN} \
--max-position-embeddings ${MAX_POS_EMBED} \
--tokenizer-type MultimodalTokenizer \
--tokenizer-model NousResearch/Nous-Hermes-2-Yi-34B \
--tokenizer-prompt-format nvlm-yi-34b \
--vocab-size 64000 \
--make-vocab-size-divisible-by 1 \
--position-embedding-type rope \
--rotary-percent 1.0 \
--rotary-base 5000000 \
--disable-bias-linear \
--tensor-model-parallel-size 8 \
--language-model-type yi-34b \
--vision-model-type internvit \
--micro-batch-size ${MBZ} \
--global-batch-size ${BZ} \
--train-samples 30000000 \
--lr-decay-samples 25600000 \
--lr-warmup-samples 83200 \
--lr 2e-6 \
--min-lr 2.5e-7 \
--lr-decay-style cosine \
--split 100,0,0 \
--clip-grad 10 \
--weight-decay 0.1 \
--adam-beta1 0.9 \
--adam-beta2 0.95 \
--init-method-std 0.014 \
--attention-dropout ${AD} \
--hidden-dropout ${HD} \
--untie-embeddings-and-output-weights \
--eod-mask-loss \
--bf16 \
--tensorboard-dir=${TENSORBOARD_DIR} \
--freeze-ViT \
--img-h 448 \
--img-w 448 \
--patch-dim 14 \
--data-path ${DATA_TRAIN} \
--dataloader-type external \
--dataloader-save ${FINETUNE_DIR}/dataloader \
--prompt-path ${SOURCE}/examples/multimodal/nvlm/nvlm_prompts.json \
--log-interval ${LI} \
--load ${FINETUNE_DIR} \
--save ${FINETUNE_DIR} \
--pretrained-checkpoint ${CHECKPOINT_DIR} \
--save-interval 5000 \
--eval-interval 500 \
--eval-iters 10 \
--log-params-norm \
--log-num-zeros-in-grad \
${EXTRA_ARGS} \
--disable-vision-class-token \
--use-te \
--ckpt-format torch \
--pixel-shuffle \
--use-tiling \
--max-num-tiles 6 \
--use-thumbnail \
--use-tile-tags \
--image-tag-type nvlm
"
export NVTE_ALLOW_NONDETERMINISTIC_ALGO=${ALLOW_NONDETERMINISTIC}
export NVTE_APPLY_QK_LAYER_SCALING=0
# Interactive or batch mode
if [[ $BATCH -eq 0 ]]; then
torchrun --nproc_per_node 8 examples/multimodal/train.py ${OPTIONS}
else
run_cmd="python -u ${SOURCE}/examples/multimodal/train.py ${OPTIONS}"
DATETIME=`date +'date_%y-%m-%d_time_%H-%M-%S'`
srun -l --verbose \
--container-image <path to docker image> \
--container-mounts "<some mount>" \
--output=${LOGS_DIR}/%x_%j_$DATETIME.log \
sh -c "${run_cmd}"
set +x
fi
File mode changed from 100644 to 100755
#!/bin/bash
# Your SBATCH commands here if using SLURM.
# Please launch this script from megatron-lm root.
# Train a multimodal model.
export NCCL_IB_SL=1
export CUDA_DEVICE_MAX_CONNECTIONS=1
export NCCL_ALGO=^NVLS
export TOKENIZERS_PARALLELISM=false
USER=$SLURM_JOB_USER
# Auto-detect batch or interactive mode.
which srun
BATCH=$((1-$?))
DEBUG=0
if [[ $BATCH -eq 0 ]]; then
DATETIME=`date +'%y-%m-%d-%H-%M-%S'`
MODEL_NAME="qwen2.5-7B-internvit-video-sft-nvlm-${DATETIME}"
else
MODEL_NAME="qwen2.5-7B-internvitp-video-sft-nvlm"
DEBUG=0
fi
WORKSPACE="<some dir>"
SOURCE=`pwd`
OUTPUT_BASE="${WORKSPACE}/output"
OUTPUT="${OUTPUT_BASE}/${MODEL_NAME}"
FINETUNE_DIR="${OUTPUT}/checkpoints"
LOGS_DIR="${OUTPUT}/logs"
TENSORBOARD_DIR="${OUTPUT}/tensorboard"
# From pretraining. The pretraining checkpoint should have tensor parallel size to 4.
LOAD_NAME="mcore-qwen2p5-7b-internvit-tp4"
CHECKPOINT_DIR="${WORKSPACE}/output/${LOAD_NAME}/checkpoints"
DATA_TRAIN="${SOURCE}/examples/multimodal/nvlm/sft_blend.yaml"
if [[ $DEBUG -eq 1 ]]; then
MBZ=1
BZ=1
NW=0
AD=0.0
HD=0.0
LI=1
# This is just for interactive testing purposes. Do not use for proper training.
EXTRA_ARGS="--freeze-LM"
ALLOW_NONDETERMINISTIC=1
else
MBZ=1
BZ=256
NW=8
AD=0.0
HD=0.0
LI=5
EXTRA_ARGS=""
ALLOW_NONDETERMINISTIC=1
fi
USE_TILING=1
SEQ_LEN=1024
DECODER_SEQ_LEN=16384
MAX_POS_EMBED=32768
TRAIN_SAMPLES=6602173
WARMUP_SAMPLES=198065
if [[ $BATCH -eq 0 ]]; then
# Runs out of GPU memory in interactive memory without this.
EXTRA_ARGS+="--freeze-LM"
fi
if [[ $USE_TILING -eq 1 ]]; then
EXTRA_ARGS+=" --pixel-shuffle --use-tiling --max-num-tiles 12 --use-thumbnail"
SEQ_LEN=256
fi
OPTIONS=" \
--swiglu \
--use-distributed-optimizer \
--num-workers ${NW} \
--num-layers 28 \
--hidden-size 3584 \
--norm-epsilon 1e-06 \
--normalization RMSNorm \
--num-attention-heads 28 \
--exit-duration-in-mins 110 \
--group-query-attention \
--num-query-groups 4 \
--ffn-hidden-size 18944 \
--add-qkv-bias \
--seq-length ${SEQ_LEN} \
--decoder-seq-length ${DECODER_SEQ_LEN} \
--max-position-embeddings ${MAX_POS_EMBED} \
--dataloader-seq-length ${DECODER_SEQ_LEN} \
--tokenizer-type MultimodalTokenizer \
--tokenizer-model Qwen/Qwen2.5-7B-Instruct \
--tokenizer-prompt-format qwen2p5 \
--pixel-shuffle \
--position-embedding-type rope \
--rotary-percent 1.0 \
--rotary-base 1000000 \
--disable-bias-linear \
--pipeline-model-parallel-size 1 \
--tensor-model-parallel-size 4 \
--language-model-type qwen2.5_7B \
--vision-model-type internvit \
--micro-batch-size ${MBZ} \
--global-batch-size ${BZ} \
--lr 2e-6 \
--min-lr 2.5e-7 \
--train-samples ${TRAIN_SAMPLES} \
--lr-warmup-samples ${WARMUP_SAMPLES} \
--lr-decay-style cosine \
--clip-grad 10 \
--weight-decay 0.1 \
--adam-beta1 0.9 \
--adam-beta2 0.95 \
--init-method-std 0.014 \
--attention-dropout ${AD} \
--hidden-dropout ${HD} \
--eod-mask-loss \
--bf16 \
--tensorboard-dir ${TENSORBOARD_DIR} \
--img-h 448 \
--img-w 448 \
--patch-dim 14 \
--data-path ${DATA_TRAIN} \
--dataloader-type external \
--split 100,0,0 \
--prompt-path ${SOURCE}/examples/multimodal/nvlm/nvlm_prompts.json \
--log-interval ${LI} \
--save-interval 500 \
--eval-interval 500 \
--eval-iters 10 \
--log-params-norm \
--log-num-zeros-in-grad \
${EXTRA_ARGS} \
--save ${FINETUNE_DIR} \
--load ${FINETUNE_DIR} \
--pretrained-checkpoint ${CHECKPOINT_DIR} \
--distributed-timeout-minutes 60 \
--allow-missing-vision-projection-checkpoint \
--dataloader-save ${FINETUNE_DIR}/dataloader \
--disable-vision-class-token \
--use-te \
--ckpt-format torch \
--num-frames 32 \
--use-checkpoint-args \
--image-tag-type internvl \
--recompute-granularity full \
--recompute-method block \
--recompute-num-layers 28 \
--recompute-vision \
"
export NVTE_ALLOW_NONDETERMINISTIC_ALGO=${ALLOW_NONDETERMINISTIC}
export NVTE_APPLY_QK_LAYER_SCALING=0
# Interactive or batch mode
if [[ $BATCH -eq 0 ]]; then
torchrun --nproc_per_node 8 examples/multimodal/train.py ${OPTIONS}
else
run_cmd="python -u ${SOURCE}/examples/multimodal/train.py ${OPTIONS}"
DATETIME=`date +'date_%y-%m-%d_time_%H-%M-%S'`
srun -l --verbose \
--container-image <path to docker image> \
--container-mounts "<some mount>" \
--output=${LOGS_DIR}/%x_%j_$DATETIME.log \
sh -c "${run_cmd}"
set +x
fi
#!/bin/bash
# Pretrain a multimodal model.
export NCCL_IB_SL=1
export CUDA_DEVICE_MAX_CONNECTIONS=1
MODEL_NAME="mcore-llava-mistral-7b-instruct-clip336-pretraining"
# Check that the user has set an output path for model checkpoints.
if [[ -z $WORKSPACE ]]; then
echo "Please set WORKSPACE for storing your model checkpoints."
exit 1
fi
SOURCE=`pwd`
OUTPUT_BASE="${WORKSPACE}/output"
OUTPUT="${OUTPUT_BASE}/${MODEL_NAME}"
FINETUNE_DIR=${OUTPUT}/checkpoints
LOGS_DIR="${OUTPUT}/logs"
TENSORBOARD_DIR="${OUTPUT}/tensorboard"
if [[ -z $LOAD_NAME ]]; then
echo "Please set LOAD_NAME for input model name."
exit 1
fi
CHECKPOINT_DIR="${WORKSPACE}/${LOAD_NAME}/checkpoints"
DATA_TRAIN="${SOURCE}/examples/multimodal/pretrain_dataset.yaml"
DEBUG=0
if [[ $DEBUG -eq 1 ]]; then
BZ=32
NW=2
HD=0.0
LI=1
EXTRA_ARGS=""
NONDETERMINISTIC_ATTN=1
else
BZ=256
NW=2
HD=0.1
LI=10
EXTRA_ARGS=""
NONDETERMINISTIC_ATTN=1
fi
OPTIONS=" \
--apply-layernorm-1p \
--attention-softmax-in-fp32 \
--use-checkpoint-args \
--use-distributed-optimizer \
--transformer-impl transformer_engine \
--use-te \
--normalization RMSNorm \
--group-query-attention \
--num-query-groups 8 \
--no-masked-softmax-fusion \
--num-workers ${NW} \
--exit-duration-in-mins 230 \
--use-flash-attn \
--untie-embeddings-and-output-weights \
--disable-bias-linear \
--position-embedding-type rope \
--rotary-percent 1.0 \
--rotary-base 1000000 \
--swiglu \
--attention-dropout 0.0 \
--hidden-dropout ${HD} \
--tensor-model-parallel-size 4 \
--pipeline-model-parallel-size 1 \
--num-layers 32 \
--hidden-size 4096 \
--num-attention-heads 32 \
--seq-length 576 \
--decoder-seq-length 1024 \
--max-position-embeddings 4096 \
--ffn-hidden-size 14336 \
--train-iters 20000 \
--micro-batch-size 1 \
--global-batch-size ${BZ} \
--lr-decay-iters 20000 \
--lr-warmup-fraction .01 \
--lr 0.00015 \
--min-lr 1.0e-5 \
--lr-decay-style cosine \
--log-interval ${LI} \
--eval-iters 10 \
--eval-interval 1000 \
--tokenizer-type MultimodalTokenizer \
--tokenizer-model mistralai/Mistral-7B-Instruct-v0.3 \
--tokenizer-prompt-format mistral \
--data-path ${DATA_TRAIN} \
--prompt-path ${SOURCE}/examples/multimodal/manual_prompts.json \
--save-interval 1000 \
--save ${FINETUNE_DIR} \
--load ${FINETUNE_DIR} \
--dataloader-save ${FINETUNE_DIR}/dataloader \
--pretrained-checkpoint ${CHECKPOINT_DIR} \
--split 100,0,0 \
--clip-grad 1.0 \
--weight-decay 1e-2 \
--adam-beta1 0.9 \
--adam-beta2 0.95 \
--init-method-std 0.014 \
--log-params-norm \
--log-num-zeros-in-grad \
--bf16 \
--eod-mask-loss \
--freeze-LM \
--freeze-ViT \
--patch-dim 14 \
--img-h 336 \
--img-w 336 \
--dataloader-type external \
--tensorboard-dir ${TENSORBOARD_DIR} \
--language-model-type=mistral_7b \
--disable-vision-class-token \
${EXTRA_ARGS} \
--distributed-timeout-minutes 60 \
--allow-missing-vision-projection-checkpoint \
--ckpt-format torch
"
export NVTE_APPLY_QK_LAYER_SCALING=0
export NVTE_ALLOW_NONDETERMINISTIC_ALGO=${NONDETERMINISTIC_ATTN}
#!/bin/bash
# Pretrain a multimodal model.
export NCCL_IB_SL=1
export CUDA_DEVICE_MAX_CONNECTIONS=1
MODEL_NAME="mcore-llava-mistral-7b-instruct-clip336-pretraining"
# Check that the user has set an output path for model checkpoints.
if [[ -z $WORKSPACE ]]; then
echo "Please set WORKSPACE for storing your model checkpoints."
exit 1
fi
SOURCE=`pwd`
OUTPUT_BASE="${WORKSPACE}/output"
OUTPUT="${OUTPUT_BASE}/${MODEL_NAME}"
FINETUNE_DIR=${OUTPUT}/checkpoints
LOGS_DIR="${OUTPUT}/logs"
TENSORBOARD_DIR="${OUTPUT}/tensorboard"
if [[ -z $LOAD_NAME ]]; then
echo "Please set LOAD_NAME for input model name."
exit 1
fi
CHECKPOINT_DIR="${WORKSPACE}/${LOAD_NAME}/checkpoints"
DATA_TRAIN="${SOURCE}/examples/multimodal/pretrain_dataset.yaml"
DEBUG=0
if [[ $DEBUG -eq 1 ]]; then
BZ=32
NW=2
HD=0.0
LI=1
EXTRA_ARGS=""
NONDETERMINISTIC_ATTN=1
else
BZ=256
NW=2
HD=0.1
LI=10
EXTRA_ARGS=""
NONDETERMINISTIC_ATTN=1
fi
OPTIONS=" \
--apply-layernorm-1p \
--attention-softmax-in-fp32 \
--use-checkpoint-args \
--use-distributed-optimizer \
--transformer-impl transformer_engine \
--use-te \
--normalization RMSNorm \
--group-query-attention \
--num-query-groups 8 \
--no-masked-softmax-fusion \
--num-workers ${NW} \
--exit-duration-in-mins 230 \
--use-flash-attn \
--untie-embeddings-and-output-weights \
--disable-bias-linear \
--position-embedding-type rope \
--rotary-percent 1.0 \
--rotary-base 1000000 \
--swiglu \
--attention-dropout 0.0 \
--hidden-dropout ${HD} \
--tensor-model-parallel-size 4 \
--pipeline-model-parallel-size 1 \
--num-layers 32 \
--hidden-size 4096 \
--num-attention-heads 32 \
--seq-length 576 \
--decoder-seq-length 1024 \
--max-position-embeddings 4096 \
--ffn-hidden-size 14336 \
--train-iters 20000 \
--micro-batch-size 1 \
--global-batch-size ${BZ} \
--lr-decay-iters 20000 \
--lr-warmup-fraction .01 \
--lr 0.00015 \
--min-lr 1.0e-5 \
--lr-decay-style cosine \
--log-interval ${LI} \
--eval-iters 10 \
--eval-interval 1000 \
--tokenizer-type MultimodalTokenizer \
--tokenizer-model mistralai/Mistral-7B-Instruct-v0.3 \
--tokenizer-prompt-format mistral \
--data-path ${DATA_TRAIN} \
--prompt-path ${SOURCE}/examples/multimodal/manual_prompts.json \
--save-interval 1000 \
--save ${FINETUNE_DIR} \
--load ${FINETUNE_DIR} \
--dataloader-save ${FINETUNE_DIR}/dataloader \
--pretrained-checkpoint ${CHECKPOINT_DIR} \
--split 100,0,0 \
--clip-grad 1.0 \
--weight-decay 1e-2 \
--adam-beta1 0.9 \
--adam-beta2 0.95 \
--init-method-std 0.014 \
--log-params-norm \
--log-num-zeros-in-grad \
--bf16 \
--eod-mask-loss \
--freeze-LM \
--freeze-ViT \
--patch-dim 14 \
--img-h 336 \
--img-w 336 \
--dataloader-type external \
--tensorboard-dir ${TENSORBOARD_DIR} \
--language-model-type=mistral_7b \
--disable-vision-class-token \
${EXTRA_ARGS} \
--distributed-timeout-minutes 60 \
--allow-missing-vision-projection-checkpoint \
--ckpt-format torch
"
export NVTE_APPLY_QK_LAYER_SCALING=0
export NVTE_ALLOW_NONDETERMINISTIC_ALGO=${NONDETERMINISTIC_ATTN}
torchrun --nproc_per_node 8 examples/multimodal/train.py ${OPTIONS}
\ No newline at end of file
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
"""Generate text using a vision language model."""
import json
import logging
import os
import sys
from functools import partial
# Add megatron to the path.
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))
)
import torch
import yaml
from config import EvaluationConfig
from evaluation.evaluation_datasets import get_evaluation_dataset
from model import model_provider
from multimodal_args import add_multimodal_extra_args
from megatron.core import parallel_state
from megatron.core.enums import ModelType
from megatron.core.models.multimodal.llava_model import IMAGE_TOKEN
from megatron.core.models.vision.clip_vit_model import get_num_image_embeddings
from megatron.inference.text_generation.api import generate_and_post_process
from megatron.inference.text_generation.forward_step import ForwardStep
from megatron.inference.text_generation.communication import broadcast_int_list
from megatron.training import get_args, get_model, get_tokenizer, print_rank_0
from megatron.training.checkpointing import load_checkpoint
from megatron.training.initialize import initialize_megatron
def add_text_generation_args(parser):
"""Text generation arguments."""
group = parser.add_argument_group(title='Vision language model text generation arguments')
group.add_argument("--temperature", type=float, default=1.0, help='Sampling temperature.')
group.add_argument("--top_p", type=float, default=0.0, help='Top p sampling.')
group.add_argument("--top_k", type=int, default=0, help='Top k sampling.')
group.add_argument(
"--out-seq-length", type=int, default=128, help='Length of the output generated text.'
)
group.add_argument("--output-path", type=str, help='Output file path')
group.add_argument('--input-image-path', type=str, help="Input image directory")
group.add_argument(
'--num-partitions', type=int, default=0, help="Number of partitions for inputs."
)
group.add_argument('--partition-id', type=int, default=0, help="Partition index")
group.add_argument("--gt-path", type=str, help="Optional ground truth file")
group.add_argument(
"--task",
type=str,
choices=[
"captioning",
"TextVQA",
"VQAv2",
"ChartQA",
"MMMU",
"VideoMME",
"OCRBench",
"MathVista",
"AI2D",
],
help="Generation task to run",
)
group.add_argument(
"--num-samples-per-partition", type=int, default=0, help="Number of samples per partition"
)
group.add_argument("--config-path", type=str, help="Evaluation config file to use.")
# Add common multimodal arguments needed for e.g. building the model.
parser = add_multimodal_extra_args(parser)
return parser
def get_evaluation_dataloader(
task,
input_image_path,
gt_path,
img_h,
img_w,
use_tiling,
max_num_tiles,
use_thumbnail,
num_samples_per_partition,
num_partitions,
partition_id,
num_frames,
num_workers,
vision_model_type,
):
"""Build evaluation dataset."""
dataset = get_evaluation_dataset(
task,
input_image_path,
gt_path,
img_h,
img_w,
use_tiling,
max_num_tiles,
use_thumbnail,
num_samples_per_partition,
num_partitions,
partition_id,
num_frames,
vision_model_type,
)
dp_rank = parallel_state.get_data_parallel_rank()
dp_world_size = parallel_state.get_data_parallel_world_size()
sampler = torch.utils.data.DistributedSampler(
dataset, shuffle=False, num_replicas=dp_world_size, rank=dp_rank
)
# TODO: Batched inference is not supported yet.
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=None, num_workers=num_workers, sampler=sampler, pin_memory=True
)
return dataloader
def generate_samples(model, config: EvaluationConfig, print_output):
"""Text generation using a trained vision language model."""
args = get_args()
dataloader = get_evaluation_dataloader(
config.task,
config.input_image_path,
config.gt_path,
args.img_h,
args.img_w,
args.use_tiling,
args.max_num_tiles,
args.use_thumbnail,
config.num_samples_per_partition,
config.num_partitions,
config.partition_id,
args.num_frames,
args.num_workers,
args.vision_model_type,
)
num_img_embeddings_per_tile = get_num_image_embeddings(
args.img_h,
args.img_w,
args.patch_dim,
args.vision_model_type,
args.disable_vision_class_token,
1,
args.pixel_shuffle,
args.use_tile_tags,
)
for idx, (imgs, num_tiles, sample_id, question, answers, metadata) in enumerate(dataloader):
imgs = imgs.to("cuda")
num_tiles = num_tiles.to("cuda")
conv = get_conversation(config.task, question)
forward_step = partial(VLMForwardStep, num_img_embeddings_per_tile, imgs, num_tiles, args.decoder_seq_length)
if is_first_rank():
resp_sentences, _, _, _ = generate_and_post_process(
model,
forward_step=forward_step,
prompts=[conv],
tokens_to_generate=config.out_seq_length,
top_k_sampling=config.top_k,
top_p_sampling=config.top_p,
add_BOS=False,
temperature=config.temperature,
random_seed=args.seed,
detokenize_segments=False,
data_parallel=True,
)
for generation in resp_sentences:
if isinstance(sample_id, torch.Tensor):
sample_id = sample_id.item()
output = {"sample_id": sample_id}
output_name = ""
if config.task == "captioning":
output_name = "caption"
elif config.task in (
"TextVQA",
"VQAv2",
"ChartQA",
"OCRBench",
"MathVista",
"AI2D",
):
output_name = "answer"
elif config.task in ("MMMU"):
output_name = "text"
elif config.task == "VideoMME":
output_name = "response"
output = question
else:
raise NotImplementedError("no output name defined for", config.task)
prompt, generated = get_prompt_and_generated(
generation, args.tokenizer_prompt_format
)
if config.task == "VideoMME":
output["questions"][0][output_name] = generated
else:
output["prompt"] = prompt
output[output_name] = generated
if config.task == "captioning":
output["ground_truth"] = answers
elif config.task in (
"TextVQA",
"VQAv2",
"ChartQA",
"OCRBench",
"MathVista",
"AI2D",
):
if isinstance(answers, str):
answers = [answers]
output["gt_answer"] = answers
if len(metadata) > 0:
output.update(metadata)
elif config.task == "MMMU":
output["prediction"] = generated
output.update(metadata)
else:
raise NotImplementedError("no output processing defined for", config.task)
if print_output:
print(output)
yield output
idx += 1
else:
generate_and_post_process(
model, forward_step=forward_step, detokenize_segments=False, data_parallel=True
)
idx += 1
def get_evaluation_config():
"""Get evaluation config from a config file or command-line arguments."""
args = get_args()
if args.config_path:
with open(args.config_path, "r") as f:
config_dict = yaml.safe_load(f)
config = EvaluationConfig(**config_dict)
else:
config = EvaluationConfig(
task=args.task,
temperature=args.temperature,
top_p=args.top_p,
top_k=args.top_k,
out_seq_length=args.out_seq_length,
output_path=args.output_path,
input_image_path=args.input_image_path,
gt_path=args.gt_path,
num_partitions=args.num_partitions,
partition_id=args.partition_id,
num_samples_per_partition=args.num_samples_per_partition,
)
# Default output path if not defined...
if not config.output_path:
os.makedirs("generated", exist_ok=True)
config.output_path = "generated/" + args.language_model_type
return config
def is_first_rank():
"""First tensor and pipeline parallel rank."""
return (
parallel_state.is_pipeline_first_stage(ignore_virtual=True)
and parallel_state.get_tensor_model_parallel_rank() == 0
)
def get_output_path(config, dp_rank):
"""Generation output path."""
return (
f"{config.output_path}-{config.task}-dprank={dp_rank}-partition={config.partition_id}.jsonl"
)
def generate_and_write_samples(model, config, print_output=True):
"""Generate text and write to an output file."""
dp_rank = parallel_state.get_data_parallel_rank()
if is_first_rank():
output_path = get_output_path(config, dp_rank)
output_file = open(output_path, "w")
print(f"output path: {output_file.name}")
with torch.no_grad():
for output in generate_samples(model, config, print_output):
if is_first_rank():
output_file.write(json.dumps(output) + "\n")
output_file.flush()
if is_first_rank():
output_file.close()
class VLMForwardStep(ForwardStep):
"""Inference forward step for a multimodal model."""
def __init__(
self,
num_img_embeddings_per_tile,
images,
num_tiles,
decoder_seq_length,
model,
max_batch_size,
max_sequence_length,
):
"""Create multimodal forward step."""
total_num_tiles = torch.sum(num_tiles).item()
num_img_embeddings = num_img_embeddings_per_tile * total_num_tiles
super().__init__(model, max_batch_size, max_sequence_length + num_img_embeddings)
self._images = images
self._num_tiles = num_tiles
self._num_img_embeddings = num_img_embeddings
self.decoder_seq_length = decoder_seq_length
self._recv_only_vision_embeds = False
pp_rank = parallel_state.get_pipeline_model_parallel_rank()
# Checks if the previous stage only has a vision encoder, and that the current stage has part of the LM decoder.
# In this case, the current stage should only receive vision embeddings.
if pp_rank > 0:
self._recv_only_vision_embeds = parallel_state.is_inside_encoder(pp_rank - 1) and (not parallel_state.is_inside_decoder(pp_rank - 1)) and parallel_state.is_inside_decoder()
# Checks if the current stage only has a vision encoder
self._encoder_only = parallel_state.is_inside_encoder() and not parallel_state.is_inside_decoder()
def _forward(self, tokens, position_ids, attention_mask):
return self.model(
self._images,
tokens,
position_ids,
attention_mask=None,
inference_params=self.inference_params,
num_image_tiles=self._num_tiles,
runtime_gather_output=True,
)
def __call__(self, tokens, position_ids, attention_mask):
num_image_tokens = (tokens == self.model.module.image_token_index).sum().item()
num_tokens = tokens.size(1)
recv_buffer_seq_length = None
if num_image_tokens > 0:
# When there are image tokens and this stage only receives vision embeddings, adjust the recv buffer seq length to match the image embeddings sequence length.
# If there are image tokens and this stage receives full embeddings, make sure we compensate for expansion of image tokens.
# Note that this will set a recv_buffer_seq_length for the encoder stage, this length is irrelevant since that recv buffer is never allocated.
if self._recv_only_vision_embeds:
recv_buffer_seq_length = self._num_img_embeddings
else:
recv_buffer_seq_length = min(self._num_img_embeddings + num_tokens - num_image_tokens, self.decoder_seq_length)
elif self._recv_only_vision_embeds:
# If this stage only receives vision embeddings and there are no image tokens we won't run the encoder and therefore shouldn't try to recv.
recv_buffer_seq_length = 0
# If the pipeline stage only has a vision encoder, then it only needs to run when there are image tokens
if not (self._encoder_only and num_image_tokens == 0):
output = super().__call__(tokens, position_ids, attention_mask, recv_buffer_seq_length=recv_buffer_seq_length)
else:
output = None
if isinstance(output, tuple):
logits, _ = output
else:
logits = output
# On the first inference iteration, we compute image tokens.
# On every PP stage(although inference params should only matter for decoder),
# update the sequence length offset by the number of image tokens.
if num_tokens > 1 and num_image_tokens > 0:
if "image_tokens_count" not in self.inference_params.key_value_memory_dict:
self.inference_params.key_value_memory_dict["image_tokens_count"] = self._num_img_embeddings
if self._num_img_embeddings + num_tokens - num_image_tokens > self.decoder_seq_length:
self.inference_params.sequence_len_offset += self.decoder_seq_length - num_tokens
else:
self.inference_params.sequence_len_offset += (
self.inference_params.key_value_memory_dict["image_tokens_count"] - num_image_tokens
)
return logits
def get_conversation(task, question):
"""Get a conversation for a given task and evaluation question."""
conversation = []
# In all cases, the tokenizer adds possible header tokens for the assistant.
if task == "captioning":
conversation = [
{"role": "system", "content": "Answer the questions."},
{
"role": "user",
"content": f"{IMAGE_TOKEN}\nProvide a one-sentence caption for provided image.",
},
]
elif task in ("TextVQA", "VQAv2", "ChartQA"):
conversation = [
{"role": "system", "content": "Answer the questions."},
{
"role": "user",
"content": f"{IMAGE_TOKEN}\n{question}\nAnswer the question using a single word or phrase.",
},
]
elif task in ("OCRBench", "MathVista", "AI2D"):
conversation = [
{"role": "system", "content": "Answer the questions."},
{"role": "user", "content": f"{IMAGE_TOKEN}\n{question}"},
]
elif task == "MMMU":
conversation = [
{"role": "system", "content": "Answer the questions."},
{"role": "user", "content": question},
]
elif task == "VideoMME":
q = (
"Select the best answer to the following multiple-choice "
"question based on the video. Respond with only the letter "
"(A, B, C, or D) of the correct option.\n"
)
q += question["questions"][0]["question"] + "\n"
q += question["questions"][0]["choices"][0] + "\n"
q += question["questions"][0]["choices"][1] + "\n"
q += question["questions"][0]["choices"][2] + "\n"
q += question["questions"][0]["choices"][3] + "\n"
conversation = [
{"role": "system", "content": "Answer the questions."},
{"role": "user", "content": f"{IMAGE_TOKEN}\n{question}"},
]
return conversation
def get_prompt_and_generated(prompt_and_generation, prompt_format):
"""Strip prompt and other unnecessary text from generation."""
if prompt_format == "llama3":
splitted = prompt_and_generation.split("<|start_header_id|>assistant<|end_header_id|>\n\n")
prompt = splitted[0]
generated = splitted[1]
generated = generated.split("<|eot_id|>")[0]
elif prompt_format == "mistral":
splitted = prompt_and_generation.split("[/INST]")
prompt = splitted[0]
generated = splitted[1]
generated = generated.split("</s>")[0]
elif prompt_format == "chatml":
splitted = prompt_and_generation.split("<|im_start|> assistant\n")
prompt = splitted[0]
generated = splitted[1]
generated = generated.split("<|im_end|>")[0]
elif prompt_format in ("nvlm-yi-34b", "qwen2p0", "qwen2p5"):
splitted = prompt_and_generation.split("<|im_start|>assistant\n")
prompt = splitted[0]
generated = splitted[1]
generated = generated.split("<|im_end|>")[0]
else:
raise ValueError(f"Prompt format {prompt_format} is not supported.")
# Remove possible garbage.
generated = generated.strip()
generated = generated.split("\n\n")[0]
generated = generated.split("\n")[0]
return prompt, generated
def main():
"""Vision language model text generation."""
initialize_megatron(extra_args_provider=add_text_generation_args)
if torch.distributed.get_rank() == 0:
logging.getLogger(__name__).warning(
"Models using pipeline parallelism are not supported yet."
)
args = get_args()
def wrapped_model_provider(pre_process, post_process, add_encoder, add_decoder):
return model_provider(pre_process, post_process, add_encoder, add_decoder, parallel_output=False)
# Set up model and load checkpoint.
model = get_model(wrapped_model_provider, model_type=ModelType.encoder_and_decoder, wrap_with_ddp=False)
if args.load is not None:
_ = load_checkpoint(model, None, None)
model = model[0]
model.eval()
config = get_evaluation_config()
generate_and_write_samples(model, config)
if __name__ == "__main__":
main()
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
"""Generate text using a vision language model."""
import json
import logging
import os
import sys
from functools import partial
# Add megatron to the path.
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))
)
import torch
import yaml
from config import EvaluationConfig
from evaluation.evaluation_datasets import get_evaluation_dataset
from model import model_provider
from multimodal_args import add_multimodal_extra_args
from megatron.core import parallel_state
from megatron.core.enums import ModelType
from megatron.core.models.multimodal.llava_model import IMAGE_TOKEN
from megatron.core.models.vision.clip_vit_model import get_num_image_embeddings
from megatron.inference.text_generation.api import generate_and_post_process
from megatron.inference.text_generation.forward_step import ForwardStep
from megatron.inference.text_generation.communication import broadcast_int_list
from megatron.core.inference.sampling_params import SamplingParams
from megatron.core.inference.engines.mcore_engine import MCoreEngine
from megatron.core.inference.inference_request import InferenceRequest, VLMInferenceRequest
from megatron.core.inference.text_generation_controllers.vlm_text_generation_controller import (
VLMTextGenerationController,
)
from megatron.core.inference.model_inference_wrappers.inference_wrapper_config import (
InferenceWrapperConfig,
)
from megatron.core.inference.model_inference_wrappers.multimodal.vlm_inference_wrapper import (
VLMInferenceWrapper,
)
from megatron.training import get_args, get_model, get_tokenizer, print_rank_0
from megatron.training.checkpointing import load_checkpoint
from megatron.training.initialize import initialize_megatron
def add_text_generation_args(parser):
"""Text generation arguments."""
group = parser.add_argument_group(title='Vision language model text generation arguments')
group.add_argument("--temperature", type=float, default=1.0, help='Sampling temperature.')
group.add_argument("--top_p", type=float, default=0.0, help='Top p sampling.')
group.add_argument("--top_k", type=int, default=0, help='Top k sampling.')
group.add_argument(
"--out-seq-length", type=int, default=128, help='Length of the output generated text.'
)
group.add_argument("--output-path", type=str, help='Output file path')
group.add_argument('--input-image-path', type=str, help="Input image directory")
group.add_argument(
'--num-partitions', type=int, default=0, help="Number of partitions for inputs."
)
group.add_argument('--partition-id', type=int, default=0, help="Partition index")
group.add_argument("--gt-path", type=str, help="Optional ground truth file")
group.add_argument(
"--task",
type=str,
choices=[
"captioning",
"TextVQA",
"VQAv2",
"ChartQA",
"MMMU",
"VideoMME",
"OCRBench",
"MathVista",
"AI2D",
"InfoVQA",
"SPDocVQA",
],
help="Generation task to run",
)
group.add_argument(
"--num-samples-per-partition", type=int, default=0, help="Number of samples per partition"
)
group.add_argument("--config-path", type=str, help="Evaluation config file to use.")
group.add_argument("--use-mcore-inference", action="store_true", default=False, help="Use the MCore inference API")
# Add common multimodal arguments needed for e.g. building the model.
parser = add_multimodal_extra_args(parser)
return parser
def get_evaluation_dataloader(
task,
input_image_path,
gt_path,
img_h,
img_w,
use_tiling,
max_num_tiles,
use_thumbnail,
num_samples_per_partition,
num_partitions,
partition_id,
num_frames,
num_workers,
vision_model_type,
):
"""Build evaluation dataset."""
dataset = get_evaluation_dataset(
task,
input_image_path,
gt_path,
img_h,
img_w,
use_tiling,
max_num_tiles,
use_thumbnail,
num_samples_per_partition,
num_partitions,
partition_id,
num_frames,
vision_model_type,
)
dp_rank = parallel_state.get_data_parallel_rank()
dp_world_size = parallel_state.get_data_parallel_world_size()
sampler = torch.utils.data.DistributedSampler(
dataset, shuffle=False, num_replicas=dp_world_size, rank=dp_rank
)
# TODO: Batched inference is not supported yet.
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=None, num_workers=num_workers, sampler=sampler, pin_memory=True
)
return dataloader
def generate_samples(model, config: EvaluationConfig, print_output):
"""Text generation using a trained vision language model."""
args = get_args()
dataloader = get_evaluation_dataloader(
config.task,
config.input_image_path,
config.gt_path,
args.img_h,
args.img_w,
args.use_tiling,
args.max_num_tiles,
args.use_thumbnail,
config.num_samples_per_partition,
config.num_partitions,
config.partition_id,
args.num_frames,
args.num_workers,
args.vision_model_type,
)
num_img_embeddings_per_tile = get_num_image_embeddings(
args.img_h,
args.img_w,
args.patch_dim,
args.vision_model_type,
args.disable_vision_class_token,
1,
args.pixel_shuffle,
args.use_tile_tags,
)
if args.use_mcore_inference:
inference_wrapper_config = InferenceWrapperConfig(
hidden_size=args.hidden_size,
inference_batch_times_seqlen_threshold=args.inference_batch_times_seqlen_threshold,
fp32_residual_connection=args.fp32_residual_connection,
params_dtype=args.params_dtype,
padded_vocab_size=args.padded_vocab_size,
)
inference_wrapped_model = VLMInferenceWrapper(model, inference_wrapper_config)
tokenizer = get_tokenizer()
controller = VLMTextGenerationController(
inference_wrapped_model=inference_wrapped_model, tokenizer=tokenizer
)
inference_engine = MCoreEngine(
controller, max_batch_size=1, random_seed=args.seed
)
sampling_params = SamplingParams(
temperature=config.temperature,
top_k=config.top_k,
top_p=config.top_p,
num_tokens_to_generate=config.out_seq_length,
)
for idx, (imgs, num_tiles, sample_id, question, answers, metadata) in enumerate(dataloader):
imgs = imgs.to("cuda")
num_tiles = num_tiles.to("cuda")
conv = get_conversation(config.task, question)
if not args.use_mcore_inference:
forward_step = partial(VLMForwardStep, num_img_embeddings_per_tile, imgs, num_tiles, args.decoder_seq_length)
if is_first_rank():
if args.use_mcore_inference:
inference_request = VLMInferenceRequest(
request_id=inference_engine.get_new_request_id(),
prompt=conv,
prompt_tokens=controller.tokenize_prompt(conv),
inference_parameters=sampling_params,
num_img_embeddings_per_tile=num_img_embeddings_per_tile,
imgs=imgs,
num_tiles=num_tiles,
decoder_seq_length=args.decoder_seq_length,
)
results: List[InferenceRequest] = inference_engine.generate(
inference_requests=[inference_request]
)
resp_sentences = [
tokenizer.detokenize(result.prompt_tokens) + result.generated_text
for result in results
]
else:
resp_sentences, _, _, _ = generate_and_post_process(
model,
forward_step=forward_step,
prompts=[conv],
tokens_to_generate=config.out_seq_length,
top_k_sampling=config.top_k,
top_p_sampling=config.top_p,
add_BOS=False,
temperature=config.temperature,
random_seed=args.seed,
detokenize_segments=False,
data_parallel=True,
)
for generation in resp_sentences:
if isinstance(sample_id, torch.Tensor):
sample_id = sample_id.item()
output = {"sample_id": sample_id}
output_name = ""
if config.task == "captioning":
output_name = "caption"
elif config.task in (
"TextVQA",
"VQAv2",
"ChartQA",
"OCRBench",
"MathVista",
"AI2D",
"InfoVQA",
"SPDocVQA",
):
output_name = "answer"
elif config.task in ("MMMU"):
output_name = "text"
elif config.task == "VideoMME":
output_name = "response"
output = question
else:
raise NotImplementedError("no output name defined for", config.task)
prompt, generated = get_prompt_and_generated(
generation, args.tokenizer_prompt_format
)
if config.task == "VideoMME":
output["questions"][0][output_name] = generated
else:
output["prompt"] = prompt
output[output_name] = generated
if config.task == "captioning":
output["ground_truth"] = answers
elif config.task in (
"TextVQA",
"VQAv2",
"ChartQA",
"OCRBench",
"MathVista",
"AI2D",
"InfoVQA",
"SPDocVQA",
):
if isinstance(answers, str):
answers = [answers]
output["gt_answer"] = answers
if len(metadata) > 0:
output.update(metadata)
elif config.task == "MMMU":
output["prediction"] = generated
output.update(metadata)
else:
raise NotImplementedError("no output processing defined for", config.task)
if print_output:
print(output)
yield output
idx += 1
else:
if args.use_mcore_inference:
inference_request = VLMInferenceRequest(
request_id=inference_engine.get_new_request_id(),
prompt=conv,
prompt_tokens=controller.tokenize_prompt(conv),
inference_parameters=sampling_params,
num_img_embeddings_per_tile=num_img_embeddings_per_tile,
imgs=imgs,
num_tiles=num_tiles,
decoder_seq_length=args.decoder_seq_length,
)
inference_engine.generate(
inference_requests=[inference_request]
)
else:
generate_and_post_process(
model, forward_step=forward_step, detokenize_segments=False, data_parallel=True
)
idx += 1
def get_evaluation_config():
"""Get evaluation config from a config file or command-line arguments."""
args = get_args()
if args.config_path:
with open(args.config_path, "r") as f:
config_dict = yaml.safe_load(f)
config = EvaluationConfig(**config_dict)
else:
config = EvaluationConfig(
task=args.task,
temperature=args.temperature,
top_p=args.top_p,
top_k=args.top_k,
out_seq_length=args.out_seq_length,
output_path=args.output_path,
input_image_path=args.input_image_path,
gt_path=args.gt_path,
num_partitions=args.num_partitions,
partition_id=args.partition_id,
num_samples_per_partition=args.num_samples_per_partition,
)
# Default output path if not defined...
if not config.output_path:
os.makedirs("generated", exist_ok=True)
config.output_path = "generated/" + args.language_model_type
return config
def is_first_rank():
"""First tensor and pipeline parallel rank."""
return (
parallel_state.is_pipeline_first_stage(ignore_virtual=True)
and parallel_state.get_tensor_model_parallel_rank() == 0
)
def get_output_path(config, dp_rank):
"""Generation output path."""
return (
f"{config.output_path}-{config.task}-dprank={dp_rank}-partition={config.partition_id}.jsonl"
)
def generate_and_write_samples(model, config, print_output=True):
"""Generate text and write to an output file."""
dp_rank = parallel_state.get_data_parallel_rank()
if is_first_rank():
output_path = get_output_path(config, dp_rank)
output_file = open(output_path, "w")
print(f"output path: {output_file.name}")
with torch.no_grad():
for output in generate_samples(model, config, print_output):
if is_first_rank():
output_file.write(json.dumps(output) + "\n")
output_file.flush()
if is_first_rank():
output_file.close()
class VLMForwardStep(ForwardStep):
"""Inference forward step for a multimodal model."""
def __init__(
self,
num_img_embeddings_per_tile,
images,
num_tiles,
decoder_seq_length,
model,
max_batch_size,
max_sequence_length,
):
"""Create multimodal forward step."""
total_num_tiles = torch.sum(num_tiles).item()
num_img_embeddings = num_img_embeddings_per_tile * total_num_tiles
super().__init__(model, max_batch_size, max_sequence_length + num_img_embeddings)
self._images = images
self._num_tiles = num_tiles
self._num_img_embeddings = num_img_embeddings
self.decoder_seq_length = decoder_seq_length
self._recv_only_vision_embeds = False
pp_rank = parallel_state.get_pipeline_model_parallel_rank()
# Checks if the previous stage only has a vision encoder, and that the current stage has part of the LM decoder.
# In this case, the current stage should only receive vision embeddings.
if pp_rank > 0:
self._recv_only_vision_embeds = parallel_state.is_inside_encoder(pp_rank - 1) and (not parallel_state.is_inside_decoder(pp_rank - 1)) and parallel_state.is_inside_decoder()
# Checks if the current stage only has a vision encoder
self._encoder_only = parallel_state.is_inside_encoder() and not parallel_state.is_inside_decoder()
def _forward(self, tokens, position_ids, attention_mask):
return self.model(
self._images,
tokens,
position_ids,
attention_mask=None,
inference_params=self.inference_params,
num_image_tiles=self._num_tiles,
runtime_gather_output=True,
)
def __call__(self, tokens, position_ids, attention_mask):
num_image_tokens = (tokens == self.model.module.image_token_index).sum().item()
num_tokens = tokens.size(1)
recv_buffer_seq_length = None
if num_image_tokens > 0:
# When there are image tokens and this stage only receives vision embeddings, adjust the recv buffer seq length to match the image embeddings sequence length.
# If there are image tokens and this stage receives full embeddings, make sure we compensate for expansion of image tokens.
# Note that this will set a recv_buffer_seq_length for the encoder stage, this length is irrelevant since that recv buffer is never allocated.
if self._recv_only_vision_embeds:
recv_buffer_seq_length = self._num_img_embeddings
else:
recv_buffer_seq_length = min(self._num_img_embeddings + num_tokens - num_image_tokens, self.decoder_seq_length)
elif self._recv_only_vision_embeds:
# If this stage only receives vision embeddings and there are no image tokens we won't run the encoder and therefore shouldn't try to recv.
recv_buffer_seq_length = 0
# If the pipeline stage only has a vision encoder, then it only needs to run when there are image tokens
if not (self._encoder_only and num_image_tokens == 0):
output = super().__call__(tokens, position_ids, attention_mask, recv_buffer_seq_length=recv_buffer_seq_length)
else:
output = None
if isinstance(output, tuple):
logits, _ = output
else:
logits = output
# On the first inference iteration, we compute image tokens.
# On every PP stage(although inference params should only matter for decoder),
# update the sequence length offset by the number of image tokens.
if num_tokens > 1 and num_image_tokens > 0:
if "image_tokens_count" not in self.inference_params.key_value_memory_dict:
self.inference_params.key_value_memory_dict["image_tokens_count"] = self._num_img_embeddings
if self._num_img_embeddings + num_tokens - num_image_tokens > self.decoder_seq_length:
self.inference_params.sequence_len_offset += self.decoder_seq_length - num_tokens
else:
self.inference_params.sequence_len_offset += (
self.inference_params.key_value_memory_dict["image_tokens_count"] - num_image_tokens
)
return logits
def get_conversation(task, question):
"""Get a conversation for a given task and evaluation question."""
conversation = []
# In all cases, the tokenizer adds possible header tokens for the assistant.
if task == "captioning":
conversation = [
{"role": "system", "content": "Answer the questions."},
{
"role": "user",
"content": f"{IMAGE_TOKEN}\nProvide a one-sentence caption for provided image.",
},
]
elif task in ("TextVQA", "VQAv2", "ChartQA", "InfoVQA", "SPDocVQA"):
conversation = [
{"role": "system", "content": "Answer the questions."},
{
"role": "user",
"content": f"{IMAGE_TOKEN}\n{question}\nAnswer the question using a single word or phrase.",
},
]
elif task in ("OCRBench", "MathVista", "AI2D"):
conversation = [
{"role": "system", "content": "Answer the questions."},
{"role": "user", "content": f"{IMAGE_TOKEN}\n{question}"},
]
elif task == "MMMU":
conversation = [
{"role": "system", "content": "Answer the questions."},
{"role": "user", "content": question},
]
elif task == "VideoMME":
q = (
"Select the best answer to the following multiple-choice "
"question based on the video. Respond with only the letter "
"(A, B, C, or D) of the correct option.\n"
)
q += question["questions"][0]["question"] + "\n"
q += question["questions"][0]["choices"][0] + "\n"
q += question["questions"][0]["choices"][1] + "\n"
q += question["questions"][0]["choices"][2] + "\n"
q += question["questions"][0]["choices"][3] + "\n"
conversation = [
{"role": "system", "content": "Answer the questions."},
{"role": "user", "content": f"{IMAGE_TOKEN}\n{q}"},
]
return conversation
def get_prompt_and_generated(prompt_and_generation, prompt_format):
"""Strip prompt and other unnecessary text from generation."""
if prompt_format in ("llama3", "llama3p1"):
splitted = prompt_and_generation.split("<|start_header_id|>assistant<|end_header_id|>\n\n")
prompt = splitted[0]
generated = splitted[1]
generated = generated.split("<|eot_id|>")[0]
elif prompt_format == "mistral":
splitted = prompt_and_generation.split("[/INST]")
prompt = splitted[0]
generated = splitted[1]
generated = generated.split("</s>")[0]
elif prompt_format == "chatml":
splitted = prompt_and_generation.split("<|im_start|> assistant\n")
prompt = splitted[0]
generated = splitted[1]
generated = generated.split("<|im_end|>")[0]
elif prompt_format in ("nvlm-yi-34b", "qwen2p0", "qwen2p5"):
splitted = prompt_and_generation.split("<|im_start|>assistant\n")
prompt = splitted[0]
generated = splitted[1]
generated = generated.split("<|im_end|>")[0]
else:
raise ValueError(f"Prompt format {prompt_format} is not supported.")
# Remove possible garbage.
generated = generated.strip()
generated = generated.split("\n\n")[0]
generated = generated.split("\n")[0]
return prompt, generated
def main():
"""Vision language model text generation."""
initialize_megatron(extra_args_provider=add_text_generation_args)
if torch.distributed.get_rank() == 0:
logging.getLogger(__name__).warning(
"Models using pipeline parallelism are not supported yet."
)
args = get_args()
def wrapped_model_provider(pre_process, post_process, add_encoder, add_decoder):
return model_provider(pre_process, post_process, add_encoder, add_decoder, parallel_output=False)
# Set up model and load checkpoint.
model = get_model(wrapped_model_provider, model_type=ModelType.encoder_and_decoder, wrap_with_ddp=False)
if args.load is not None:
_ = load_checkpoint(model, None, None)
model = model[0]
model.eval()
config = get_evaluation_config()
generate_and_write_samples(model, config)
if __name__ == "__main__":
main()
#!/bin/bash
# Run SFT on a pretrained multimodal model
export NCCL_IB_SL=1
export CUDA_DEVICE_MAX_CONNECTIONS=1
MODEL_NAME="mcore-llava-mistral-7b-instruct-clip336-sft"
# Check that the user has set an output path for model checkpoints.
if [[ -z $WORKSPACE ]]; then
echo "Please set WORKSPACE for storing your model checkpoints."
exit 1
fi
SOURCE=`pwd`
OUTPUT_BASE="${WORKSPACE}/output"
OUTPUT="${OUTPUT_BASE}/${MODEL_NAME}"
FINETUNE_DIR=${OUTPUT}/checkpoints
LOGS_DIR="${OUTPUT}/logs"
TENSORBOARD_DIR="${OUTPUT}/tensorboard"
if [[ -z $LOAD_NAME ]]; then
echo "Please set LOAD_NAME for input model name."
exit 1
fi
if [[ -z $LOAD_ITER ]]; then
echo "Please set LOAD_ITER for pre-trained input model iteration."
exit 1
fi
CHECKPOINT_DIR="${WORKSPACE}/${LOAD_NAME}/checkpoints"
DATA_TRAIN="${SOURCE}/examples/multimodal/sft_dataset.yaml"
DEBUG=0
if [[ $DEBUG -eq 1 ]]; then
BZ=8
NW=1
HD=0.0
LI=1
EXTRA_ARGS=""
NONDETERMINISTIC_ATTN=1
else
BZ=128
NW=2
HD=0.1
LI=10
EXTRA_ARGS=""
NONDETERMINISTIC_ATTN=1
fi
OPTIONS=" \
--apply-layernorm-1p \
--attention-softmax-in-fp32 \
--use-checkpoint-args \
--use-distributed-optimizer \
--transformer-impl transformer_engine \
--use-te \
--normalization RMSNorm \
--group-query-attention \
--num-query-groups 8 \
--no-masked-softmax-fusion \
--num-workers ${NW} \
--exit-duration-in-mins 230 \
--use-flash-attn \
--untie-embeddings-and-output-weights \
--disable-bias-linear \
--position-embedding-type rope \
--rotary-percent 1.0 \
--rotary-base 1000000 \
--swiglu \
--attention-dropout 0.0 \
--hidden-dropout ${HD} \
--tensor-model-parallel-size 4 \
--pipeline-model-parallel-size 1 \
--num-layers 32 \
--hidden-size 4096 \
--num-attention-heads 32 \
--seq-length 576 \
--decoder-seq-length 2048 \
--max-position-embeddings 4096 \
--ffn-hidden-size 14336 \
--train-iters 20000 \
--micro-batch-size 1 \
--global-batch-size ${BZ} \
--lr-decay-iters 20000 \
--lr-warmup-fraction .01 \
--lr 1e-6 \
--min-lr 1e-7 \
--lr-decay-style cosine \
--log-interval ${LI} \
--eval-iters 10 \
--eval-interval 500 \
--tokenizer-type MultimodalTokenizer \
--tokenizer-model mistralai/Mistral-7B-Instruct-v0.3 \
--tokenizer-prompt-format mistral \
--data-path ${DATA_TRAIN} \
--prompt-path ${SOURCE}/examples/multimodal/manual_prompts.json \
--save-interval 500 \
--save ${FINETUNE_DIR} \
--load ${FINETUNE_DIR} \
--pretrained-checkpoint ${CHECKPOINT_DIR} \
--dataloader-save ${FINETUNE_DIR}/dataloader \
--split 100,0,0 \
--clip-grad 0.5 \
--weight-decay 0.1 \
--adam-beta1 0.9 \
--adam-beta2 0.95 \
--init-method-std 0.014 \
--log-params-norm \
--log-num-zeros-in-grad \
--eod-mask-loss \
--freeze-ViT \
--patch-dim 14 \
--img-h 336 \
--img-w 336 \
--dataloader-type external \
--tensorboard-dir ${TENSORBOARD_DIR} \
--language-model-type=mistral_7b \
--disable-vision-class-token \
${EXTRA_ARGS} \
--distributed-timeout-minutes 60 \
--ckpt-format torch
"
export NVTE_APPLY_QK_LAYER_SCALING=0
export NVTE_ALLOW_NONDETERMINISTIC_ALGO=${NONDETERMINISTIC_ATTN}
torchrun --nproc_per_node 8 examples/multimodal/train.py ${OPTIONS}
#!/bin/bash
# Run SFT on a pretrained multimodal model
export NCCL_IB_SL=1
export CUDA_DEVICE_MAX_CONNECTIONS=1
MODEL_NAME="mcore-llava-mistral-7b-instruct-clip336-sft"
# Check that the user has set an output path for model checkpoints.
if [[ -z $WORKSPACE ]]; then
echo "Please set WORKSPACE for storing your model checkpoints."
exit 1
fi
SOURCE=`pwd`
OUTPUT_BASE="${WORKSPACE}/output"
OUTPUT="${OUTPUT_BASE}/${MODEL_NAME}"
FINETUNE_DIR=${OUTPUT}/checkpoints
LOGS_DIR="${OUTPUT}/logs"
TENSORBOARD_DIR="${OUTPUT}/tensorboard"
if [[ -z $LOAD_NAME ]]; then
echo "Please set LOAD_NAME for input model name."
exit 1
fi
if [[ -z $LOAD_ITER ]]; then
echo "Please set LOAD_ITER for pre-trained input model iteration."
exit 1
fi
CHECKPOINT_DIR="${WORKSPACE}/${LOAD_NAME}/checkpoints"
DATA_TRAIN="${SOURCE}/examples/multimodal/sft_dataset.yaml"
DEBUG=0
if [[ $DEBUG -eq 1 ]]; then
BZ=8
NW=1
HD=0.0
LI=1
EXTRA_ARGS=""
NONDETERMINISTIC_ATTN=1
else
BZ=128
NW=2
HD=0.1
LI=10
EXTRA_ARGS=""
NONDETERMINISTIC_ATTN=1
fi
OPTIONS=" \
--apply-layernorm-1p \
--attention-softmax-in-fp32 \
--use-checkpoint-args \
--use-distributed-optimizer \
--transformer-impl transformer_engine \
--use-te \
--normalization RMSNorm \
--group-query-attention \
--num-query-groups 8 \
--no-masked-softmax-fusion \
--num-workers ${NW} \
--exit-duration-in-mins 230 \
--use-flash-attn \
--untie-embeddings-and-output-weights \
--disable-bias-linear \
--position-embedding-type rope \
--rotary-percent 1.0 \
--rotary-base 1000000 \
--swiglu \
--attention-dropout 0.0 \
--hidden-dropout ${HD} \
--tensor-model-parallel-size 4 \
--pipeline-model-parallel-size 1 \
--num-layers 32 \
--hidden-size 4096 \
--num-attention-heads 32 \
--seq-length 576 \
--decoder-seq-length 2048 \
--max-position-embeddings 4096 \
--ffn-hidden-size 14336 \
--train-iters 20000 \
--micro-batch-size 1 \
--global-batch-size ${BZ} \
--lr-decay-iters 20000 \
--lr-warmup-fraction .01 \
--lr 1e-6 \
--min-lr 1e-7 \
--lr-decay-style cosine \
--log-interval ${LI} \
--eval-iters 10 \
--eval-interval 500 \
--tokenizer-type MultimodalTokenizer \
--tokenizer-model mistralai/Mistral-7B-Instruct-v0.3 \
--tokenizer-prompt-format mistral \
--data-path ${DATA_TRAIN} \
--prompt-path ${SOURCE}/examples/multimodal/manual_prompts.json \
--save-interval 500 \
--save ${FINETUNE_DIR} \
--load ${FINETUNE_DIR} \
--pretrained-checkpoint ${CHECKPOINT_DIR} \
--dataloader-save ${FINETUNE_DIR}/dataloader \
--split 100,0,0 \
--clip-grad 0.5 \
--weight-decay 0.1 \
--adam-beta1 0.9 \
--adam-beta2 0.95 \
--init-method-std 0.014 \
--log-params-norm \
--log-num-zeros-in-grad \
--eod-mask-loss \
--freeze-ViT \
--patch-dim 14 \
--img-h 336 \
--img-w 336 \
--dataloader-type external \
--tensorboard-dir ${TENSORBOARD_DIR} \
--language-model-type=mistral_7b \
--disable-vision-class-token \
${EXTRA_ARGS} \
--distributed-timeout-minutes 60 \
--ckpt-format torch
"
export NVTE_APPLY_QK_LAYER_SCALING=0
export NVTE_ALLOW_NONDETERMINISTIC_ALGO=${NONDETERMINISTIC_ATTN}
torchrun --nproc_per_node 8 examples/multimodal/train.py ${OPTIONS}
#!/bin/bash
export NCCL_IB_SL=1
export CUDA_DEVICE_MAX_CONNECTIONS=1
export NVTE_APPLY_QK_LAYER_SCALING=0
INPUT_IMAGE_PATH="placeholder"
GROUNDTRUTH_PATH="placeholder"
NUM_FRAMES=1
while [[ $# -gt 0 ]]; do
case $1 in
-i|--input-image-path)
INPUT_IMAGE_PATH="$2"
shift
shift
;;
--num-frames)
NUM_FRAMES="$2"
shift
shift
;;
-o|--output-path)
OUTPUT_PATH="$2"
shift
shift
;;
-m|--model-path)
MODEL_PATH="$2"
shift
shift
;;
-t|--task)
TASK="$2"
shift
shift
;;
-g|--gt-path)
GROUNDTRUTH_PATH="$2"
shift
shift
;;
-*|--*)
echo "Invalid option $1"
exit 1
;;
esac
done
# Please modify these as needed.
NUM_PARTITIONS=0
START=0
END=0
for PARTITION_ID in $( eval echo {$START..$END} )
do
torchrun --nproc_per_node 8 examples/multimodal/run_text_generation.py \
--apply-layernorm-1p \
--attention-softmax-in-fp32 \
--use-flash-attn \
--transformer-impl transformer_engine \
--use-te \
--use-checkpoint-args \
--normalization RMSNorm \
--language-model-type mistral_7b \
--untie-embeddings-and-output-weights \
--disable-bias-linear \
--position-embedding-type rope \
--rotary-percent 1.0 \
--rotary-base 1000000 \
--swiglu \
--attention-dropout 0.0 \
--hidden-dropout 0.0 \
--tensor-model-parallel-size 4 \
--pipeline-model-parallel-size 1 \
--group-query-attention \
--num-query-groups 8 \
--num-layers 32 \
--hidden-size 4096 \
--ffn-hidden-size 14336 \
--num-attention-heads 32 \
--max-position-embeddings 4096 \
--no-masked-softmax-fusion \
--load ${MODEL_PATH} \
--tokenizer-type MultimodalTokenizer \
--tokenizer-model mistralai/Mistral-7B-Instruct-v0.3 \
--tokenizer-prompt-format mistral \
--bf16 \
--micro-batch-size 1 \
--seq-length 2048 \
--out-seq-length 12 \
--temperature 1.0 \
--img-h 336 \
--img-w 336 \
--patch-dim 14 \
--seed 153 \
--top_k 1 \
--no-load-rng \
--no-load-optim \
--input-image-path ${INPUT_IMAGE_PATH} \
--num-partitions ${NUM_PARTITIONS} \
--partition-id ${PARTITION_ID} \
--output-path ${OUTPUT_PATH} \
--gt-path ${GROUNDTRUTH_PATH} \
--task ${TASK} \
--disable-vision-class-token \
--num-frames ${NUM_FRAMES} \
--ckpt-format torch
done
#!/bin/bash
export NCCL_IB_SL=1
export CUDA_DEVICE_MAX_CONNECTIONS=1
export NVTE_APPLY_QK_LAYER_SCALING=0
INPUT_IMAGE_PATH="placeholder"
GROUNDTRUTH_PATH="placeholder"
NUM_FRAMES=1
while [[ $# -gt 0 ]]; do
case $1 in
-i|--input-image-path)
INPUT_IMAGE_PATH="$2"
shift
shift
;;
--num-frames)
NUM_FRAMES="$2"
shift
shift
;;
-o|--output-path)
OUTPUT_PATH="$2"
shift
shift
;;
-m|--model-path)
MODEL_PATH="$2"
shift
shift
;;
-t|--task)
TASK="$2"
shift
shift
;;
-g|--gt-path)
GROUNDTRUTH_PATH="$2"
shift
shift
;;
-*|--*)
echo "Invalid option $1"
exit 1
;;
esac
done
# Please modify these as needed.
NUM_PARTITIONS=0
START=0
END=0
for PARTITION_ID in $( eval echo {$START..$END} )
do
torchrun --nproc_per_node 8 examples/multimodal/run_text_generation.py \
--apply-layernorm-1p \
--attention-softmax-in-fp32 \
--use-flash-attn \
--transformer-impl transformer_engine \
--use-te \
--use-checkpoint-args \
--normalization RMSNorm \
--language-model-type mistral_7b \
--untie-embeddings-and-output-weights \
--disable-bias-linear \
--position-embedding-type rope \
--rotary-percent 1.0 \
--rotary-base 1000000 \
--swiglu \
--attention-dropout 0.0 \
--hidden-dropout 0.0 \
--tensor-model-parallel-size 4 \
--pipeline-model-parallel-size 1 \
--group-query-attention \
--num-query-groups 8 \
--num-layers 32 \
--hidden-size 4096 \
--ffn-hidden-size 14336 \
--num-attention-heads 32 \
--max-position-embeddings 4096 \
--no-masked-softmax-fusion \
--load ${MODEL_PATH} \
--tokenizer-type MultimodalTokenizer \
--tokenizer-model mistralai/Mistral-7B-Instruct-v0.3 \
--tokenizer-prompt-format mistral \
--bf16 \
--micro-batch-size 1 \
--seq-length 2048 \
--out-seq-length 12 \
--temperature 1.0 \
--img-h 336 \
--img-w 336 \
--patch-dim 14 \
--seed 153 \
--top_k 1 \
--no-load-rng \
--no-load-optim \
--input-image-path ${INPUT_IMAGE_PATH} \
--num-partitions ${NUM_PARTITIONS} \
--partition-id ${PARTITION_ID} \
--output-path ${OUTPUT_PATH} \
--gt-path ${GROUNDTRUTH_PATH} \
--task ${TASK} \
--disable-vision-class-token \
--num-frames ${NUM_FRAMES} \
--ckpt-format torch
done
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
"""Pretrain or SFT multimodal."""
import os
import sys
from functools import partial
import torch
import yaml
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))
)
from dataloader_provider import train_valid_test_dataloaders_provider, is_first_or_last_stage
from model import model_provider
from multimodal_args import add_multimodal_extra_args
from megatron.core import mpu, tensor_parallel
from megatron.core.enums import ModelType
from megatron.core.models.multimodal.llava_model import IGNORE_INDEX, LLaVAModel
from megatron.core.packed_seq_params import PackedSeqParams
from megatron.core.parallel_state import (
get_tensor_model_parallel_rank,
get_pipeline_model_parallel_world_size,
is_pipeline_last_stage,
)
from megatron.training import get_args, get_timers, get_tokenizer, pretrain
from megatron.training.utils import is_last_rank
def get_batch(data_iterator):
"""Generate a batch
Note: attn_mask_type in layer_specs.py sets the attention mask. Attention mask is None here.
"""
imgs = None
tokens = None
labels = None
loss_mask = None
attention_mask = None
position_ids = None
num_tiles = None
packed_seq_params = None
args = get_args()
# Dataloader doesn't run on the middle stages in a pipeline parallel model.
pp_size = get_pipeline_model_parallel_world_size()
if not is_first_or_last_stage(pp_size, args.encoder_pipeline_model_parallel_size):
# Note these are all set to None above.
return tokens, labels, loss_mask, attention_mask, position_ids, imgs, num_tiles, packed_seq_params
# Broadcast data.
torch.cuda.nvtx.range_push("get_data")
if data_iterator is not None and get_tensor_model_parallel_rank() == 0:
data = next(data_iterator)
else:
data = None
data_text = tensor_parallel.broadcast_data(["tokens"], data, torch.int64)["tokens"]
labels = tensor_parallel.broadcast_data(["labels"], data, torch.int64)["labels"]
imgs = tensor_parallel.broadcast_data(["imgs"], data, torch.float32)["imgs"]
num_tiles = tensor_parallel.broadcast_data(["num_tiles"], data, torch.int32)["num_tiles"]
cu_lengths = tensor_parallel.broadcast_data(["cu_lengths"], data, torch.int32)["cu_lengths"]
max_lengths = tensor_parallel.broadcast_data(["max_lengths"], data, torch.int32)["max_lengths"]
# No image input (text-only sample) if the dataloader produced a dummy image.
if imgs.shape == torch.Size([1, 1]):
# FIXME: text-only data can cause a hang if the vision model is own its own pipeline rank and --freeze-ViT is enabled.
imgs = torch.tensor([], dtype=torch.float32, device=data_text.device)
num_tiles = torch.tensor([], dtype=torch.int, device=data_text.device)
# Last pipeline parallel stage doesn't need images.
if pp_size > 1 and is_pipeline_last_stage():
imgs = None
# If cu_lengths and max_lengths are non-dummy, construct PackedSeqParams. Otherwise, leave it at None.
if cu_lengths.shape != torch.Size([1, 1]):
assert (
cu_lengths.shape[0] == max_lengths.shape[0] == 1
), "micro-batch-size must be 1 for packing"
cu_lengths = cu_lengths[0]
max_lengths = max_lengths[0]
packed_seq_params = PackedSeqParams(
qkv_format="thd",
cu_seqlens_q=cu_lengths,
cu_seqlens_kv=cu_lengths,
max_seqlen_q=max_lengths,
max_seqlen_kv=max_lengths,
)
torch.cuda.nvtx.range_pop()
tokens_ = data_text.long()
torch.cuda.nvtx.range_push("index tokens")
tokenizer = get_tokenizer()
text_length = tokens_.shape[1]
tokens = tokens_[:, :text_length].contiguous()
labels = labels[:, 1 : text_length + 1].contiguous()
assert tokens.shape == labels.shape, f"tokens: {tokens.shape} != labels: {labels.shape}"
torch.cuda.nvtx.range_pop()
torch.cuda.nvtx.range_push("get_ltor_masks_and_position_ids")
loss_mask, position_ids = get_ltor_masks_and_position_ids(tokens, labels, tokenizer.pad)
torch.cuda.nvtx.range_pop()
return (
tokens,
labels,
loss_mask,
attention_mask,
position_ids,
imgs,
num_tiles,
packed_seq_params,
)
def get_ltor_masks_and_position_ids(input_ids, target, pad_token):
"""Build masks and position id for left to right model."""
seq_length = input_ids.shape[1]
# Position ids.
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
# Loss mask.
loss_mask = torch.ones(target.size(), dtype=torch.float, device=input_ids.device)
loss_mask[target == pad_token] = 0.0 # mask paddings
loss_mask[target == IGNORE_INDEX] = 0.0 # mask prompts
return loss_mask, position_ids
def loss_func(loss_mask, output_tensor):
losses = output_tensor.float()
loss_mask = loss_mask.contiguous().view(-1).float()
total_tokens = loss_mask.sum()
total_loss = torch.sum(losses.view(-1) * loss_mask)
loss = torch.cat([total_loss.view(1), total_tokens.view(1)])
reporting_loss = loss.clone().detach()
torch.distributed.all_reduce(reporting_loss, group=mpu.get_data_parallel_group())
local_num_tokens = loss[1].clone().detach().to(torch.int)
return (total_loss, local_num_tokens, {'lm loss': (reporting_loss[0], reporting_loss[1])})
def forward_step(data_iterator, model: LLaVAModel):
"""Forward training step.
Args:
data_iterator (torch.utils.data.dataloader): Input data iterator
model: Multimodal model
Returns:
output_tensor (torch.Tensor): Loss of shape [b, s] if labels are provided, otherwise logits of shape [b, s, vocab_size].
loss_func (callable): Loss function with a loss mask specified.
"""
timers = get_timers()
# Get the batch.
timers('batch-generator', log_level=2).start()
(
tokens,
labels,
loss_mask,
attention_mask,
position_ids,
images,
num_image_tiles,
packed_seq_params,
) = get_batch(data_iterator)
timers('batch-generator').stop()
output_tensor, loss_mask = model(
images,
tokens,
position_ids,
attention_mask,
labels,
loss_mask,
num_image_tiles=num_image_tiles,
packed_seq_params=packed_seq_params,
)
return output_tensor, partial(loss_func, loss_mask)
def llava_embedding_ranks(pp_ranks):
"""LLava's embedding ranks consist of the decoder's first and last ranks (ie, the ViT has no embeddings).
Args:
pp_ranks: A list of global ranks that constitute a pipeline group.
"""
args = get_args()
# encoder size is also the index to the first rank of the decoder.
epp = args.encoder_pipeline_model_parallel_size
last_rank = pp_ranks[-1]
if len(pp_ranks) == 1 or pp_ranks[epp] == last_rank:
return [last_rank]
else:
return [pp_ranks[epp], last_rank]
def llava_position_embedding_ranks(pp_ranks):
"""LLava's embedding ranks consist of the singular rank of the model or the decoder's first rank.
Args:
pp_ranks: A list of global ranks that constitute a pipeline group.
"""
args = get_args()
# encoder size is also the index to the first rank of the decoder.
epp = args.encoder_pipeline_model_parallel_size
last_rank = pp_ranks[-1]
if len(pp_ranks) == 1:
return [last_rank]
else:
return [pp_ranks[epp]]
def run_online_eval(model):
"""Run an evaluation benchmark during training."""
args = get_args()
# Online evaluation config is not defined. Do nothing.
if not args.online_evaluation_config:
return []
from config import EvaluationConfig
from run_text_generation import generate_and_write_samples
with open(args.online_evaluation_config, "r") as f:
config_dict = yaml.safe_load(f)
config = EvaluationConfig(**config_dict)
# The inference code assumes the first rank is the leader.
# Tensorboard writer is on the last rank.
# We must write to a storage space that all ranks see.
output_dir = os.path.join(args.save, "online_eval")
os.makedirs(output_dir, exist_ok=True)
config.output_path = os.path.join(output_dir, args.language_model_type)
# The actual generation.
generate_and_write_samples(model[0].module, config, print_output=False)
# Make sure the first rank is done writing so that the last rank can run eval.
torch.distributed.barrier()
if not is_last_rank():
return []
# Run evaluation.
if config.task == "TextVQA":
from evaluate_textvqa import textvqa_eval
avg_acc = textvqa_eval(config.output_path)
return [{"TextVQA accuracy": avg_acc}]
else:
raise NotImplementedError(f"online evaluation of {config.task} not implemented yet")
def write_online_eval_to_tensorboard(data, iteration, writer):
"""Write online evaluation data to Tensorboard."""
if not writer:
return
for item in data:
for k, v in item.items():
writer.add_scalar(k, v, iteration)
if __name__ == "__main__":
train_valid_test_dataloaders_provider.is_distributed = True
pretrain(
train_valid_test_dataloaders_provider,
model_provider,
ModelType.encoder_and_decoder,
forward_step,
args_defaults={'tokenizer_type': 'GPT2BPETokenizer'},
extra_args_provider=add_multimodal_extra_args,
process_non_loss_data_func=write_online_eval_to_tensorboard,
get_embedding_ranks=llava_embedding_ranks,
get_position_embedding_ranks=llava_position_embedding_ranks,
non_loss_data_func=run_online_eval,
)
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
"""Pretrain or SFT multimodal."""
import math
import os
import sys
from functools import partial
import torch
import yaml
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))
)
from dataloader_provider import train_valid_test_dataloaders_provider, is_first_or_last_stage
from model import model_provider
from multimodal_args import add_multimodal_extra_args
from megatron.core import mpu, tensor_parallel
from megatron.core.enums import ModelType
from megatron.core.models.multimodal import context_parallel
from megatron.core.models.multimodal.llava_model import IGNORE_INDEX, LLaVAModel
from megatron.core.packed_seq_params import PackedSeqParams
from megatron.core.parallel_state import (
get_tensor_model_parallel_rank,
get_pipeline_model_parallel_world_size,
is_pipeline_last_stage,
)
from megatron.training import get_args, get_timers, get_tokenizer, pretrain
from megatron.training.utils import is_last_rank, get_batch_on_this_cp_rank
def get_batch(data_iterator, image_token_index, img_seq_len):
"""Generate a batch
Note: attn_mask_type in layer_specs.py sets the attention mask. Attention mask is None here.
"""
imgs = None
tokens = None
labels = None
loss_mask = None
attention_mask = None
position_ids = None
num_tiles = None
packed_seq_params = None
args = get_args()
# Dataloader doesn't run on the middle stages in a pipeline parallel model.
pp_size = get_pipeline_model_parallel_world_size()
if not is_first_or_last_stage(pp_size, args.encoder_pipeline_model_parallel_size):
# Note these are all set to None above.
return tokens, labels, loss_mask, attention_mask, position_ids, imgs, num_tiles, packed_seq_params
# Broadcast data.
torch.cuda.nvtx.range_push("get_data")
if data_iterator is not None and get_tensor_model_parallel_rank() == 0:
data = next(data_iterator)
else:
data = None
data_text = tensor_parallel.broadcast_data(["tokens"], data, torch.int64)["tokens"]
labels = tensor_parallel.broadcast_data(["labels"], data, torch.int64)["labels"]
imgs = tensor_parallel.broadcast_data(["imgs"], data, torch.float32)["imgs"]
num_tiles = tensor_parallel.broadcast_data(["num_tiles"], data, torch.int32)["num_tiles"]
cu_lengths = tensor_parallel.broadcast_data(["cu_lengths"], data, torch.int32)["cu_lengths"]
max_lengths = tensor_parallel.broadcast_data(["max_lengths"], data, torch.int32)["max_lengths"]
# No image input (text-only sample) if the dataloader returned a size 1 image.
if imgs.shape == torch.Size([1, 1]):
# FSDP can hang with text-only samples. A workaround is to run a valid dummy image through the vision
# model and then add image embeddings with a zero multiplier.
if args.use_torch_fsdp2:
imgs = torch.zeros((1, 3, args.img_h, args.img_w), dtype=torch.float32, device=data_text.device)
num_tiles = torch.tensor([], dtype=torch.int, device=data_text.device)
else:
# Similar workaround is not needed without FSDP and we can use an empty image.
# FIXME: text-only data can cause still cause a hang in the special case where
# the vision model is own its own pipeline rank and --freeze-ViT is enabled.
imgs = torch.tensor([], dtype=torch.float32, device=data_text.device)
num_tiles = torch.tensor([], dtype=torch.int, device=data_text.device)
# Last pipeline parallel stage doesn't need images.
if pp_size > 1 and is_pipeline_last_stage():
imgs = None
# If cu_lengths and max_lengths are non-dummy, construct PackedSeqParams. Otherwise, leave it at None.
if cu_lengths.shape != torch.Size([1, 1]):
assert (
cu_lengths.shape[0] == max_lengths.shape[0] == 1
), "micro-batch-size must be 1 for packing"
cu_lengths = cu_lengths[0]
max_lengths = max_lengths[0]
packed_seq_params = PackedSeqParams(
qkv_format="thd",
cu_seqlens_q=cu_lengths,
cu_seqlens_kv=cu_lengths,
max_seqlen_q=max_lengths,
max_seqlen_kv=max_lengths,
)
torch.cuda.nvtx.range_pop()
tokens_ = data_text.long()
torch.cuda.nvtx.range_push("index tokens")
tokenizer = get_tokenizer()
text_length = tokens_.shape[1]
tokens = tokens_[:, :text_length].contiguous()
labels = labels[:, 1 : text_length + 1].contiguous()
assert tokens.shape == labels.shape, f"tokens: {tokens.shape} != labels: {labels.shape}"
torch.cuda.nvtx.range_pop()
torch.cuda.nvtx.range_push("get_ltor_masks_and_position_ids")
loss_mask, position_ids = get_ltor_masks_and_position_ids(tokens, labels, tokenizer.pad)
torch.cuda.nvtx.range_pop()
# If context parallel is enabled, must shard inputs to CP ranks.
if args.context_parallel_size > 1 or args.sequence_parallel:
assert tokens.shape[0], "micro-batch-size > 1 not supported yet with CP"
num_image_tokens = torch.sum(tokens == image_token_index).item()
num_image_embeddings = num_image_tokens * img_seq_len - num_image_tokens
seq_len = text_length + num_image_embeddings
# CP expects sequence length is divisible by CP size so apply padding.
mp_padding_needed = context_parallel.get_padding(
seq_len, args.context_parallel_size,
args.tensor_model_parallel_size, args.sequence_parallel,
)
tokens, position_ids, labels, loss_mask = [torch.nn.functional.pad(item, (0, mp_padding_needed)) for item in (tokens, position_ids, labels, loss_mask)]
# Get PackedSeqParams that indicate the amount of padding for TransformerEngine.
packed_seq_params = context_parallel.get_packed_seq_params(tokens, num_image_embeddings, mp_padding_needed, args.context_parallel_size, True)
return (
tokens,
labels,
loss_mask,
attention_mask,
position_ids,
imgs,
num_tiles,
packed_seq_params,
)
def get_ltor_masks_and_position_ids(input_ids, target, pad_token):
"""Build masks and position id for left to right model."""
seq_length = input_ids.shape[1]
# Position ids.
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
# Loss mask.
loss_mask = torch.ones(target.size(), dtype=torch.float, device=input_ids.device)
loss_mask[target == pad_token] = 0.0 # mask paddings
loss_mask[target == IGNORE_INDEX] = 0.0 # mask prompts
return loss_mask, position_ids
def get_mask_start_and_end_idx(arr):
"""
Returns a list of tuples holding the start and end index in arr of the non-zeros contiguuous
sub arrays.
For instance, if arr = [0, 1, 0, 0, 1, 1]
get_mask_start_and_end_idx(arr) = [(1, 1), (4, 5)]
such that arr[1:1+1] = [1] and arr[4:5+1] = [1, 1]
"""
mask = (arr != 0)
mask_int = mask.int()
diff = mask_int[1:] - mask_int[:-1]
start_indices = (diff == 1).nonzero(as_tuple=False).flatten() + 1
end_indices = (diff == -1).nonzero(as_tuple=False).flatten()
if len(mask)==0: return []
if mask[0]:
start_indices = torch.cat((torch.tensor([0], device=arr.device), start_indices))
if mask[-1]:
end_indices = torch.cat((end_indices, torch.tensor([len(arr) - 1], device=arr.device)))
sequences = list(zip(start_indices.tolist(), end_indices.tolist()))
return sequences
def scaled_loss_func(loss_mask, output_tensor):
"""
Scaled loss function
Scale the loss for each conversation turn using the formula:
1 / sum_j[ sqrt(length(loss_turn_j)) ] * sum_i[ sum(loss_turn_i) / sqrt(length(loss_turn_i)) ]
Where we use the loss mask to infer the start / end of the conversation turns.
"""
losses = output_tensor.float()
loss_list = []
num_valid_labels_list = []
for idx in range(losses.shape[0]):
loss_this_sample = losses[idx]
turn_start_end_list = get_mask_start_and_end_idx(loss_mask[idx])
for turn_start, turn_end in turn_start_end_list:
# compute loss for each turn
loss_this_turn = loss_this_sample[turn_start:turn_end+1].sum()
assert (1 - loss_mask)[idx][turn_start:turn_end+1].sum() < 1.0
num_valid_labels_this_turn = turn_end - turn_start + 1
loss_this_turn = loss_this_turn / num_valid_labels_this_turn
loss_list.append(loss_this_turn)
# append num of valid labels for each turn
num_valid_labels_list.append(num_valid_labels_this_turn)
base_num = sum([math.sqrt(each) for each in num_valid_labels_list])
for idx in range(len(loss_list)):
# normalize loss for each turn
loss_list[idx] = loss_list[idx] * math.sqrt(num_valid_labels_list[idx]) / base_num
total_loss = torch.stack(loss_list).sum()
total_tokens = torch.ones_like(total_loss)
loss = torch.cat([total_loss.view(1), total_tokens.view(1)])
reporting_loss = loss.clone().detach()
torch.distributed.all_reduce(reporting_loss, group=mpu.get_data_parallel_group())
local_num_tokens = loss[1].clone().detach().to(torch.int)
return (
total_loss,
local_num_tokens,
{'lm loss': (reporting_loss[0], reporting_loss[1])},
)
def loss_func(loss_mask, output_tensor):
args = get_args()
losses = output_tensor.float()
loss_mask = loss_mask.contiguous().view(-1).float()
total_tokens = loss_mask.sum()
total_loss = torch.sum(losses.view(-1) * loss_mask)
loss = torch.cat([total_loss.view(1), total_tokens.view(1)])
if args.context_parallel_size > 1:
torch.distributed.all_reduce(loss, group=mpu.get_context_parallel_group())
reporting_loss = loss.clone().detach()
torch.distributed.all_reduce(reporting_loss, group=mpu.get_data_parallel_group())
local_num_tokens = loss[1].clone().detach().to(torch.int)
# We multiply by context parallel size because later there will be a divide by CP(+DP) size.
return (
loss[0] * args.context_parallel_size,
local_num_tokens,
{'lm loss': (reporting_loss[0], reporting_loss[1])}
)
def forward_step(data_iterator, model: LLaVAModel):
"""Forward training step.
Args:
data_iterator (torch.utils.data.dataloader): Input data iterator
model: Multimodal model
Returns:
output_tensor (torch.Tensor): Loss of shape [b, s] if labels are provided, otherwise logits of shape [b, s, vocab_size].
loss_func (callable): Loss function with a loss mask specified.
"""
timers = get_timers()
# Get the batch.
timers('batch-generator', log_level=2).start()
(
tokens,
labels,
loss_mask,
attention_mask,
position_ids,
images,
num_image_tiles,
packed_seq_params,
) = get_batch(data_iterator, model.module.module.image_token_index, model.module.module.img_seq_len)
timers('batch-generator').stop()
output_tensor, loss_mask = model(
images,
tokens,
position_ids,
attention_mask,
labels,
loss_mask,
num_image_tiles=num_image_tiles,
packed_seq_params=packed_seq_params,
)
args = get_args()
if args.use_loss_scaling:
loss_function = partial(scaled_loss_func, loss_mask)
else:
loss_function = partial(loss_func, loss_mask)
return output_tensor, loss_function
def llava_embedding_ranks(pp_ranks):
"""LLava's embedding ranks consist of the decoder's first and last ranks (ie, the ViT has no embeddings).
Args:
pp_ranks: A list of global ranks that constitute a pipeline group.
"""
args = get_args()
# encoder size is also the index to the first rank of the decoder.
epp = args.encoder_pipeline_model_parallel_size
last_rank = pp_ranks[-1]
if len(pp_ranks) == 1 or pp_ranks[epp] == last_rank:
return [last_rank]
else:
return [pp_ranks[epp], last_rank]
def llava_position_embedding_ranks(pp_ranks):
"""LLava's embedding ranks consist of the singular rank of the model or the decoder's first rank.
Args:
pp_ranks: A list of global ranks that constitute a pipeline group.
"""
args = get_args()
# encoder size is also the index to the first rank of the decoder.
epp = args.encoder_pipeline_model_parallel_size
last_rank = pp_ranks[-1]
if len(pp_ranks) == 1:
return [last_rank]
else:
return [pp_ranks[epp]]
def run_online_eval(model):
"""Run an evaluation benchmark during training."""
args = get_args()
# Online evaluation config is not defined. Do nothing.
if not args.online_evaluation_config:
return []
from config import EvaluationConfig
from run_text_generation import generate_and_write_samples
with open(args.online_evaluation_config, "r") as f:
config_dict = yaml.safe_load(f)
config = EvaluationConfig(**config_dict)
# The inference code assumes the first rank is the leader.
# Tensorboard writer is on the last rank.
# We must write to a storage space that all ranks see.
output_dir = os.path.join(args.save, "online_eval")
os.makedirs(output_dir, exist_ok=True)
config.output_path = os.path.join(output_dir, args.language_model_type)
# The actual generation.
generate_and_write_samples(model[0].module, config, print_output=False)
# Make sure the first rank is done writing so that the last rank can run eval.
torch.distributed.barrier()
if not is_last_rank():
return []
# Run evaluation.
if config.task == "TextVQA":
from evaluate_textvqa import textvqa_eval
avg_acc = textvqa_eval(config.output_path)
return [{"TextVQA accuracy": avg_acc}]
else:
raise NotImplementedError(f"online evaluation of {config.task} not implemented yet")
def write_online_eval_to_tensorboard(data, iteration, writer):
"""Write online evaluation data to Tensorboard."""
if not writer:
return
for item in data:
for k, v in item.items():
writer.add_scalar(k, v, iteration)
if __name__ == "__main__":
train_valid_test_dataloaders_provider.is_distributed = True
pretrain(
train_valid_test_dataloaders_provider,
model_provider,
ModelType.encoder_and_decoder,
forward_step,
args_defaults={'tokenizer_type': 'GPT2BPETokenizer'},
extra_args_provider=add_multimodal_extra_args,
process_non_loss_data_func=write_online_eval_to_tensorboard,
get_embedding_ranks=llava_embedding_ranks,
get_position_embedding_ranks=llava_position_embedding_ranks,
non_loss_data_func=run_online_eval,
)
File mode changed from 100644 to 100755
File mode changed from 100644 to 100755
#!/bin/bash
# Runs the "220M" parameter model
export CUDA_DEVICE_MAX_CONNECTIONS=1
GPUS_PER_NODE=8
# Change for multinode config
MASTER_ADDR=localhost
MASTER_PORT=6000
NUM_NODES=1
NODE_RANK=0
WORLD_SIZE=$(($GPUS_PER_NODE*$NUM_NODES))
CHECKPOINT_PATH=$1 #<Specify path>
TENSORBOARD_DIR=$2 #<Specify path>
VOCAB_FILE=$3 #<Specify path to file>/bert-large-cased-vocab.txt
DATA_PATH=$4 #<Specify path and file prefix>_text_document
DISTRIBUTED_ARGS="
--nproc_per_node $GPUS_PER_NODE \
--nnodes $NUM_NODES \
--node_rank $NODE_RANK \
--master_addr $MASTER_ADDR \
--master_port $MASTER_PORT
"
T5_ARGS="
--encoder-num-layers 12 \
--decoder-num-layers 12 \
--hidden-size 768 \
--num-attention-heads 12 \
--kv-channels 64 \
--ffn-hidden-size 3072 \
--encoder-seq-length 512 \
--decoder-seq-length 128 \
--max-position-embeddings 512 \
--micro-batch-size 64 \
--global-batch-size 512 \
--lr 0.0001 \
--train-iters 1000000 \
--lr-decay-iters 1000000 \
--lr-decay-style linear \
--min-lr 0.00001 \
--weight-decay 1e-2 \
--lr-warmup-fraction .01 \
--clip-grad 1.0 \
--bf16 \
--vocab-extra-ids 100 \
--init-method-std 0.015 \
--transformer-impl transformer_engine \
--tensor-model-parallel-size 1 \
--pipeline-model-parallel-size 1 \
--attention-backend auto \
"
DATA_ARGS="
--data-path $DATA_PATH \
--vocab-file $VOCAB_FILE \
--tokenizer-type BertWordPieceCase \
--split 99982,9,9 \
"
OUTPUT_ARGS="
--log-interval 100 \
--tensorboard-dir ${TENSORBOARD_DIR} \
--save-interval 500 \
--eval-interval 1000 \
--eval-iters 10
"
torchrun $DISTRIBUTED_ARGS pretrain_t5.py \
$T5_ARGS \
$DATA_ARGS \
$OUTPUT_ARGS \
--distributed-backend nccl \
--save $CHECKPOINT_PATH \
--load $CHECKPOINT_PATH \
#!/bin/bash
# Runs the "220M" parameter model
export CUDA_DEVICE_MAX_CONNECTIONS=1
GPUS_PER_NODE=8
# Change for multinode config
MASTER_ADDR=localhost
MASTER_PORT=6000
NUM_NODES=1
NODE_RANK=0
WORLD_SIZE=$(($GPUS_PER_NODE*$NUM_NODES))
CHECKPOINT_PATH=$1 #<Specify path>
TENSORBOARD_DIR=$2 #<Specify path>
VOCAB_FILE=$3 #<Specify path to file>/bert-large-cased-vocab.txt
DATA_PATH=$4 #<Specify path and file prefix>_text_document
DISTRIBUTED_ARGS="
--nproc_per_node $GPUS_PER_NODE \
--nnodes $NUM_NODES \
--node_rank $NODE_RANK \
--master_addr $MASTER_ADDR \
--master_port $MASTER_PORT
"
T5_ARGS="
--encoder-num-layers 12 \
--decoder-num-layers 12 \
--hidden-size 768 \
--num-attention-heads 12 \
--kv-channels 64 \
--ffn-hidden-size 3072 \
--encoder-seq-length 512 \
--decoder-seq-length 128 \
--max-position-embeddings 512 \
--micro-batch-size 64 \
--global-batch-size 512 \
--lr 0.0001 \
--train-iters 1000000 \
--lr-decay-iters 1000000 \
--lr-decay-style linear \
--min-lr 0.00001 \
--weight-decay 1e-2 \
--lr-warmup-fraction .01 \
--clip-grad 1.0 \
--bf16 \
--vocab-extra-ids 100 \
--init-method-std 0.015 \
--transformer-impl transformer_engine \
--tensor-model-parallel-size 1 \
--pipeline-model-parallel-size 1 \
--attention-backend auto \
"
DATA_ARGS="
--data-path $DATA_PATH \
--vocab-file $VOCAB_FILE \
--tokenizer-type BertWordPieceCase \
--split 99982,9,9 \
"
OUTPUT_ARGS="
--log-interval 100 \
--tensorboard-dir ${TENSORBOARD_DIR} \
--save-interval 500 \
--eval-interval 1000 \
--eval-iters 10
"
torchrun $DISTRIBUTED_ARGS pretrain_t5.py \
$T5_ARGS \
$DATA_ARGS \
$OUTPUT_ARGS \
--distributed-backend nccl \
--save $CHECKPOINT_PATH \
--load $CHECKPOINT_PATH \
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment