""" Nanotron Inference Script Usage: ``` export CUDA_DEVICE_MAX_CONNECTIONS=1 # important for some distributed operations torchrun --nproc_per_node=4 run_generate.py ---ckpt-path checkpoints/test/4 ``` """ import argparse import os from pathlib import Path import torch from config import MambaConfig, MambaModelConfig from mamba import MambaForTraining from nanotron import distributed as dist from nanotron import logging from nanotron.config import ( GenerationArgs, LoggingArgs, ParallelismArgs, get_config_from_file, ) from nanotron.generation.decode import ( GenerationInput, TokenizerConfig, decode_text, decode_tokenized, ) from nanotron.logging import log_rank, set_ranks_logging_level from nanotron.models import build_model from nanotron.parallel import ParallelContext from nanotron.parallel.parameters import sanity_check from nanotron.parallel.pipeline_parallel.engine import ( OneForwardOneBackwardPipelineEngine, ) from nanotron.parallel.pipeline_parallel.tensor_pointer import TensorPointer from nanotron.parallel.tensor_parallel.enum import TensorParallelLinearMode from nanotron.random import ( RandomStates, get_current_random_state, get_synced_random_state, set_random_seed, ) from nanotron.serialize import load_weights from nanotron.trainer import mark_tied_parameters try: from transformers import AutoTokenizer except ImportError: AutoTokenizer = None logger = logging.get_logger(__name__) def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--ckpt-path", type=Path, required=True, help="Checkpoint path") parser.add_argument("--dp", type=int, default=1) parser.add_argument("--pp", type=int, default=1) parser.add_argument("--tp", type=int, default=1) parser.add_argument("--max-new-tokens", type=int, default=128, help="Maximum number of new tokens to generate") return parser.parse_args() def main(): args = get_args() assert args.ckpt_path.exists(), f"Checkpoint path {args.ckpt_path} does not exist" config = get_config_from_file( (args.ckpt_path / "config.yaml").as_posix(), config_class=MambaConfig, model_config_class=MambaModelConfig ) model_config = config.model.model_config tokenizer_path = config.tokenizer.tokenizer_name_or_path parallel_config = ParallelismArgs( dp=args.dp, pp=args.pp, tp=args.tp, pp_engine=OneForwardOneBackwardPipelineEngine(), tp_mode=TensorParallelLinearMode.ALL_REDUCE, tp_linear_async_communication=False, ) print(parallel_config) # Initialise all process groups parallel_context = ParallelContext( data_parallel_size=parallel_config.dp, pipeline_parallel_size=parallel_config.pp, tensor_parallel_size=parallel_config.tp, ) # Set log levels logging_config = LoggingArgs( log_level="info", log_level_replica="info", ) # Set log levels set_ranks_logging_level(parallel_context=parallel_context, logging_config=logging_config) log_rank(f"model_config: {model_config}", logger=logger, level=logging.INFO, rank=0) log_rank(f"tokenizer_path: {tokenizer_path}", logger=logger, level=logging.INFO, rank=0) # Set random states set_random_seed(42) # Get synchronized random states if parallel_config.tp_mode is TensorParallelLinearMode.ALL_REDUCE: random_states = RandomStates( {"tp_synced": get_synced_random_state(random_state=get_current_random_state(), pg=parallel_context.tp_pg)} ) else: # We don't need to sync across TP when using sequence parallel (REDUCE_SCATTER) random_states = RandomStates({}) model = build_model( model_builder=lambda: MambaForTraining( config=model_config, parallel_context=parallel_context, parallel_config=parallel_config, random_states=random_states, ), dtype=getattr(torch, model_config.dtype), parallel_context=parallel_context, ) # Mark some parameters as tied # TODO @nouamane: this is only needed for training, can we just mark params as NanotronParameter instead? mark_tied_parameters(model=model, parallel_context=parallel_context, parallel_config=parallel_config) # Sanity check model sanity_check(root_module=model) # Load checkpoint checkpoint_path = args.ckpt_path log_rank( f"Loading checkpoint from {checkpoint_path}:", logger=logger, level=logging.INFO, rank=0, ) load_weights(model=model, parallel_context=parallel_context, root_folder=checkpoint_path) model.eval() if AutoTokenizer is not None: tokenizer = AutoTokenizer.from_pretrained(tokenizer_path) # tokenizer.pad_token_id = tokenizer.eos_token_id if tokenizer.pad_token_id is None: if tokenizer.eos_token_id is not None: tokenizer.pad_token_id = tokenizer.eos_token_id elif getattr(model.config, "pad_token_id", None) is not None: tokenizer.pad_token_id = int(model.config.pad_token_id) elif getattr(model.config, "eos_token_id", None) is not None: tokenizer.pad_token_id = int(model.config.eos_token_id) else: tokenizer.add_special_tokens({"pad_token": "[PAD]"}) tokenizer.padding_side = "left" tokenizer.truncation_side = "left" # TODO @nouamane: do we want this? dummy_inputs = [ # "Passage: Daniel went back to the garden. Mary travelled to the kitchen. Sandra journeyed to the kitchen. Sandra went to the hallway. John went to the bedroom. Mary went back to the garden. Where is Mary?\nAnswer:", # "This film was probably inspired by Godzilla", "What is your " ] log_rank("Setup Inference mode for mamba model", logger=logger, level=logging.INFO, rank=0) # assert config.inference_params.max_batch_size == 1, "Only batch size 1 is supported for inference for now" outputs = decode_text( input_iter=(GenerationInput(text=text) for text in dummy_inputs), tokenizer=tokenizer, # TODO @thomasw21: From ModelWithLoss extract the model. model=model.model, parallel_context=parallel_context, max_new_tokens=args.max_new_tokens, max_micro_batch_size=2, generation_config=GenerationArgs(sampler="greedy", use_cache=True), tokenizer_config=TokenizerConfig(max_input_length=None), is_bench=os.environ.get("USE_BENCH", "0") == "1", logits_are_batch_first=False, ) for output in outputs: input_ids = output.input_ids generated_ids = output.generation_ids if isinstance(input_ids, TensorPointer): assert isinstance(generated_ids, TensorPointer) continue assert isinstance(generated_ids, torch.Tensor) log_rank( f"input: {tokenizer.decode(input_ids, clean_up_tokenization_spaces=False)[:1000]}", logger=logger, level=logging.INFO, rank=0, ) log_rank( f"generation: {tokenizer.decode(generated_ids[len(input_ids) :], clean_up_tokenization_spaces=False)}", logger=logger, level=logging.INFO, rank=0, ) log_rank( "--------------------------------------------------", logger=logger, level=logging.INFO, rank=0, ) # Model ref tokens = tokenizer(dummy_inputs, return_tensors="pt") input_ids = tokens.input_ids.to(device="cuda") else: outputs = decode_tokenized( input_ids=torch.zeros(1, 1).to(dtype=torch.int64, device="cuda"), input_mask=torch.ones(1, 1).to(dtype=torch.bool, device="cuda"), model=model.model, parallel_context=parallel_context, generation_config=GenerationArgs(sampler="greedy", use_cache=True), max_micro_batch_size=1, max_new_tokens=12, returns_logits=False, ) for output in outputs: input_ids = output.input_ids generated_ids = output.generation_ids if isinstance(input_ids, TensorPointer): assert isinstance(generated_ids, TensorPointer) continue assert isinstance(generated_ids, torch.Tensor) log_rank( f"generation: {generated_ids[len(input_ids) :]}", logger=logger, level=logging.INFO, rank=0, ) log_rank( "--------------------------------------------------", logger=logger, level=logging.INFO, rank=0, ) dist.barrier() if __name__ == "__main__": main()