import torch from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig # model_name_or_path = "deepseek-ai/DeepSeek-V2-Lite" model_name_or_path = "/home/DeepSeek-V2/DeepSeek-V2-Lite-Chat" tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True) # `max_memory` should be set based on your devices max_memory = {i: "64GB" for i in range(8)} # `device_map` cannot be set to `auto` model = AutoModelForCausalLM.from_pretrained( model_name_or_path, trust_remote_code=True, device_map="sequential", torch_dtype=torch.bfloat16, max_memory=max_memory, attn_implementation="eager") model.generation_config = GenerationConfig.from_pretrained(model_name_or_path) model.generation_config.pad_token_id = model.generation_config.eos_token_id text = "An attention function can be described as mapping a query and a set of key-value pairs to an output, where the query, keys, values, and output are all vectors. The output is" inputs = tokenizer(text, return_tensors="pt") outputs = model.generate(**inputs.to(model.device), max_new_tokens=100) result = tokenizer.decode(outputs[0], skip_special_tokens=True) print("result", result)