import torch from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig model_name_or_path = "deepseek-ai/DeepSeek-V2-Lite" tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True) # `device_map` cannot be set to `auto` model = AutoModelForCausalLM.from_pretrained( model_name_or_path, trust_remote_code=True, torch_dtype=torch.bfloat16).cuda() model.generation_config = GenerationConfig.from_pretrained(model_name_or_path) model.generation_config.pad_token_id = model.generation_config.eos_token_id text = "An attention function can be described as mapping a query and a set of key-value pairs to an output, where the query, keys, values, and output are all vectors. The output is" inputs = tokenizer(text, return_tensors="pt") outputs = model.generate(**inputs.to(model.device), max_new_tokens=100) result = tokenizer.decode(outputs[0], skip_special_tokens=True) print("result", result)