int8_inference_huggingface.py 698 Bytes
Newer Older
1
import torch
2
from transformers import LlamaForCausalLM, LlamaTokenizer
3
4

MAX_NEW_TOKENS = 128
Ruff's avatar
Ruff committed
5
model_name = "meta-llama/Llama-2-7b-hf"
6

Ruff's avatar
Ruff committed
7
text = "Hamburg is in which country?\n"
8
tokenizer = LlamaTokenizer.from_pretrained(model_name)
9
10
input_ids = tokenizer(text, return_tensors="pt").input_ids

11
max_memory = f"{int(torch.cuda.mem_get_info()[0] / 1024**3) - 2}GB"
12
13
14
15

n_gpus = torch.cuda.device_count()
max_memory = {i: max_memory for i in range(n_gpus)}

Ruff's avatar
Ruff committed
16
model = LlamaForCausalLM.from_pretrained(model_name, device_map="auto", load_in_8bit=True, max_memory=max_memory)
17

18
19
generated_ids = model.generate(input_ids, max_length=MAX_NEW_TOKENS)
print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))