basic_safetensors_generate.py 847 Bytes
Newer Older
1
2
3
4
5
6
from awq import AutoAWQForCausalLM
from transformers import AutoTokenizer, TextStreamer

quant_path = "casperhansen/opt-125m-awq"

# Load model
Casper Hansen's avatar
Casper Hansen committed
7
model = AutoAWQForCausalLM.from_quantized(quant_path, fuse_layers=True, safetensors=True)
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
tokenizer = AutoTokenizer.from_pretrained(quant_path, trust_remote_code=True)
streamer = TextStreamer(tokenizer, skip_special_tokens=True)

# Convert prompt to tokens
prompt_template = """\
A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.

USER: {prompt}
ASSISTANT:"""

tokens = tokenizer(
    prompt_template.format(prompt="How are you today?"), 
    return_tensors='pt'
).input_ids.cuda()

# Generate output
generation_output = model.generate(
    tokens, 
    streamer=streamer,
    max_new_tokens=512
)