basic_generate.py 947 Bytes
Newer Older
Casper Hansen's avatar
Casper Hansen committed
1
2
3
from awq import AutoAWQForCausalLM
from transformers import AutoTokenizer, TextStreamer

Casper Hansen's avatar
Casper Hansen committed
4
quant_path = "TheBloke/Mistral-7B-OpenOrca-AWQ"
Casper Hansen's avatar
Casper Hansen committed
5
6

# Load model
Casper Hansen's avatar
Casper Hansen committed
7
model = AutoAWQForCausalLM.from_quantized(quant_path, fuse_layers=False, safetensors=True)
Casper Hansen's avatar
Casper Hansen committed
8
tokenizer = AutoTokenizer.from_pretrained(quant_path, trust_remote_code=True)
Casper Hansen's avatar
Casper Hansen committed
9
streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
Casper Hansen's avatar
Casper Hansen committed
10
11
12

# Convert prompt to tokens
prompt_template = """\
Casper Hansen's avatar
Casper Hansen committed
13
14
15
16
17
<|im_start|>system
You are MistralOrca, a large language model trained by Alignment Lab AI. Write out your reasoning step-by-step to be sure you get the right answers!<|im_end|>
<|im_start|>user
{prompt}<|im_end|>
<|im_start|>assistant"""
Casper Hansen's avatar
Casper Hansen committed
18
19

tokens = tokenizer(
Casper Hansen's avatar
Casper Hansen committed
20
    prompt_template.format(prompt="Why is ice cream so good, yes so good?"), 
Casper Hansen's avatar
Casper Hansen committed
21
22
23
24
25
26
27
28
29
    return_tensors='pt'
).input_ids.cuda()

# Generate output
generation_output = model.generate(
    tokens, 
    streamer=streamer,
    max_new_tokens=512
)