test.py 921 Bytes
Newer Older
chenzk's avatar
v1.0  
chenzk committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
from transformers import pipeline, AutoTokenizer
from kvpress import FinchPress

model_name = "Qwen/Qwen3-8B"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model_kwargs = {"attn_implementation": "flash_attention_2"}
# model_kwargs = {"attn_implementation": "eager"}
pipe = pipeline("kv-press-text-generation", model=model_name, model_kwargs=model_kwargs)

context = "You are Qwen, created by Alibaba Cloud. You are a helpful assistant."
question = "美国面积多大?"

press = FinchPress(compression_ratio=0.5)
press.update_model_and_tokenizer(pipe.model, pipe.tokenizer)

delimiter = press.delimiter_token
full_input = f"{context}{delimiter}{question}"
# encoded = tokenizer(full_input, return_tensors="pt", add_special_tokens=False)
# input_ids = encoded['input_ids'][0]

# decoded = tokenizer.decode(input_ids)

answer = pipe(full_input, press=press, max_new_tokens=512)["answer"]
print("answer: ", answer)