from transformers import pipeline, AutoTokenizer from kvpress import SnapKVPress model = "Qwen/Qwen3-8B" tokenizer = AutoTokenizer.from_pretrained(model) model_kwargs = {"attn_implementation": "flash_attention_2"} # model_kwargs = {"attn_implementation": "eager"} pipe = pipeline("kv-press-text-generation", model=model, model_kwargs=model_kwargs) context = "You are Qwen, created by Alibaba Cloud. You are a helpful assistant." question = "美国面积多大?" q_len = int(tokenizer(question, return_tensors="pt").input_ids.shape[1]//2) q_len = int(0.1 * q_len) if q_len > 1024 else q_len window_size = min(64, q_len) if q_len > 1 else 1 # print(f"using window_size: {window_size}") press = SnapKVPress(compression_ratio=0.5, window_size=window_size) answer = pipe(context, question=question, press=press, max_new_tokens=512)["answer"] print("answer: ", answer)