infer_vllm.py 1.57 KB
Newer Older
chenzk's avatar
v1.0  
chenzk committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
from transformers import AutoTokenizer
from vllm import LLM, SamplingParams
from multiprocessing import freeze_support


if __name__ == '__main__':
    freeze_support()
    # Initialize the tokenizer
    tokenizer = AutoTokenizer.from_pretrained("MiniMax/MiniMax-M1-40k")

    # Pass the default decoding hyperparameters of Qwen3-8B.
    # max_tokens is for the maximum length for generation.
    sampling_params = SamplingParams(temperature=0.7, top_p=0.8, repetition_penalty=1.05, max_tokens=512)

    # Input the model name or path. Can be GPTQ or AWQ models.
    llm = LLM(
        model="MiniMax/MiniMax-M1-40k",
        distributed_executor_backend="ray",
        tensor_parallel_size=16,
        max_model_len=4096,
        dtype="bfloat16",
        enforce_eager=True,
        gpu_memory_utilization=0.99,
        trust_remote_code=True,
    )

    # Prepare your prompts
    prompt = "美国的国土面积多大?"
    messages = [
        {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
        {"role": "user", "content": [{"type": "text", "text": prompt}]}
    ]

    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True,
        enable_thinking=True # Switches between thinking and non-thinking modes. Default is True.
    )

    # generate outputs
    outputs = llm.generate([text], sampling_params)

    # Print the outputs.
    for output in outputs:
        prompt = output.prompt
        generated_text = output.outputs[0].text
        print(f"Generated text: {generated_text!r}")