phi_multi_infer.py 1.23 KB
Newer Older
luopl's avatar
luopl committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import torch
from transformers import AutoTokenizer
from vllm import LLM, SamplingParams

def main():
    # Initialize the tokenizer
    tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-4", trust_remote_code=True)
    # Create a sampling params object.
    sampling_params = SamplingParams(temperature=0.8, repetition_penalty=1.05, max_tokens=512)

    # Create an LLM object with model path and configuration.
    llm = LLM(model="microsoft/phi-4",
              tensor_parallel_size=2,
              trust_remote_code=True,
              gpu_memory_utilization=0.95,
              dtype="float16",
              max_model_len=512,
              enforce_eager=True)

    # Prepare your prompts
    prompt = "Tell me something about large language models."
    messages = [
    {"role": "user", "content": prompt}
    ]
    text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
    )

    # generate outputs
    outputs = llm.generate([text], sampling_params)
    # Print the outputs.
    for output in outputs:
        prompt = output.prompt
        generated_text = output.outputs[0].text
        print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")


if __name__ == '__main__':
    main()