from vllm import LLM, SamplingParams import os os.environ["CUDA_VISIBLE_DEVICES"] = '7' prompt = '' model_path = '' sampling_params = SamplingParams(temperature=1, top_p=0.95) llm = LLM(model=model_path, trust_remote_code=True, enforce_eager=True, tensor_parallel_size=1) outputs = llm.generate(prompt, sampling_params) for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")