"...git@developer.sourcefind.cn:tianlh/lightgbm-dcu.git" did not exist on "e7c53270a0f675f611a14eabbb07e16c27fbffe7"
Unverified Commit 8c6fde08 authored by Michael Goin's avatar Michael Goin Committed by GitHub
Browse files

Remove "device" from vllm_causallms.py (#3176)

Device has been a deprecated arg for a few releases of vLLM and is now removed in 0.10.0 https://github.com/vllm-project/vllm/pull/21349
parent 904bba12
...@@ -131,7 +131,6 @@ class VLLM(TemplateLM): ...@@ -131,7 +131,6 @@ class VLLM(TemplateLM):
max_model_len: int = None, max_model_len: int = None,
seed: int = 1234, seed: int = 1234,
gpu_memory_utilization: float = 0.9, gpu_memory_utilization: float = 0.9,
device: str = "cuda",
data_parallel_size: int = 1, data_parallel_size: int = 1,
lora_local_path: str = None, lora_local_path: str = None,
# VLLM: enable thinking tags in the prompt. # VLLM: enable thinking tags in the prompt.
...@@ -173,7 +172,6 @@ class VLLM(TemplateLM): ...@@ -173,7 +172,6 @@ class VLLM(TemplateLM):
"swap_space": int(swap_space), "swap_space": int(swap_space),
"quantization": quantization, "quantization": quantization,
"seed": int(seed), "seed": int(seed),
"device": str(device),
"enable_lora": True if lora_local_path else False, "enable_lora": True if lora_local_path else False,
"max_lora_rank": int(max_lora_rank), "max_lora_rank": int(max_lora_rank),
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment