Unverified Commit a87fe425 authored by Baber Abbasi's avatar Baber Abbasi Committed by GitHub
Browse files

fix vllm data parallel (#2746)

* remove ray.remote resources

* remove kobtest tag (registered as group)
parent af2d2f3e
...@@ -243,13 +243,13 @@ class VLLM(TemplateLM): ...@@ -243,13 +243,13 @@ class VLLM(TemplateLM):
temperature=0, prompt_logprobs=1, max_tokens=1, detokenize=False temperature=0, prompt_logprobs=1, max_tokens=1, detokenize=False
) )
if self.data_parallel_size > 1: if self.data_parallel_size > 1:
# vLLM hangs if tensor_parallel > 1 and resources are set in ray.remote # vLLM hangs if resources are set in ray.remote
# also seems to only work with decorator and not with ray.remote() fn # also seems to only work with decorator and not with ray.remote() fn
# see https://github.com/vllm-project/vllm/issues/973 # see https://github.com/vllm-project/vllm/issues/973
@ray.remote(num_gpus=1 if self.tensor_parallel_size == 1 else None) @ray.remote
def run_inference_one_model( def run_inference_one_model(
model_args: dict, model_args: dict,
sampling_params, sampling_params: SamplingParams,
requests: List[List[int]], requests: List[List[int]],
lora_request: LoRARequest, lora_request: LoRARequest,
): ):
......
...@@ -109,10 +109,10 @@ class VLLM_VLM(VLLM): ...@@ -109,10 +109,10 @@ class VLLM_VLM(VLLM):
temperature=0, prompt_logprobs=1, max_tokens=1, detokenize=False temperature=0, prompt_logprobs=1, max_tokens=1, detokenize=False
) )
if self.data_parallel_size > 1: if self.data_parallel_size > 1:
# vLLM hangs if tensor_parallel > 1 and resources are set in ray.remote # vLLM hangs if resources are set in ray.remote
# also seems to only work with decorator and not with ray.remote() fn # also seems to only work with decorator and not with ray.remote() fn
# see https://github.com/vllm-project/vllm/issues/973 # see https://github.com/vllm-project/vllm/issues/973
@ray.remote(num_gpus=1 if self.tensor_parallel_size == 1 else None) @ray.remote
def run_inference_one_model( def run_inference_one_model(
model_args: dict, sampling_params, requests: List[List[dict]] model_args: dict, sampling_params, requests: List[List[dict]]
): ):
......
tag:
- kobest
task: kobest_boolq task: kobest_boolq
dataset_path: skt/kobest_v1 dataset_path: skt/kobest_v1
dataset_name: boolq dataset_name: boolq
......
tag:
- kobest
task: kobest_copa task: kobest_copa
dataset_path: skt/kobest_v1 dataset_path: skt/kobest_v1
dataset_name: copa dataset_name: copa
......
tag:
- kobest
task: kobest_hellaswag task: kobest_hellaswag
dataset_path: skt/kobest_v1 dataset_path: skt/kobest_v1
dataset_name: hellaswag dataset_name: hellaswag
......
tag:
- kobest
task: kobest_sentineg task: kobest_sentineg
dataset_path: skt/kobest_v1 dataset_path: skt/kobest_v1
dataset_name: sentineg dataset_name: sentineg
......
tag:
- kobest
task: kobest_wic task: kobest_wic
dataset_path: skt/kobest_v1 dataset_path: skt/kobest_v1
dataset_name: wic dataset_name: wic
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment