Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
694dc642
"test/git@developer.sourcefind.cn:gaoqiong/migraphx.git" did not exist on "0272f393c4415f45399b453de7179158b32f5e5d"
Commit
694dc642
authored
Nov 29, 2023
by
baberabb
Browse files
fix data + tensor parallel
parent
5075de60
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
11 additions
and
7 deletions
+11
-7
lm_eval/models/vllm_causallms.py
lm_eval/models/vllm_causallms.py
+11
-7
No files found.
lm_eval/models/vllm_causallms.py
View file @
694dc642
from
collections
import
defaultdict
from
collections
import
defaultdict
import
os
from
itertools
import
islice
from
itertools
import
islice
from
typing
import
List
,
Tuple
,
Optional
,
Literal
,
Union
,
Any
from
typing
import
List
,
Tuple
,
Optional
,
Literal
,
Union
,
Any
from
transformers
import
AutoTokenizer
from
transformers
import
AutoTokenizer
...
@@ -9,6 +10,7 @@ from tqdm import tqdm
...
@@ -9,6 +10,7 @@ from tqdm import tqdm
from
lm_eval.api.registry
import
register_model
from
lm_eval.api.registry
import
register_model
from
lm_eval
import
utils
from
lm_eval
import
utils
from
ray.util.multiprocessing
import
Pool
from
ray.util.multiprocessing
import
Pool
import
multiprocessing
try
:
try
:
...
@@ -21,13 +23,15 @@ eval_logger = utils.eval_logger
...
@@ -21,13 +23,15 @@ eval_logger = utils.eval_logger
def
run_inference_one_gpu
(
model_args
:
dict
,
sampling_params
,
requests
:
List
[
int
]):
def
run_inference_one_gpu
(
model_args
:
dict
,
sampling_params
,
requests
:
List
[
int
]):
# gpu_id = [x for x in gpu_id]
# os.environ["CUDA_VISIBLE_DEVICES"]= str(gpu_id)
llm
=
LLM
(
**
model_args
)
llm
=
LLM
(
**
model_args
)
return
llm
.
generate
(
prompt_token_ids
=
requests
,
sampling_params
=
sampling_params
)
return
llm
.
generate
(
prompt_token_ids
=
requests
,
sampling_params
=
sampling_params
)
def
chunk_list
(
my_list
:
List
[
Any
],
chunk_size
:
int
):
def
chunk_list
(
lst
,
n
):
for
i
in
range
(
0
,
len
(
my_list
),
chunk_size
):
chunk_size
=
len
(
lst
)
//
n
+
(
1
if
len
(
lst
)
%
n
else
0
)
yield
list
(
islice
(
my_list
,
i
,
i
+
chunk_size
)
)
return
[
lst
[
i
:
i
+
chunk_size
]
for
i
in
range
(
0
,
len
(
lst
),
chunk_size
)
]
@
register_model
(
"vllm"
)
@
register_model
(
"vllm"
)
...
@@ -80,6 +84,8 @@ please install vllm via `pip install lm-eval[vllm]` or `pip install -e .[vllm]`"
...
@@ -80,6 +84,8 @@ please install vllm via `pip install lm-eval[vllm]` or `pip install -e .[vllm]`"
}
}
if
self
.
data_parallel
<=
1
:
if
self
.
data_parallel
<=
1
:
self
.
model
=
LLM
(
**
self
.
model_args
)
self
.
model
=
LLM
(
**
self
.
model_args
)
else
:
self
.
model_args
[
"worker_use_ray"
]
=
True
self
.
tokenizer
=
AutoTokenizer
.
from_pretrained
(
self
.
tokenizer
=
AutoTokenizer
.
from_pretrained
(
pretrained
,
pretrained
,
revision
=
revision
,
revision
=
revision
,
...
@@ -146,10 +152,8 @@ please install vllm via `pip install lm-eval[vllm]` or `pip install -e .[vllm]`"
...
@@ -146,10 +152,8 @@ please install vllm via `pip install lm-eval[vllm]` or `pip install -e .[vllm]`"
requests
=
chunk_list
(
requests
,
self
.
data_parallel
)
requests
=
chunk_list
(
requests
,
self
.
data_parallel
)
inputs
=
[(
self
.
model_args
,
sampling_params
,
req
)
for
req
in
requests
]
inputs
=
[(
self
.
model_args
,
sampling_params
,
req
)
for
req
in
requests
]
with
Pool
()
as
pool
:
with
Pool
(
self
.
data_parallel
)
as
pool
:
results
=
pool
.
starmap
(
results
=
pool
.
starmap
(
run_inference_one_gpu
,
inputs
)
run_inference_one_gpu
,
inputs
,
self
.
data_parallel
)
# flatten results
# flatten results
return
[
item
for
sublist
in
results
for
item
in
sublist
]
return
[
item
for
sublist
in
results
for
item
in
sublist
]
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment