Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
8ea23e2d
Commit
8ea23e2d
authored
Nov 28, 2023
by
lintangsutawika
Browse files
updated oa_completion and fix import error
parent
5d3bf2e7
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
4 additions
and
8 deletions
+4
-8
lm_eval/models/openai_completions.py
lm_eval/models/openai_completions.py
+4
-8
No files found.
lm_eval/models/openai_completions.py
View file @
8ea23e2d
...
@@ -10,9 +10,6 @@ from lm_eval import utils
...
@@ -10,9 +10,6 @@ from lm_eval import utils
from
lm_eval.api.model
import
LM
from
lm_eval.api.model
import
LM
from
lm_eval.api.registry
import
register_model
from
lm_eval.api.registry
import
register_model
import
asyncio
from
openai
import
OpenAI
,
AsyncOpenAI
def
get_result
(
response
:
dict
,
ctxlen
:
int
)
->
Tuple
[
float
,
bool
]:
def
get_result
(
response
:
dict
,
ctxlen
:
int
)
->
Tuple
[
float
,
bool
]:
"""Process results from OpenAI API response.
"""Process results from OpenAI API response.
...
@@ -58,7 +55,7 @@ please install these via `pip install lm-eval[openai]` or `pip install -e .[open
...
@@ -58,7 +55,7 @@ please install these via `pip install lm-eval[openai]` or `pip install -e .[open
backoff_time
=
3
backoff_time
=
3
while
True
:
while
True
:
try
:
try
:
return
openai
.
Completion
.
create
(
**
kwargs
)
return
openai
.
Completion
s
.
create
(
**
kwargs
)
except
openai
.
error
.
OpenAIError
:
except
openai
.
error
.
OpenAIError
:
import
traceback
import
traceback
...
@@ -344,7 +341,6 @@ please install these via `pip install lm-eval[openai]` or `pip install -e .[open
...
@@ -344,7 +341,6 @@ please install these via `pip install lm-eval[openai]` or `pip install -e .[open
@
register_model
(
"openai-chat-completions"
)
@
register_model
(
"openai-chat-completions"
)
class
OpenaiChatCompletionsLM
(
LM
):
class
OpenaiChatCompletionsLM
(
LM
):
def
__init__
(
def
__init__
(
self
,
model
:
str
=
"gpt-3.5-turbo"
,
truncate
:
bool
=
False
,
batch_size
:
int
=
1
self
,
model
:
str
=
"gpt-3.5-turbo"
,
truncate
:
bool
=
False
,
batch_size
:
int
=
1
)
->
None
:
)
->
None
:
...
@@ -376,7 +372,7 @@ class OpenaiChatCompletionsLM(LM):
...
@@ -376,7 +372,7 @@ class OpenaiChatCompletionsLM(LM):
self
.
end_of_text_token_id
=
self
.
tokenizer
.
eot_token
self
.
end_of_text_token_id
=
self
.
tokenizer
.
eot_token
# Read from environment variable OPENAI_API_KEY
# Read from environment variable OPENAI_API_KEY
self
.
client
=
OpenAI
()
# AsyncOpenAI()
self
.
client
=
openai
.
OpenAI
()
#
openai.
AsyncOpenAI()
@
property
@
property
def
eot_token_id
(
self
):
def
eot_token_id
(
self
):
...
@@ -451,8 +447,8 @@ class OpenaiChatCompletionsLM(LM):
...
@@ -451,8 +447,8 @@ class OpenaiChatCompletionsLM(LM):
pbar
=
tqdm
(
total
=
len
(
requests
),
disable
=
(
self
.
rank
!=
0
))
pbar
=
tqdm
(
total
=
len
(
requests
),
disable
=
(
self
.
rank
!=
0
))
for
key
,
re_ord
in
re_ords
.
items
():
for
key
,
re_ord
in
re_ords
.
items
():
# n needs to be 1 because messages in
# n needs to be 1 because messages in
# chat completion are not batch but
# chat completion are not batch but
# is regarded as a single conversation.
# is regarded as a single conversation.
chunks
=
utils
.
chunks
(
re_ord
.
get_reordered
(),
n
=
1
)
chunks
=
utils
.
chunks
(
re_ord
.
get_reordered
(),
n
=
1
)
for
chunk
in
chunks
:
for
chunk
in
chunks
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment