Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
b1f7284e
Commit
b1f7284e
authored
Feb 04, 2021
by
Leo Gao
Browse files
Add retry with backoff for GPT3
parent
c55e8237
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
15 additions
and
2 deletions
+15
-2
lm_eval/models/gpt3.py
lm_eval/models/gpt3.py
+15
-2
No files found.
lm_eval/models/gpt3.py
View file @
b1f7284e
...
@@ -3,6 +3,7 @@ import transformers
...
@@ -3,6 +3,7 @@ import transformers
from
lm_eval.base
import
LM
from
lm_eval.base
import
LM
from
lm_eval
import
utils
from
lm_eval
import
utils
from
tqdm
import
tqdm
from
tqdm
import
tqdm
import
time
def
get_result
(
response
,
ctxlen
):
def
get_result
(
response
,
ctxlen
):
...
@@ -21,6 +22,18 @@ def get_result(response, ctxlen):
...
@@ -21,6 +22,18 @@ def get_result(response, ctxlen):
return
continuation_logprobs
,
is_greedy
return
continuation_logprobs
,
is_greedy
def
oa_completion
(
**
kwargs
):
import
openai
backoff_time
=
3
while
True
:
try
:
return
openai
.
Completion
.
create
(
**
kwargs
)
except
openai
.
error
.
OpenAIError
:
time
.
sleep
(
backoff_time
)
backoff_time
*=
1.5
class
GPT3LM
(
LM
):
class
GPT3LM
(
LM
):
MAX_LENGTH
=
2048
MAX_LENGTH
=
2048
...
@@ -67,7 +80,7 @@ class GPT3LM(LM):
...
@@ -67,7 +80,7 @@ class GPT3LM(LM):
inps
.
append
(
inp
)
inps
.
append
(
inp
)
ctxlens
.
append
(
ctxlen
)
ctxlens
.
append
(
ctxlen
)
response
=
o
penai
.
C
ompletion
.
create
(
response
=
o
a_c
ompletion
(
engine
=
self
.
engine
,
engine
=
self
.
engine
,
prompt
=
inps
,
prompt
=
inps
,
echo
=
True
,
echo
=
True
,
...
@@ -89,7 +102,7 @@ class GPT3LM(LM):
...
@@ -89,7 +102,7 @@ class GPT3LM(LM):
inp
=
context_enc
[
-
(
self
.
MAX_LENGTH
-
self
.
MAX_GEN_TOKS
):]
inp
=
context_enc
[
-
(
self
.
MAX_LENGTH
-
self
.
MAX_GEN_TOKS
):]
ctxlen
=
len
(
context_enc
)
-
max
(
0
,
len
(
context_enc
)
-
(
self
.
MAX_LENGTH
-
self
.
MAX_GEN_TOKS
))
ctxlen
=
len
(
context_enc
)
-
max
(
0
,
len
(
context_enc
)
-
(
self
.
MAX_LENGTH
-
self
.
MAX_GEN_TOKS
))
response
=
o
penai
.
C
ompletion
.
create
(
response
=
o
a_c
ompletion
(
engine
=
self
.
engine
,
engine
=
self
.
engine
,
prompt
=
[
inp
],
prompt
=
[
inp
],
max_tokens
=
self
.
MAX_GEN_TOKS
,
max_tokens
=
self
.
MAX_GEN_TOKS
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment