Unverified Commit bd0f2414 authored by Vicki Boykis's avatar Vicki Boykis Committed by GitHub
Browse files

Enabling OpenAI completions via gooseai (#1141)

* enabling OpenAI completions via gooseai

* openai-completions and pin openai
parent 35a65ba0
...@@ -55,8 +55,8 @@ please install these via `pip install lm-eval[openai]` or `pip install -e .[open ...@@ -55,8 +55,8 @@ please install these via `pip install lm-eval[openai]` or `pip install -e .[open
backoff_time = 3 backoff_time = 3
while True: while True:
try: try:
return openai.Completions.create(**kwargs) return openai.completions.create(**kwargs)
except openai.error.OpenAIError: except openai.OpenAIError:
import traceback import traceback
traceback.print_exc() traceback.print_exc()
...@@ -64,13 +64,13 @@ please install these via `pip install lm-eval[openai]` or `pip install -e .[open ...@@ -64,13 +64,13 @@ please install these via `pip install lm-eval[openai]` or `pip install -e .[open
backoff_time *= 1.5 backoff_time *= 1.5
@register_model("gooseai") @register_model("openai-completions")
class OpenaiCompletionsLM(LM): class OpenaiCompletionsLM(LM):
REQ_CHUNK_SIZE = 20 REQ_CHUNK_SIZE = 20
def __init__( def __init__(
self, self,
engine: str = "text-davinci-003", model: str = "text-davinci-003",
truncate: bool = False, truncate: bool = False,
batch_size: int = 1, batch_size: int = 1,
) -> None: ) -> None:
...@@ -89,8 +89,8 @@ class OpenaiCompletionsLM(LM): ...@@ -89,8 +89,8 @@ class OpenaiCompletionsLM(LM):
"attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. \ "attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. \
please install these via `pip install lm-eval[openai]` or `pip install -e .[openai]`", please install these via `pip install lm-eval[openai]` or `pip install -e .[openai]`",
) )
self.engine = engine self.model= model
self.tokenizer = tiktoken.encoding_for_model(self.engine) self.tokenizer = tiktoken.encoding_for_model(self.model)
self.vocab_size = self.tokenizer.n_vocab self.vocab_size = self.tokenizer.n_vocab
self.truncate = truncate self.truncate = truncate
self.end_of_text_token_id = self.tokenizer.eot_token self.end_of_text_token_id = self.tokenizer.eot_token
...@@ -245,7 +245,7 @@ class OpenaiCompletionsLM(LM): ...@@ -245,7 +245,7 @@ class OpenaiCompletionsLM(LM):
until = request_args.get("until", ["<|endoftext|>"]) until = request_args.get("until", ["<|endoftext|>"])
response = oa_completion( response = oa_completion(
engine=self.engine, model=self.model,
prompt=inps, prompt=inps,
max_tokens=self.max_gen_toks, max_tokens=self.max_gen_toks,
temperature=0.0, temperature=0.0,
...@@ -254,7 +254,7 @@ class OpenaiCompletionsLM(LM): ...@@ -254,7 +254,7 @@ class OpenaiCompletionsLM(LM):
) )
for resp, (context, args_) in zip(response.choices, chunk): for resp, (context, args_) in zip(response.choices, chunk):
s = resp["text"] s = getattr(resp, 'text')
until_ = args_.get("until", ["<|endoftext|>"]) until_ = args_.get("until", ["<|endoftext|>"])
......
...@@ -70,7 +70,7 @@ promptsource = [ ...@@ -70,7 +70,7 @@ promptsource = [
] ]
gptq = ["auto-gptq[triton] @ git+https://github.com/PanQiWei/AutoGPTQ"] gptq = ["auto-gptq[triton] @ git+https://github.com/PanQiWei/AutoGPTQ"]
anthropic = ["anthropic"] anthropic = ["anthropic"]
openai = ["openai>=1.3.5", "tiktoken"] openai = ["openai==1.3.9", "tiktoken"]
vllm = ["vllm"] vllm = ["vllm"]
all = [ all = [
"lm_eval[dev]", "lm_eval[dev]",
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment