Unverified Commit bf2abb41 authored by Rob Geada's avatar Rob Geada Committed by GitHub
Browse files

Fix 'loglikelihood' typos in the api models file (#2459)

parent 26f607f5
...@@ -58,7 +58,7 @@ class TemplateAPI(TemplateLM): ...@@ -58,7 +58,7 @@ class TemplateAPI(TemplateLM):
pretrained: str = None, # `model` takes precedence over `pretrained` when passed. pretrained: str = None, # `model` takes precedence over `pretrained` when passed.
base_url: str = None, base_url: str = None,
tokenizer: Optional[str] = None, tokenizer: Optional[str] = None,
# Logliklehood tasks require a tokenizer to calculate context lengths, # Loglikelihood tasks require a tokenizer to calculate context lengths,
# however the requests can be sent as a string if the API doesn't support token inputs. # however the requests can be sent as a string if the API doesn't support token inputs.
# use tokenized_requests=False # use tokenized_requests=False
tokenizer_backend: Optional[ tokenizer_backend: Optional[
...@@ -196,7 +196,7 @@ class TemplateAPI(TemplateLM): ...@@ -196,7 +196,7 @@ class TemplateAPI(TemplateLM):
if not self.tokenized_requests: if not self.tokenized_requests:
# if messages are tokenized: # if messages are tokenized:
if isinstance(messages[0][0], int): if isinstance(messages[0][0], int):
# assuming decoding is lossless. However, this is only for logliklehood requests # assuming decoding is lossless. However, this is only for loglikelihood requests
# as we need to compute the context length. For generations, we don't need to tokenize. # as we need to compute the context length. For generations, we don't need to tokenize.
messages = self.decode_batch(messages) messages = self.decode_batch(messages)
if self._batch_size <= 1: if self._batch_size <= 1:
...@@ -415,7 +415,7 @@ class TemplateAPI(TemplateLM): ...@@ -415,7 +415,7 @@ class TemplateAPI(TemplateLM):
) )
return None return None
def batch_logliklehood_requests( def batch_loglikelihood_requests(
self, chunks: Iterable[List[LogLikelihoodInputs]] self, chunks: Iterable[List[LogLikelihoodInputs]]
) -> Tuple[List[List[int]], List[int], List[Tuple[str, str]]]: ) -> Tuple[List[List[int]], List[int], List[Tuple[str, str]]]:
inputs = [] inputs = []
...@@ -500,7 +500,7 @@ class TemplateAPI(TemplateLM): ...@@ -500,7 +500,7 @@ class TemplateAPI(TemplateLM):
if self._concurrent <= 1: if self._concurrent <= 1:
pbar = tqdm(desc="Requesting API", total=len(requests)) pbar = tqdm(desc="Requesting API", total=len(requests))
for chunk in chunked: for chunk in chunked:
inputs, ctxlens, cache_keys = self.batch_logliklehood_requests([chunk]) inputs, ctxlens, cache_keys = self.batch_loglikelihood_requests([chunk])
outputs = retry( outputs = retry(
stop=stop_after_attempt(self.max_retries), stop=stop_after_attempt(self.max_retries),
...@@ -524,7 +524,7 @@ class TemplateAPI(TemplateLM): ...@@ -524,7 +524,7 @@ class TemplateAPI(TemplateLM):
) )
pbar.update(1) pbar.update(1)
else: else:
inputs, ctxlens, cache_keys = self.batch_logliklehood_requests(chunked) inputs, ctxlens, cache_keys = self.batch_loglikelihood_requests(chunked)
res = itertools.chain.from_iterable( res = itertools.chain.from_iterable(
asyncio.run( asyncio.run(
self.get_batched_requests( self.get_batched_requests(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment