Unverified Commit 292e5814 authored by Hailey Schoelkopf's avatar Hailey Schoelkopf Committed by GitHub
Browse files

Fix minor edge cases (#951 #1503) (#1520)

* Fix padding

* Fix elif in model loading

* format
parent 45823914
...@@ -152,7 +152,7 @@ def simple_evaluate( ...@@ -152,7 +152,7 @@ def simple_evaluate(
if model_args is None: if model_args is None:
model_args = "" model_args = ""
elif isinstance(model_args, dict): if isinstance(model_args, dict):
lm = lm_eval.api.registry.get_model(model).create_from_arg_obj( lm = lm_eval.api.registry.get_model(model).create_from_arg_obj(
model_args, model_args,
{ {
...@@ -348,10 +348,16 @@ def evaluate( ...@@ -348,10 +348,16 @@ def evaluate(
gathered_item = ( gathered_item = (
lm.accelerator.gather(instances_rnk).cpu().detach().numpy().tolist() lm.accelerator.gather(instances_rnk).cpu().detach().numpy().tolist()
) )
# "multiple_choice" task types dispatch (several) "loglikelihood" request types
reqtype = (
"loglikelihood"
if task.OUTPUT_TYPE == "multiple_choice"
else task.OUTPUT_TYPE
)
# compute number of pseudo-batches to pad with (FSDP/DDP require even batches among ranks) # compute number of pseudo-batches to pad with (FSDP/DDP require even batches among ranks)
numpad = max(gathered_item) - gathered_item[lm.rank] numpad = max(gathered_item) - gathered_item[lm.rank]
padding_requests[task.OUTPUT_TYPE] += numpad # todo: may not account for padding in cases like SquadV2 which has multiple req types
padding_requests[reqtype] += numpad
### Run LM on inputs, get all outputs ### ### Run LM on inputs, get all outputs ###
# execute each type of request # execute each type of request
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment