Commit ffda60ab authored by Nathan Habib's avatar Nathan Habib
Browse files

removing float16 convertion in logmax

parent e377c47f
......@@ -1132,7 +1132,7 @@ class HFLM(TemplateLM):
multi_logits = F.log_softmax(
self._model_call(batched_inps, **call_kwargs),
dim=-1,
dtype=torch.float16,
# dtype=torch.float16,
) # [batch, padding_length (inp or cont), vocab]
for (request_str, ctx_tokens, _), logits, inplen, cont_toks in zip(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment