Commit 22b7fdc5 authored by Nikhil Pinnaparaju's avatar Nikhil Pinnaparaju
Browse files

alternative more elegant way of setting pad token for gpt2 generate

parent 1b604e16
......@@ -112,7 +112,8 @@ class HFLM(BaseLM):
generation_kwargs = {'do_sample': False, 'max_length': max_length}
if eos_token_id is not None:
generation_kwargs['eos_token_id'] = eos_token_id
return self.gpt2.generate(context, pad_token_id=eos_token_id, **generation_kwargs)
generation_kwargs['pad_token_id'] = eos_token_id
return self.gpt2.generate(context, **generation_kwargs)
# for backwards compatibility
GPT2LM = HFLM
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment