Unverified Commit a5d2967b authored by lewtun's avatar lewtun Committed by GitHub
Browse files

Fix examples in M2M100 docstrings (#11540)

Replaces `tok` with `tokenizer` so examples can run with copy-paste
parent 98020865
......@@ -566,7 +566,7 @@ M2M_100_GENERATION_EXAMPLE = r"""
>>> model_inputs = tokenizer(text_to_translate, return_tensors='pt')
>>> # translate to French
>>> gen_tokens = model.generate( **model_inputs, forced_bos_token_id=tok.get_lang_id("fr"))
>>> gen_tokens = model.generate( **model_inputs, forced_bos_token_id=tokenizer.get_lang_id("fr"))
>>> print(tokenizer.batch_decode(gen_tokens, skip_special_tokens=True))
"""
......@@ -1272,7 +1272,7 @@ class M2M100ForConditionalGeneration(M2M100PreTrainedModel):
>>> model_inputs = tokenizer(text_to_translate, return_tensors='pt')
>>> # translate to French
>>> gen_tokens = model.generate( **model_inputs, forced_bos_token_id=tok.get_lang_id("fr"))
>>> gen_tokens = model.generate( **model_inputs, forced_bos_token_id=tokenizer.get_lang_id("fr"))
>>> print(tokenizer.batch_decode(gen_tokens, skip_special_tokens=True))
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment