"git@developer.sourcefind.cn:OpenDAS/dlib.git" did not exist on "2661386229d23f15b48960658b9a1d65853906f3"
Commit 07a79db5 authored by Lysandre's avatar Lysandre
Browse files

Fix failing doc samples

parent bdd3d0c7
...@@ -47,6 +47,7 @@ The different languages this model/tokenizer handles, as well as the ids of thes ...@@ -47,6 +47,7 @@ The different languages this model/tokenizer handles, as well as the ids of thes
.. code-block:: .. code-block::
# Continuation of the previous script
print(tokenizer.lang2id) # {'en': 0, 'fr': 1} print(tokenizer.lang2id) # {'en': 0, 'fr': 1}
...@@ -54,6 +55,7 @@ These ids should be used when passing a language parameter during a model pass. ...@@ -54,6 +55,7 @@ These ids should be used when passing a language parameter during a model pass.
.. code-block:: .. code-block::
# Continuation of the previous script
input_ids = torch.tensor([tokenizer.encode("Wikipedia was used to")]) # batch size of 1 input_ids = torch.tensor([tokenizer.encode("Wikipedia was used to")]) # batch size of 1
...@@ -62,6 +64,7 @@ filled with the appropriate language ids, of the same size as input_ids. For eng ...@@ -62,6 +64,7 @@ filled with the appropriate language ids, of the same size as input_ids. For eng
.. code-block:: .. code-block::
# Continuation of the previous script
language_id = tokenizer.lang2id['en'] # 0 language_id = tokenizer.lang2id['en'] # 0
langs = torch.tensor([language_id] * input_ids.shape[1]) # torch.tensor([0, 0, 0, ..., 0]) langs = torch.tensor([language_id] * input_ids.shape[1]) # torch.tensor([0, 0, 0, ..., 0])
...@@ -73,6 +76,7 @@ You can then feed it all as input to your model: ...@@ -73,6 +76,7 @@ You can then feed it all as input to your model:
.. code-block:: .. code-block::
# Continuation of the previous script
outputs = model(input_ids, langs=langs) outputs = model(input_ids, langs=langs)
......
...@@ -148,9 +148,12 @@ class FlaubertModel(XLMModel): ...@@ -148,9 +148,12 @@ class FlaubertModel(XLMModel):
Examples:: Examples::
from transformers import FlaubertTokenizer, FlaubertModel
import torch
tokenizer = FlaubertTokenizer.from_pretrained('flaubert-base-cased') tokenizer = FlaubertTokenizer.from_pretrained('flaubert-base-cased')
model = FlaubertModel.from_pretrained('flaubert-base-cased') model = FlaubertModel.from_pretrained('flaubert-base-cased')
input_ids = torch.tensor(tokenizer.encode("Le chat manges une pomme.", add_special_tokens=True)).unsqueeze(0) # Batch size 1 input_ids = torch.tensor(tokenizer.encode("Le chat mange une pomme.", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids) outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
......
...@@ -78,6 +78,7 @@ class TestCodeExamples(unittest.TestCase): ...@@ -78,6 +78,7 @@ class TestCodeExamples(unittest.TestCase):
for file in files: for file in files:
# Open all files # Open all files
print("Testing", file, end=" ")
with open(os.path.join(directory, file)) as f: with open(os.path.join(directory, file)) as f:
# Retrieve examples # Retrieve examples
examples = get_examples_from_file(f) examples = get_examples_from_file(f)
...@@ -99,7 +100,7 @@ class TestCodeExamples(unittest.TestCase): ...@@ -99,7 +100,7 @@ class TestCodeExamples(unittest.TestCase):
joined_examples.append(example) joined_examples.append(example)
joined_examples_index += 1 joined_examples_index += 1
print("Testing", file, str(len(joined_examples)) + "/" + str(len(joined_examples))) print(str(len(joined_examples)) + "/" + str(len(joined_examples)))
# Execute sub tests with every example. # Execute sub tests with every example.
for index, code_example in enumerate(joined_examples): for index, code_example in enumerate(joined_examples):
...@@ -114,7 +115,8 @@ class TestCodeExamples(unittest.TestCase): ...@@ -114,7 +115,8 @@ class TestCodeExamples(unittest.TestCase):
def test_main_doc_examples(self): def test_main_doc_examples(self):
doc_directory = "docs/source" doc_directory = "docs/source"
self.analyze_directory(doc_directory) ignore_files = ["favicon.ico"]
self.analyze_directory(doc_directory, ignore_files=ignore_files)
def test_modeling_examples(self): def test_modeling_examples(self):
transformers_directory = "src/transformers" transformers_directory = "src/transformers"
...@@ -125,5 +127,7 @@ class TestCodeExamples(unittest.TestCase): ...@@ -125,5 +127,7 @@ class TestCodeExamples(unittest.TestCase):
"modeling_tf_auto.py", "modeling_tf_auto.py",
"modeling_utils.py", "modeling_utils.py",
"modeling_tf_t5.py", "modeling_tf_t5.py",
"modeling_bart.py",
"modeling_tf_utils.py"
] ]
self.analyze_directory(transformers_directory, identifier=modeling_files, ignore_files=ignore_files) self.analyze_directory(transformers_directory, identifier=modeling_files, ignore_files=ignore_files)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment