"docs/vscode:/vscode.git/clone" did not exist on "87d5057d863c927e31761acd00a6716653275931"
Unverified Commit 184ef8ec authored by Lysandre Debut's avatar Lysandre Debut Committed by GitHub
Browse files

TensorFlow tests: having from_pt set to True requires torch to be installed. (#10664)

* TF model exists for Blenderbot 400M

* Marian

* RAG
parent 543d0549
...@@ -309,7 +309,7 @@ class TFBlenderbot400MIntegrationTests(unittest.TestCase): ...@@ -309,7 +309,7 @@ class TFBlenderbot400MIntegrationTests(unittest.TestCase):
@cached_property @cached_property
def model(self): def model(self):
model = TFAutoModelForSeq2SeqLM.from_pretrained(self.model_name, from_pt=True) model = TFAutoModelForSeq2SeqLM.from_pretrained(self.model_name)
return model return model
@slow @slow
......
...@@ -350,7 +350,7 @@ class AbstractMarianIntegrationTest(unittest.TestCase): ...@@ -350,7 +350,7 @@ class AbstractMarianIntegrationTest(unittest.TestCase):
@cached_property @cached_property
def model(self): def model(self):
warnings.simplefilter("error") warnings.simplefilter("error")
model: TFMarianMTModel = TFAutoModelForSeq2SeqLM.from_pretrained(self.model_name, from_pt=True) model: TFMarianMTModel = TFAutoModelForSeq2SeqLM.from_pretrained(self.model_name)
assert isinstance(model, TFMarianMTModel) assert isinstance(model, TFMarianMTModel)
c = model.config c = model.config
self.assertListEqual(c.bad_words_ids, [[c.pad_token_id]]) self.assertListEqual(c.bad_words_ids, [[c.pad_token_id]])
......
...@@ -562,7 +562,7 @@ class TFRagModelIntegrationTests(unittest.TestCase): ...@@ -562,7 +562,7 @@ class TFRagModelIntegrationTests(unittest.TestCase):
) )
def token_model_nq_checkpoint(self, retriever): def token_model_nq_checkpoint(self, retriever):
return TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", from_pt=True, retriever=retriever) return TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever)
def get_rag_config(self): def get_rag_config(self):
question_encoder_config = AutoConfig.from_pretrained("facebook/dpr-question_encoder-single-nq-base") question_encoder_config = AutoConfig.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
...@@ -799,7 +799,7 @@ class TFRagModelIntegrationTests(unittest.TestCase): ...@@ -799,7 +799,7 @@ class TFRagModelIntegrationTests(unittest.TestCase):
def test_rag_token_greedy_search(self): def test_rag_token_greedy_search(self):
tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq") tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq")
retriever = RagRetriever.from_pretrained("facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True) retriever = RagRetriever.from_pretrained("facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True)
rag_token = TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever, from_pt=True) rag_token = TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever)
# check first two questions # check first two questions
input_dict = tokenizer( input_dict = tokenizer(
...@@ -833,7 +833,7 @@ class TFRagModelIntegrationTests(unittest.TestCase): ...@@ -833,7 +833,7 @@ class TFRagModelIntegrationTests(unittest.TestCase):
# NOTE: gold labels comes from num_beam=4, so this is effectively beam-search test # NOTE: gold labels comes from num_beam=4, so this is effectively beam-search test
tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq") tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq")
retriever = RagRetriever.from_pretrained("facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True) retriever = RagRetriever.from_pretrained("facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True)
rag_token = TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever, from_pt=True) rag_token = TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever)
input_dict = tokenizer( input_dict = tokenizer(
self.test_data_questions, self.test_data_questions,
...@@ -877,9 +877,7 @@ class TFRagModelIntegrationTests(unittest.TestCase): ...@@ -877,9 +877,7 @@ class TFRagModelIntegrationTests(unittest.TestCase):
retriever = RagRetriever.from_pretrained( retriever = RagRetriever.from_pretrained(
"facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True
) )
rag_sequence = TFRagSequenceForGeneration.from_pretrained( rag_sequence = TFRagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", retriever=retriever)
"facebook/rag-sequence-nq", retriever=retriever, from_pt=True
)
input_dict = tokenizer( input_dict = tokenizer(
self.test_data_questions, self.test_data_questions,
...@@ -923,9 +921,7 @@ class TFRagModelIntegrationTests(unittest.TestCase): ...@@ -923,9 +921,7 @@ class TFRagModelIntegrationTests(unittest.TestCase):
retriever = RagRetriever.from_pretrained( retriever = RagRetriever.from_pretrained(
"facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True
) )
rag_sequence = TFRagSequenceForGeneration.from_pretrained( rag_sequence = TFRagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", retriever=retriever)
"facebook/rag-sequence-nq", retriever=retriever, from_pt=True
)
input_dict = tokenizer( input_dict = tokenizer(
self.test_data_questions, self.test_data_questions,
return_tensors="tf", return_tensors="tf",
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment