"src/git@developer.sourcefind.cn:gaoqiong/migraphx.git" did not exist on "221dcea57e36d696cecfb21b3504d38435300242"
Unverified Commit 10f8c636 authored by Lysandre Debut's avatar Lysandre Debut Committed by GitHub
Browse files

Ci test tf super slow (#8007)

* Test TF GPU CI

* Change cache

* Fix missing torch requirement

* Fix some model tests


Style

* LXMERT

* MobileBERT

* Longformer skip test

* XLNet

* The rest of the tests

* RAG goes OOM in multi gpu setup

* YAML test files

* Last fixes

* Skip doctests

* Fill mask tests

* Yaml files

* Last test fix

* Style

* Update cache

* Change ONNX tests to slow + use tiny model
parent 7e36deec
......@@ -279,6 +279,7 @@ class TransfoXLModelTest(ModelTesterMixin, unittest.TestCase):
self.assertEqual(model_embed.emb_layers[layer].weight.shape[0], cloned_embeddings[layer].shape[0])
@require_torch
class TransfoXLModelLanguageGenerationTest(unittest.TestCase):
@slow
def test_lm_generate_transfo_xl_wt103(self):
......
......@@ -17,7 +17,7 @@
import unittest
from transformers import is_torch_available
from transformers.testing_utils import slow, torch_device
from transformers.testing_utils import require_torch, slow, torch_device
if is_torch_available():
......@@ -26,6 +26,7 @@ if is_torch_available():
from transformers import XLMProphetNetForConditionalGeneration, XLMProphetNetTokenizer
@require_torch
class XLMProphetNetModelIntegrationTest(unittest.TestCase):
@slow
def test_pretrained_checkpoint_hidden_states(self):
......
......@@ -17,7 +17,7 @@
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, slow
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
......@@ -28,6 +28,7 @@ if is_torch_available():
@require_sentencepiece
@require_tokenizers
@require_torch
class XLMRobertaModelIntegrationTest(unittest.TestCase):
@slow
def test_xlm_roberta_base(self):
......
......@@ -95,26 +95,28 @@ class OnnxExportTestCase(unittest.TestCase):
@require_torch
@require_tokenizers
@slow
def test_infer_dynamic_axis_pytorch(self):
"""
Validate the dynamic axis generated for each parameters are correct
"""
from transformers import BertModel
model = BertModel(BertConfig.from_pretrained("bert-base-cased"))
tokenizer = BertTokenizerFast.from_pretrained("bert-base-cased")
model = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random"))
tokenizer = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random")
self._test_infer_dynamic_axis(model, tokenizer, "pt")
@require_tf
@require_tokenizers
@slow
def test_infer_dynamic_axis_tf(self):
"""
Validate the dynamic axis generated for each parameters are correct
"""
from transformers import TFBertModel
model = TFBertModel(BertConfig.from_pretrained("bert-base-cased"))
tokenizer = BertTokenizerFast.from_pretrained("bert-base-cased")
model = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random"))
tokenizer = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random")
self._test_infer_dynamic_axis(model, tokenizer, "tf")
def _test_infer_dynamic_axis(self, model, tokenizer, framework):
......
......@@ -96,21 +96,53 @@ class FillMaskPipelineTests(MonoInputPipelineCommonMixin, unittest.TestCase):
framework="pt",
topk=2,
)
self._test_mono_column_pipeline(
nlp,
valid_inputs,
mandatory_keys,
expected_multi_result=EXPECTED_FILL_MASK_RESULT,
expected_check_keys=["sequence"],
)
self._test_mono_column_pipeline(
nlp,
valid_inputs[:1],
mandatory_keys,
expected_multi_result=EXPECTED_FILL_MASK_TARGET_RESULT,
expected_check_keys=["sequence"],
targets=valid_targets,
)
mono_result = nlp(valid_inputs[0], targets=valid_targets)
self.assertIsInstance(mono_result, list)
self.assertIsInstance(mono_result[0], dict)
for mandatory_key in mandatory_keys:
self.assertIn(mandatory_key, mono_result[0])
multi_result = [nlp(valid_input) for valid_input in valid_inputs]
self.assertIsInstance(multi_result, list)
self.assertIsInstance(multi_result[0], (dict, list))
for result, expected in zip(multi_result, EXPECTED_FILL_MASK_RESULT):
self.assertEqual(set([o["sequence"] for o in result]), set([o["sequence"] for o in result]))
if isinstance(multi_result[0], list):
multi_result = multi_result[0]
for result in multi_result:
for key in mandatory_keys:
self.assertIn(key, result)
self.assertRaises(Exception, nlp, [None])
valid_inputs = valid_inputs[:1]
mono_result = nlp(valid_inputs[0], targets=valid_targets)
self.assertIsInstance(mono_result, list)
self.assertIsInstance(mono_result[0], dict)
for mandatory_key in mandatory_keys:
self.assertIn(mandatory_key, mono_result[0])
multi_result = [nlp(valid_input) for valid_input in valid_inputs]
self.assertIsInstance(multi_result, list)
self.assertIsInstance(multi_result[0], (dict, list))
for result, expected in zip(multi_result, EXPECTED_FILL_MASK_TARGET_RESULT):
self.assertEqual(set([o["sequence"] for o in result]), set([o["sequence"] for o in result]))
if isinstance(multi_result[0], list):
multi_result = multi_result[0]
for result in multi_result:
for key in mandatory_keys:
self.assertIn(key, result)
self.assertRaises(Exception, nlp, [None])
@require_tf
@slow
......@@ -123,18 +155,50 @@ class FillMaskPipelineTests(MonoInputPipelineCommonMixin, unittest.TestCase):
valid_targets = [" Patrick", " Clara"]
for model_name in self.large_models:
nlp = pipeline(task="fill-mask", model=model_name, tokenizer=model_name, framework="tf", topk=2)
self._test_mono_column_pipeline(
nlp,
valid_inputs,
mandatory_keys,
expected_multi_result=EXPECTED_FILL_MASK_RESULT,
expected_check_keys=["sequence"],
)
self._test_mono_column_pipeline(
nlp,
valid_inputs[:1],
mandatory_keys,
expected_multi_result=EXPECTED_FILL_MASK_TARGET_RESULT,
expected_check_keys=["sequence"],
targets=valid_targets,
)
mono_result = nlp(valid_inputs[0], targets=valid_targets)
self.assertIsInstance(mono_result, list)
self.assertIsInstance(mono_result[0], dict)
for mandatory_key in mandatory_keys:
self.assertIn(mandatory_key, mono_result[0])
multi_result = [nlp(valid_input) for valid_input in valid_inputs]
self.assertIsInstance(multi_result, list)
self.assertIsInstance(multi_result[0], (dict, list))
for result, expected in zip(multi_result, EXPECTED_FILL_MASK_RESULT):
self.assertEqual(set([o["sequence"] for o in result]), set([o["sequence"] for o in result]))
if isinstance(multi_result[0], list):
multi_result = multi_result[0]
for result in multi_result:
for key in mandatory_keys:
self.assertIn(key, result)
self.assertRaises(Exception, nlp, [None])
valid_inputs = valid_inputs[:1]
mono_result = nlp(valid_inputs[0], targets=valid_targets)
self.assertIsInstance(mono_result, list)
self.assertIsInstance(mono_result[0], dict)
for mandatory_key in mandatory_keys:
self.assertIn(mandatory_key, mono_result[0])
multi_result = [nlp(valid_input) for valid_input in valid_inputs]
self.assertIsInstance(multi_result, list)
self.assertIsInstance(multi_result[0], (dict, list))
for result, expected in zip(multi_result, EXPECTED_FILL_MASK_TARGET_RESULT):
self.assertEqual(set([o["sequence"] for o in result]), set([o["sequence"] for o in result]))
if isinstance(multi_result[0], list):
multi_result = multi_result[0]
for result in multi_result:
for key in mandatory_keys:
self.assertIn(key, result)
self.assertRaises(Exception, nlp, [None])
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment