test_pipelines.py 15.8 KB
Newer Older
1
import unittest
Julien Chaumond's avatar
Julien Chaumond committed
2
from typing import Iterable, List, Optional
Morgan Funtowicz's avatar
Morgan Funtowicz committed
3
4

from transformers import pipeline
5
from transformers.pipelines import SUPPORTED_TASKS, DefaultArgumentHandler, Pipeline
6

Lysandre Debut's avatar
Lysandre Debut committed
7
from .utils import require_tf, require_torch, slow
8

Aymeric Augustin's avatar
Aymeric Augustin committed
9

10
NER_FINETUNED_MODELS = ["sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english"]
Morgan Funtowicz's avatar
Morgan Funtowicz committed
11

12
13
14
15
# xlnet-base-cased disabled for now, since it crashes TF2
FEATURE_EXTRACT_FINETUNED_MODELS = ["sshleifer/tiny-distilbert-base-cased"]
TEXT_CLASSIF_FINETUNED_MODELS = ["sshleifer/tiny-distilbert-base-uncased-finetuned-sst-2-english"]
TEXT_GENERATION_FINETUNED_MODELS = ["sshleifer/tiny-ctrl"]
16

17
18
FILL_MASK_FINETUNED_MODELS = ["sshleifer/tiny-distilroberta-base"]
LARGE_FILL_MASK_FINETUNED_MODELS = ["distilroberta-base"]  # @slow
Julien Chaumond's avatar
Julien Chaumond committed
19

20
21
SUMMARIZATION_FINETUNED_MODELS = ["sshleifer/bart-tiny-random", "patrickvonplaten/t5-tiny-random"]
TF_SUMMARIZATION_FINETUNED_MODELS = ["patrickvonplaten/t5-tiny-random"]
22

23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
TRANSLATION_FINETUNED_MODELS = [
    ("patrickvonplaten/t5-tiny-random", "translation_en_to_de"),
    ("patrickvonplaten/t5-tiny-random", "translation_en_to_ro"),
]
TF_TRANSLATION_FINETUNED_MODELS = [("patrickvonplaten/t5-tiny-random", "translation_en_to_fr")]

expected_fill_mask_result = [
    [
        {"sequence": "<s> My name is:</s>", "score": 0.009954338893294334, "token": 35},
        {"sequence": "<s> My name is John</s>", "score": 0.0080940006300807, "token": 610},
    ],
    [
        {"sequence": "<s> The largest city in France is Paris</s>", "score": 0.3185044229030609, "token": 2201},
        {"sequence": "<s> The largest city in France is Lyon</s>", "score": 0.21112334728240967, "token": 12790},
    ],
]
39

40

41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
class DefaultArgumentHandlerTestCase(unittest.TestCase):
    def setUp(self) -> None:
        self.handler = DefaultArgumentHandler()

    def test_kwargs_x(self):
        mono_data = {"X": "This is a sample input"}
        mono_args = self.handler(**mono_data)

        self.assertTrue(isinstance(mono_args, list))
        self.assertEqual(len(mono_args), 1)

        multi_data = {"x": ["This is a sample input", "This is a second sample input"]}
        multi_args = self.handler(**multi_data)

        self.assertTrue(isinstance(multi_args, list))
        self.assertEqual(len(multi_args), 2)

    def test_kwargs_data(self):
        mono_data = {"data": "This is a sample input"}
        mono_args = self.handler(**mono_data)

        self.assertTrue(isinstance(mono_args, list))
        self.assertEqual(len(mono_args), 1)

        multi_data = {"data": ["This is a sample input", "This is a second sample input"]}
        multi_args = self.handler(**multi_data)

        self.assertTrue(isinstance(multi_args, list))
        self.assertEqual(len(multi_args), 2)

    def test_multi_kwargs(self):
        mono_data = {"data": "This is a sample input", "X": "This is a sample input 2"}
        mono_args = self.handler(**mono_data)

        self.assertTrue(isinstance(mono_args, list))
        self.assertEqual(len(mono_args), 2)

        multi_data = {
            "data": ["This is a sample input", "This is a second sample input"],
            "test": ["This is a sample input 2", "This is a second sample input 2"],
        }
        multi_args = self.handler(**multi_data)

        self.assertTrue(isinstance(multi_args, list))
        self.assertEqual(len(multi_args), 4)

    def test_args(self):
        mono_data = "This is a sample input"
        mono_args = self.handler(mono_data)

        self.assertTrue(isinstance(mono_args, list))
        self.assertEqual(len(mono_args), 1)

        mono_data = ["This is a sample input"]
        mono_args = self.handler(mono_data)

        self.assertTrue(isinstance(mono_args, list))
        self.assertEqual(len(mono_args), 1)

        multi_data = ["This is a sample input", "This is a second sample input"]
        multi_args = self.handler(multi_data)

        self.assertTrue(isinstance(multi_args, list))
        self.assertEqual(len(multi_args), 2)

        multi_data = ["This is a sample input", "This is a second sample input"]
        multi_args = self.handler(*multi_data)

        self.assertTrue(isinstance(multi_args, list))
        self.assertEqual(len(multi_args), 2)


Morgan Funtowicz's avatar
Morgan Funtowicz committed
113
class MonoColumnInputTestCase(unittest.TestCase):
Julien Chaumond's avatar
Julien Chaumond committed
114
115
116
117
118
    def _test_mono_column_pipeline(
        self,
        nlp: Pipeline,
        valid_inputs: List,
        output_keys: Iterable[str],
119
        invalid_inputs: List = [None],
Julien Chaumond's avatar
Julien Chaumond committed
120
121
122
        expected_multi_result: Optional[List] = None,
        expected_check_keys: Optional[List[str]] = None,
    ):
Morgan Funtowicz's avatar
Morgan Funtowicz committed
123
124
125
126
127
128
129
130
131
132
133
134
        self.assertIsNotNone(nlp)

        mono_result = nlp(valid_inputs[0])
        self.assertIsInstance(mono_result, list)
        self.assertIsInstance(mono_result[0], (dict, list))

        if isinstance(mono_result[0], list):
            mono_result = mono_result[0]

        for key in output_keys:
            self.assertIn(key, mono_result[0])

135
        multi_result = [nlp(input) for input in valid_inputs]
Morgan Funtowicz's avatar
Morgan Funtowicz committed
136
137
138
        self.assertIsInstance(multi_result, list)
        self.assertIsInstance(multi_result[0], (dict, list))

Julien Chaumond's avatar
Julien Chaumond committed
139
140
141
142
143
144
145
        if expected_multi_result is not None:
            for result, expect in zip(multi_result, expected_multi_result):
                for key in expected_check_keys or []:
                    self.assertEqual(
                        set([o[key] for o in result]), set([o[key] for o in expect]),
                    )

Morgan Funtowicz's avatar
Morgan Funtowicz committed
146
147
148
149
150
151
152
153
154
        if isinstance(multi_result[0], list):
            multi_result = multi_result[0]

        for result in multi_result:
            for key in output_keys:
                self.assertIn(key, result)

        self.assertRaises(Exception, nlp, invalid_inputs)

155
    @require_torch
156
    def test_torch_ner(self):
157
158
        mandatory_keys = {"entity", "word", "score"}
        valid_inputs = ["HuggingFace is solving NLP one commit at a time.", "HuggingFace is based in New-York & Paris"]
159
160
161
        for model_name in NER_FINETUNED_MODELS:
            nlp = pipeline(task="ner", model=model_name, tokenizer=model_name)
            self._test_mono_column_pipeline(nlp, valid_inputs, mandatory_keys)
Morgan Funtowicz's avatar
Morgan Funtowicz committed
162

163
164
    @require_tf
    def test_tf_ner(self):
165
166
        mandatory_keys = {"entity", "word", "score"}
        valid_inputs = ["HuggingFace is solving NLP one commit at a time.", "HuggingFace is based in New-York & Paris"]
167
168
169
        for model_name in NER_FINETUNED_MODELS:
            nlp = pipeline(task="ner", model=model_name, tokenizer=model_name, framework="tf")
            self._test_mono_column_pipeline(nlp, valid_inputs, mandatory_keys)
Morgan Funtowicz's avatar
Morgan Funtowicz committed
170

171
    @require_torch
172
    def test_torch_sentiment_analysis(self):
Julien Chaumond's avatar
Julien Chaumond committed
173
        mandatory_keys = {"label", "score"}
174
        valid_inputs = ["HuggingFace is solving NLP one commit at a time.", "HuggingFace is based in New-York & Paris"]
175
176
177
        for model_name in TEXT_CLASSIF_FINETUNED_MODELS:
            nlp = pipeline(task="sentiment-analysis", model=model_name, tokenizer=model_name)
            self._test_mono_column_pipeline(nlp, valid_inputs, mandatory_keys)
Morgan Funtowicz's avatar
Morgan Funtowicz committed
178

179
180
    @require_tf
    def test_tf_sentiment_analysis(self):
Julien Chaumond's avatar
Julien Chaumond committed
181
        mandatory_keys = {"label", "score"}
182
        valid_inputs = ["HuggingFace is solving NLP one commit at a time.", "HuggingFace is based in New-York & Paris"]
183
184
185
        for model_name in TEXT_CLASSIF_FINETUNED_MODELS:
            nlp = pipeline(task="sentiment-analysis", model=model_name, tokenizer=model_name, framework="tf")
            self._test_mono_column_pipeline(nlp, valid_inputs, mandatory_keys)
Morgan Funtowicz's avatar
Morgan Funtowicz committed
186

187
    @require_torch
188
    def test_torch_feature_extraction(self):
189
        valid_inputs = ["HuggingFace is solving NLP one commit at a time.", "HuggingFace is based in New-York & Paris"]
190
191
192
        for model_name in FEATURE_EXTRACT_FINETUNED_MODELS:
            nlp = pipeline(task="feature-extraction", model=model_name, tokenizer=model_name)
            self._test_mono_column_pipeline(nlp, valid_inputs, {})
Morgan Funtowicz's avatar
Morgan Funtowicz committed
193

194
    @require_tf
Julien Chaumond's avatar
Julien Chaumond committed
195
    def test_tf_feature_extraction(self):
196
        valid_inputs = ["HuggingFace is solving NLP one commit at a time.", "HuggingFace is based in New-York & Paris"]
197
198
199
        for model_name in FEATURE_EXTRACT_FINETUNED_MODELS:
            nlp = pipeline(task="feature-extraction", model=model_name, tokenizer=model_name, framework="tf")
            self._test_mono_column_pipeline(nlp, valid_inputs, {})
Morgan Funtowicz's avatar
Morgan Funtowicz committed
200

Julien Chaumond's avatar
Julien Chaumond committed
201
    @require_torch
202
203
204
205
206
207
208
209
210
211
212
213
    def test_torch_fill_mask(self):
        mandatory_keys = {"sequence", "score", "token"}
        valid_inputs = [
            "My name is <mask>",
            "The largest city in France is <mask>",
        ]
        for model_name in FILL_MASK_FINETUNED_MODELS:
            nlp = pipeline(task="fill-mask", model=model_name, tokenizer=model_name, framework="pt", topk=2,)
            self._test_mono_column_pipeline(nlp, valid_inputs, mandatory_keys, expected_check_keys=["sequence"])

    @require_tf
    def test_tf_fill_mask(self):
Julien Chaumond's avatar
Julien Chaumond committed
214
215
216
217
218
        mandatory_keys = {"sequence", "score", "token"}
        valid_inputs = [
            "My name is <mask>",
            "The largest city in France is <mask>",
        ]
219
220
221
222
223
224
225
226
227
228
229
        for model_name in FILL_MASK_FINETUNED_MODELS:
            nlp = pipeline(task="fill-mask", model=model_name, tokenizer=model_name, framework="tf", topk=2,)
            self._test_mono_column_pipeline(nlp, valid_inputs, mandatory_keys, expected_check_keys=["sequence"])

    @require_torch
    @slow
    def test_torch_fill_mask_results(self):
        mandatory_keys = {"sequence", "score", "token"}
        valid_inputs = [
            "My name is <mask>",
            "The largest city in France is <mask>",
Julien Chaumond's avatar
Julien Chaumond committed
230
        ]
231
232
        for model_name in LARGE_FILL_MASK_FINETUNED_MODELS:
            nlp = pipeline(task="fill-mask", model=model_name, tokenizer=model_name, framework="pt", topk=2,)
Julien Chaumond's avatar
Julien Chaumond committed
233
234
235
236
            self._test_mono_column_pipeline(
                nlp,
                valid_inputs,
                mandatory_keys,
237
                expected_multi_result=expected_fill_mask_result,
Julien Chaumond's avatar
Julien Chaumond committed
238
239
240
241
                expected_check_keys=["sequence"],
            )

    @require_tf
242
243
    @slow
    def test_tf_fill_mask_results(self):
Julien Chaumond's avatar
Julien Chaumond committed
244
245
246
247
248
        mandatory_keys = {"sequence", "score", "token"}
        valid_inputs = [
            "My name is <mask>",
            "The largest city in France is <mask>",
        ]
249
250
        for model_name in LARGE_FILL_MASK_FINETUNED_MODELS:
            nlp = pipeline(task="fill-mask", model=model_name, tokenizer=model_name, framework="tf", topk=2)
Julien Chaumond's avatar
Julien Chaumond committed
251
252
253
254
            self._test_mono_column_pipeline(
                nlp,
                valid_inputs,
                mandatory_keys,
255
                expected_multi_result=expected_fill_mask_result,
Julien Chaumond's avatar
Julien Chaumond committed
256
257
258
                expected_check_keys=["sequence"],
            )

259
    @require_torch
260
    def test_torch_summarization(self):
261
262
263
        valid_inputs = ["A string like this", ["list of strings entry 1", "list of strings v2"]]
        invalid_inputs = [4, "<mask>"]
        mandatory_keys = ["summary_text"]
264
265
266
        for model in SUMMARIZATION_FINETUNED_MODELS:
            nlp = pipeline(task="summarization", model=model, tokenizer=model)
            self._test_mono_column_pipeline(nlp, valid_inputs, mandatory_keys, invalid_inputs=invalid_inputs)
267
268
269
270
271
272

    @require_tf
    def test_tf_summarization(self):
        valid_inputs = ["A string like this", ["list of strings entry 1", "list of strings v2"]]
        invalid_inputs = [4, "<mask>"]
        mandatory_keys = ["summary_text"]
273
274
275
        for model_name in TF_SUMMARIZATION_FINETUNED_MODELS:
            nlp = pipeline(task="summarization", model=model_name, tokenizer=model_name, framework="tf",)
            self._test_mono_column_pipeline(nlp, valid_inputs, mandatory_keys, invalid_inputs=invalid_inputs)
276
277

    @require_torch
278
    def test_torch_translation(self):
279
280
281
        valid_inputs = ["A string like this", ["list of strings entry 1", "list of strings v2"]]
        invalid_inputs = [4, "<mask>"]
        mandatory_keys = ["translation_text"]
282
283
284
        for model_name, task in TRANSLATION_FINETUNED_MODELS:
            nlp = pipeline(task=task, model=model_name, tokenizer=model_name)
            self._test_mono_column_pipeline(nlp, valid_inputs, mandatory_keys, invalid_inputs)
285
286

    @require_tf
287
    @slow
288
289
290
291
    def test_tf_translation(self):
        valid_inputs = ["A string like this", ["list of strings entry 1", "list of strings v2"]]
        invalid_inputs = [4, "<mask>"]
        mandatory_keys = ["translation_text"]
292
293
294
        for model, task in TF_TRANSLATION_FINETUNED_MODELS:
            nlp = pipeline(task=task, model=model, tokenizer=model, framework="tf")
            self._test_mono_column_pipeline(nlp, valid_inputs, mandatory_keys, invalid_inputs=invalid_inputs)
295

296
    @require_torch
297
    def test_torch_text_generation(self):
298
        valid_inputs = ["A string like this", ["list of strings entry 1", "list of strings v2"]]
299
300
301
        for model_name in TEXT_GENERATION_FINETUNED_MODELS:
            nlp = pipeline(task="text-generation", model=model_name, tokenizer=model_name, framework="pt")
            self._test_mono_column_pipeline(nlp, valid_inputs, {})
302
303
304
305

    @require_tf
    def test_tf_text_generation(self):
        valid_inputs = ["A string like this", ["list of strings entry 1", "list of strings v2"]]
306
307
308
309
310
311
        for model_name in TEXT_GENERATION_FINETUNED_MODELS:
            nlp = pipeline(task="text-generation", model=model_name, tokenizer=model_name, framework="tf")
            self._test_mono_column_pipeline(nlp, valid_inputs, {})


QA_FINETUNED_MODELS = ["sshleifer/tiny-distilbert-base-cased-distilled-squad"]
312

Morgan Funtowicz's avatar
Morgan Funtowicz committed
313

314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
class QAPipelineTests(unittest.TestCase):
    def _test_qa_pipeline(self, nlp):
        output_keys = {"score", "answer", "start", "end"}
        valid_inputs = [
            {"question": "Where was HuggingFace founded ?", "context": "HuggingFace was founded in Paris."},
            {
                "question": "In what field is HuggingFace working ?",
                "context": "HuggingFace is a startup based in New-York founded in Paris which is trying to solve NLP.",
            },
        ]
        invalid_inputs = [
            {"question": "", "context": "This is a test to try empty question edge case"},
            {"question": None, "context": "This is a test to try empty question edge case"},
            {"question": "What is does with empty context ?", "context": ""},
            {"question": "What is does with empty context ?", "context": None},
        ]
Morgan Funtowicz's avatar
Morgan Funtowicz committed
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
        self.assertIsNotNone(nlp)

        mono_result = nlp(valid_inputs[0])
        self.assertIsInstance(mono_result, dict)

        for key in output_keys:
            self.assertIn(key, mono_result)

        multi_result = nlp(valid_inputs)
        self.assertIsInstance(multi_result, list)
        self.assertIsInstance(multi_result[0], dict)

        for result in multi_result:
            for key in output_keys:
                self.assertIn(key, result)
345
346
        for bad_input in invalid_inputs:
            self.assertRaises(Exception, nlp, bad_input)
Morgan Funtowicz's avatar
Morgan Funtowicz committed
347
348
        self.assertRaises(Exception, nlp, invalid_inputs)

349
    @require_torch
350
351
352
353
    def test_torch_question_answering(self):
        for model_name in QA_FINETUNED_MODELS:
            nlp = pipeline(task="question-answering", model=model_name, tokenizer=model_name)
            self._test_qa_pipeline(nlp)
Morgan Funtowicz's avatar
Morgan Funtowicz committed
354

355
356
    @require_tf
    def test_tf_question_answering(self):
357
358
359
        for model_name in QA_FINETUNED_MODELS:
            nlp = pipeline(task="question-answering", model=model_name, tokenizer=model_name, framework="tf")
            self._test_qa_pipeline(nlp)
Lysandre Debut's avatar
Lysandre Debut committed
360
361
362
363


class PipelineCommonTests(unittest.TestCase):

364
    pipelines = SUPPORTED_TASKS.keys()
Lysandre Debut's avatar
Lysandre Debut committed
365
366
367
368
369

    @slow
    @require_tf
    def test_tf_defaults(self):
        # Test that pipelines can be correctly loaded without any argument
Patrick von Platen's avatar
Patrick von Platen committed
370
        for task in self.pipelines:
371
            with self.subTest(msg="Testing TF defaults with TF and {}".format(task)):
Patrick von Platen's avatar
Patrick von Platen committed
372
                pipeline(task, framework="tf")
Lysandre Debut's avatar
Lysandre Debut committed
373
374
375
376
377

    @slow
    @require_torch
    def test_pt_defaults(self):
        # Test that pipelines can be correctly loaded without any argument
Patrick von Platen's avatar
Patrick von Platen committed
378
379
380
        for task in self.pipelines:
            with self.subTest(msg="Testing Torch defaults with PyTorch and {}".format(task)):
                pipeline(task, framework="pt")