test_pipelines_token_classification.py 29.6 KB
Newer Older
Sylvain Gugger's avatar
Sylvain Gugger committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
16
import unittest

17
import numpy as np
18

19
20
21
22
23
24
25
26
27
from transformers import (
    MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
    TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
    AutoModelForTokenClassification,
    AutoTokenizer,
    TokenClassificationPipeline,
    pipeline,
)
from transformers.pipelines import AggregationStrategy, TokenClassificationArgumentHandler
28
29
30
31
32
33
34
35
from transformers.testing_utils import (
    is_pipeline_test,
    nested_simplify,
    require_tf,
    require_torch,
    require_torch_gpu,
    slow,
)
36

37
from .test_pipelines_common import ANY, PipelineTestCaseMeta
38

39
40

VALID_INPUTS = ["A simple string", ["list of strings", "A simple string that is quite a bit longer"]]
41
42


43
44
45
46
@is_pipeline_test
class TokenClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
    model_mapping = MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
    tf_model_mapping = TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
47

48
49
    def run_pipeline_test(self, model, tokenizer, feature_extractor):
        token_classifier = TokenClassificationPipeline(model=model, tokenizer=tokenizer)
50

51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
        outputs = token_classifier("A simple string")
        self.assertIsInstance(outputs, list)
        n = len(outputs)
        self.assertEqual(
            nested_simplify(outputs),
            [
                {
                    "entity": ANY(str),
                    "score": ANY(float),
                    "start": ANY(int),
                    "end": ANY(int),
                    "index": ANY(int),
                    "word": ANY(str),
                }
                for i in range(n)
            ],
        )
        outputs = token_classifier(["list of strings", "A simple string that is quite a bit longer"])
        self.assertIsInstance(outputs, list)
        self.assertEqual(len(outputs), 2)
        n = len(outputs[0])
        m = len(outputs[1])
73

74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
        self.assertEqual(
            nested_simplify(outputs),
            [
                [
                    {
                        "entity": ANY(str),
                        "score": ANY(float),
                        "start": ANY(int),
                        "end": ANY(int),
                        "index": ANY(int),
                        "word": ANY(str),
                    }
                    for i in range(n)
                ],
                [
                    {
                        "entity": ANY(str),
                        "score": ANY(float),
                        "start": ANY(int),
                        "end": ANY(int),
                        "index": ANY(int),
                        "word": ANY(str),
                    }
                    for i in range(m)
                ],
            ],
        )
101

102
        self.run_aggregation_strategy(model, tokenizer)
103

104
105
    def run_aggregation_strategy(self, model, tokenizer):
        token_classifier = TokenClassificationPipeline(model=model, tokenizer=tokenizer, aggregation_strategy="simple")
106
        self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.SIMPLE)
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
        outputs = token_classifier("A simple string")
        self.assertIsInstance(outputs, list)
        n = len(outputs)
        self.assertEqual(
            nested_simplify(outputs),
            [
                {
                    "entity_group": ANY(str),
                    "score": ANY(float),
                    "start": ANY(int),
                    "end": ANY(int),
                    "word": ANY(str),
                }
                for i in range(n)
            ],
        )
123

124
        token_classifier = TokenClassificationPipeline(model=model, tokenizer=tokenizer, aggregation_strategy="first")
125
        self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.FIRST)
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
        outputs = token_classifier("A simple string")
        self.assertIsInstance(outputs, list)
        n = len(outputs)
        self.assertEqual(
            nested_simplify(outputs),
            [
                {
                    "entity_group": ANY(str),
                    "score": ANY(float),
                    "start": ANY(int),
                    "end": ANY(int),
                    "word": ANY(str),
                }
                for i in range(n)
            ],
        )
142

143
        token_classifier = TokenClassificationPipeline(model=model, tokenizer=tokenizer, aggregation_strategy="max")
144
        self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.MAX)
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
        outputs = token_classifier("A simple string")
        self.assertIsInstance(outputs, list)
        n = len(outputs)
        self.assertEqual(
            nested_simplify(outputs),
            [
                {
                    "entity_group": ANY(str),
                    "score": ANY(float),
                    "start": ANY(int),
                    "end": ANY(int),
                    "word": ANY(str),
                }
                for i in range(n)
            ],
        )
161

162
163
164
        token_classifier = TokenClassificationPipeline(
            model=model, tokenizer=tokenizer, aggregation_strategy="average"
        )
165
        self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.AVERAGE)
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
        outputs = token_classifier("A simple string")
        self.assertIsInstance(outputs, list)
        n = len(outputs)
        self.assertEqual(
            nested_simplify(outputs),
            [
                {
                    "entity_group": ANY(str),
                    "score": ANY(float),
                    "start": ANY(int),
                    "end": ANY(int),
                    "word": ANY(str),
                }
                for i in range(n)
            ],
        )
182

183
184
        with self.assertWarns(UserWarning):
            token_classifier = pipeline(task="ner", model=model, tokenizer=tokenizer, grouped_entities=True)
185
        self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.SIMPLE)
186
187
188
189
        with self.assertWarns(UserWarning):
            token_classifier = pipeline(
                task="ner", model=model, tokenizer=tokenizer, grouped_entities=True, ignore_subwords=True
            )
190
        self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.FIRST)
191

192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
    @require_torch
    @slow
    def test_spanish_bert(self):
        # https://github.com/huggingface/transformers/pull/4987
        NER_MODEL = "mrm8488/bert-spanish-cased-finetuned-ner"
        model = AutoModelForTokenClassification.from_pretrained(NER_MODEL)
        tokenizer = AutoTokenizer.from_pretrained(NER_MODEL, use_fast=True)
        sentence = """Consuelo Ara煤jo Noguera, ministra de cultura del presidente Andr茅s Pastrana (1998.2002) fue asesinada por las Farc luego de haber permanecido secuestrada por algunos meses."""

        token_classifier = pipeline("ner", model=model, tokenizer=tokenizer)
        output = token_classifier(sentence)
        self.assertEqual(
            nested_simplify(output[:3]),
            [
                {"entity": "B-PER", "score": 0.999, "word": "Cons", "start": 0, "end": 4, "index": 1},
                {"entity": "B-PER", "score": 0.803, "word": "##uelo", "start": 4, "end": 8, "index": 2},
                {"entity": "I-PER", "score": 0.999, "word": "Ara", "start": 9, "end": 12, "index": 3},
            ],
        )

        token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple")
        output = token_classifier(sentence)
        self.assertEqual(
            nested_simplify(output[:3]),
            [
                {"entity_group": "PER", "score": 0.999, "word": "Cons", "start": 0, "end": 4},
                {"entity_group": "PER", "score": 0.966, "word": "##uelo Ara煤jo Noguera", "start": 4, "end": 23},
                {"entity_group": "PER", "score": 1.0, "word": "Andr茅s Pastrana", "start": 60, "end": 75},
            ],
        )

        token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="first")
        output = token_classifier(sentence)
        self.assertEqual(
            nested_simplify(output[:3]),
            [
                {"entity_group": "PER", "score": 0.999, "word": "Consuelo Ara煤jo Noguera", "start": 0, "end": 23},
                {"entity_group": "PER", "score": 1.0, "word": "Andr茅s Pastrana", "start": 60, "end": 75},
                {"entity_group": "ORG", "score": 0.999, "word": "Farc", "start": 110, "end": 114},
            ],
        )

        token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="max")
        output = token_classifier(sentence)
        self.assertEqual(
            nested_simplify(output[:3]),
            [
                {"entity_group": "PER", "score": 0.999, "word": "Consuelo Ara煤jo Noguera", "start": 0, "end": 23},
                {"entity_group": "PER", "score": 1.0, "word": "Andr茅s Pastrana", "start": 60, "end": 75},
                {"entity_group": "ORG", "score": 0.999, "word": "Farc", "start": 110, "end": 114},
            ],
        )

        token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="average")
        output = token_classifier(sentence)
        self.assertEqual(
            nested_simplify(output[:3]),
            [
                {"entity_group": "PER", "score": 0.966, "word": "Consuelo Ara煤jo Noguera", "start": 0, "end": 23},
                {"entity_group": "PER", "score": 1.0, "word": "Andr茅s Pastrana", "start": 60, "end": 75},
                {"entity_group": "ORG", "score": 0.542, "word": "Farc", "start": 110, "end": 114},
            ],
        )

256
257
258
259
260
261
262
263
264
265
266
267
268
    @require_torch_gpu
    @slow
    def test_gpu(self):
        sentence = "This is dummy sentence"
        ner = pipeline(
            "token-classification",
            device=0,
            aggregation_strategy=AggregationStrategy.SIMPLE,
        )

        output = ner(sentence)
        self.assertEqual(nested_simplify(output), [])

269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
    @require_torch
    @slow
    def test_dbmdz_english(self):
        # Other sentence
        NER_MODEL = "dbmdz/bert-large-cased-finetuned-conll03-english"
        model = AutoModelForTokenClassification.from_pretrained(NER_MODEL)
        tokenizer = AutoTokenizer.from_pretrained(NER_MODEL, use_fast=True)
        sentence = """Enzo works at the the UN"""
        token_classifier = pipeline("ner", model=model, tokenizer=tokenizer)
        output = token_classifier(sentence)
        self.assertEqual(
            nested_simplify(output),
            [
                {"entity": "I-PER", "score": 0.997, "word": "En", "start": 0, "end": 2, "index": 1},
                {"entity": "I-PER", "score": 0.996, "word": "##zo", "start": 2, "end": 4, "index": 2},
                {"entity": "I-ORG", "score": 0.999, "word": "UN", "start": 22, "end": 24, "index": 7},
            ],
        )

        token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple")
        output = token_classifier(sentence)
        self.assertEqual(
            nested_simplify(output),
            [
                {"entity_group": "PER", "score": 0.996, "word": "Enzo", "start": 0, "end": 4},
                {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 22, "end": 24},
            ],
        )

        token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="first")
        output = token_classifier(sentence)
        self.assertEqual(
            nested_simplify(output[:3]),
            [
                {"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4},
                {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 22, "end": 24},
            ],
        )

        token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="max")
        output = token_classifier(sentence)
        self.assertEqual(
            nested_simplify(output[:3]),
            [
                {"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4},
                {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 22, "end": 24},
            ],
        )

        token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="average")
        output = token_classifier(sentence)
        self.assertEqual(
            nested_simplify(output),
            [
                {"entity_group": "PER", "score": 0.996, "word": "Enzo", "start": 0, "end": 4},
                {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 22, "end": 24},
            ],
        )

328
329
330
331
332
333
334
335
336
337
338
339
340
    @require_torch
    @slow
    def test_aggregation_strategy_byte_level_tokenizer(self):
        sentence = "Groenlinks praat over Schiphol."
        ner = pipeline("ner", model="xlm-roberta-large-finetuned-conll02-dutch", aggregation_strategy="max")
        self.assertEqual(
            nested_simplify(ner(sentence)),
            [
                {"end": 10, "entity_group": "ORG", "score": 0.994, "start": 0, "word": "Groenlinks"},
                {"entity_group": "LOC", "score": 1.0, "word": "Schiphol.", "start": 22, "end": 31},
            ],
        )

341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
    @require_torch
    def test_aggregation_strategy_no_b_i_prefix(self):
        model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english"
        tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
        token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt")
        # Just to understand scores indexes in this test
        token_classifier.model.config.id2label = {0: "O", 1: "MISC", 2: "PER", 3: "ORG", 4: "LOC"}
        example = [
            {
                # fmt : off
                "scores": np.array([0, 0, 0, 0, 0.9968166351318359]),
                "index": 1,
                "is_subword": False,
                "word": "En",
                "start": 0,
                "end": 2,
            },
            {
                # fmt : off
                "scores": np.array([0, 0, 0, 0, 0.9957635998725891]),
                "index": 2,
                "is_subword": True,
                "word": "##zo",
                "start": 2,
                "end": 4,
            },
            {
                # fmt: off
                "scores": np.array([0, 0, 0, 0.9986497163772583, 0]),
                # fmt: on
                "index": 7,
                "word": "UN",
                "is_subword": False,
                "start": 11,
                "end": 13,
            },
        ]
        self.assertEqual(
            nested_simplify(token_classifier.aggregate(example, AggregationStrategy.NONE)),
            [
                {"end": 2, "entity": "LOC", "score": 0.997, "start": 0, "word": "En", "index": 1},
                {"end": 4, "entity": "LOC", "score": 0.996, "start": 2, "word": "##zo", "index": 2},
                {"end": 13, "entity": "ORG", "score": 0.999, "start": 11, "word": "UN", "index": 7},
            ],
        )
        self.assertEqual(
            nested_simplify(token_classifier.aggregate(example, AggregationStrategy.SIMPLE)),
            [
                {"entity_group": "LOC", "score": 0.996, "word": "Enzo", "start": 0, "end": 4},
                {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13},
            ],
        )

394
395
    @require_torch
    def test_aggregation_strategy(self):
396
        model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english"
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
        tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
        token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt")
        # Just to understand scores indexes in this test
        self.assertEqual(
            token_classifier.model.config.id2label,
            {0: "O", 1: "B-MISC", 2: "I-MISC", 3: "B-PER", 4: "I-PER", 5: "B-ORG", 6: "I-ORG", 7: "B-LOC", 8: "I-LOC"},
        )
        example = [
            {
                # fmt : off
                "scores": np.array([0, 0, 0, 0, 0.9968166351318359, 0, 0, 0]),
                "index": 1,
                "is_subword": False,
                "word": "En",
                "start": 0,
                "end": 2,
            },
            {
                # fmt : off
                "scores": np.array([0, 0, 0, 0, 0.9957635998725891, 0, 0, 0]),
                "index": 2,
                "is_subword": True,
                "word": "##zo",
                "start": 2,
                "end": 4,
            },
            {
                # fmt: off
                "scores": np.array([0, 0, 0, 0, 0, 0.9986497163772583, 0, 0, ]),
                # fmt: on
                "index": 7,
                "word": "UN",
                "is_subword": False,
                "start": 11,
                "end": 13,
            },
        ]
        self.assertEqual(
            nested_simplify(token_classifier.aggregate(example, AggregationStrategy.NONE)),
            [
                {"end": 2, "entity": "I-PER", "score": 0.997, "start": 0, "word": "En", "index": 1},
                {"end": 4, "entity": "I-PER", "score": 0.996, "start": 2, "word": "##zo", "index": 2},
                {"end": 13, "entity": "B-ORG", "score": 0.999, "start": 11, "word": "UN", "index": 7},
            ],
        )
        self.assertEqual(
            nested_simplify(token_classifier.aggregate(example, AggregationStrategy.SIMPLE)),
            [
                {"entity_group": "PER", "score": 0.996, "word": "Enzo", "start": 0, "end": 4},
                {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13},
            ],
        )
        self.assertEqual(
            nested_simplify(token_classifier.aggregate(example, AggregationStrategy.FIRST)),
            [
                {"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4},
                {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13},
            ],
        )
        self.assertEqual(
            nested_simplify(token_classifier.aggregate(example, AggregationStrategy.MAX)),
            [
                {"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4},
                {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13},
            ],
        )
        self.assertEqual(
            nested_simplify(token_classifier.aggregate(example, AggregationStrategy.AVERAGE)),
            [
                {"entity_group": "PER", "score": 0.996, "word": "Enzo", "start": 0, "end": 4},
                {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13},
            ],
        )

    @require_torch
    def test_aggregation_strategy_example2(self):
473
        model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english"
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
        tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
        token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt")
        # Just to understand scores indexes in this test
        self.assertEqual(
            token_classifier.model.config.id2label,
            {0: "O", 1: "B-MISC", 2: "I-MISC", 3: "B-PER", 4: "I-PER", 5: "B-ORG", 6: "I-ORG", 7: "B-LOC", 8: "I-LOC"},
        )
        example = [
            {
                # Necessary for AVERAGE
                "scores": np.array([0, 0.55, 0, 0.45, 0, 0, 0, 0, 0, 0]),
                "is_subword": False,
                "index": 1,
                "word": "Ra",
                "start": 0,
                "end": 2,
            },
            {
                "scores": np.array([0, 0, 0, 0.2, 0, 0, 0, 0.8, 0, 0]),
                "is_subword": True,
                "word": "##ma",
                "start": 2,
                "end": 4,
                "index": 2,
            },
            {
                # 4th score will have the higher average
                # 4th score is B-PER for this model
                # It's does not correspond to any of the subtokens.
                "scores": np.array([0, 0, 0, 0.4, 0, 0, 0.6, 0, 0, 0]),
                "is_subword": True,
                "word": "##zotti",
                "start": 11,
                "end": 13,
                "index": 3,
            },
        ]
        self.assertEqual(
            token_classifier.aggregate(example, AggregationStrategy.NONE),
            [
                {"end": 2, "entity": "B-MISC", "score": 0.55, "start": 0, "word": "Ra", "index": 1},
                {"end": 4, "entity": "B-LOC", "score": 0.8, "start": 2, "word": "##ma", "index": 2},
                {"end": 13, "entity": "I-ORG", "score": 0.6, "start": 11, "word": "##zotti", "index": 3},
            ],
        )

        self.assertEqual(
            token_classifier.aggregate(example, AggregationStrategy.FIRST),
            [{"entity_group": "MISC", "score": 0.55, "word": "Ramazotti", "start": 0, "end": 13}],
        )
        self.assertEqual(
            token_classifier.aggregate(example, AggregationStrategy.MAX),
            [{"entity_group": "LOC", "score": 0.8, "word": "Ramazotti", "start": 0, "end": 13}],
        )
        self.assertEqual(
            nested_simplify(token_classifier.aggregate(example, AggregationStrategy.AVERAGE)),
            [{"entity_group": "PER", "score": 0.35, "word": "Ramazotti", "start": 0, "end": 13}],
        )

    @require_torch
    def test_gather_pre_entities(self):
535
        model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english"
536
        tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
537
        token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt")
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554

        sentence = "Hello there"

        tokens = tokenizer(
            sentence,
            return_attention_mask=False,
            return_tensors="pt",
            truncation=True,
            return_special_tokens_mask=True,
            return_offsets_mapping=True,
        )
        offset_mapping = tokens.pop("offset_mapping").cpu().numpy()[0]
        special_tokens_mask = tokens.pop("special_tokens_mask").cpu().numpy()[0]
        input_ids = tokens["input_ids"].numpy()[0]
        # First element in [CLS]
        scores = np.array([[1, 0, 0], [0.1, 0.3, 0.6], [0.8, 0.1, 0.1]])

555
        pre_entities = token_classifier.gather_pre_entities(
556
557
558
559
560
561
            sentence,
            input_ids,
            scores,
            offset_mapping,
            special_tokens_mask,
            aggregation_strategy=AggregationStrategy.NONE,
562
        )
563
564
565
566
567
568
569
570
571
572
573
574
575
576
        self.assertEqual(
            nested_simplify(pre_entities),
            [
                {"word": "Hello", "scores": [0.1, 0.3, 0.6], "start": 0, "end": 5, "is_subword": False, "index": 1},
                {
                    "word": "there",
                    "scores": [0.8, 0.1, 0.1],
                    "index": 2,
                    "start": 6,
                    "end": 11,
                    "is_subword": False,
                },
            ],
        )
577
578
579
580
581

    @require_tf
    def test_tf_only(self):
        model_name = "Narsil/small"  # This model only has a TensorFlow version
        # We test that if we don't specificy framework='tf', it gets detected automatically
582
        token_classifier = pipeline(task="ner", model=model_name)
583
        self.assertEqual(token_classifier.framework, "tf")
584
585

    @require_tf
586
587
588
589
590
591
592
593
594
595
596
    def test_small_model_tf(self):
        model_name = "Narsil/small2"
        token_classifier = pipeline(task="token-classification", model=model_name, framework="tf")
        outputs = token_classifier("This is a test !")
        self.assertEqual(
            nested_simplify(outputs),
            [
                {"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 4},
                {"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 5, "end": 7},
            ],
        )
597

598
599
600
601
602
603
604
605
606
607
608
609
610
611
    @require_torch
    def test_no_offset_tokenizer(self):
        model_name = "Narsil/small2"
        tokenizer = AutoTokenizer.from_pretrained("Narsil/small2", use_fast=False)
        token_classifier = pipeline(task="token-classification", model=model_name, tokenizer=tokenizer, framework="pt")
        outputs = token_classifier("This is a test !")
        self.assertEqual(
            nested_simplify(outputs),
            [
                {"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": None, "end": None},
                {"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": None, "end": None},
            ],
        )

612
613
614
615
616
617
618
619
620
621
622
623
    @require_torch
    def test_small_model_pt(self):
        model_name = "Narsil/small2"
        token_classifier = pipeline(task="token-classification", model=model_name, framework="pt")
        outputs = token_classifier("This is a test !")
        self.assertEqual(
            nested_simplify(outputs),
            [
                {"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 4},
                {"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 5, "end": 7},
            ],
        )
624
625

    @require_torch
626
    def test_pt_ignore_subwords_slow_tokenizer_raises(self):
627
        model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english"
628
629
630
631
632
633
634
635
636
637
        tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)

        with self.assertRaises(ValueError):
            pipeline(task="ner", model=model_name, tokenizer=tokenizer, aggregation_strategy=AggregationStrategy.FIRST)
        with self.assertRaises(ValueError):
            pipeline(
                task="ner", model=model_name, tokenizer=tokenizer, aggregation_strategy=AggregationStrategy.AVERAGE
            )
        with self.assertRaises(ValueError):
            pipeline(task="ner", model=model_name, tokenizer=tokenizer, aggregation_strategy=AggregationStrategy.MAX)
638

639
640
641
    @slow
    @require_torch
    def test_simple(self):
642
        token_classifier = pipeline(task="ner", model="dslim/bert-base-NER", grouped_entities=True)
643
644
        sentence = "Hello Sarah Jessica Parker who Jessica lives in New York"
        sentence2 = "This is a simple test"
645
        output = token_classifier(sentence)
646

647
        output_ = nested_simplify(output)
648
649

        self.assertEqual(
650
            output_,
651
652
653
654
655
656
657
658
659
660
661
662
663
            [
                {
                    "entity_group": "PER",
                    "score": 0.996,
                    "word": "Sarah Jessica Parker",
                    "start": 6,
                    "end": 26,
                },
                {"entity_group": "PER", "score": 0.977, "word": "Jessica", "start": 31, "end": 38},
                {"entity_group": "LOC", "score": 0.999, "word": "New York", "start": 48, "end": 56},
            ],
        )

664
        output = token_classifier([sentence, sentence2])
665
        output_ = nested_simplify(output)
666
667
668
669
670
671
672
673
674
675
676
677
678

        self.assertEqual(
            output_,
            [
                [
                    {"entity_group": "PER", "score": 0.996, "word": "Sarah Jessica Parker", "start": 6, "end": 26},
                    {"entity_group": "PER", "score": 0.977, "word": "Jessica", "start": 31, "end": 38},
                    {"entity_group": "LOC", "score": 0.999, "word": "New York", "start": 48, "end": 56},
                ],
                [],
            ],
        )

679

680
@is_pipeline_test
681
682
683
684
685
686
687
688
689
690
691
class TokenClassificationArgumentHandlerTestCase(unittest.TestCase):
    def setUp(self):
        self.args_parser = TokenClassificationArgumentHandler()

    def test_simple(self):
        string = "This is a simple input"

        inputs, offset_mapping = self.args_parser(string)
        self.assertEqual(inputs, [string])
        self.assertEqual(offset_mapping, None)

692
        inputs, offset_mapping = self.args_parser([string, string])
693
694
695
696
697
698
699
        self.assertEqual(inputs, [string, string])
        self.assertEqual(offset_mapping, None)

        inputs, offset_mapping = self.args_parser(string, offset_mapping=[(0, 1), (1, 2)])
        self.assertEqual(inputs, [string])
        self.assertEqual(offset_mapping, [[(0, 1), (1, 2)]])

700
701
702
        inputs, offset_mapping = self.args_parser(
            [string, string], offset_mapping=[[(0, 1), (1, 2)], [(0, 2), (2, 3)]]
        )
703
704
705
706
707
708
        self.assertEqual(inputs, [string, string])
        self.assertEqual(offset_mapping, [[(0, 1), (1, 2)], [(0, 2), (2, 3)]])

    def test_errors(self):
        string = "This is a simple input"

709
710
        # 2 sentences, 1 offset_mapping, args
        with self.assertRaises(TypeError):
711
712
            self.args_parser(string, string, offset_mapping=[[(0, 1), (1, 2)]])

713
714
        # 2 sentences, 1 offset_mapping, args
        with self.assertRaises(TypeError):
715
716
            self.args_parser(string, string, offset_mapping=[(0, 1), (1, 2)])

717
718
719
720
721
722
723
724
        # 2 sentences, 1 offset_mapping, input_list
        with self.assertRaises(ValueError):
            self.args_parser([string, string], offset_mapping=[[(0, 1), (1, 2)]])

        # 2 sentences, 1 offset_mapping, input_list
        with self.assertRaises(ValueError):
            self.args_parser([string, string], offset_mapping=[(0, 1), (1, 2)])

725
726
727
728
729
        # 1 sentences, 2 offset_mapping
        with self.assertRaises(ValueError):
            self.args_parser(string, offset_mapping=[[(0, 1), (1, 2)], [(0, 2), (2, 3)]])

        # 0 sentences, 1 offset_mapping
730
        with self.assertRaises(TypeError):
731
            self.args_parser(offset_mapping=[[(0, 1), (1, 2)]])