pipelines.py 129 KB
Newer Older
Morgan Funtowicz's avatar
Morgan Funtowicz committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Aymeric Augustin's avatar
Aymeric Augustin committed
15

Morgan Funtowicz's avatar
Morgan Funtowicz committed
16

17
18
import csv
import json
Morgan Funtowicz's avatar
Morgan Funtowicz committed
19
import os
20
import pickle
Aymeric Augustin's avatar
Aymeric Augustin committed
21
import sys
22
import uuid
23
import warnings
Morgan Funtowicz's avatar
Morgan Funtowicz committed
24
from abc import ABC, abstractmethod
25
from collections.abc import Iterable
26
from contextlib import contextmanager
27
from os.path import abspath, exists
28
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
29
from uuid import UUID
Morgan Funtowicz's avatar
Morgan Funtowicz committed
30
31
32

import numpy as np

33
from .configuration_utils import PretrainedConfig
34
from .data import SquadExample, SquadFeatures, squad_convert_examples_to_features
Sylvain Gugger's avatar
Sylvain Gugger committed
35
from .file_utils import add_end_docstrings, is_tf_available, is_torch_available
36
from .modelcard import ModelCard
Sylvain Gugger's avatar
Sylvain Gugger committed
37
38
39
from .models.auto.configuration_auto import AutoConfig
from .models.auto.tokenization_auto import AutoTokenizer
from .models.bert.tokenization_bert import BasicTokenizer
40
from .tokenization_utils import PreTrainedTokenizer
41
from .tokenization_utils_base import PaddingStrategy
Lysandre Debut's avatar
Lysandre Debut committed
42
from .utils import logging
Morgan Funtowicz's avatar
Morgan Funtowicz committed
43

Aymeric Augustin's avatar
Aymeric Augustin committed
44

Morgan Funtowicz's avatar
Morgan Funtowicz committed
45
if is_tf_available():
Morgan Funtowicz's avatar
Morgan Funtowicz committed
46
    import tensorflow as tf
47

Sylvain Gugger's avatar
Sylvain Gugger committed
48
    from .models.auto.modeling_tf_auto import (
49
        TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
50
        TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
51
52
53
        TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
        TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
        TF_MODEL_WITH_LM_HEAD_MAPPING,
54
        TFAutoModel,
55
        TFAutoModelForCausalLM,
56
        TFAutoModelForMaskedLM,
57
        TFAutoModelForQuestionAnswering,
58
        TFAutoModelForSeq2SeqLM,
59
        TFAutoModelForSequenceClassification,
60
61
        TFAutoModelForTokenClassification,
    )
Morgan Funtowicz's avatar
Morgan Funtowicz committed
62
63
64

if is_torch_available():
    import torch
65

Sylvain Gugger's avatar
Sylvain Gugger committed
66
    from .models.auto.modeling_auto import (
67
68
69
70
71
        MODEL_FOR_MASKED_LM_MAPPING,
        MODEL_FOR_QUESTION_ANSWERING_MAPPING,
        MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
        MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
        MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
72
        AutoModel,
73
74
        AutoModelForCausalLM,
        AutoModelForMaskedLM,
75
76
77
78
        AutoModelForQuestionAnswering,
        AutoModelForSeq2SeqLM,
        AutoModelForSequenceClassification,
        AutoModelForTokenClassification,
79
    )
Morgan Funtowicz's avatar
Morgan Funtowicz committed
80

81
82
if TYPE_CHECKING:
    from .modeling_tf_utils import TFPreTrainedModel
83
    from .modeling_utils import PreTrainedModel
84

Morgan Funtowicz's avatar
Morgan Funtowicz committed
85

Lysandre Debut's avatar
Lysandre Debut committed
86
logger = logging.get_logger(__name__)
87

88

Julien Chaumond's avatar
Julien Chaumond committed
89
def get_framework(model, revision: Optional[str] = None):
Sylvain Gugger's avatar
Sylvain Gugger committed
90
91
92
93
    """
    Select framework (TensorFlow or PyTorch) to use.

    Args:
94
        model (:obj:`str`, :class:`~transformers.PreTrainedModel` or :class:`~transformers.TFPreTrainedModel`):
Sylvain Gugger's avatar
Sylvain Gugger committed
95
96
            If both frameworks are installed, picks the one corresponding to the model passed (either a model class or
            the model name). If no specific model is provided, defaults to using PyTorch.
97
    """
98
    if not is_tf_available() and not is_torch_available():
Aymeric Augustin's avatar
Aymeric Augustin committed
99
        raise RuntimeError(
100
101
102
103
            "At least one of TensorFlow 2.0 or PyTorch should be installed. "
            "To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ "
            "To install PyTorch, read the instructions at https://pytorch.org/."
        )
104
105
    if isinstance(model, str):
        if is_torch_available() and not is_tf_available():
Julien Chaumond's avatar
Julien Chaumond committed
106
            model = AutoModel.from_pretrained(model, revision=revision)
107
        elif is_tf_available() and not is_torch_available():
Julien Chaumond's avatar
Julien Chaumond committed
108
            model = TFAutoModel.from_pretrained(model, revision=revision)
109
110
        else:
            try:
Julien Chaumond's avatar
Julien Chaumond committed
111
                model = AutoModel.from_pretrained(model, revision=revision)
112
            except OSError:
Julien Chaumond's avatar
Julien Chaumond committed
113
                model = TFAutoModel.from_pretrained(model, revision=revision)
114
115

    framework = "tf" if model.__class__.__name__.startswith("TF") else "pt"
thomwolf's avatar
thomwolf committed
116
117
    return framework

118

119
def get_default_model(targeted_task: Dict, framework: Optional[str], task_options: Optional[Any]) -> str:
120
121
122
123
124
    """
    Select a default model to use for a given task. Defaults to pytorch if ambiguous.

    Args:
        targeted_task (:obj:`Dict` ):
125
           Dictionary representing the given task, that should contain default models
126
127
128
129

        framework (:obj:`str`, None)
           "pt", "tf" or None, representing a specific framework if it was specified, or None if we don't know yet.

130
        task_options (:obj:`Any`, None)
Sylvain Gugger's avatar
Sylvain Gugger committed
131
132
           Any further value required by the task to get fully specified, for instance (SRC, TGT) languages for
           translation task.
133

134
135
136
137
138
139
140
141
142
    Returns

        :obj:`str` The model string representing the default model for this pipeline
    """
    if is_torch_available() and not is_tf_available():
        framework = "pt"
    elif is_tf_available() and not is_torch_available():
        framework = "tf"

143
144
145
146
147
148
149
150
151
152
    defaults = targeted_task["default"]
    if task_options:
        if task_options not in defaults:
            raise ValueError("The task does not provide any default models for options {}".format(task_options))
        default_models = defaults[task_options]["model"]
    elif "model" in defaults:
        default_models = targeted_task["default"]["model"]
    else:
        # XXX This error message needs to be updated to be more generic if more tasks are going to become
        # parametrized
153
        raise ValueError('The task defaults can\'t be correctly selected. You probably meant "translation_XX_to_YY"')
154

155
156
157
158
159
160
    if framework is None:
        framework = "pt"

    return default_models[framework]


161
162
class PipelineException(Exception):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
163
164
165
166
167
168
    Raised by a :class:`~transformers.Pipeline` when handling __call__.

    Args:
        task (:obj:`str`): The task of the pipeline.
        model (:obj:`str`): The model used by the pipeline.
        reason (:obj:`str`): The error message to display.
169
170
171
172
173
174
175
176
177
    """

    def __init__(self, task: str, model: str, reason: str):
        super().__init__(reason)

        self.task = task
        self.model = model


178
179
class ArgumentHandler(ABC):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
180
    Base interface for handling arguments for each :class:`~transformers.pipelines.Pipeline`.
181
    """
182

183
184
185
    @abstractmethod
    def __call__(self, *args, **kwargs):
        raise NotImplementedError()
Morgan Funtowicz's avatar
Morgan Funtowicz committed
186
187


188
class PipelineDataFormat:
189
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
190
191
192
    Base class for all the pipeline supported data format both for reading and writing. Supported data formats
    currently includes:

Sylvain Gugger's avatar
Sylvain Gugger committed
193
194
195
    - JSON
    - CSV
    - stdin/stdout (pipe)
196

Sylvain Gugger's avatar
Sylvain Gugger committed
197
198
199
200
201
202
203
204
205
    :obj:`PipelineDataFormat` also includes some utilities to work with multi-columns like mapping from datasets
    columns to pipelines keyword arguments through the :obj:`dataset_kwarg_1=dataset_column_1` format.

    Args:
        output_path (:obj:`str`, `optional`): Where to save the outgoing data.
        input_path (:obj:`str`, `optional`): Where to look for the input data.
        column (:obj:`str`, `optional`): The column to read.
        overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
            Whether or not to overwrite the :obj:`output_path`.
206
    """
207
208

    SUPPORTED_FORMATS = ["json", "csv", "pipe"]
209

210
    def __init__(
Lysandre's avatar
Lysandre committed
211
212
213
214
215
        self,
        output_path: Optional[str],
        input_path: Optional[str],
        column: Optional[str],
        overwrite: bool = False,
216
    ):
thomwolf's avatar
thomwolf committed
217
218
        self.output_path = output_path
        self.input_path = input_path
219
        self.column = column.split(",") if column is not None else [""]
220
221
222
        self.is_multi_columns = len(self.column) > 1

        if self.is_multi_columns:
223
            self.column = [tuple(c.split("=")) if "=" in c else (c, c) for c in self.column]
224

thomwolf's avatar
thomwolf committed
225
        if output_path is not None and not overwrite:
thomwolf's avatar
thomwolf committed
226
            if exists(abspath(self.output_path)):
227
                raise OSError("{} already exists on disk".format(self.output_path))
228

thomwolf's avatar
thomwolf committed
229
230
        if input_path is not None:
            if not exists(abspath(self.input_path)):
231
                raise OSError("{} doesnt exist on disk".format(self.input_path))
232
233
234
235
236
237

    @abstractmethod
    def __iter__(self):
        raise NotImplementedError()

    @abstractmethod
Sylvain Gugger's avatar
Sylvain Gugger committed
238
    def save(self, data: Union[dict, List[dict]]):
Morgan Funtowicz's avatar
Morgan Funtowicz committed
239
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
240
241
242
243
244
        Save the provided data object with the representation for the current
        :class:`~transformers.pipelines.PipelineDataFormat`.

        Args:
            data (:obj:`dict` or list of :obj:`dict`): The data to store.
Morgan Funtowicz's avatar
Morgan Funtowicz committed
245
        """
246
247
        raise NotImplementedError()

248
    def save_binary(self, data: Union[dict, List[dict]]) -> str:
Morgan Funtowicz's avatar
Morgan Funtowicz committed
249
250
        """
        Save the provided data object as a pickle-formatted binary data on the disk.
Sylvain Gugger's avatar
Sylvain Gugger committed
251
252
253
254
255
256

        Args:
            data (:obj:`dict` or list of :obj:`dict`): The data to store.

        Returns:
            :obj:`str`: Path where the data has been saved.
Morgan Funtowicz's avatar
Morgan Funtowicz committed
257
        """
thomwolf's avatar
thomwolf committed
258
        path, _ = os.path.splitext(self.output_path)
259
        binary_path = os.path.extsep.join((path, "pickle"))
260

261
        with open(binary_path, "wb+") as f_output:
262
263
264
265
            pickle.dump(data, f_output)

        return binary_path

266
    @staticmethod
267
    def from_str(
Lysandre's avatar
Lysandre committed
268
269
270
271
272
        format: str,
        output_path: Optional[str],
        input_path: Optional[str],
        column: Optional[str],
        overwrite=False,
Sylvain Gugger's avatar
Sylvain Gugger committed
273
274
    ) -> "PipelineDataFormat":
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
275
276
        Creates an instance of the right subclass of :class:`~transformers.pipelines.PipelineDataFormat` depending on
        :obj:`format`.
Sylvain Gugger's avatar
Sylvain Gugger committed
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292

        Args:
            format: (:obj:`str`):
                The format of the desired pipeline. Acceptable values are :obj:`"json"`, :obj:`"csv"` or :obj:`"pipe"`.
            output_path (:obj:`str`, `optional`):
                Where to save the outgoing data.
            input_path (:obj:`str`, `optional`):
                Where to look for the input data.
            column (:obj:`str`, `optional`):
                The column to read.
            overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to overwrite the :obj:`output_path`.

        Returns:
            :class:`~transformers.pipelines.PipelineDataFormat`: The proper data format.
        """
293
        if format == "json":
thomwolf's avatar
thomwolf committed
294
            return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
295
        elif format == "csv":
thomwolf's avatar
thomwolf committed
296
            return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
297
        elif format == "pipe":
thomwolf's avatar
thomwolf committed
298
            return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
299
        else:
300
            raise KeyError("Unknown reader {} (Available reader are json/csv/pipe)".format(format))
301
302
303


class CsvPipelineDataFormat(PipelineDataFormat):
Sylvain Gugger's avatar
Sylvain Gugger committed
304
305
306
307
308
309
310
311
312
313
314
    """
    Support for pipelines using CSV data format.

    Args:
        output_path (:obj:`str`, `optional`): Where to save the outgoing data.
        input_path (:obj:`str`, `optional`): Where to look for the input data.
        column (:obj:`str`, `optional`): The column to read.
        overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
            Whether or not to overwrite the :obj:`output_path`.
    """

315
    def __init__(
Lysandre's avatar
Lysandre committed
316
317
318
319
320
        self,
        output_path: Optional[str],
        input_path: Optional[str],
        column: Optional[str],
        overwrite=False,
321
    ):
thomwolf's avatar
thomwolf committed
322
        super().__init__(output_path, input_path, column, overwrite=overwrite)
323
324

    def __iter__(self):
325
        with open(self.input_path, "r") as f:
326
327
328
329
330
            reader = csv.DictReader(f)
            for row in reader:
                if self.is_multi_columns:
                    yield {k: row[c] for k, c in self.column}
                else:
331
                    yield row[self.column[0]]
332
333

    def save(self, data: List[dict]):
Sylvain Gugger's avatar
Sylvain Gugger committed
334
335
336
337
338
339
340
        """
        Save the provided data object with the representation for the current
        :class:`~transformers.pipelines.PipelineDataFormat`.

        Args:
            data (:obj:`List[dict]`): The data to store.
        """
341
        with open(self.output_path, "w") as f:
342
343
344
345
346
347
348
            if len(data) > 0:
                writer = csv.DictWriter(f, list(data[0].keys()))
                writer.writeheader()
                writer.writerows(data)


class JsonPipelineDataFormat(PipelineDataFormat):
Sylvain Gugger's avatar
Sylvain Gugger committed
349
350
351
352
353
354
355
356
357
358
359
    """
    Support for pipelines using JSON file format.

    Args:
        output_path (:obj:`str`, `optional`): Where to save the outgoing data.
        input_path (:obj:`str`, `optional`): Where to look for the input data.
        column (:obj:`str`, `optional`): The column to read.
        overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
            Whether or not to overwrite the :obj:`output_path`.
    """

360
    def __init__(
Lysandre's avatar
Lysandre committed
361
362
363
364
365
        self,
        output_path: Optional[str],
        input_path: Optional[str],
        column: Optional[str],
        overwrite=False,
366
    ):
thomwolf's avatar
thomwolf committed
367
        super().__init__(output_path, input_path, column, overwrite=overwrite)
368

369
        with open(input_path, "r") as f:
370
371
372
373
374
375
376
            self._entries = json.load(f)

    def __iter__(self):
        for entry in self._entries:
            if self.is_multi_columns:
                yield {k: entry[c] for k, c in self.column}
            else:
377
                yield entry[self.column[0]]
378
379

    def save(self, data: dict):
Sylvain Gugger's avatar
Sylvain Gugger committed
380
381
382
383
384
385
        """
        Save the provided data object in a json file.

        Args:
            data (:obj:`dict`): The data to store.
        """
386
        with open(self.output_path, "w") as f:
387
388
389
            json.dump(data, f)


Morgan Funtowicz's avatar
Morgan Funtowicz committed
390
391
class PipedPipelineDataFormat(PipelineDataFormat):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
392
    Read data from piped input to the python process. For multi columns data, columns should separated by \t
Morgan Funtowicz's avatar
Morgan Funtowicz committed
393
394

    If columns are provided, then the output will be a dictionary with {column_x: value_x}
Sylvain Gugger's avatar
Sylvain Gugger committed
395
396
397
398
399
400
401

    Args:
        output_path (:obj:`str`, `optional`): Where to save the outgoing data.
        input_path (:obj:`str`, `optional`): Where to look for the input data.
        column (:obj:`str`, `optional`): The column to read.
        overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
            Whether or not to overwrite the :obj:`output_path`.
Morgan Funtowicz's avatar
Morgan Funtowicz committed
402
    """
403

Morgan Funtowicz's avatar
Morgan Funtowicz committed
404
405
406
    def __iter__(self):
        for line in sys.stdin:
            # Split for multi-columns
407
            if "\t" in line:
Morgan Funtowicz's avatar
Morgan Funtowicz committed
408

409
                line = line.split("\t")
Morgan Funtowicz's avatar
Morgan Funtowicz committed
410
411
412
413
414
415
416
417
418
419
420
                if self.column:
                    # Dictionary to map arguments
                    yield {kwargs: l for (kwargs, _), l in zip(self.column, line)}
                else:
                    yield tuple(line)

            # No dictionary to map arguments
            else:
                yield line

    def save(self, data: dict):
Sylvain Gugger's avatar
Sylvain Gugger committed
421
422
423
424
425
426
        """
        Print the data.

        Args:
            data (:obj:`dict`): The data to store.
        """
Morgan Funtowicz's avatar
Morgan Funtowicz committed
427
428
        print(data)

429
    def save_binary(self, data: Union[dict, List[dict]]) -> str:
thomwolf's avatar
thomwolf committed
430
        if self.output_path is None:
431
            raise KeyError(
432
433
                "When using piped input on pipeline outputting large object requires an output file path. "
                "Please provide such output path through --output argument."
434
435
436
437
            )

        return super().save_binary(data)

Morgan Funtowicz's avatar
Morgan Funtowicz committed
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452

class _ScikitCompat(ABC):
    """
    Interface layer for the Scikit and Keras compatibility.
    """

    @abstractmethod
    def transform(self, X):
        raise NotImplementedError()

    @abstractmethod
    def predict(self, X):
        raise NotImplementedError()


Sylvain Gugger's avatar
Sylvain Gugger committed
453
PIPELINE_INIT_ARGS = r"""
Morgan Funtowicz's avatar
Morgan Funtowicz committed
454
    Arguments:
455
456
        model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
            The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
Lysandre Debut's avatar
Lysandre Debut committed
457
458
            :class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
            TensorFlow.
459
460
        tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
            The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
Lysandre Debut's avatar
Lysandre Debut committed
461
            :class:`~transformers.PreTrainedTokenizer`.
Sylvain Gugger's avatar
Sylvain Gugger committed
462
        modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`):
Lysandre Debut's avatar
Lysandre Debut committed
463
            Model card attributed to the model for this pipeline.
Sylvain Gugger's avatar
Sylvain Gugger committed
464
465
466
        framework (:obj:`str`, `optional`):
            The framework to use, either :obj:`"pt"` for PyTorch or :obj:`"tf"` for TensorFlow. The specified framework
            must be installed.
Lysandre Debut's avatar
Lysandre Debut committed
467

Sylvain Gugger's avatar
Sylvain Gugger committed
468
469
470
            If no framework is specified, will default to the one currently installed. If no framework is specified and
            both frameworks are installed, will default to the framework of the :obj:`model`, or to PyTorch if no model
            is provided.
Sylvain Gugger's avatar
Sylvain Gugger committed
471
472
473
        task (:obj:`str`, defaults to :obj:`""`):
            A task-identifier for the pipeline.
        args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`):
Morgan Funtowicz's avatar
Morgan Funtowicz committed
474
            Reference to the object in charge of parsing supplied pipeline parameters.
Sylvain Gugger's avatar
Sylvain Gugger committed
475
        device (:obj:`int`, `optional`, defaults to -1):
Sylvain Gugger's avatar
Sylvain Gugger committed
476
477
            Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on
            the associated CUDA device id.
Lysandre Debut's avatar
Lysandre Debut committed
478
        binary_output (:obj:`bool`, `optional`, defaults to :obj:`False`):
Sylvain Gugger's avatar
Sylvain Gugger committed
479
480
            Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.
"""
Morgan Funtowicz's avatar
Morgan Funtowicz committed
481

Lysandre Debut's avatar
Lysandre Debut committed
482

Sylvain Gugger's avatar
Sylvain Gugger committed
483
484
485
486
487
488
@add_end_docstrings(PIPELINE_INIT_ARGS)
class Pipeline(_ScikitCompat):
    """
    The Pipeline class is the class from which all pipelines inherit. Refer to this class for methods shared across
    different pipelines.

Sylvain Gugger's avatar
Sylvain Gugger committed
489
490
    Base class implementing pipelined operations. Pipeline workflow is defined as a sequence of the following
    operations:
Sylvain Gugger's avatar
Sylvain Gugger committed
491
492
493
494
495
496
497
498
499

        Input -> Tokenization -> Model Inference -> Post-Processing (task dependent) -> Output

    Pipeline supports running on CPU or GPU through the device argument (see below).

    Some pipeline, like for instance :class:`~transformers.FeatureExtractionPipeline` (:obj:`'feature-extraction'` )
    output large tensor object as nested-lists. In order to avoid dumping such large structure as textual data we
    provide the :obj:`binary_output` constructor argument. If set to :obj:`True`, the output will be stored in the
    pickle format.
500
    """
thomwolf's avatar
thomwolf committed
501
502
503

    default_input_names = None

504
505
    def __init__(
        self,
506
507
        model: Union["PreTrainedModel", "TFPreTrainedModel"],
        tokenizer: PreTrainedTokenizer,
508
        modelcard: Optional[ModelCard] = None,
509
        framework: Optional[str] = None,
510
        task: str = "",
511
512
513
514
        args_parser: ArgumentHandler = None,
        device: int = -1,
        binary_output: bool = False,
    ):
515

thomwolf's avatar
thomwolf committed
516
        if framework is None:
Sylvain Gugger's avatar
Sylvain Gugger committed
517
            framework = get_framework(model)
thomwolf's avatar
thomwolf committed
518

519
        self.task = task
520
521
        self.model = model
        self.tokenizer = tokenizer
522
        self.modelcard = modelcard
thomwolf's avatar
thomwolf committed
523
        self.framework = framework
524
        self.device = device if framework == "tf" else torch.device("cpu" if device < 0 else "cuda:{}".format(device))
525
        self.binary_output = binary_output
526

527
        # Special handling
528
529
        if self.framework == "pt" and self.device.type == "cuda":
            self.model = self.model.to(self.device)
530

531
532
533
534
535
        # Update config with task specific parameters
        task_specific_params = self.model.config.task_specific_params
        if task_specific_params is not None and task in task_specific_params:
            self.model.config.update(task_specific_params.get(task))

Sylvain Gugger's avatar
Sylvain Gugger committed
536
    def save_pretrained(self, save_directory: str):
537
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
538
539
540
541
542
        Save the pipeline's model and tokenizer.

        Args:
            save_directory (:obj:`str`):
                A path to the directory where to saved. It will be created if it doesn't exist.
543
        """
544
545
        if os.path.isfile(save_directory):
            logger.error("Provided path ({}) should be a directory, not a file".format(save_directory))
546
            return
547
        os.makedirs(save_directory, exist_ok=True)
548
549
550

        self.model.save_pretrained(save_directory)
        self.tokenizer.save_pretrained(save_directory)
551
552
        if self.modelcard is not None:
            self.modelcard.save_pretrained(save_directory)
553
554

    def transform(self, X):
555
556
557
        """
        Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
        """
558
559
560
        return self(X=X)

    def predict(self, X):
561
562
563
        """
        Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
        """
564
        return self(X=X)
Morgan Funtowicz's avatar
Morgan Funtowicz committed
565

566
567
    @contextmanager
    def device_placement(self):
568
569
        """
        Context Manager allowing tensor allocation on the user-specified device in framework agnostic way.
Sylvain Gugger's avatar
Sylvain Gugger committed
570

571
572
        Returns:
            Context manager
Sylvain Gugger's avatar
Sylvain Gugger committed
573
574
575
576
577
578
579
580

        Examples::

            # Explicitly ask for tensor allocation on CUDA device :0
            pipe = pipeline(..., device=0)
            with pipe.device_placement():
                # Every framework specific tensor allocation will be done on the request device
                output = pipe(...)
581
        """
582
583
        if self.framework == "tf":
            with tf.device("/CPU:0" if self.device == -1 else "/device:GPU:{}".format(self.device)):
584
585
                yield
        else:
586
            if self.device.type == "cuda":
587
                torch.cuda.set_device(self.device)
588

589
            yield
590

591
592
593
    def ensure_tensor_on_device(self, **inputs):
        """
        Ensure PyTorch tensors are on the specified device.
Sylvain Gugger's avatar
Sylvain Gugger committed
594
595
596
597
598
599

        Args:
            inputs (keyword arguments that should be :obj:`torch.Tensor`): The tensors to place on :obj:`self.device`.

        Return:
            :obj:`Dict[str, torch.Tensor]`: The same as :obj:`inputs` but on the proper device.
600
601
602
        """
        return {name: tensor.to(self.device) for name, tensor in inputs.items()}

Sylvain Gugger's avatar
Sylvain Gugger committed
603
    def check_model_type(self, supported_models: Union[List[str], dict]):
604
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
605
606
607
608
609
        Check if the model class is in supported by the pipeline.

        Args:
            supported_models (:obj:`List[str]` or :obj:`dict`):
                The list of models supported by the pipeline, or a dictionary with model class values.
610
611
612
613
614
615
616
617
618
619
        """
        if not isinstance(supported_models, list):  # Create from a model mapping
            supported_models = [item[1].__name__ for item in supported_models.items()]
        if self.model.__class__.__name__ not in supported_models:
            raise PipelineException(
                self.task,
                self.model.base_model_prefix,
                f"The model '{self.model.__class__.__name__}' is not supported for {self.task}. Supported models are {supported_models}",
            )

620
    def _parse_and_tokenize(self, inputs, padding=True, add_special_tokens=True, **kwargs):
Julien Chaumond's avatar
Julien Chaumond committed
621
622
623
        """
        Parse arguments and tokenize
        """
Morgan Funtowicz's avatar
Morgan Funtowicz committed
624
        # Parse arguments
625
        inputs = self.tokenizer(
Lysandre's avatar
Lysandre committed
626
627
628
629
            inputs,
            add_special_tokens=add_special_tokens,
            return_tensors=self.framework,
            padding=padding,
630
        )
Morgan Funtowicz's avatar
Morgan Funtowicz committed
631

Julien Chaumond's avatar
Julien Chaumond committed
632
633
        return inputs

634
635
    def __call__(self, *args, **kwargs):
        inputs = self._parse_and_tokenize(*args, **kwargs)
636
        return self._forward(inputs)
Morgan Funtowicz's avatar
Morgan Funtowicz committed
637

Julien Chaumond's avatar
Julien Chaumond committed
638
    def _forward(self, inputs, return_tensors=False):
639
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
640
641
        Internal framework specific forward dispatching

642
        Args:
643
            inputs: dict holding all the keyword arguments for required by the model forward method.
Sylvain Gugger's avatar
Sylvain Gugger committed
644
645
            return_tensors: Whether to return native framework (pt/tf) tensors rather than numpy array

646
647
648
        Returns:
            Numpy array
        """
649
650
651
652
        # Encode for forward
        with self.device_placement():
            if self.framework == "tf":
                # TODO trace model
Funtowicz Morgan's avatar
Funtowicz Morgan committed
653
                predictions = self.model(inputs.data, training=False)[0]
654
655
656
657
            else:
                with torch.no_grad():
                    inputs = self.ensure_tensor_on_device(**inputs)
                    predictions = self.model(**inputs)[0].cpu()
658

Julien Chaumond's avatar
Julien Chaumond committed
659
660
661
662
        if return_tensors:
            return predictions
        else:
            return predictions.numpy()
663
664


Sylvain Gugger's avatar
Sylvain Gugger committed
665
# Can't use @add_end_docstrings(PIPELINE_INIT_ARGS) here because this one does not accept `binary_output`
666
class FeatureExtractionPipeline(Pipeline):
667
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
668
669
    Feature extraction pipeline using no model head. This pipeline extracts the hidden states from the base
    transformer, which can be used as features in downstream tasks.
Lysandre Debut's avatar
Lysandre Debut committed
670

Sylvain Gugger's avatar
Sylvain Gugger committed
671
672
    This feature extraction pipeline can currently be loaded from :func:`~transformers.pipeline` using the task
    identifier: :obj:`"feature-extraction"`.
Lysandre Debut's avatar
Lysandre Debut committed
673
674
675
676
677

    All models may be used for this pipeline. See a list of all models, including community-contributed models on
    `huggingface.co/models <https://huggingface.co/models>`__.

    Arguments:
678
679
        model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
            The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
Lysandre Debut's avatar
Lysandre Debut committed
680
681
            :class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
            TensorFlow.
682
683
        tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
            The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
Lysandre Debut's avatar
Lysandre Debut committed
684
            :class:`~transformers.PreTrainedTokenizer`.
Sylvain Gugger's avatar
Sylvain Gugger committed
685
        modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`):
Lysandre Debut's avatar
Lysandre Debut committed
686
            Model card attributed to the model for this pipeline.
Sylvain Gugger's avatar
Sylvain Gugger committed
687
688
689
        framework (:obj:`str`, `optional`):
            The framework to use, either :obj:`"pt"` for PyTorch or :obj:`"tf"` for TensorFlow. The specified framework
            must be installed.
Lysandre Debut's avatar
Lysandre Debut committed
690

Sylvain Gugger's avatar
Sylvain Gugger committed
691
692
693
            If no framework is specified, will default to the one currently installed. If no framework is specified and
            both frameworks are installed, will default to the framework of the :obj:`model`, or to PyTorch if no model
            is provided.
Sylvain Gugger's avatar
Sylvain Gugger committed
694
695
696
        task (:obj:`str`, defaults to :obj:`""`):
            A task-identifier for the pipeline.
        args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`):
Lysandre Debut's avatar
Lysandre Debut committed
697
            Reference to the object in charge of parsing supplied pipeline parameters.
Sylvain Gugger's avatar
Sylvain Gugger committed
698
        device (:obj:`int`, `optional`, defaults to -1):
Sylvain Gugger's avatar
Sylvain Gugger committed
699
700
            Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on
            the associated CUDA device id.
701
    """
702

703
704
    def __init__(
        self,
705
706
        model: Union["PreTrainedModel", "TFPreTrainedModel"],
        tokenizer: PreTrainedTokenizer,
707
        modelcard: Optional[ModelCard] = None,
708
709
710
        framework: Optional[str] = None,
        args_parser: ArgumentHandler = None,
        device: int = -1,
711
        task: str = "",
712
713
714
715
716
717
718
719
720
    ):
        super().__init__(
            model=model,
            tokenizer=tokenizer,
            modelcard=modelcard,
            framework=framework,
            args_parser=args_parser,
            device=device,
            binary_output=True,
721
            task=task,
722
        )
723

724
    def __call__(self, *args, **kwargs):
Sylvain Gugger's avatar
Sylvain Gugger committed
725
726
727
728
729
730
731
732
733
        """
        Extract the features of the input(s).

        Args:
            args (:obj:`str` or :obj:`List[str]`): One or several texts (or one list of texts) to get the features of.

        Return:
            A nested list of :obj:`float`: The features computed by the model.
        """
734
        return super().__call__(*args, **kwargs).tolist()
Morgan Funtowicz's avatar
Morgan Funtowicz committed
735
736


Sylvain Gugger's avatar
Sylvain Gugger committed
737
@add_end_docstrings(PIPELINE_INIT_ARGS)
738
739
class TextGenerationPipeline(Pipeline):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
740
741
    Language generation pipeline using any :obj:`ModelWithLMHead`. This pipeline predicts the words that will follow a
    specified text prompt.
742

Sylvain Gugger's avatar
Sylvain Gugger committed
743
744
    This language generation pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
    task identifier: :obj:`"text-generation"`.
745

Sylvain Gugger's avatar
Sylvain Gugger committed
746
    The models that this pipeline can use are models that have been trained with an autoregressive language modeling
Sylvain Gugger's avatar
Sylvain Gugger committed
747
748
    objective, which includes the uni-directional models in the library (e.g. gpt2). See the list of available
    community models on `huggingface.co/models <https://huggingface.co/models?filter=causal-lm>`__.
749
750
    """

751
    # Prefix text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
752
753
    # in https://github.com/rusiaaman/XLNet-gen#methodology
    # and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
754

Sylvain Gugger's avatar
Sylvain Gugger committed
755
756
757
758
759
760
761
762
763
    XL_PREFIX = """
    In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
    voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
    Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
    and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
    accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
    the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
    begging for his blessing. <eod> </s> <eos>
    """
764

765
766
767
768
769
770
771
772
773
774
775
776
777
778
    ALLOWED_MODELS = [
        "XLNetLMHeadModel",
        "TransfoXLLMHeadModel",
        "ReformerModelWithLMHead",
        "GPT2LMHeadModel",
        "OpenAIGPTLMHeadModel",
        "CTRLLMHeadModel",
        "TFXLNetLMHeadModel",
        "TFTransfoXLLMHeadModel",
        "TFGPT2LMHeadModel",
        "TFOpenAIGPTLMHeadModel",
        "TFCTRLLMHeadModel",
    ]

779
780
781
782
783
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self.check_model_type(self.ALLOWED_MODELS)

784
785
    # overriding _parse_and_tokenize to allow for unusual language-modeling tokenizer arguments

786
    def _parse_and_tokenize(self, inputs, padding=True, add_special_tokens=True, **kwargs):
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
        """
        Parse arguments and tokenize
        """
        # Parse arguments
        if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
            tokenizer_kwargs = {"add_space_before_punct_symbol": True}
        else:
            tokenizer_kwargs = {}
        inputs = self.tokenizer(
            inputs,
            add_special_tokens=add_special_tokens,
            return_tensors=self.framework,
            padding=padding,
            **tokenizer_kwargs,
        )

        return inputs

805
    def __call__(
806
        self,
807
        text_inputs,
808
809
810
811
812
        return_tensors=False,
        return_text=True,
        clean_up_tokenization_spaces=False,
        prefix=None,
        **generate_kwargs
813
    ):
Sylvain Gugger's avatar
Sylvain Gugger committed
814
815
816
817
818
819
820
        """
        Complete the prompt(s) given as inputs.

        Args:
            args (:obj:`str` or :obj:`List[str]`):
                One or several prompts (or one list of prompts) to complete.
            return_tensors (:obj:`bool`, `optional`, defaults to :obj:`False`):
821
                Whether or not to include the tensors of predictions (as token indices) in the outputs.
Sylvain Gugger's avatar
Sylvain Gugger committed
822
823
824
825
            return_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
                Whether or not to include the decoded texts in the outputs.
            clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to clean up the potential extra spaces in the text output.
826
827
            prefix (:obj:`str`, `optional`):
                Prefix added to prompt.
Sylvain Gugger's avatar
Sylvain Gugger committed
828
            generate_kwargs:
Sylvain Gugger's avatar
Sylvain Gugger committed
829
830
                Additional keyword arguments to pass along to the generate method of the model (see the generate method
                corresponding to your framework `here <./model.html#generative-models>`__).
Sylvain Gugger's avatar
Sylvain Gugger committed
831
832

        Return:
Sylvain Gugger's avatar
Sylvain Gugger committed
833
            A list or a list of list of :obj:`dict`: Each result comes as a dictionary with the following keys:
834

Sylvain Gugger's avatar
Sylvain Gugger committed
835
836
837
838
            - **generated_text** (:obj:`str`, present when ``return_text=True``) -- The generated text.
            - **generated_token_ids** (:obj:`torch.Tensor` or :obj:`tf.Tensor`, present when ``return_tensors=True``)
              -- The token ids of the generated text.
        """
839

840
841
        if isinstance(text_inputs, str):
            text_inputs = [text_inputs]
842
843
844
845
        results = []
        for prompt_text in text_inputs:
            # Manage correct placement of the tensors
            with self.device_placement():
846
847
                prefix = prefix if prefix is not None else self.model.config.prefix
                if prefix is None and self.model.__class__.__name__ in [
848
849
850
851
852
                    "XLNetLMHeadModel",
                    "TransfoXLLMHeadModel",
                    "TFXLNetLMHeadModel",
                    "TFTransfoXLLMHeadModel",
                ]:
853
854
855
856
857
                    # For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
                    prefix = self.XL_PREFIX

                if prefix:
                    prefix_inputs = self._parse_and_tokenize(prefix, padding=False, add_special_tokens=False)
858
                    # This impacts max_length and min_length argument that need adjusting.
859
860
861
862
863
864
865
866
                    prefix_length = prefix_inputs["input_ids"].shape[-1]
                    if generate_kwargs.get("max_length", None) is not None:
                        generate_kwargs["max_length"] += prefix_length
                    if generate_kwargs.get("min_length", None) is not None:
                        generate_kwargs["min_length"] += prefix_length

                prefix = prefix or ""
                inputs = self._parse_and_tokenize(prefix + prompt_text, padding=False, add_special_tokens=False)
867

868
869
870
871
872
873
                # set input_ids to None to allow empty prompt
                if inputs["input_ids"].shape[-1] == 0:
                    inputs["input_ids"] = None
                    inputs["attention_mask"] = None

                if self.framework == "pt" and inputs["input_ids"] is not None:
874
875
876
877
878
879
                    inputs = self.ensure_tensor_on_device(**inputs)

                input_ids = inputs["input_ids"]

                # Ensure that batch size = 1 (batch generation not allowed for now)
                assert (
880
                    input_ids is None or input_ids.shape[0] == 1
881
882
883
884
885
886
                ), "Batch generation is currently not supported. See https://github.com/huggingface/transformers/issues/3021 for more information."

                output_sequences = self.model.generate(input_ids=input_ids, **generate_kwargs)  # BS x SL

            result = []
            for generated_sequence in output_sequences:
887
888
                if self.framework == "pt" and generated_sequence is not None:
                    generated_sequence = generated_sequence.cpu()
889
                generated_sequence = generated_sequence.numpy().tolist()
890
891
892
893
894
895
896
897
898
899
900
901
                record = {}
                if return_tensors:
                    record["generated_token_ids"] = generated_sequence
                if return_text:
                    # Decode text
                    text = self.tokenizer.decode(
                        generated_sequence,
                        skip_special_tokens=True,
                        clean_up_tokenization_spaces=clean_up_tokenization_spaces,
                    )

                    # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
902
903
904
905
906
907
908
909
910
911
912
913
                    if input_ids is None:
                        prompt_length = 0
                    else:
                        prompt_length = len(
                            self.tokenizer.decode(
                                input_ids[0],
                                skip_special_tokens=True,
                                clean_up_tokenization_spaces=clean_up_tokenization_spaces,
                            )
                        )

                    record["generated_text"] = prompt_text + text[prompt_length:]
914
915
916
917
918
919
920
921
922
923

                result.append(record)
            results += [result]

        if len(results) == 1:
            return results[0]

        return results


Sylvain Gugger's avatar
Sylvain Gugger committed
924
925
926
927
928
929
930
@add_end_docstrings(
    PIPELINE_INIT_ARGS,
    r"""
        return_all_scores (:obj:`bool`, `optional`, defaults to :obj:`False`):
            Whether to return all prediction scores or just the one of the predicted class.
    """,
)
Morgan Funtowicz's avatar
Morgan Funtowicz committed
931
class TextClassificationPipeline(Pipeline):
932
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
933
934
    Text classification pipeline using any :obj:`ModelForSequenceClassification`. See the `sequence classification
    examples <../task_summary.html#sequence-classification>`__ for more information.
Lysandre Debut's avatar
Lysandre Debut committed
935

Sylvain Gugger's avatar
Sylvain Gugger committed
936
937
938
    This text classification pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
    task identifier: :obj:`"sentiment-analysis"` (for classifying sequences according to positive or negative
    sentiments).
Lysandre Debut's avatar
Lysandre Debut committed
939

Sylvain Gugger's avatar
Sylvain Gugger committed
940
941
    If multiple classification labels are available (:obj:`model.config.num_labels >= 2`), the pipeline will run a
    softmax over the results. If there is a single label, the pipeline will run a sigmoid over the result.
942

Sylvain Gugger's avatar
Sylvain Gugger committed
943
944
945
    The models that this pipeline can use are models that have been fine-tuned on a sequence classification task. See
    the up-to-date list of available models on `huggingface.co/models
    <https://huggingface.co/models?filter=text-classification>`__.
946
    """
Morgan Funtowicz's avatar
Morgan Funtowicz committed
947

948
949
950
    def __init__(self, return_all_scores: bool = False, **kwargs):
        super().__init__(**kwargs)

951
952
953
954
955
956
        self.check_model_type(
            TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
            if self.framework == "tf"
            else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
        )

957
958
        self.return_all_scores = return_all_scores

959
    def __call__(self, *args, **kwargs):
Sylvain Gugger's avatar
Sylvain Gugger committed
960
961
962
963
964
        """
        Classify the text(s) given as inputs.

        Args:
            args (:obj:`str` or :obj:`List[str]`):
Yuta Hayashibe's avatar
Yuta Hayashibe committed
965
                One or several texts (or one list of prompts) to classify.
Sylvain Gugger's avatar
Sylvain Gugger committed
966
967

        Return:
Sylvain Gugger's avatar
Sylvain Gugger committed
968
            A list or a list of list of :obj:`dict`: Each result comes as list of dictionaries with the following keys:
Sylvain Gugger's avatar
Sylvain Gugger committed
969
970
971
972
973
974

            - **label** (:obj:`str`) -- The label predicted.
            - **score** (:obj:`float`) -- The corresponding probability.

            If ``self.return_all_scores=True``, one such dictionary is returned per label.
        """
975
        outputs = super().__call__(*args, **kwargs)
976
977
978
979
980

        if self.model.config.num_labels == 1:
            scores = 1.0 / (1.0 + np.exp(-outputs))
        else:
            scores = np.exp(outputs) / np.exp(outputs).sum(-1, keepdims=True)
981
982
        if self.return_all_scores:
            return [
983
                [{"label": self.model.config.id2label[i], "score": score.item()} for i, score in enumerate(item)]
984
985
986
987
988
989
                for item in scores
            ]
        else:
            return [
                {"label": self.model.config.id2label[item.argmax()], "score": item.max().item()} for item in scores
            ]
Morgan Funtowicz's avatar
Morgan Funtowicz committed
990
991


992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
class ZeroShotClassificationArgumentHandler(ArgumentHandler):
    """
    Handles arguments for zero-shot for text classification by turning each possible label into an NLI
    premise/hypothesis pair.
    """

    def _parse_labels(self, labels):
        if isinstance(labels, str):
            labels = [label.strip() for label in labels.split(",")]
        return labels

    def __call__(self, sequences, labels, hypothesis_template):
        if len(labels) == 0 or len(sequences) == 0:
            raise ValueError("You must include at least one label and at least one sequence.")
        if hypothesis_template.format(labels[0]) == hypothesis_template:
            raise ValueError(
                (
                    'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
                    "Make sure the passed template includes formatting syntax such as {{}} where the label should go."
                ).format(hypothesis_template)
            )

        if isinstance(sequences, str):
            sequences = [sequences]
        labels = self._parse_labels(labels)

        sequence_pairs = []
        for sequence in sequences:
            sequence_pairs.extend([[sequence, hypothesis_template.format(label)] for label in labels])

        return sequence_pairs


Sylvain Gugger's avatar
Sylvain Gugger committed
1025
@add_end_docstrings(PIPELINE_INIT_ARGS)
1026
1027
class ZeroShotClassificationPipeline(Pipeline):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
1028
1029
    NLI-based zero-shot classification pipeline using a :obj:`ModelForSequenceClassification` trained on NLI (natural
    language inference) tasks.
1030
1031

    Any combination of sequences and labels can be passed and each combination will be posed as a premise/hypothesis
Sylvain Gugger's avatar
Sylvain Gugger committed
1032
    pair and passed to the pretrained model. Then, the logit for `entailment` is taken as the logit for the candidate
1033
1034
    label being valid. Any NLI model can be used, but the id of the `entailment` label must be included in the model
    config's :attr:`~transformers.PretrainedConfig.label2id`.
1035

Sylvain Gugger's avatar
Sylvain Gugger committed
1036
1037
    This NLI pipeline can currently be loaded from :func:`~transformers.pipeline` using the following task identifier:
    :obj:`"zero-shot-classification"`.
1038

Sylvain Gugger's avatar
Sylvain Gugger committed
1039
1040
    The models that this pipeline can use are models that have been fine-tuned on an NLI task. See the up-to-date list
    of available models on `huggingface.co/models <https://huggingface.co/models?search=nli>`__.
1041
1042
1043
    """

    def __init__(self, args_parser=ZeroShotClassificationArgumentHandler(), *args, **kwargs):
1044
1045
        super().__init__(*args, **kwargs)
        self._args_parser = args_parser
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
        if self.entailment_id == -1:
            logger.warning(
                "Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
                "-1. Define a descriptive label2id mapping in the model config to ensure correct outputs."
            )

    @property
    def entailment_id(self):
        for label, ind in self.model.config.label2id.items():
            if label.lower().startswith("entail"):
                return ind
        return -1
1058

1059
    def _parse_and_tokenize(
1060
        self, sequences, candidate_labels, hypothesis_template, padding=True, add_special_tokens=True, **kwargs
1061
    ):
1062
1063
1064
        """
        Parse arguments and tokenize only_first so that hypothesis (label) is not truncated
        """
1065
        sequence_pairs = self._args_parser(sequences, candidate_labels, hypothesis_template)
1066
        inputs = self.tokenizer(
1067
            sequence_pairs,
1068
1069
1070
1071
1072
1073
1074
1075
            add_special_tokens=add_special_tokens,
            return_tensors=self.framework,
            padding=padding,
            truncation="only_first",
        )

        return inputs

1076
1077
1078
1079
1080
1081
1082
    def __call__(
        self,
        sequences: Union[str, List[str]],
        candidate_labels,
        hypothesis_template="This example is {}.",
        multi_class=False,
    ):
1083
        """
1084
1085
        Classify the sequence(s) given as inputs. See the :obj:`~transformers.ZeroShotClassificationPipeline`
        documentation for more information.
1086
1087

        Args:
1088
            sequences (:obj:`str` or :obj:`List[str]`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1089
                The sequence(s) to classify, will be truncated if the model input is too large.
1090
            candidate_labels (:obj:`str` or :obj:`List[str]`):
1091
1092
                The set of possible class labels to classify each sequence into. Can be a single label, a string of
                comma-separated labels, or a list of labels.
1093
            hypothesis_template (:obj:`str`, `optional`, defaults to :obj:`"This example is {}."`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1094
1095
                The template used to turn each label into an NLI-style hypothesis. This template must include a {} or
                similar syntax for the candidate label to be inserted into the template. For example, the default
Sylvain Gugger's avatar
Sylvain Gugger committed
1096
1097
1098
1099
                template is :obj:`"This example is {}."` With the candidate label :obj:`"sports"`, this would be fed
                into the model like :obj:`"<cls> sequence to classify <sep> This example is sports . <sep>"`. The
                default template works well in many cases, but it may be worthwhile to experiment with different
                templates depending on the task setting.
1100
            multi_class (:obj:`bool`, `optional`, defaults to :obj:`False`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1101
1102
1103
1104
                Whether or not multiple candidate labels can be true. If :obj:`False`, the scores are normalized such
                that the sum of the label likelihoods for each sequence is 1. If :obj:`True`, the labels are considered
                independent and probabilities are normalized for each candidate by doing a softmax of the entailment
                score vs. the contradiction score.
1105

Sylvain Gugger's avatar
Sylvain Gugger committed
1106
        Return:
Sylvain Gugger's avatar
Sylvain Gugger committed
1107
            A :obj:`dict` or a list of :obj:`dict`: Each result comes as a dictionary with the following keys:
Sylvain Gugger's avatar
Sylvain Gugger committed
1108
1109
1110

            - **sequence** (:obj:`str`) -- The sequence for which this is the output.
            - **labels** (:obj:`List[str]`) -- The labels sorted by order of likelihood.
1111
            - **scores** (:obj:`List[float]`) -- The probabilities for each of the labels.
1112
        """
1113
1114
1115
        if sequences and isinstance(sequences, str):
            sequences = [sequences]

1116
        outputs = super().__call__(sequences, candidate_labels, hypothesis_template)
1117
        num_sequences = len(sequences)
1118
1119
1120
1121
1122
1123
1124
1125
        candidate_labels = self._args_parser._parse_labels(candidate_labels)
        reshaped_outputs = outputs.reshape((num_sequences, len(candidate_labels), -1))

        if len(candidate_labels) == 1:
            multi_class = True

        if not multi_class:
            # softmax the "entailment" logits over all candidate labels
1126
            entail_logits = reshaped_outputs[..., self.entailment_id]
1127
1128
1129
            scores = np.exp(entail_logits) / np.exp(entail_logits).sum(-1, keepdims=True)
        else:
            # softmax over the entailment vs. contradiction dim for each label independently
1130
1131
1132
            entailment_id = self.entailment_id
            contradiction_id = -1 if entailment_id == 0 else 0
            entail_contr_logits = reshaped_outputs[..., [contradiction_id, entailment_id]]
1133
1134
1135
1136
1137
1138
1139
1140
            scores = np.exp(entail_contr_logits) / np.exp(entail_contr_logits).sum(-1, keepdims=True)
            scores = scores[..., 1]

        result = []
        for iseq in range(num_sequences):
            top_inds = list(reversed(scores[iseq].argsort()))
            result.append(
                {
1141
                    "sequence": sequences if isinstance(sequences, str) else sequences[iseq],
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
                    "labels": [candidate_labels[i] for i in top_inds],
                    "scores": scores[iseq][top_inds].tolist(),
                }
            )

        if len(result) == 1:
            return result[0]
        return result


Sylvain Gugger's avatar
Sylvain Gugger committed
1152
1153
1154
@add_end_docstrings(
    PIPELINE_INIT_ARGS,
    r"""
1155
        top_k (:obj:`int`, defaults to 5): The number of predictions to return.
Sylvain Gugger's avatar
Sylvain Gugger committed
1156
1157
    """,
)
Julien Chaumond's avatar
Julien Chaumond committed
1158
1159
class FillMaskPipeline(Pipeline):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
1160
1161
    Masked language modeling prediction pipeline using any :obj:`ModelWithLMHead`. See the `masked language modeling
    examples <../task_summary.html#masked-language-modeling>`__ for more information.
Lysandre Debut's avatar
Lysandre Debut committed
1162

Sylvain Gugger's avatar
Sylvain Gugger committed
1163
1164
    This mask filling pipeline can currently be loaded from :func:`~transformers.pipeline` using the following task
    identifier: :obj:`"fill-mask"`.
Lysandre Debut's avatar
Lysandre Debut committed
1165
1166

    The models that this pipeline can use are models that have been trained with a masked language modeling objective,
Sylvain Gugger's avatar
Sylvain Gugger committed
1167
    which includes the bi-directional models in the library. See the up-to-date list of available models on
1168
    `huggingface.co/models <https://huggingface.co/models?filter=masked-lm>`__.
Lysandre Debut's avatar
Lysandre Debut committed
1169

Sylvain Gugger's avatar
Sylvain Gugger committed
1170
    .. note::
Lysandre Debut's avatar
Lysandre Debut committed
1171

Sylvain Gugger's avatar
Sylvain Gugger committed
1172
        This pipeline only works for inputs with exactly one token masked.
Julien Chaumond's avatar
Julien Chaumond committed
1173
1174
1175
1176
    """

    def __init__(
        self,
1177
1178
        model: Union["PreTrainedModel", "TFPreTrainedModel"],
        tokenizer: PreTrainedTokenizer,
1179
        modelcard: Optional[ModelCard] = None,
Julien Chaumond's avatar
Julien Chaumond committed
1180
1181
1182
        framework: Optional[str] = None,
        args_parser: ArgumentHandler = None,
        device: int = -1,
1183
        top_k=5,
1184
        task: str = "",
Julien Chaumond's avatar
Julien Chaumond committed
1185
1186
1187
1188
1189
1190
1191
1192
1193
    ):
        super().__init__(
            model=model,
            tokenizer=tokenizer,
            modelcard=modelcard,
            framework=framework,
            args_parser=args_parser,
            device=device,
            binary_output=True,
1194
            task=task,
Julien Chaumond's avatar
Julien Chaumond committed
1195
1196
        )

1197
        self.check_model_type(TF_MODEL_WITH_LM_HEAD_MAPPING if self.framework == "tf" else MODEL_FOR_MASKED_LM_MAPPING)
Sylvain Gugger's avatar
Sylvain Gugger committed
1198
        self.top_k = top_k
Julien Chaumond's avatar
Julien Chaumond committed
1199

1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
    def ensure_exactly_one_mask_token(self, masked_index: np.ndarray):
        numel = np.prod(masked_index.shape)
        if numel > 1:
            raise PipelineException(
                "fill-mask",
                self.model.base_model_prefix,
                f"More than one mask_token ({self.tokenizer.mask_token}) is not supported",
            )
        elif numel < 1:
            raise PipelineException(
                "fill-mask",
                self.model.base_model_prefix,
                f"No mask_token ({self.tokenizer.mask_token}) found on the input",
            )

1215
    def __call__(self, *args, targets=None, top_k: Optional[int] = None, **kwargs):
Sylvain Gugger's avatar
Sylvain Gugger committed
1216
1217
1218
1219
        """
        Fill the masked token in the text(s) given as inputs.

        Args:
1220
1221
1222
1223
            args (:obj:`str` or :obj:`List[str]`):
                One or several texts (or one list of prompts) with masked tokens.
            targets (:obj:`str` or :obj:`List[str]`, `optional`):
                When passed, the model will return the scores for the passed token or tokens rather than the top k
Sylvain Gugger's avatar
Sylvain Gugger committed
1224
1225
                predictions in the entire vocabulary. If the provided targets are not in the model vocab, they will be
                tokenized and the first resulting token will be used (with a warning).
1226
1227
            top_k (:obj:`int`, `optional`):
                When passed, overrides the number of predictions to return.
Sylvain Gugger's avatar
Sylvain Gugger committed
1228
1229

        Return:
Sylvain Gugger's avatar
Sylvain Gugger committed
1230
            A list or a list of list of :obj:`dict`: Each result comes as list of dictionaries with the following keys:
Sylvain Gugger's avatar
Sylvain Gugger committed
1231
1232
1233
1234
1235
1236

            - **sequence** (:obj:`str`) -- The corresponding input with the mask token prediction.
            - **score** (:obj:`float`) -- The corresponding probability.
            - **token** (:obj:`int`) -- The predicted token id (to replace the masked one).
            - **token** (:obj:`str`) -- The predicted token (to replace the masked one).
        """
Julien Chaumond's avatar
Julien Chaumond committed
1237
1238
1239
1240
1241
1242
        inputs = self._parse_and_tokenize(*args, **kwargs)
        outputs = self._forward(inputs, return_tensors=True)

        results = []
        batch_size = outputs.shape[0] if self.framework == "tf" else outputs.size(0)

1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
        if targets is not None:
            if len(targets) == 0 or len(targets[0]) == 0:
                raise ValueError("At least one target must be provided when passed.")
            if isinstance(targets, str):
                targets = [targets]

            targets_proc = []
            for target in targets:
                target_enc = self.tokenizer.tokenize(target)
                if len(target_enc) > 1 or target_enc[0] == self.tokenizer.unk_token:
                    logger.warning(
                        "The specified target token `{}` does not exist in the model vocabulary. Replacing with `{}`.".format(
                            target, target_enc[0]
                        )
                    )
                targets_proc.append(target_enc[0])
            target_inds = np.array(self.tokenizer.convert_tokens_to_ids(targets_proc))

Julien Chaumond's avatar
Julien Chaumond committed
1261
1262
1263
1264
1265
        for i in range(batch_size):
            input_ids = inputs["input_ids"][i]
            result = []

            if self.framework == "tf":
1266
1267
1268
1269
1270
1271
                masked_index = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()

                # Fill mask pipeline supports only one ${mask_token} per sample
                self.ensure_exactly_one_mask_token(masked_index)

                logits = outputs[i, masked_index.item(), :]
Julien Chaumond's avatar
Julien Chaumond committed
1272
                probs = tf.nn.softmax(logits)
1273
                if targets is None:
1274
                    topk = tf.math.top_k(probs, k=top_k if top_k is not None else self.top_k)
1275
1276
1277
1278
1279
1280
                    values, predictions = topk.values.numpy(), topk.indices.numpy()
                else:
                    values = tf.gather_nd(probs, tf.reshape(target_inds, (-1, 1)))
                    sort_inds = tf.reverse(tf.argsort(values), [0])
                    values = tf.gather_nd(values, tf.reshape(sort_inds, (-1, 1))).numpy()
                    predictions = target_inds[sort_inds.numpy()]
Julien Chaumond's avatar
Julien Chaumond committed
1281
            else:
1282
                masked_index = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=False)
1283
1284
1285

                # Fill mask pipeline supports only one ${mask_token} per sample
                self.ensure_exactly_one_mask_token(masked_index.numpy())
1286

1287
                logits = outputs[i, masked_index.item(), :]
Julien Chaumond's avatar
Julien Chaumond committed
1288
                probs = logits.softmax(dim=0)
1289
                if targets is None:
1290
                    values, predictions = probs.topk(top_k if top_k is not None else self.top_k)
1291
1292
1293
1294
1295
                else:
                    values = probs[..., target_inds]
                    sort_inds = list(reversed(values.argsort(dim=-1)))
                    values = values[..., sort_inds]
                    predictions = target_inds[sort_inds]
Julien Chaumond's avatar
Julien Chaumond committed
1296
1297
1298
1299
1300
1301

            for v, p in zip(values.tolist(), predictions.tolist()):
                tokens = input_ids.numpy()
                tokens[masked_index] = p
                # Filter padding out:
                tokens = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
1302
1303
1304
1305
1306
1307
1308
1309
                result.append(
                    {
                        "sequence": self.tokenizer.decode(tokens),
                        "score": v,
                        "token": p,
                        "token_str": self.tokenizer.convert_ids_to_tokens(p),
                    }
                )
Julien Chaumond's avatar
Julien Chaumond committed
1310
1311
1312
1313
1314
1315
1316
1317
1318

            # Append
            results += [result]

        if len(results) == 1:
            return results[0]
        return results


1319
1320
1321
1322
1323
1324
1325
1326
class TokenClassificationArgumentHandler(ArgumentHandler):
    """
    Handles arguments for token classification.
    """

    def __call__(self, *args, **kwargs):

        if args is not None and len(args) > 0:
1327
            inputs = list(args)
1328
            batch_size = len(inputs)
1329
1330
        else:
            raise ValueError("At least one input is required.")
1331

1332
        offset_mapping = kwargs.get("offset_mapping")
1333
1334
1335
1336
        if offset_mapping:
            if isinstance(offset_mapping, list) and isinstance(offset_mapping[0], tuple):
                offset_mapping = [offset_mapping]
            if len(offset_mapping) != batch_size:
1337
                raise ValueError("offset_mapping should have the same batch size as the input")
1338
1339
1340
        return inputs, offset_mapping


Sylvain Gugger's avatar
Sylvain Gugger committed
1341
1342
1343
1344
1345
1346
1347
1348
1349
@add_end_docstrings(
    PIPELINE_INIT_ARGS,
    r"""
        ignore_labels (:obj:`List[str]`, defaults to :obj:`["O"]`):
            A list of labels to ignore.
        grouped_entities (:obj:`bool`, `optional`, defaults to :obj:`False`):
            Whether or not to group the tokens corresponding to the same entity together in the predictions or not.
    """,
)
1350
class TokenClassificationPipeline(Pipeline):
1351
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
1352
1353
    Named Entity Recognition pipeline using any :obj:`ModelForTokenClassification`. See the `named entity recognition
    examples <../task_summary.html#named-entity-recognition>`__ for more information.
Lysandre Debut's avatar
Lysandre Debut committed
1354

Sylvain Gugger's avatar
Sylvain Gugger committed
1355
1356
1357
    This token recognition pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
    task identifier: :obj:`"ner"` (for predicting the classes of tokens in a sequence: person, organisation, location
    or miscellaneous).
Lysandre Debut's avatar
Lysandre Debut committed
1358

Sylvain Gugger's avatar
Sylvain Gugger committed
1359
1360
1361
    The models that this pipeline can use are models that have been fine-tuned on a token classification task. See the
    up-to-date list of available models on `huggingface.co/models
    <https://huggingface.co/models?filter=token-classification>`__.
1362
    """
Morgan Funtowicz's avatar
Morgan Funtowicz committed
1363

1364
1365
1366
1367
    default_input_names = "sequences"

    def __init__(
        self,
1368
1369
        model: Union["PreTrainedModel", "TFPreTrainedModel"],
        tokenizer: PreTrainedTokenizer,
1370
        modelcard: Optional[ModelCard] = None,
1371
        framework: Optional[str] = None,
1372
        args_parser: ArgumentHandler = TokenClassificationArgumentHandler(),
1373
1374
1375
        device: int = -1,
        binary_output: bool = False,
        ignore_labels=["O"],
1376
        task: str = "",
1377
        grouped_entities: bool = False,
1378
        ignore_subwords: bool = False,
1379
1380
1381
1382
1383
1384
1385
1386
    ):
        super().__init__(
            model=model,
            tokenizer=tokenizer,
            modelcard=modelcard,
            framework=framework,
            device=device,
            binary_output=binary_output,
1387
            task=task,
1388
        )
1389

1390
1391
1392
1393
1394
1395
        self.check_model_type(
            TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
            if self.framework == "tf"
            else MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
        )

1396
        self._basic_tokenizer = BasicTokenizer(do_lower_case=False)
1397
        self._args_parser = args_parser
thomwolf's avatar
thomwolf committed
1398
        self.ignore_labels = ignore_labels
1399
        self.grouped_entities = grouped_entities
1400
        self.ignore_subwords = ignore_subwords
1401

1402
1403
1404
1405
1406
1407
        if self.ignore_subwords and not self.tokenizer.is_fast:
            raise ValueError(
                "Slow tokenizers cannot ignore subwords. Please set the `ignore_subwords` option"
                "to `False` or use a fast tokenizer."
            )

1408
    def __call__(self, inputs: Union[str, List[str]], **kwargs):
Sylvain Gugger's avatar
Sylvain Gugger committed
1409
1410
1411
1412
        """
        Classify each token of the text(s) given as inputs.

        Args:
1413
            inputs (:obj:`str` or :obj:`List[str]`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
                One or several texts (or one list of texts) for token classification.

        Return:
            A list or a list of list of :obj:`dict`: Each result comes as a list of dictionaries (one for each token in
            the corresponding input, or each entity if this pipeline was instantiated with
            :obj:`grouped_entities=True`) with the following keys:

            - **word** (:obj:`str`) -- The token/word classified.
            - **score** (:obj:`float`) -- The corresponding probability for :obj:`entity`.
            - **entity** (:obj:`str`) -- The entity predicted for that token/word.
            - **index** (:obj:`int`, only present when ``self.grouped_entities=False``) -- The index of the
              corresponding token in the sentence.
        """
1427

1428
        inputs, offset_mappings = self._args_parser(inputs, **kwargs)
1429

Julien Chaumond's avatar
Julien Chaumond committed
1430
        answers = []
1431
1432

        for i, sentence in enumerate(inputs):
Morgan Funtowicz's avatar
Morgan Funtowicz committed
1433

1434
1435
            # Manage correct placement of the tensors
            with self.device_placement():
Morgan Funtowicz's avatar
Morgan Funtowicz committed
1436

1437
                tokens = self.tokenizer(
Lysandre's avatar
Lysandre committed
1438
1439
1440
1441
                    sentence,
                    return_attention_mask=False,
                    return_tensors=self.framework,
                    truncation=True,
1442
1443
                    return_special_tokens_mask=True,
                    return_offsets_mapping=self.tokenizer.is_fast,
1444
                )
1445
                if self.tokenizer.is_fast:
1446
                    offset_mapping = tokens.pop("offset_mapping").cpu().numpy()[0]
1447
1448
1449
                elif offset_mappings:
                    offset_mapping = offset_mappings[i]
                else:
1450
1451
1452
                    offset_mapping = None

                special_tokens_mask = tokens.pop("special_tokens_mask").cpu().numpy()[0]
1453
1454

                # Forward
1455
                if self.framework == "tf":
Funtowicz Morgan's avatar
Funtowicz Morgan committed
1456
                    entities = self.model(tokens.data)[0][0].numpy()
1457
                    input_ids = tokens["input_ids"].numpy()[0]
Morgan Funtowicz's avatar
Morgan Funtowicz committed
1458
                else:
1459
                    with torch.no_grad():
1460
                        tokens = self.ensure_tensor_on_device(**tokens)
1461
                        entities = self.model(**tokens)[0][0].cpu().numpy()
1462
                        input_ids = tokens["input_ids"].cpu().numpy()[0]
Morgan Funtowicz's avatar
Morgan Funtowicz committed
1463

thomwolf's avatar
thomwolf committed
1464
1465
            score = np.exp(entities) / np.exp(entities).sum(-1, keepdims=True)
            labels_idx = score.argmax(axis=-1)
Morgan Funtowicz's avatar
Morgan Funtowicz committed
1466

1467
1468
            entities = []
            # Filter to labels not in `self.ignore_labels`
1469
            # Filter special_tokens
1470
1471
1472
            filtered_labels_idx = [
                (idx, label_idx)
                for idx, label_idx in enumerate(labels_idx)
1473
                if (self.model.config.id2label[label_idx] not in self.ignore_labels) and not special_tokens_mask[idx]
1474
1475
1476
            ]

            for idx, label_idx in filtered_labels_idx:
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
                if offset_mapping is not None:
                    start_ind, end_ind = offset_mapping[idx]
                    word_ref = sentence[start_ind:end_ind]
                    word = self.tokenizer.convert_ids_to_tokens([int(input_ids[idx])])[0]
                    is_subword = len(word_ref) != len(word)

                    if int(input_ids[idx]) == self.tokenizer.unk_token_id:
                        word = word_ref
                        is_subword = False
                else:
                    word = self.tokenizer.convert_ids_to_tokens(int(input_ids[idx]))
1488
1489

                entity = {
1490
                    "word": word,
1491
1492
1493
1494
1495
                    "score": score[idx][label_idx].item(),
                    "entity": self.model.config.id2label[label_idx],
                    "index": idx,
                }

1496
1497
1498
                if self.grouped_entities and self.ignore_subwords:
                    entity["is_subword"] = is_subword

1499
                entities += [entity]
Morgan Funtowicz's avatar
Morgan Funtowicz committed
1500

1501
            if self.grouped_entities:
1502
1503
                answers += [self.group_entities(entities)]
            # Append ungrouped entities
1504
1505
1506
            else:
                answers += [entities]

thomwolf's avatar
thomwolf committed
1507
1508
        if len(answers) == 1:
            return answers[0]
Morgan Funtowicz's avatar
Morgan Funtowicz committed
1509
1510
        return answers

1511
    def group_sub_entities(self, entities: List[dict]) -> dict:
1512
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
1513
1514
1515
1516
        Group together the adjacent tokens with the same entity predicted.

        Args:
            entities (:obj:`dict`): The entities predicted by the pipeline.
1517
        """
1518
        # Get the first entity in the entity group
1519
1520
        entity = entities[0]["entity"].split("-")[-1]
        scores = np.nanmean([entity["score"] for entity in entities])
1521
1522
1523
1524
1525
1526
1527
1528
1529
        tokens = [entity["word"] for entity in entities]

        entity_group = {
            "entity_group": entity,
            "score": np.mean(scores),
            "word": self.tokenizer.convert_tokens_to_string(tokens),
        }
        return entity_group

1530
1531
    def group_entities(self, entities: List[dict]) -> List[dict]:
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
1532
1533
1534
1535
        Find and group together the adjacent tokens with the same entity predicted.

        Args:
            entities (:obj:`dict`): The entities predicted by the pipeline.
1536
1537
1538
1539
1540
1541
1542
1543
1544
        """

        entity_groups = []
        entity_group_disagg = []

        if entities:
            last_idx = entities[-1]["index"]

        for entity in entities:
1545

1546
            is_last_idx = entity["index"] == last_idx
1547
            is_subword = self.ignore_subwords and entity["is_subword"]
1548
1549
1550
1551
1552
1553
1554
1555
            if not entity_group_disagg:
                entity_group_disagg += [entity]
                if is_last_idx:
                    entity_groups += [self.group_sub_entities(entity_group_disagg)]
                continue

            # If the current entity is similar and adjacent to the previous entity, append it to the disaggregated entity group
            # The split is meant to account for the "B" and "I" suffixes
1556
            # Shouldn't merge if both entities are B-type
1557
            if (
1558
1559
1560
1561
                (
                    entity["entity"].split("-")[-1] == entity_group_disagg[-1]["entity"].split("-")[-1]
                    and entity["entity"].split("-")[0] != "B"
                )
1562
                and entity["index"] == entity_group_disagg[-1]["index"] + 1
1563
1564
1565
1566
1567
1568
            ) or is_subword:
                # Modify subword type to be previous_type
                if is_subword:
                    entity["entity"] = entity_group_disagg[-1]["entity"].split("-")[-1]
                    entity["score"] = np.nan  # set ignored scores to nan and use np.nanmean

1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
                entity_group_disagg += [entity]
                # Group the entities at the last entity
                if is_last_idx:
                    entity_groups += [self.group_sub_entities(entity_group_disagg)]
            # If the current entity is different from the previous entity, aggregate the disaggregated entity group
            else:
                entity_groups += [self.group_sub_entities(entity_group_disagg)]
                entity_group_disagg = [entity]
                # If it's the last entity, add it to the entity groups
                if is_last_idx:
                    entity_groups += [self.group_sub_entities(entity_group_disagg)]

        return entity_groups

Morgan Funtowicz's avatar
Morgan Funtowicz committed
1583

1584
NerPipeline = TokenClassificationPipeline
1585
1586


1587
1588
class QuestionAnsweringArgumentHandler(ArgumentHandler):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
1589
1590
    QuestionAnsweringPipeline requires the user to provide multiple arguments (i.e. question & context) to be mapped to
    internal :class:`~transformers.SquadExample`.
1591

Sylvain Gugger's avatar
Sylvain Gugger committed
1592
1593
    QuestionAnsweringArgumentHandler manages all the possible to create a :class:`~transformers.SquadExample` from the
    command-line supplied arguments.
1594
    """
1595

1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
    def normalize(self, item):
        if isinstance(item, SquadExample):
            return item
        elif isinstance(item, dict):
            for k in ["question", "context"]:
                if k not in item:
                    raise KeyError("You need to provide a dictionary with keys {question:..., context:...}")
                elif item[k] is None:
                    raise ValueError("`{}` cannot be None".format(k))
                elif isinstance(item[k], str) and len(item[k]) == 0:
                    raise ValueError("`{}` cannot be empty".format(k))

            return QuestionAnsweringPipeline.create_sample(**item)
        raise ValueError("{} argument needs to be of type (SquadExample, dict)".format(item))

1611
    def __call__(self, *args, **kwargs):
1612
        # Detect where the actual inputs are
1613
1614
        if args is not None and len(args) > 0:
            if len(args) == 1:
1615
1616
1617
                inputs = args[0]
            elif len(args) == 2 and {type(el) for el in args} == {str}:
                inputs = [{"question": args[0], "context": args[1]}]
1618
            else:
1619
                inputs = list(args)
Morgan Funtowicz's avatar
Morgan Funtowicz committed
1620
1621
        # Generic compatibility with sklearn and Keras
        # Batched data
1622
1623
1624
1625
        elif "X" in kwargs:
            inputs = kwargs["X"]
        elif "data" in kwargs:
            inputs = kwargs["data"]
1626
        elif "question" in kwargs and "context" in kwargs:
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
            if isinstance(kwargs["question"], list) and isinstance(kwargs["context"], str):
                inputs = [{"question": Q, "context": kwargs["context"]} for Q in kwargs["question"]]
            elif isinstance(kwargs["question"], list) and isinstance(kwargs["context"], list):
                if len(kwargs["question"]) != len(kwargs["context"]):
                    raise ValueError("Questions and contexts don't have the same lengths")

                inputs = [{"question": Q, "context": C} for Q, C in zip(kwargs["question"], kwargs["context"])]
            elif isinstance(kwargs["question"], str) and isinstance(kwargs["context"], str):
                inputs = [{"question": kwargs["question"], "context": kwargs["context"]}]
            else:
                raise ValueError("Arguments can't be understood")
1638
        else:
1639
            raise ValueError("Unknown arguments {}".format(kwargs))
1640

1641
1642
        # Normalize inputs
        if isinstance(inputs, dict):
1643
            inputs = [inputs]
1644
1645
1646
1647
1648
1649
1650
1651
        elif isinstance(inputs, Iterable):
            # Copy to avoid overriding arguments
            inputs = [i for i in inputs]
        else:
            raise ValueError("Invalid arguments {}".format(inputs))

        for i, item in enumerate(inputs):
            inputs[i] = self.normalize(item)
1652
1653
1654
1655

        return inputs


Sylvain Gugger's avatar
Sylvain Gugger committed
1656
@add_end_docstrings(PIPELINE_INIT_ARGS)
Morgan Funtowicz's avatar
Morgan Funtowicz committed
1657
1658
class QuestionAnsweringPipeline(Pipeline):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
1659
1660
    Question Answering pipeline using any :obj:`ModelForQuestionAnswering`. See the `question answering examples
    <../task_summary.html#question-answering>`__ for more information.
Lysandre Debut's avatar
Lysandre Debut committed
1661

Sylvain Gugger's avatar
Sylvain Gugger committed
1662
1663
    This question answering pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
    task identifier: :obj:`"question-answering"`.
Lysandre Debut's avatar
Lysandre Debut committed
1664

Sylvain Gugger's avatar
Sylvain Gugger committed
1665
1666
1667
    The models that this pipeline can use are models that have been fine-tuned on a question answering task. See the
    up-to-date list of available models on `huggingface.co/models
    <https://huggingface.co/models?filter=question-answering>`__.
Morgan Funtowicz's avatar
Morgan Funtowicz committed
1668
1669
    """

1670
1671
1672
1673
    default_input_names = "question,context"

    def __init__(
        self,
1674
1675
        model: Union["PreTrainedModel", "TFPreTrainedModel"],
        tokenizer: PreTrainedTokenizer,
1676
        modelcard: Optional[ModelCard] = None,
1677
1678
        framework: Optional[str] = None,
        device: int = -1,
1679
        task: str = "",
1680
1681
1682
1683
1684
1685
1686
1687
        **kwargs
    ):
        super().__init__(
            model=model,
            tokenizer=tokenizer,
            modelcard=modelcard,
            framework=framework,
            device=device,
1688
            task=task,
1689
            **kwargs,
1690
        )
thomwolf's avatar
thomwolf committed
1691

1692
        self._args_parser = QuestionAnsweringArgumentHandler()
1693
1694
1695
1696
        self.check_model_type(
            TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING if self.framework == "tf" else MODEL_FOR_QUESTION_ANSWERING_MAPPING
        )

Morgan Funtowicz's avatar
Morgan Funtowicz committed
1697
    @staticmethod
1698
1699
1700
    def create_sample(
        question: Union[str, List[str]], context: Union[str, List[str]]
    ) -> Union[SquadExample, List[SquadExample]]:
1701
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
1702
1703
        QuestionAnsweringPipeline leverages the :class:`~transformers.SquadExample` internally. This helper method
        encapsulate all the logic for converting question(s) and context(s) to :class:`~transformers.SquadExample`.
Sylvain Gugger's avatar
Sylvain Gugger committed
1704

1705
        We currently support extractive question answering.
Sylvain Gugger's avatar
Sylvain Gugger committed
1706

Morgan Funtowicz's avatar
Morgan Funtowicz committed
1707
        Arguments:
Sylvain Gugger's avatar
Sylvain Gugger committed
1708
1709
            question (:obj:`str` or :obj:`List[str]`): The question(s) asked.
            context (:obj:`str` or :obj:`List[str]`): The context(s) in which we will look for the answer.
Morgan Funtowicz's avatar
Morgan Funtowicz committed
1710
1711

        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
1712
1713
            One or a list of :class:`~transformers.SquadExample`: The corresponding :class:`~transformers.SquadExample`
            grouping question and context.
1714
1715
        """
        if isinstance(question, list):
Morgan Funtowicz's avatar
Morgan Funtowicz committed
1716
1717
1718
1719
            return [SquadExample(None, q, c, None, None, None) for q, c in zip(question, context)]
        else:
            return SquadExample(None, question, context, None, None, None)

1720
    def __call__(self, *args, **kwargs):
1721
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
1722
1723
        Answer the question(s) given as inputs by using the context(s).

1724
        Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
1725
1726
1727
            args (:class:`~transformers.SquadExample` or a list of :class:`~transformers.SquadExample`):
                One or several :class:`~transformers.SquadExample` containing the question and context.
            X (:class:`~transformers.SquadExample` or a list of :class:`~transformers.SquadExample`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1728
1729
                One or several :class:`~transformers.SquadExample` containing the question and context (will be treated
                the same way as if passed as the first positional argument).
Sylvain Gugger's avatar
Sylvain Gugger committed
1730
            data (:class:`~transformers.SquadExample` or a list of :class:`~transformers.SquadExample`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1731
1732
                One or several :class:`~transformers.SquadExample` containing the question and context (will be treated
                the same way as if passed as the first positional argument).
Sylvain Gugger's avatar
Sylvain Gugger committed
1733
1734
1735
            question (:obj:`str` or :obj:`List[str]`):
                One or several question(s) (must be used in conjunction with the :obj:`context` argument).
            context (:obj:`str` or :obj:`List[str]`):
1736
                One or several context(s) associated with the question(s) (must be used in conjunction with the
Sylvain Gugger's avatar
Sylvain Gugger committed
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
                :obj:`question` argument).
            topk (:obj:`int`, `optional`, defaults to 1):
                The number of answers to return (will be chosen by order of likelihood).
            doc_stride (:obj:`int`, `optional`, defaults to 128):
                If the context is too long to fit with the question for the model, it will be split in several chunks
                with some overlap. This argument controls the size of that overlap.
            max_answer_len (:obj:`int`, `optional`, defaults to 15):
                The maximum length of predicted answers (e.g., only answers with a shorter length are considered).
            max_seq_len (:obj:`int`, `optional`, defaults to 384):
                The maximum length of the total sentence (context + question) after tokenization. The context will be
                split in several chunks (using :obj:`doc_stride`) if needed.
            max_question_len (:obj:`int`, `optional`, defaults to 64):
                The maximum length of the question after tokenization. It will be truncated if needed.
            handle_impossible_answer (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not we accept impossible as an answer.

        Return:
Sylvain Gugger's avatar
Sylvain Gugger committed
1754
            A :obj:`dict` or a list of :obj:`dict`: Each result comes as a dictionary with the following keys:
Sylvain Gugger's avatar
Sylvain Gugger committed
1755
1756
1757
1758
1759

            - **score** (:obj:`float`) -- The probability associated to the answer.
            - **start** (:obj:`int`) -- The start index of the answer (in the tokenized version of the input).
            - **end** (:obj:`int`) -- The end index of the answer (in the tokenized version of the input).
            - **answer** (:obj:`str`) -- The answer to the question.
1760
        """
Morgan Funtowicz's avatar
Morgan Funtowicz committed
1761
        # Set defaults values
1762
        kwargs.setdefault("padding", "longest")
1763
1764
1765
1766
1767
        kwargs.setdefault("topk", 1)
        kwargs.setdefault("doc_stride", 128)
        kwargs.setdefault("max_answer_len", 15)
        kwargs.setdefault("max_seq_len", 384)
        kwargs.setdefault("max_question_len", 64)
1768
        kwargs.setdefault("handle_impossible_answer", False)
Morgan Funtowicz's avatar
Morgan Funtowicz committed
1769

1770
1771
        if kwargs["topk"] < 1:
            raise ValueError("topk parameter should be >= 1 (got {})".format(kwargs["topk"]))
Morgan Funtowicz's avatar
Morgan Funtowicz committed
1772

1773
1774
        if kwargs["max_answer_len"] < 1:
            raise ValueError("max_answer_len parameter should be >= 1 (got {})".format(kwargs["max_answer_len"]))
Morgan Funtowicz's avatar
Morgan Funtowicz committed
1775
1776

        # Convert inputs to features
1777
        examples = self._args_parser(*args, **kwargs)
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
        if not self.tokenizer.is_fast:
            features_list = [
                squad_convert_examples_to_features(
                    examples=[example],
                    tokenizer=self.tokenizer,
                    max_seq_length=kwargs["max_seq_len"],
                    doc_stride=kwargs["doc_stride"],
                    max_query_length=kwargs["max_question_len"],
                    padding_strategy=PaddingStrategy.MAX_LENGTH.value,
                    is_training=False,
                    tqdm_enabled=False,
                )
                for example in examples
            ]
        else:
            features_list = []
            for example in examples:
                # Define the side we want to truncate / pad and the text/pair sorting
                question_first = bool(self.tokenizer.padding_side == "right")

                encoded_inputs = self.tokenizer(
                    text=example.question_text if question_first else example.context_text,
                    text_pair=example.context_text if question_first else example.question_text,
                    padding=kwargs["padding"],
                    truncation="only_second" if question_first else "only_first",
                    max_length=kwargs["max_seq_len"],
                    stride=kwargs["doc_stride"],
                    return_tensors="np",
                    return_token_type_ids=True,
                    return_overflowing_tokens=True,
                    return_offsets_mapping=True,
                    return_special_tokens_mask=True,
                )

                # When the input is too long, it's converted in a batch of inputs with overflowing tokens
                # and a stride of overlap between the inputs. If a batch of inputs is given, a special output
                # "overflow_to_sample_mapping" indicate which member of the encoded batch belong to which original batch sample.
                # Here we tokenize examples one-by-one so we don't need to use "overflow_to_sample_mapping".
                # "num_span" is the number of output samples generated from the overflowing tokens.
                num_spans = len(encoded_inputs["input_ids"])

                # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
                # We put 0 on the tokens from the context and 1 everywhere else (question and special tokens)
                p_mask = np.asarray(
                    [
                        [tok != 1 if question_first else 0 for tok in encoded_inputs.sequence_ids(span_id)]
                        for span_id in range(num_spans)
                    ]
                )

                # keep the cls_token unmasked (some models use it to indicate unanswerable questions)
                if self.tokenizer.cls_token_id:
                    cls_index = np.nonzero(encoded_inputs["input_ids"] == self.tokenizer.cls_token_id)
                    p_mask[cls_index] = 0

                features = []
                for span_idx in range(num_spans):
                    features.append(
                        SquadFeatures(
                            input_ids=encoded_inputs["input_ids"][span_idx],
                            attention_mask=encoded_inputs["attention_mask"][span_idx],
                            token_type_ids=encoded_inputs["token_type_ids"][span_idx],
                            p_mask=p_mask[span_idx].tolist(),
                            encoding=encoded_inputs[span_idx],
                            # We don't use the rest of the values - and actually
                            # for Fast tokenizer we could totally avoid using SquadFeatures and SquadExample
                            cls_index=None,
                            token_to_orig_map={},
                            example_index=0,
                            unique_id=0,
                            paragraph_len=0,
                            token_is_max_context=0,
                            tokens=[],
                            start_position=0,
                            end_position=0,
                            is_impossible=False,
                            qas_id=None,
                        )
                    )
                features_list.append(features)

Rishabh Manoj's avatar
Rishabh Manoj committed
1859
1860
        all_answers = []
        for features, example in zip(features_list, examples):
Patrick von Platen's avatar
Patrick von Platen committed
1861
1862
            model_input_names = self.tokenizer.model_input_names + ["input_ids"]
            fw_args = {k: [feature.__dict__[k] for feature in features] for k in model_input_names}
Rishabh Manoj's avatar
Rishabh Manoj committed
1863
1864
1865
1866
1867

            # Manage tensor allocation on correct device
            with self.device_placement():
                if self.framework == "tf":
                    fw_args = {k: tf.constant(v) for (k, v) in fw_args.items()}
Funtowicz Morgan's avatar
Funtowicz Morgan committed
1868
                    start, end = self.model(fw_args)[:2]
Rishabh Manoj's avatar
Rishabh Manoj committed
1869
1870
1871
1872
1873
                    start, end = start.numpy(), end.numpy()
                else:
                    with torch.no_grad():
                        # Retrieve the score for the context tokens only (removing question tokens)
                        fw_args = {k: torch.tensor(v, device=self.device) for (k, v) in fw_args.items()}
Funtowicz Morgan's avatar
Funtowicz Morgan committed
1874
                        start, end = self.model(**fw_args)[:2]
Rishabh Manoj's avatar
Rishabh Manoj committed
1875
1876
                        start, end = start.cpu().numpy(), end.cpu().numpy()

1877
            min_null_score = 1000000  # large and positive
Rishabh Manoj's avatar
Rishabh Manoj committed
1878
1879
            answers = []
            for (feature, start_, end_) in zip(features, start, end):
1880
1881
                # Ensure padded tokens & question tokens cannot belong to the set of candidate answers.
                undesired_tokens = np.abs(np.array(feature.p_mask) - 1) & feature.attention_mask
Rishabh Manoj's avatar
Rishabh Manoj committed
1882

1883
1884
1885
1886
1887
1888
                # Generate mask
                undesired_tokens_mask = undesired_tokens == 0.0

                # Make sure non-context indexes in the tensor cannot contribute to the softmax
                start_ = np.where(undesired_tokens_mask, -10000.0, start_)
                end_ = np.where(undesired_tokens_mask, -10000.0, end_)
Funtowicz Morgan's avatar
Funtowicz Morgan committed
1889
1890
1891
1892
1893

                # Normalize logits and spans to retrieve the answer
                start_ = np.exp(start_ - np.log(np.sum(np.exp(start_), axis=-1, keepdims=True)))
                end_ = np.exp(end_ - np.log(np.sum(np.exp(end_), axis=-1, keepdims=True)))

1894
1895
1896
                if kwargs["handle_impossible_answer"]:
                    min_null_score = min(min_null_score, (start_[0] * end_[0]).item())

1897
1898
1899
                # Mask CLS
                start_[0] = end_[0] = 0.0

Rishabh Manoj's avatar
Rishabh Manoj committed
1900
                starts, ends, scores = self.decode(start_, end_, kwargs["topk"], kwargs["max_answer_len"])
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
                if not self.tokenizer.is_fast:
                    char_to_word = np.array(example.char_to_word_offset)

                    # Convert the answer (tokens) back to the original text
                    # Score: score from the model
                    # Start: Index of the first character of the answer in the context string
                    # End: Index of the character following the last character of the answer in the context string
                    # Answer: Plain text of the answer
                    answers += [
                        {
                            "score": score.item(),
                            "start": np.where(char_to_word == feature.token_to_orig_map[s])[0][0].item(),
                            "end": np.where(char_to_word == feature.token_to_orig_map[e])[0][-1].item(),
                            "answer": " ".join(
                                example.doc_tokens[feature.token_to_orig_map[s] : feature.token_to_orig_map[e] + 1]
                            ),
                        }
                        for s, e, score in zip(starts, ends, scores)
                    ]
                else:
                    # Convert the answer (tokens) back to the original text
                    # Score: score from the model
                    # Start: Index of the first character of the answer in the context string
                    # End: Index of the character following the last character of the answer in the context string
                    # Answer: Plain text of the answer
                    question_first = bool(self.tokenizer.padding_side == "right")
                    enc = feature.encoding

                    # Sometimes the max probability token is in the middle of a word so:
                    # - we start by finding the right word containing the token with `token_to_word`
                    # - then we convert this word in a character span with `word_to_chars`
                    answers += [
                        {
                            "score": score.item(),
                            "start": enc.word_to_chars(
                                enc.token_to_word(s), sequence_index=1 if question_first else 0
                            )[0],
                            "end": enc.word_to_chars(enc.token_to_word(e), sequence_index=1 if question_first else 0)[
                                1
                            ],
                            "answer": example.context_text[
                                enc.word_to_chars(enc.token_to_word(s), sequence_index=1 if question_first else 0)[
                                    0
                                ] : enc.word_to_chars(enc.token_to_word(e), sequence_index=1 if question_first else 0)[
                                    1
                                ]
                            ],
                        }
                        for s, e, score in zip(starts, ends, scores)
                    ]
1951
1952
1953
1954

            if kwargs["handle_impossible_answer"]:
                answers.append({"score": min_null_score, "start": 0, "end": 0, "answer": ""})

Morgan Funtowicz's avatar
Morgan Funtowicz committed
1955
1956
1957
            answers = sorted(answers, key=lambda x: x["score"], reverse=True)[: kwargs["topk"]]
            all_answers += answers

Rishabh Manoj's avatar
Rishabh Manoj committed
1958
        if len(all_answers) == 1:
Morgan Funtowicz's avatar
Morgan Funtowicz committed
1959
            return all_answers[0]
Rishabh Manoj's avatar
Rishabh Manoj committed
1960
        return all_answers
Morgan Funtowicz's avatar
Morgan Funtowicz committed
1961
1962

    def decode(self, start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int) -> Tuple:
1963
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
1964
1965
        Take the output of any :obj:`ModelForQuestionAnswering` and will generate probabilities for each span to be the
        actual answer.
Sylvain Gugger's avatar
Sylvain Gugger committed
1966

Sylvain Gugger's avatar
Sylvain Gugger committed
1967
1968
1969
        In addition, it filters out some unwanted/impossible cases like answer len being greater than max_answer_len or
        answer end position being before the starting position. The method supports output the k-best answer through
        the topk argument.
1970
1971

        Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
1972
1973
1974
1975
            start (:obj:`np.ndarray`): Individual start probabilities for each token.
            end (:obj:`np.ndarray`): Individual end probabilities for each token.
            topk (:obj:`int`): Indicates how many possible answer span(s) to extract from the model output.
            max_answer_len (:obj:`int`): Maximum size of the answer to extract from the model's output.
1976
        """
Morgan Funtowicz's avatar
Morgan Funtowicz committed
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
        # Ensure we have batch axis
        if start.ndim == 1:
            start = start[None]

        if end.ndim == 1:
            end = end[None]

        # Compute the score of each tuple(start, end) to be the real answer
        outer = np.matmul(np.expand_dims(start, -1), np.expand_dims(end, 1))

        # Remove candidate with end < start and end - start > max_answer_len
        candidates = np.tril(np.triu(outer), max_answer_len - 1)

        #  Inspired by Chen & al. (https://github.com/facebookresearch/DrQA)
        scores_flat = candidates.flatten()
        if topk == 1:
            idx_sort = [np.argmax(scores_flat)]
        elif len(scores_flat) < topk:
            idx_sort = np.argsort(-scores_flat)
        else:
            idx = np.argpartition(-scores_flat, topk)[0:topk]
            idx_sort = idx[np.argsort(-scores_flat[idx])]

        start, end = np.unravel_index(idx_sort, candidates.shape)[1:]
        return start, end, candidates[0, start, end]

Sylvain Gugger's avatar
Sylvain Gugger committed
2003
    def span_to_answer(self, text: str, start: int, end: int) -> Dict[str, Union[str, int]]:
2004
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
2005
        When decoding from token probabilities, this method maps token indexes to actual word in the initial context.
2006
2007

        Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
2008
2009
2010
            text (:obj:`str`): The actual context to extract the answer from.
            start (:obj:`int`): The answer starting token index.
            end (:obj:`int`): The answer end token index.
2011
2012

        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
2013
            Dictionary like :obj:`{'answer': str, 'start': int, 'end': int}`
2014
        """
Morgan Funtowicz's avatar
Morgan Funtowicz committed
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
        words = []
        token_idx = char_start_idx = char_end_idx = chars_idx = 0

        for i, word in enumerate(text.split(" ")):
            token = self.tokenizer.tokenize(word)

            # Append words if they are in the span
            if start <= token_idx <= end:
                if token_idx == start:
                    char_start_idx = chars_idx

                if token_idx == end:
                    char_end_idx = chars_idx + len(word)

                words += [word]

            # Stop if we went over the end of the answer
            if token_idx > end:
                break

            # Append the subtokenization length to the running index
            token_idx += len(token)
            chars_idx += len(word) + 1

        # Join text with spaces
2040
2041
2042
2043
2044
        return {
            "answer": " ".join(words),
            "start": max(0, char_start_idx),
            "end": min(len(text), char_end_idx),
        }
Morgan Funtowicz's avatar
Morgan Funtowicz committed
2045
2046


Sylvain Gugger's avatar
Sylvain Gugger committed
2047
@add_end_docstrings(PIPELINE_INIT_ARGS)
2048
2049
class SummarizationPipeline(Pipeline):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
2050
2051
    Summarize news articles and other documents.

Sylvain Gugger's avatar
Sylvain Gugger committed
2052
2053
    This summarizing pipeline can currently be loaded from :func:`~transformers.pipeline` using the following task
    identifier: :obj:`"summarization"`.
Sylvain Gugger's avatar
Sylvain Gugger committed
2054

Sylvain Gugger's avatar
Sylvain Gugger committed
2055
2056
2057
    The models that this pipeline can use are models that have been fine-tuned on a summarization task, which is
    currently, '`bart-large-cnn`', '`t5-small`', '`t5-base`', '`t5-large`', '`t5-3b`', '`t5-11b`'. See the up-to-date
    list of available models on `huggingface.co/models <https://huggingface.co/models?filter=summarization>`__.
2058
2059
2060

    Usage::

2061
        # use bart in pytorch
2062
        summarizer = pipeline("summarization")
2063
2064
2065
2066
2067
        summarizer("Sam Shleifer writes the best docstring examples in the whole world.", min_length=5, max_length=20)

        # use t5 in tf
        summarizer = pipeline("summarization", model="t5-base", tokenizer="t5-base", framework="tf")
        summarizer("Sam Shleifer writes the best docstring examples in the whole world.", min_length=5, max_length=20)
2068
2069
    """

2070
    def __init__(self, *args, **kwargs):
2071
        kwargs.update(task="summarization")
2072
2073
2074
2075
2076
        super().__init__(*args, **kwargs)

        self.check_model_type(
            TF_MODEL_WITH_LM_HEAD_MAPPING if self.framework == "tf" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
        )
2077

2078
    def __call__(
2079
        self, *documents, return_tensors=False, return_text=True, clean_up_tokenization_spaces=False, **generate_kwargs
2080
2081
    ):
        r"""
Sylvain Gugger's avatar
Sylvain Gugger committed
2082
        Summarize the text(s) given as inputs.
2083

Sylvain Gugger's avatar
Sylvain Gugger committed
2084
2085
2086
2087
2088
2089
        Args:
            documents (`str` or :obj:`List[str]`):
                One or several articles (or one list of articles) to summarize.
            return_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
                Whether or not to include the decoded texts in the outputs
            return_tensors (:obj:`bool`, `optional`, defaults to :obj:`False`):
2090
                Whether or not to include the tensors of predictions (as token indices) in the outputs.
Sylvain Gugger's avatar
Sylvain Gugger committed
2091
2092
2093
            clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to clean up the potential extra spaces in the text output.
            generate_kwargs:
Sylvain Gugger's avatar
Sylvain Gugger committed
2094
2095
                Additional keyword arguments to pass along to the generate method of the model (see the generate method
                corresponding to your framework `here <./model.html#generative-models>`__).
2096

Sylvain Gugger's avatar
Sylvain Gugger committed
2097
        Return:
Sylvain Gugger's avatar
Sylvain Gugger committed
2098
            A list or a list of list of :obj:`dict`: Each result comes as a dictionary with the following keys:
2099

Sylvain Gugger's avatar
Sylvain Gugger committed
2100
2101
            - **summary_text** (:obj:`str`, present when ``return_text=True``) -- The summary of the corresponding
              input.
Sylvain Gugger's avatar
Sylvain Gugger committed
2102
2103
            - **summary_token_ids** (:obj:`torch.Tensor` or :obj:`tf.Tensor`, present when ``return_tensors=True``) --
              The token ids of the summary.
2104
2105
        """
        assert return_tensors or return_text, "You must specify return_tensors=True or return_text=True"
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
        assert len(documents) > 0, "Please provide a document to summarize"

        prefix = self.model.config.prefix if self.model.config.prefix is not None else ""

        if isinstance(documents[0], list):
            assert (
                self.tokenizer.pad_token_id is not None
            ), "Please make sure that the tokenizer has a pad_token_id when using a batch input"

            documents = ([prefix + document for document in documents[0]],)
2116
            padding = True
2117
2118
2119

        elif isinstance(documents[0], str):
            documents = (prefix + documents[0],)
2120
            padding = False
2121
2122
2123
2124
2125
2126
2127
        else:
            raise ValueError(
                " `documents[0]`: {} have the wrong format. The should be either of type `str` or type `list`".format(
                    documents[0]
                )
            )

2128
        with self.device_placement():
2129
            inputs = self._parse_and_tokenize(*documents, padding=padding)
2130
2131
2132
2133
2134

            if self.framework == "pt":
                inputs = self.ensure_tensor_on_device(**inputs)
                input_length = inputs["input_ids"].shape[-1]
            elif self.framework == "tf":
2135
                input_length = tf.shape(inputs["input_ids"])[-1].numpy()
2136

2137
2138
            min_length = generate_kwargs.get("min_length", self.model.config.min_length)
            if input_length < min_length // 2:
2139
                logger.warning(
2140
                    "Your min_length is set to {}, but you input_length is only {}. You might consider decreasing min_length manually, e.g. summarizer('...', min_length=10)".format(
2141
                        min_length, input_length
2142
2143
2144
                    )
                )

2145
2146
            max_length = generate_kwargs.get("max_length", self.model.config.max_length)
            if input_length < max_length:
2147
                logger.warning(
2148
                    "Your max_length is set to {}, but you input_length is only {}. You might consider decreasing max_length manually, e.g. summarizer('...', max_length=50)".format(
2149
                        max_length, input_length
2150
2151
2152
                    )
                )

2153
            summaries = self.model.generate(
Lysandre's avatar
Lysandre committed
2154
2155
2156
                inputs["input_ids"],
                attention_mask=inputs["attention_mask"],
                **generate_kwargs,
2157
            )
2158

2159
2160
2161
2162
2163
2164
2165
            results = []
            for summary in summaries:
                record = {}
                if return_tensors:
                    record["summary_token_ids"] = summary
                if return_text:
                    record["summary_text"] = self.tokenizer.decode(
Lysandre's avatar
Lysandre committed
2166
2167
2168
                        summary,
                        skip_special_tokens=True,
                        clean_up_tokenization_spaces=clean_up_tokenization_spaces,
2169
2170
2171
2172
2173
                    )
                results.append(record)
            return results


Sylvain Gugger's avatar
Sylvain Gugger committed
2174
@add_end_docstrings(PIPELINE_INIT_ARGS)
2175
2176
2177
2178
class TranslationPipeline(Pipeline):
    """
    Translates from one language to another.

Sylvain Gugger's avatar
Sylvain Gugger committed
2179
2180
    This translation pipeline can currently be loaded from :func:`~transformers.pipeline` using the following task
    identifier: :obj:`"translation_xx_to_yy"`.
2181

Sylvain Gugger's avatar
Sylvain Gugger committed
2182
2183
2184
    The models that this pipeline can use are models that have been fine-tuned on a translation task. See the
    up-to-date list of available models on `huggingface.co/models
    <https://huggingface.co/models?filter=translation>`__.
2185

Sylvain Gugger's avatar
Sylvain Gugger committed
2186
2187
2188
    Usage::
        en_fr_translator = pipeline("translation_en_to_fr")
        en_fr_translator("How old are you?")
2189
2190
    """

2191
2192
2193
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

2194
2195
2196
        self.check_model_type(
            TF_MODEL_WITH_LM_HEAD_MAPPING if self.framework == "tf" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
        )
2197

2198
    def __call__(
2199
        self, *args, return_tensors=False, return_text=True, clean_up_tokenization_spaces=False, **generate_kwargs
2200
2201
    ):
        r"""
Sylvain Gugger's avatar
Sylvain Gugger committed
2202
2203
        Translate the text(s) given as inputs.

2204
        Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
2205
2206
2207
            args (:obj:`str` or :obj:`List[str]`):
                Texts to be translated.
            return_tensors (:obj:`bool`, `optional`, defaults to :obj:`False`):
2208
                Whether or not to include the tensors of predictions (as token indices) in the outputs.
Sylvain Gugger's avatar
Sylvain Gugger committed
2209
2210
2211
2212
2213
            return_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
                Whether or not to include the decoded texts in the outputs.
            clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to clean up the potential extra spaces in the text output.
            generate_kwargs:
Sylvain Gugger's avatar
Sylvain Gugger committed
2214
2215
                Additional keyword arguments to pass along to the generate method of the model (see the generate method
                corresponding to your framework `here <./model.html#generative-models>`__).
2216

Sylvain Gugger's avatar
Sylvain Gugger committed
2217
        Return:
Sylvain Gugger's avatar
Sylvain Gugger committed
2218
            A list or a list of list of :obj:`dict`: Each result comes as a dictionary with the following keys:
2219

Sylvain Gugger's avatar
Sylvain Gugger committed
2220
2221
2222
            - **translation_text** (:obj:`str`, present when ``return_text=True``) -- The translation.
            - **translation_token_ids** (:obj:`torch.Tensor` or :obj:`tf.Tensor`, present when ``return_tensors=True``)
              -- The token ids of the translation.
2223
2224
2225
2226
2227
        """
        assert return_tensors or return_text, "You must specify return_tensors=True or return_text=True"

        prefix = self.model.config.prefix if self.model.config.prefix is not None else ""

2228
        if isinstance(args[0], list):
2229
2230
2231
            assert (
                self.tokenizer.pad_token_id is not None
            ), "Please make sure that the tokenizer has a pad_token_id when using a batch input"
2232
            args = ([prefix + text for text in args[0]],)
2233
            padding = True
2234

2235
2236
        elif isinstance(args[0], str):
            args = (prefix + args[0],)
2237
            padding = False
2238
2239
2240
        else:
            raise ValueError(
                " `documents[0]`: {} have the wrong format. The should be either of type `str` or type `list`".format(
2241
                    args[0]
2242
2243
2244
2245
                )
            )

        with self.device_placement():
2246
            inputs = self._parse_and_tokenize(*args, padding=padding)
2247
2248
2249
2250
2251
2252
2253
2254

            if self.framework == "pt":
                inputs = self.ensure_tensor_on_device(**inputs)
                input_length = inputs["input_ids"].shape[-1]

            elif self.framework == "tf":
                input_length = tf.shape(inputs["input_ids"])[-1].numpy()

2255
2256
            max_length = generate_kwargs.get("max_length", self.model.config.max_length)
            if input_length > 0.9 * max_length:
2257
2258
                logger.warning(
                    "Your input_length: {} is bigger than 0.9 * max_length: {}. You might consider increasing your max_length manually, e.g. translator('...', max_length=400)".format(
2259
                        input_length, max_length
2260
2261
2262
2263
                    )
                )

            translations = self.model.generate(
Lysandre's avatar
Lysandre committed
2264
2265
2266
                inputs["input_ids"],
                attention_mask=inputs["attention_mask"],
                **generate_kwargs,
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
            )
            results = []
            for translation in translations:
                record = {}
                if return_tensors:
                    record["translation_token_ids"] = translation
                if return_text:
                    record["translation_text"] = self.tokenizer.decode(
                        translation,
                        skip_special_tokens=True,
                        clean_up_tokenization_spaces=clean_up_tokenization_spaces,
2278
2279
2280
2281
2282
                    )
                results.append(record)
            return results


2283
2284
2285
2286
2287
@add_end_docstrings(PIPELINE_INIT_ARGS)
class Text2TextGenerationPipeline(Pipeline):
    """
    Pipeline for text to text generation using seq2seq models.

Sylvain Gugger's avatar
Sylvain Gugger committed
2288
2289
    This Text2TextGenerationPipeline pipeline can currently be loaded from :func:`~transformers.pipeline` using the
    following task identifier: :obj:`"text2text-generation"`.
2290

Sylvain Gugger's avatar
Sylvain Gugger committed
2291
2292
    The models that this pipeline can use are models that have been fine-tuned on a translation task. See the
    up-to-date list of available models on `huggingface.co/models <https://huggingface.co/models?filter=seq2seq>`__.
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318

    Usage::

        text2text_generator = pipeline("text2text-generation")
        text2text_generator("question: What is 42 ? context: 42 is the answer to life, the universe and everything")
    """

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self.check_model_type(
            TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
            if self.framework == "tf"
            else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
        )

    def __call__(
        self, *args, return_tensors=False, return_text=True, clean_up_tokenization_spaces=False, **generate_kwargs
    ):
        r"""
        Generate the output text(s) using text(s) given as inputs.

        Args:
            args (:obj:`str` or :obj:`List[str]`):
                Input text for the encoder.
            return_tensors (:obj:`bool`, `optional`, defaults to :obj:`False`):
2319
                Whether or not to include the tensors of predictions (as token indices) in the outputs.
2320
2321
2322
2323
2324
            return_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
                Whether or not to include the decoded texts in the outputs.
            clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to clean up the potential extra spaces in the text output.
            generate_kwargs:
Sylvain Gugger's avatar
Sylvain Gugger committed
2325
2326
                Additional keyword arguments to pass along to the generate method of the model (see the generate method
                corresponding to your framework `here <./model.html#generative-models>`__).
2327
2328

        Return:
Sylvain Gugger's avatar
Sylvain Gugger committed
2329
            A list or a list of list of :obj:`dict`: Each result comes as a dictionary with the following keys:
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377

            - **generated_text** (:obj:`str`, present when ``return_text=True``) -- The generated text.
            - **generated_token_ids** (:obj:`torch.Tensor` or :obj:`tf.Tensor`, present when ``return_tensors=True``)
              -- The token ids of the generated text.
        """
        assert return_tensors or return_text, "You must specify return_tensors=True or return_text=True"

        if isinstance(args[0], list):
            assert (
                self.tokenizer.pad_token_id is not None
            ), "Please make sure that the tokenizer has a pad_token_id when using a batch input"
            padding = True

        elif isinstance(args[0], str):
            padding = False
        else:
            raise ValueError(
                " `documents[0]`: {} have the wrong format. The should be either of type `str` or type `list`".format(
                    args[0]
                )
            )

        with self.device_placement():
            inputs = self._parse_and_tokenize(*args, padding=padding)

            if self.framework == "pt":
                inputs = self.ensure_tensor_on_device(**inputs)

            generations = self.model.generate(
                inputs["input_ids"],
                attention_mask=inputs["attention_mask"],
                **generate_kwargs,
            )
            results = []
            for generation in generations:
                record = {}
                if return_tensors:
                    record["generated_token_ids"] = generation
                if return_text:
                    record["generated_text"] = self.tokenizer.decode(
                        generation,
                        skip_special_tokens=True,
                        clean_up_tokenization_spaces=clean_up_tokenization_spaces,
                    )
                results.append(record)
            return results


2378
2379
2380
class Conversation:
    """
    Utility class containing a conversation and its history. This class is meant to be used as an input to the
Sylvain Gugger's avatar
Sylvain Gugger committed
2381
2382
2383
    :class:`~transformers.ConversationalPipeline`. The conversation contains a number of utility function to manage the
    addition of new user input and generated model responses. A conversation needs to contain an unprocessed user input
    before being passed to the :class:`~transformers.ConversationalPipeline`. This user input is either created when
2384
2385
    the class is instantiated, or by calling :obj:`conversational_pipeline.append_response("input")` after a
    conversation turn.
Sylvain Gugger's avatar
Sylvain Gugger committed
2386
2387
2388
2389
2390
2391
2392
2393
2394

    Arguments:
        text (:obj:`str`, `optional`):
            The initial user input to start the conversation. If not provided, a user input needs to be provided
            manually using the :meth:`~transformers.Conversation.add_user_input` method before the conversation can
            begin.
        conversation_id (:obj:`uuid.UUID`, `optional`):
            Unique identifier for the conversation. If not provided, a random UUID4 id will be assigned to the
            conversation.
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419

    Usage::

        conversation = Conversation("Going to the movies tonight - any suggestions?")

        # Steps usually performed by the model when generating a response:
        # 1. Mark the user input as processed (moved to the history)
        conversation.mark_processed()
        # 2. Append a mode response
        conversation.append_response("The Big lebowski.")

        conversation.add_user_input("Is it good?")
    """

    def __init__(self, text: str = None, conversation_id: UUID = None):
        if not conversation_id:
            conversation_id = uuid.uuid4()
        self.uuid: UUID = conversation_id
        self.past_user_inputs: List[str] = []
        self.generated_responses: List[str] = []
        self.history: List[int] = []
        self.new_user_input: Optional[str] = text

    def add_user_input(self, text: str, overwrite: bool = False):
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
2420
2421
        Add a user input to the conversation for the next round. This populates the internal :obj:`new_user_input`
        field.
2422
2423

        Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
2424
2425
2426
            text (:obj:`str`): The user input for the next conversation round.
            overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not existing and unprocessed user input should be overwritten when this function is called.
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
        """
        if self.new_user_input:
            if overwrite:
                logger.warning(
                    'User input added while unprocessed input was existing: "{}" was overwritten with: "{}".'.format(
                        self.new_user_input, text
                    )
                )
                self.new_user_input = text
            else:
                logger.warning(
                    'User input added while unprocessed input was existing: "{}" new input ignored: "{}". '
                    "Set `overwrite` to True to overwrite unprocessed user input".format(self.new_user_input, text)
                )
        else:
            self.new_user_input = text

    def mark_processed(self):
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
2446
2447
        Mark the conversation as processed (moves the content of :obj:`new_user_input` to :obj:`past_user_inputs`) and
        empties the :obj:`new_user_input` field.
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
        """
        if self.new_user_input:
            self.past_user_inputs.append(self.new_user_input)
        self.new_user_input = None

    def append_response(self, response: str):
        """
        Append a response to the list of generated responses.

        Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
2458
            response (:obj:`str`): The model generated response.
2459
2460
2461
2462
2463
        """
        self.generated_responses.append(response)

    def set_history(self, history: List[int]):
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
2464
2465
        Updates the value of the history of the conversation. The history is represented by a list of :obj:`token_ids`.
        The history is used by the model to generate responses based on the previous conversation turns.
2466
2467

        Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
2468
            history (:obj:`List[int]`): History of tokens provided and generated for this conversation.
2469
2470
2471
2472
2473
2474
2475
2476
        """
        self.history = history

    def __repr__(self):
        """
        Generates a string representation of the conversation.

        Return:
Sylvain Gugger's avatar
Sylvain Gugger committed
2477
            :obj:`str`:
2478

Sylvain Gugger's avatar
Sylvain Gugger committed
2479
2480
            Example: Conversation id: 7d15686b-dc94-49f2-9c4b-c9eac6a1f114 user >> Going to the movies tonight - any
            suggestions? bot >> The Big Lebowski
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
        """
        output = "Conversation id: {} \n".format(self.uuid)
        for user_input, generated_response in zip(self.past_user_inputs, self.generated_responses):
            output += "user >> {} \n".format(user_input)
            output += "bot >> {} \n".format(generated_response)
        if self.new_user_input is not None:
            output += "user >> {} \n".format(self.new_user_input)
        return output


Sylvain Gugger's avatar
Sylvain Gugger committed
2491
2492
2493
2494
2495
2496
2497
@add_end_docstrings(
    PIPELINE_INIT_ARGS,
    r"""
        min_length_for_response (:obj:`int`, `optional`, defaults to 32):
            The minimum length (in number of tokens) for a response.
    """,
)
2498
2499
2500
2501
class ConversationalPipeline(Pipeline):
    """
    Multi-turn conversational pipeline.

Sylvain Gugger's avatar
Sylvain Gugger committed
2502
2503
    This conversational pipeline can currently be loaded from :func:`~transformers.pipeline` using the following task
    identifier: :obj:`"conversational"`.
Sylvain Gugger's avatar
Sylvain Gugger committed
2504
2505

    The models that this pipeline can use are models that have been fine-tuned on a multi-turn conversational task,
Sylvain Gugger's avatar
Sylvain Gugger committed
2506
2507
2508
    currently: `'microsoft/DialoGPT-small'`, `'microsoft/DialoGPT-medium'`, `'microsoft/DialoGPT-large'`. See the
    up-to-date list of available models on `huggingface.co/models
    <https://huggingface.co/models?filter=conversational>`__.
Sylvain Gugger's avatar
Sylvain Gugger committed
2509

2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
    Usage::

        conversational_pipeline = pipeline("conversational")

        conversation_1 = Conversation("Going to the movies tonight - any suggestions?")
        conversation_2 = Conversation("What's the last book you have read?")

        conversational_pipeline([conversation_1, conversation_2])

        conversation_1.add_user_input("Is it an action movie?")
        conversation_2.add_user_input("What is the genre of this book?")

        conversational_pipeline([conversation_1, conversation_2])
    """

    def __init__(self, min_length_for_response=32, *args, **kwargs):
        super().__init__(*args, **kwargs)
2527
2528

        # We need at least an eos_token
2529
        assert self.tokenizer.eos_token_id is not None, "DialoguePipeline tokenizer should have an EOS token set"
2530
2531
2532
        if self.tokenizer.pad_token_id is None:
            self.tokenizer.pad_token = self.tokenizer.eos_token

2533
2534
2535
2536
2537
2538
2539
2540
2541
        self.min_length_for_response = min_length_for_response

    def __call__(
        self,
        conversations: Union[Conversation, List[Conversation]],
        clean_up_tokenization_spaces=True,
        **generate_kwargs
    ):
        r"""
Sylvain Gugger's avatar
Sylvain Gugger committed
2542
2543
        Generate responses for the conversation(s) given as inputs.

2544
        Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
2545
2546
2547
2548
2549
            conversations (a :class:`~transformers.Conversation` or a list of :class:`~transformers.Conversation`):
                Conversations to generate responses for.
            clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to clean up the potential extra spaces in the text output.
            generate_kwargs:
Sylvain Gugger's avatar
Sylvain Gugger committed
2550
2551
                Additional keyword arguments to pass along to the generate method of the model (see the generate method
                corresponding to your framework `here <./model.html#generative-models>`__).
2552
2553

        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
2554
2555
            :class:`~transformers.Conversation` or a list of :class:`~transformers.Conversation`: Conversation(s) with
            updated generated responses for those containing a new user input.
2556
2557
        """

2558
2559
        if isinstance(conversations, Conversation):
            conversations = [conversations]
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
        # Input validation
        if isinstance(conversations, list):
            for conversation in conversations:
                assert isinstance(
                    conversation, Conversation
                ), "DialoguePipeline expects a Conversation or list of Conversations as an input"
                if conversation.new_user_input is None:
                    raise ValueError(
                        "Conversation with UUID {} does not contain new user input to process. "
                        "Add user inputs with the conversation's `add_user_input` method".format(
                            type(conversation.uuid)
                        )
                    )
            assert (
                self.tokenizer.pad_token_id is not None or self.tokenizer.eos_token_id is not None
            ), "Please make sure that the tokenizer has a pad_token_id or eos_token_id when using a batch input"
        else:
            raise ValueError("DialoguePipeline expects a Conversation or list of Conversations as an input")

        with self.device_placement():

            inputs = self._parse_and_tokenize([conversation.new_user_input for conversation in conversations])
            histories = [conversation.history for conversation in conversations]
            max_length = generate_kwargs.get("max_length", self.model.config.max_length)
            inputs = self._concat_inputs_history(inputs, histories, max_length)

            if self.framework == "pt":
                inputs = self.ensure_tensor_on_device(**inputs)
                input_length = inputs["input_ids"].shape[-1]

            elif self.framework == "tf":
                input_length = tf.shape(inputs["input_ids"])[-1].numpy()

            if input_length > 0.9 * max_length:
                logger.warning(
                    "Longest conversation length: {} is bigger than 0.9 * max_length: {}. "
                    "You might consider trimming the early phase of the conversation".format(input_length, max_length)
                )
            generated_responses = self.model.generate(
Lysandre's avatar
Lysandre committed
2599
2600
2601
                inputs["input_ids"],
                attention_mask=inputs["attention_mask"],
                **generate_kwargs,
2602
2603
            )

2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
            if self.model.config.is_encoder_decoder:
                if self.framework == "pt":
                    history = torch.cat((inputs["input_ids"], generated_responses[:, 1:]), 1)
                elif self.framework == "tf":
                    history = tf.concat([inputs["input_ids"], generated_responses[:, 1:]], 1)
            else:
                history = generated_responses

            history = self._clean_padding_history(history)
            if self.model.config.is_encoder_decoder:
                start_position = 1
            else:
                start_position = input_length

2618
2619
2620
2621
2622
            output = []
            for conversation_index, conversation in enumerate(conversations):
                conversation.mark_processed()
                conversation.generated_responses.append(
                    self.tokenizer.decode(
2623
                        generated_responses[conversation_index][start_position:],
2624
2625
2626
2627
                        skip_special_tokens=True,
                        clean_up_tokenization_spaces=clean_up_tokenization_spaces,
                    )
                )
2628
                conversation.set_history(history[conversation_index])
2629
2630
2631
2632
2633
2634
                output.append(conversation)
            if len(output) == 1:
                return output[0]
            else:
                return output

2635
    def _parse_and_tokenize(self, inputs, **kwargs):
2636
2637
2638
2639
        """
        Parse arguments and tokenize, adding an EOS token at the end of the user input
        """
        # Parse arguments
2640
        inputs = self.tokenizer(inputs, add_special_tokens=False, padding=False).get("input_ids", [])
2641
2642
2643
2644
2645
2646
2647
2648
        for input in inputs:
            input.append(self.tokenizer.eos_token_id)
        return inputs

    def _clean_padding_history(self, generated_tensor) -> List[List[int]]:
        """
        Cleans the padding history. Padding may be generated in two places when multiple conversations are provided as
        an input:
Sylvain Gugger's avatar
Sylvain Gugger committed
2649

2650
            - at the end of the concatenated history and new user input, so that all input to the model have the same
Sylvain Gugger's avatar
Sylvain Gugger committed
2651
              length
2652
2653
2654
2655
2656
2657
2658
2659
2660
            - at the end of the generated response, as some responses will be longer than others
        This method cleans up these padding token so that the history for each conversation is not impacted by the
        batching process.
        """
        outputs = []
        for sequence in generated_tensor:
            sequence_tokens = []
            is_previous_pad = False
            for token in sequence:
2661
                if token == self.tokenizer.pad_token_id:
2662
2663
                    if self.tokenizer.pad_token_id != self.tokenizer.eos_token_id:
                        continue
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
                    if is_previous_pad:
                        continue
                    else:
                        is_previous_pad = True
                else:
                    is_previous_pad = False
                if self.framework == "pt":
                    sequence_tokens.append(token.item())
                else:
                    sequence_tokens.append(int(token.numpy()))

            outputs.append(sequence_tokens)
        return outputs

    def _concat_inputs_history(self, inputs: List[List[int]], histories: List[Optional[List[int]]], max_length: int):
        """
        Builds an input prepended by the history for this conversation, allowing multi-turn conversation with context
        """
        outputs = []
        for new_input, history in zip(inputs, histories):
            if history is not None:
                new_input = history + new_input
            if len(new_input) > max_length - self.min_length_for_response:
                cutoff_eos_index = 0
                while len(new_input) - cutoff_eos_index > max_length - self.min_length_for_response:
                    if cutoff_eos_index >= len(new_input):
                        break
                    cutoff_eos_index = new_input[cutoff_eos_index:].index(self.tokenizer.eos_token_id)
                    if cutoff_eos_index == 0 or cutoff_eos_index == len(new_input) - 1:
                        break
                    else:
                        new_input = new_input[cutoff_eos_index + 1 :]
            outputs.append(new_input)
2697
2698
        padded_outputs = self.tokenizer.pad(
            {"input_ids": outputs}, padding="longest", return_attention_mask=True, return_tensors=self.framework
2699
        )
2700
        return padded_outputs
2701
2702


2703
# Register all the supported tasks here
Morgan Funtowicz's avatar
Morgan Funtowicz committed
2704
SUPPORTED_TASKS = {
2705
2706
2707
2708
    "feature-extraction": {
        "impl": FeatureExtractionPipeline,
        "tf": TFAutoModel if is_tf_available() else None,
        "pt": AutoModel if is_torch_available() else None,
2709
        "default": {"model": {"pt": "distilbert-base-cased", "tf": "distilbert-base-cased"}},
2710
    },
2711
2712
2713
2714
2715
2716
    "sentiment-analysis": {
        "impl": TextClassificationPipeline,
        "tf": TFAutoModelForSequenceClassification if is_tf_available() else None,
        "pt": AutoModelForSequenceClassification if is_torch_available() else None,
        "default": {
            "model": {
2717
2718
                "pt": "distilbert-base-uncased-finetuned-sst-2-english",
                "tf": "distilbert-base-uncased-finetuned-sst-2-english",
2719
            },
2720
        },
Morgan Funtowicz's avatar
Morgan Funtowicz committed
2721
    },
2722
    "ner": {
2723
        "impl": TokenClassificationPipeline,
2724
2725
2726
2727
        "tf": TFAutoModelForTokenClassification if is_tf_available() else None,
        "pt": AutoModelForTokenClassification if is_torch_available() else None,
        "default": {
            "model": {
Julien Chaumond's avatar
Julien Chaumond committed
2728
2729
                "pt": "dbmdz/bert-large-cased-finetuned-conll03-english",
                "tf": "dbmdz/bert-large-cased-finetuned-conll03-english",
2730
            },
2731
        },
Morgan Funtowicz's avatar
Morgan Funtowicz committed
2732
    },
2733
2734
2735
2736
2737
    "question-answering": {
        "impl": QuestionAnsweringPipeline,
        "tf": TFAutoModelForQuestionAnswering if is_tf_available() else None,
        "pt": AutoModelForQuestionAnswering if is_torch_available() else None,
        "default": {
Lysandre's avatar
E231  
Lysandre committed
2738
            "model": {"pt": "distilbert-base-cased-distilled-squad", "tf": "distilbert-base-cased-distilled-squad"},
2739
2740
        },
    },
Julien Chaumond's avatar
Julien Chaumond committed
2741
2742
    "fill-mask": {
        "impl": FillMaskPipeline,
2743
        "tf": TFAutoModelForMaskedLM if is_tf_available() else None,
2744
        "pt": AutoModelForMaskedLM if is_torch_available() else None,
2745
        "default": {"model": {"pt": "distilroberta-base", "tf": "distilroberta-base"}},
Julien Chaumond's avatar
Julien Chaumond committed
2746
    },
2747
2748
    "summarization": {
        "impl": SummarizationPipeline,
2749
        "tf": TFAutoModelForSeq2SeqLM if is_tf_available() else None,
2750
2751
        "pt": AutoModelForSeq2SeqLM if is_torch_available() else None,
        "default": {"model": {"pt": "sshleifer/distilbart-cnn-12-6", "tf": "t5-small"}},
2752
    },
2753
2754
    # This task is a special case as it's parametrized by SRC, TGT languages.
    "translation": {
2755
        "impl": TranslationPipeline,
2756
        "tf": TFAutoModelForSeq2SeqLM if is_tf_available() else None,
2757
        "pt": AutoModelForSeq2SeqLM if is_torch_available() else None,
2758
2759
2760
2761
2762
        "default": {
            ("en", "fr"): {"model": {"pt": "t5-base", "tf": "t5-base"}},
            ("en", "de"): {"model": {"pt": "t5-base", "tf": "t5-base"}},
            ("en", "ro"): {"model": {"pt": "t5-base", "tf": "t5-base"}},
        },
2763
    },
2764
2765
2766
2767
2768
2769
    "text2text-generation": {
        "impl": Text2TextGenerationPipeline,
        "tf": TFAutoModelForSeq2SeqLM if is_tf_available() else None,
        "pt": AutoModelForSeq2SeqLM if is_torch_available() else None,
        "default": {"model": {"pt": "t5-base", "tf": "t5-base"}},
    },
2770
2771
    "text-generation": {
        "impl": TextGenerationPipeline,
2772
        "tf": TFAutoModelForCausalLM if is_tf_available() else None,
2773
        "pt": AutoModelForCausalLM if is_torch_available() else None,
2774
        "default": {"model": {"pt": "gpt2", "tf": "gpt2"}},
2775
    },
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
    "zero-shot-classification": {
        "impl": ZeroShotClassificationPipeline,
        "tf": TFAutoModelForSequenceClassification if is_tf_available() else None,
        "pt": AutoModelForSequenceClassification if is_torch_available() else None,
        "default": {
            "model": {"pt": "facebook/bart-large-mnli", "tf": "roberta-large-mnli"},
            "config": {"pt": "facebook/bart-large-mnli", "tf": "roberta-large-mnli"},
            "tokenizer": {"pt": "facebook/bart-large-mnli", "tf": "roberta-large-mnli"},
        },
    },
2786
2787
2788
2789
2790
2791
    "conversational": {
        "impl": ConversationalPipeline,
        "tf": TFAutoModelForCausalLM if is_tf_available() else None,
        "pt": AutoModelForCausalLM if is_torch_available() else None,
        "default": {"model": {"pt": "microsoft/DialoGPT-medium", "tf": "microsoft/DialoGPT-medium"}},
    },
Morgan Funtowicz's avatar
Morgan Funtowicz committed
2792
2793
2794
}


2795
2796
def check_task(task: str) -> Tuple[Dict, Any]:
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
2797
2798
    Checks an incoming task string, to validate it's correct and return the default Pipeline and Model classes, and
    default models if they exist.
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815

    Args:
        task (:obj:`str`):
            The task defining which pipeline will be returned. Currently accepted tasks are:

            - :obj:`"feature-extraction"`
            - :obj:`"sentiment-analysis"`
            - :obj:`"ner"`
            - :obj:`"question-answering"`
            - :obj:`"fill-mask"`
            - :obj:`"summarization"`
            - :obj:`"translation_xx_to_yy"`
            - :obj:`"translation"`
            - :obj:`"text-generation"`
            - :obj:`"conversational"`

    Returns:
2816
2817
        (task_defaults:obj:`dict`, task_options: (:obj:`tuple`, None)) The actual dictionary required to initialize the
        pipeline and some extra task options for parametrized tasks like "translation_XX_to_YY"
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836


    """
    if task in SUPPORTED_TASKS:
        targeted_task = SUPPORTED_TASKS[task]
        return targeted_task, None

    if task.startswith("translation"):
        tokens = task.split("_")
        if len(tokens) == 4 and tokens[0] == "translation" and tokens[2] == "to":
            targeted_task = SUPPORTED_TASKS["translation"]
            return targeted_task, (tokens[1], tokens[3])
        raise KeyError("Invalid translation task {}, use 'translation_XX_to_YY' format".format(task))

    raise KeyError(
        "Unknown task {}, available tasks are {}".format(task, list(SUPPORTED_TASKS.keys()) + ["translation_XX_to_YY"])
    )


2837
2838
2839
2840
2841
def pipeline(
    task: str,
    model: Optional = None,
    config: Optional[Union[str, PretrainedConfig]] = None,
    tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None,
2842
    framework: Optional[str] = None,
Julien Chaumond's avatar
Julien Chaumond committed
2843
    revision: Optional[str] = None,
2844
    use_fast: bool = True,
2845
2846
    **kwargs
) -> Pipeline:
Morgan Funtowicz's avatar
Morgan Funtowicz committed
2847
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
2848
    Utility factory method to build a :class:`~transformers.Pipeline`.
Lysandre Debut's avatar
Lysandre Debut committed
2849

Sylvain Gugger's avatar
Sylvain Gugger committed
2850
    Pipelines are made of:
Lysandre Debut's avatar
Lysandre Debut committed
2851

Sylvain Gugger's avatar
Sylvain Gugger committed
2852
2853
2854
        - A :doc:`tokenizer <tokenizer>` in charge of mapping raw textual input to token.
        - A :doc:`model <model>` to make predictions from the inputs.
        - Some (optional) post processing for enhancing model's output.
Lysandre Debut's avatar
Lysandre Debut committed
2855
2856
2857
2858
2859

    Args:
        task (:obj:`str`):
            The task defining which pipeline will be returned. Currently accepted tasks are:

Sylvain Gugger's avatar
Sylvain Gugger committed
2860
2861
2862
2863
2864
2865
2866
            - :obj:`"feature-extraction"`: will return a :class:`~transformers.FeatureExtractionPipeline`.
            - :obj:`"sentiment-analysis"`: will return a :class:`~transformers.TextClassificationPipeline`.
            - :obj:`"ner"`: will return a :class:`~transformers.TokenClassificationPipeline`.
            - :obj:`"question-answering"`: will return a :class:`~transformers.QuestionAnsweringPipeline`.
            - :obj:`"fill-mask"`: will return a :class:`~transformers.FillMaskPipeline`.
            - :obj:`"summarization"`: will return a :class:`~transformers.SummarizationPipeline`.
            - :obj:`"translation_xx_to_yy"`: will return a :class:`~transformers.TranslationPipeline`.
2867
            - :obj:`"text2text-generation"`: will return a :class:`~transformers.Text2TextGenerationPipeline`.
Sylvain Gugger's avatar
Sylvain Gugger committed
2868
            - :obj:`"text-generation"`: will return a :class:`~transformers.TextGenerationPipeline`.
2869
            - :obj:`"zero-shot-classification:`: will return a :class:`~transformers.ZeroShotClassificationPipeline`.
Sylvain Gugger's avatar
Sylvain Gugger committed
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
            - :obj:`"conversation"`: will return a :class:`~transformers.ConversationalPipeline`.
        model (:obj:`str` or :obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`, `optional`):
            The model that will be used by the pipeline to make predictions. This can be a model identifier or an
            actual instance of a pretrained model inheriting from :class:`~transformers.PreTrainedModel` (for PyTorch)
            or :class:`~transformers.TFPreTrainedModel` (for TensorFlow).

            If not provided, the default for the :obj:`task` will be loaded.
        config (:obj:`str` or :obj:`~transformers.PretrainedConfig`, `optional`):
            The configuration that will be used by the pipeline to instantiate the model. This can be a model
            identifier or an actual pretrained model configuration inheriting from
Lysandre Debut's avatar
Lysandre Debut committed
2880
2881
            :class:`~transformers.PretrainedConfig`.

2882
2883
2884
            If not provided, the default configuration file for the requested model will be used. That means that if
            :obj:`model` is given, its default configuration will be used. However, if :obj:`model` is not supplied,
            this :obj:`task`'s default model's config is used instead.
Sylvain Gugger's avatar
Sylvain Gugger committed
2885
2886
        tokenizer (:obj:`str` or :obj:`~transformers.PreTrainedTokenizer`, `optional`):
            The tokenizer that will be used by the pipeline to encode data for the model. This can be a model
Sylvain Gugger's avatar
Sylvain Gugger committed
2887
            identifier or an actual pretrained tokenizer inheriting from :class:`~transformers.PreTrainedTokenizer`.
Lysandre Debut's avatar
Lysandre Debut committed
2888

2889
2890
2891
2892
            If not provided, the default tokenizer for the given :obj:`model` will be loaded (if it is a string). If
            :obj:`model` is not specified or not a string, then the default tokenizer for :obj:`config` is loaded (if
            it is a string). However, if :obj:`config` is also not given or not a string, then the default tokenizer
            for the given :obj:`task` will be loaded.
Sylvain Gugger's avatar
Sylvain Gugger committed
2893
2894
2895
        framework (:obj:`str`, `optional`):
            The framework to use, either :obj:`"pt"` for PyTorch or :obj:`"tf"` for TensorFlow. The specified framework
            must be installed.
Lysandre Debut's avatar
Lysandre Debut committed
2896

Sylvain Gugger's avatar
Sylvain Gugger committed
2897
2898
2899
            If no framework is specified, will default to the one currently installed. If no framework is specified and
            both frameworks are installed, will default to the framework of the :obj:`model`, or to PyTorch if no model
            is provided.
Julien Chaumond's avatar
Julien Chaumond committed
2900
2901
2902
2903
        revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
            When passing a task name or a string model identifier: The specific model version to use. It can be a
            branch name, a tag name, or a commit id, since we use a git-based system for storing models and other
            artifacts on huggingface.co, so ``revision`` can be any identifier allowed by git.
2904
        use_fast (:obj:`bool`, `optional`, defaults to :obj:`True`):
2905
            Whether or not to use a Fast tokenizer if possible (a :class:`~transformers.PreTrainedTokenizerFast`).
Sylvain Gugger's avatar
Sylvain Gugger committed
2906
2907
2908
        kwargs:
            Additional keyword arguments passed along to the specific pipeline init (see the documentation for the
            corresponding pipeline class for possible values).
Lysandre Debut's avatar
Lysandre Debut committed
2909
2910

    Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
2911
        :class:`~transformers.Pipeline`: A suitable pipeline for the task.
Lysandre Debut's avatar
Lysandre Debut committed
2912
2913
2914

    Examples::

2915
        >>> from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer
Lysandre Debut's avatar
Lysandre Debut committed
2916

2917
2918
        >>> # Sentiment analysis pipeline
        >>> pipeline('sentiment-analysis')
Lysandre Debut's avatar
Lysandre Debut committed
2919

2920
2921
        >>> # Question answering pipeline, specifying the checkpoint identifier
        >>> pipeline('question-answering', model='distilbert-base-cased-distilled-squad', tokenizer='bert-base-cased')
Lysandre Debut's avatar
Lysandre Debut committed
2922

2923
2924
2925
2926
        >>> # Named entity recognition pipeline, passing in a specific model and tokenizer
        >>> model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english")
        >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
        >>> pipeline('ner', model=model, tokenizer=tokenizer)
Morgan Funtowicz's avatar
Morgan Funtowicz committed
2927
    """
2928
    # Retrieve the task
2929
    targeted_task, task_options = check_task(task)
Morgan Funtowicz's avatar
Morgan Funtowicz committed
2930

2931
    # Use default model/config/tokenizer for the task if no model is provided
2932
    if model is None:
2933
        # At that point framework might still be undetermined
2934
        model = get_default_model(targeted_task, framework, task_options)
2935
2936
2937
2938

    framework = framework or get_framework(model)

    task_class, model_class = targeted_task["impl"], targeted_task[framework]
2939

2940
2941
    # Try to infer tokenizer from model or config name (if provided as str)
    if tokenizer is None:
2942
        if isinstance(model, str):
2943
            tokenizer = model
2944
        elif isinstance(config, str):
2945
2946
2947
            tokenizer = config
        else:
            # Impossible to guest what is the right tokenizer here
2948
2949
            raise Exception(
                "Impossible to guess which tokenizer to use. "
2950
                "Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer."
2951
            )
2952

Lysandre Debut's avatar
Lysandre Debut committed
2953
    modelcard = None
2954
    # Try to infer modelcard from model or config name (if provided as str)
Lysandre Debut's avatar
Lysandre Debut committed
2955
2956
2957
2958
    if isinstance(model, str):
        modelcard = model
    elif isinstance(config, str):
        modelcard = config
2959
2960

    # Instantiate tokenizer if needed
2961
2962
2963
    if isinstance(tokenizer, (str, tuple)):
        if isinstance(tokenizer, tuple):
            # For tuple we have (tokenizer name, {kwargs})
2964
            use_fast = tokenizer[1].pop("use_fast", use_fast)
Julien Chaumond's avatar
Julien Chaumond committed
2965
2966
2967
            tokenizer = AutoTokenizer.from_pretrained(
                tokenizer[0], use_fast=use_fast, revision=revision, **tokenizer[1]
            )
2968
        else:
Julien Chaumond's avatar
Julien Chaumond committed
2969
            tokenizer = AutoTokenizer.from_pretrained(tokenizer, revision=revision, use_fast=use_fast)
2970
2971
2972

    # Instantiate config if needed
    if isinstance(config, str):
Julien Chaumond's avatar
Julien Chaumond committed
2973
        config = AutoConfig.from_pretrained(config, revision=revision)
2974

thomwolf's avatar
thomwolf committed
2975
2976
    # Instantiate modelcard if needed
    if isinstance(modelcard, str):
Julien Chaumond's avatar
Julien Chaumond committed
2977
        modelcard = ModelCard.from_pretrained(modelcard, revision=revision)
thomwolf's avatar
thomwolf committed
2978

2979
    # Instantiate model if needed
2980
    if isinstance(model, str):
2981
2982
        # Handle transparent TF/PT model conversion
        model_kwargs = {}
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
        if framework == "pt" and model.endswith(".h5"):
            model_kwargs["from_tf"] = True
            logger.warning(
                "Model might be a TensorFlow model (ending with `.h5`) but TensorFlow is not available. "
                "Trying to load the model with PyTorch."
            )
        elif framework == "tf" and model.endswith(".bin"):
            model_kwargs["from_pt"] = True
            logger.warning(
                "Model might be a PyTorch model (ending with `.bin`) but PyTorch is not available. "
                "Trying to load the model with Tensorflow."
            )
Julien Chaumond's avatar
Julien Chaumond committed
2995
        model = model_class.from_pretrained(model, config=config, revision=revision, **model_kwargs)
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
        if task == "translation" and model.config.task_specific_params:
            for key in model.config.task_specific_params:
                if key.startswith("translation"):
                    task = key
                    warnings.warn(
                        '"translation" task was used, instead of "translation_XX_to_YY", defaulting to "{}"'.format(
                            task
                        ),
                        UserWarning,
                    )
                    break
3007

3008
    return task_class(model=model, tokenizer=tokenizer, modelcard=modelcard, framework=framework, task=task, **kwargs)