base.py 54.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
15
import collections
16
import csv
17
import importlib
18
19
20
21
import json
import os
import pickle
import sys
22
import traceback
23
import types
24
import warnings
25
from abc import ABC, abstractmethod
26
from collections import UserDict
27
28
from contextlib import contextmanager
from os.path import abspath, exists
29
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
30

Sylvain Gugger's avatar
Sylvain Gugger committed
31
from ..dynamic_module_utils import custom_object_save
32
from ..feature_extraction_utils import PreTrainedFeatureExtractor
33
from ..image_processing_utils import BaseImageProcessor
34
from ..modelcard import ModelCard
35
from ..models.auto.configuration_auto import AutoConfig
36
from ..tokenization_utils import PreTrainedTokenizer
37
38
39
40
41
42
43
from ..utils import (
    ModelOutput,
    add_end_docstrings,
    infer_framework,
    is_tf_available,
    is_torch_available,
    is_torch_cuda_available,
44
    is_torch_mlu_available,
45
    is_torch_npu_available,
46
47
48
    is_torch_xpu_available,
    logging,
)
49
50


51
52
GenericTensor = Union[List["GenericTensor"], "torch.Tensor", "tf.Tensor"]

53
54
55
56
57
58
59
if is_tf_available():
    import tensorflow as tf

    from ..models.auto.modeling_tf_auto import TFAutoModel

if is_torch_available():
    import torch
60
    from torch.utils.data import DataLoader, Dataset
61
62

    from ..models.auto.modeling_auto import AutoModel
63
64
65

    # Re-export for backward compatibility
    from .pt_utils import KeyDataset
66
67
68
else:
    Dataset = None
    KeyDataset = None
69
70
71
72
73
74
75
76
77

if TYPE_CHECKING:
    from ..modeling_tf_utils import TFPreTrainedModel
    from ..modeling_utils import PreTrainedModel


logger = logging.get_logger(__name__)


78
def no_collate_fn(items):
79
80
81
82
83
    if len(items) != 1:
        raise ValueError("This collate_fn is meant to be used with batch_size=1")
    return items[0]


84
85
86
87
88
89
def _pad(items, key, padding_value, padding_side):
    batch_size = len(items)
    if isinstance(items[0][key], torch.Tensor):
        # Others include `attention_mask` etc...
        shape = items[0][key].shape
        dim = len(shape)
Yih-Dar's avatar
Yih-Dar committed
90
        if key in ["pixel_values", "image"]:
91
92
93
            # This is probable image so padding shouldn't be necessary
            # B, C, H, W
            return torch.cat([item[key] for item in items], dim=0)
94
95
96
        elif dim == 4 and key == "input_features":
            # this is probably a mel spectrogram batched
            return torch.cat([item[key] for item in items], dim=0)
97
        max_length = max(item[key].shape[1] for item in items)
98
        min_length = min(item[key].shape[1] for item in items)
99
100
101
        dtype = items[0][key].dtype

        if dim == 2:
102
103
104
105
            if max_length == min_length:
                # Bypass for `ImageGPT` which doesn't provide a padding value, yet
                # we can consistently pad since the size should be matching
                return torch.cat([item[key] for item in items], dim=0)
106
107
108
            tensor = torch.zeros((batch_size, max_length), dtype=dtype) + padding_value
        elif dim == 3:
            tensor = torch.zeros((batch_size, max_length, shape[-1]), dtype=dtype) + padding_value
109
110
        elif dim == 4:
            tensor = torch.zeros((batch_size, max_length, shape[-2], shape[-1]), dtype=dtype) + padding_value
111
112
113
114
115
116
117
118
119
120
121
122

        for i, item in enumerate(items):
            if dim == 2:
                if padding_side == "left":
                    tensor[i, -len(item[key][0]) :] = item[key][0].clone()
                else:
                    tensor[i, : len(item[key][0])] = item[key][0].clone()
            elif dim == 3:
                if padding_side == "left":
                    tensor[i, -len(item[key][0]) :, :] = item[key][0].clone()
                else:
                    tensor[i, : len(item[key][0]), :] = item[key][0].clone()
123
124
125
126
127
128
            elif dim == 4:
                if padding_side == "left":
                    tensor[i, -len(item[key][0]) :, :, :] = item[key][0].clone()
                else:
                    tensor[i, : len(item[key][0]), :, :] = item[key][0].clone()

129
130
131
132
133
134
        return tensor
    else:
        return [item[key] for item in items]


def pad_collate_fn(tokenizer, feature_extractor):
135
136
137
138
    # Tokenizer
    t_padding_side = None
    # Feature extractor
    f_padding_side = None
139
140
141
142
143
144
145
146
147
    if tokenizer is None and feature_extractor is None:
        raise ValueError("Pipeline without tokenizer or feature_extractor cannot do batching")
    if tokenizer is not None:
        if tokenizer.pad_token_id is None:
            raise ValueError(
                "Pipeline with tokenizer without pad_token cannot do batching. You can try to set it with "
                "`pipe.tokenizer.pad_token_id = model.config.eos_token_id`."
            )
        else:
148
149
            t_padding_value = tokenizer.pad_token_id
            t_padding_side = tokenizer.padding_side
150
151
    if feature_extractor is not None:
        # Feature extractor can be images, where no padding is expected
152
153
154
155
156
157
158
159
160
161
162
163
        f_padding_value = getattr(feature_extractor, "padding_value", None)
        f_padding_side = getattr(feature_extractor, "padding_side", None)

    if t_padding_side is not None and f_padding_side is not None and t_padding_side != f_padding_side:
        raise ValueError(
            f"The feature extractor, and tokenizer don't agree on padding side {t_padding_side} != {f_padding_side}"
        )
    padding_side = "right"
    if t_padding_side is not None:
        padding_side = t_padding_side
    if f_padding_side is not None:
        padding_side = f_padding_side
164
165
166
167
168
169

    def inner(items):
        keys = set(items[0].keys())
        for item in items:
            if set(item.keys()) != keys:
                raise ValueError(
Sylvain Gugger's avatar
Sylvain Gugger committed
170
171
                    f"The elements of the batch contain different keys. Cannot batch them ({set(item.keys())} !="
                    f" {keys})"
172
173
                )
        # input_values, input_pixels, input_ids, ...
174
175
        padded = {}
        for key in keys:
176
            if key in {"input_ids"}:
177
                # ImageGPT uses a feature extractor
178
                if tokenizer is None and feature_extractor is not None:
179
180
181
                    _padding_value = f_padding_value
                else:
                    _padding_value = t_padding_value
182
183
            elif key in {"input_values", "pixel_values", "input_features"}:
                _padding_value = f_padding_value
184
            elif key in {"p_mask", "special_tokens_mask"}:
185
                _padding_value = 1
186
187
            elif key in {"attention_mask", "token_type_ids"}:
                _padding_value = 0
188
            else:
189
                # This is likely another random key maybe even user provided
190
191
                _padding_value = 0
            padded[key] = _pad(items, key, _padding_value, padding_side)
192
193
194
195
196
        return padded

    return inner


197
198
199
200
201
202
def infer_framework_load_model(
    model,
    config: AutoConfig,
    model_classes: Optional[Dict[str, Tuple[type]]] = None,
    task: Optional[str] = None,
    framework: Optional[str] = None,
203
    **model_kwargs,
204
):
205
    """
206
    Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model).
207

Sylvain Gugger's avatar
Sylvain Gugger committed
208
209
210
    If `model` is instantiated, this function will just infer the framework from the model class. Otherwise `model` is
    actually a checkpoint name and this method will try to instantiate it using `model_classes`. Since we don't want to
    instantiate the model twice, this model is returned for use by the pipeline.
211

212
    If both frameworks are installed and available for `model`, PyTorch is selected.
213
214

    Args:
215
        model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel`]):
Sylvain Gugger's avatar
Sylvain Gugger committed
216
            The model to infer the framework from. If `str`, a checkpoint name. The model to infer the framewrok from.
217
        config ([`AutoConfig`]):
218
            The config associated with the model to help using the correct class
219
        model_classes (dictionary `str` to `type`, *optional*):
220
            A mapping framework to class.
221
        task (`str`):
222
223
            The task defining which pipeline will be returned.
        model_kwargs:
Sylvain Gugger's avatar
Sylvain Gugger committed
224
225
            Additional dictionary of keyword arguments passed along to the model's `from_pretrained(...,
            **model_kwargs)` function.
226
227

    Returns:
228
        `Tuple`: A tuple framework, model.
229
230
231
232
233
234
235
236
    """
    if not is_tf_available() and not is_torch_available():
        raise RuntimeError(
            "At least one of TensorFlow 2.0 or PyTorch should be installed. "
            "To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ "
            "To install PyTorch, read the instructions at https://pytorch.org/."
        )
    if isinstance(model, str):
237
        model_kwargs["_from_pipeline"] = task
238
239
240
241
242
243
244
245
246
247
248
249
        class_tuple = ()
        look_pt = is_torch_available() and framework in {"pt", None}
        look_tf = is_tf_available() and framework in {"tf", None}
        if model_classes:
            if look_pt:
                class_tuple = class_tuple + model_classes.get("pt", (AutoModel,))
            if look_tf:
                class_tuple = class_tuple + model_classes.get("tf", (TFAutoModel,))
        if config.architectures:
            classes = []
            for architecture in config.architectures:
                transformers_module = importlib.import_module("transformers")
250
                if look_pt:
251
252
253
                    _class = getattr(transformers_module, architecture, None)
                    if _class is not None:
                        classes.append(_class)
254
                if look_tf:
255
256
257
258
259
260
261
262
                    _class = getattr(transformers_module, f"TF{architecture}", None)
                    if _class is not None:
                        classes.append(_class)
            class_tuple = class_tuple + tuple(classes)

        if len(class_tuple) == 0:
            raise ValueError(f"Pipeline cannot infer suitable model classes from {model}")

263
        all_traceback = {}
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
        for model_class in class_tuple:
            kwargs = model_kwargs.copy()
            if framework == "pt" and model.endswith(".h5"):
                kwargs["from_tf"] = True
                logger.warning(
                    "Model might be a TensorFlow model (ending with `.h5`) but TensorFlow is not available. "
                    "Trying to load the model with PyTorch."
                )
            elif framework == "tf" and model.endswith(".bin"):
                kwargs["from_pt"] = True
                logger.warning(
                    "Model might be a PyTorch model (ending with `.bin`) but PyTorch is not available. "
                    "Trying to load the model with Tensorflow."
                )

279
            try:
280
                model = model_class.from_pretrained(model, **kwargs)
281
282
                if hasattr(model, "eval"):
                    model = model.eval()
283
284
285
                # Stop loading on the first successful load.
                break
            except (OSError, ValueError):
286
                all_traceback[model_class.__name__] = traceback.format_exc()
287
288
289
                continue

        if isinstance(model, str):
290
291
292
293
294
295
            error = ""
            for class_name, trace in all_traceback.items():
                error += f"while loading with {class_name}, an error is thrown:\n{trace}\n"
            raise ValueError(
                f"Could not load model {model} with any of the following classes: {class_tuple}. See the original errors:\n\n{error}\n"
            )
296

297
298
    if framework is None:
        framework = infer_framework(model.__class__)
299
300
301
    return framework, model


302
303
304
305
306
def infer_framework_from_model(
    model,
    model_classes: Optional[Dict[str, Tuple[type]]] = None,
    task: Optional[str] = None,
    framework: Optional[str] = None,
307
    **model_kwargs,
308
309
):
    """
310
    Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model).
311

Sylvain Gugger's avatar
Sylvain Gugger committed
312
313
314
    If `model` is instantiated, this function will just infer the framework from the model class. Otherwise `model` is
    actually a checkpoint name and this method will try to instantiate it using `model_classes`. Since we don't want to
    instantiate the model twice, this model is returned for use by the pipeline.
315

316
    If both frameworks are installed and available for `model`, PyTorch is selected.
317
318

    Args:
319
        model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel`]):
Sylvain Gugger's avatar
Sylvain Gugger committed
320
            The model to infer the framework from. If `str`, a checkpoint name. The model to infer the framewrok from.
321
        model_classes (dictionary `str` to `type`, *optional*):
322
            A mapping framework to class.
323
        task (`str`):
324
325
            The task defining which pipeline will be returned.
        model_kwargs:
Sylvain Gugger's avatar
Sylvain Gugger committed
326
327
            Additional dictionary of keyword arguments passed along to the model's `from_pretrained(...,
            **model_kwargs)` function.
328
329

    Returns:
330
        `Tuple`: A tuple framework, model.
331
332
333
334
335
336
337
338
339
340
    """
    if isinstance(model, str):
        config = AutoConfig.from_pretrained(model, _from_pipeline=task, **model_kwargs)
    else:
        config = model.config
    return infer_framework_load_model(
        model, config, model_classes=model_classes, _from_pipeline=task, task=task, framework=framework, **model_kwargs
    )


341
342
343
344
345
def get_framework(model, revision: Optional[str] = None):
    """
    Select framework (TensorFlow or PyTorch) to use.

    Args:
346
        model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel`]):
347
348
349
            If both frameworks are installed, picks the one corresponding to the model passed (either a model class or
            the model name). If no specific model is provided, defaults to using PyTorch.
    """
350
351
352
353
    warnings.warn(
        "`get_framework` is deprecated and will be removed in v5, use `infer_framework_from_model` instead.",
        FutureWarning,
    )
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
    if not is_tf_available() and not is_torch_available():
        raise RuntimeError(
            "At least one of TensorFlow 2.0 or PyTorch should be installed. "
            "To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ "
            "To install PyTorch, read the instructions at https://pytorch.org/."
        )
    if isinstance(model, str):
        if is_torch_available() and not is_tf_available():
            model = AutoModel.from_pretrained(model, revision=revision)
        elif is_tf_available() and not is_torch_available():
            model = TFAutoModel.from_pretrained(model, revision=revision)
        else:
            try:
                model = AutoModel.from_pretrained(model, revision=revision)
            except OSError:
                model = TFAutoModel.from_pretrained(model, revision=revision)

Matt's avatar
Matt committed
371
    framework = infer_framework(model.__class__)
372
373
374
    return framework


375
376
377
def get_default_model_and_revision(
    targeted_task: Dict, framework: Optional[str], task_options: Optional[Any]
) -> Union[str, Tuple[str, str]]:
378
379
380
381
    """
    Select a default model to use for a given task. Defaults to pytorch if ambiguous.

    Args:
382
        targeted_task (`Dict` ):
383
384
           Dictionary representing the given task, that should contain default models

385
        framework (`str`, None)
386
387
           "pt", "tf" or None, representing a specific framework if it was specified, or None if we don't know yet.

388
        task_options (`Any`, None)
389
390
391
392
393
           Any further value required by the task to get fully specified, for instance (SRC, TGT) languages for
           translation task.

    Returns

394
        `str` The model string representing the default model for this pipeline
395
396
397
398
399
400
401
402
403
    """
    if is_torch_available() and not is_tf_available():
        framework = "pt"
    elif is_tf_available() and not is_torch_available():
        framework = "tf"

    defaults = targeted_task["default"]
    if task_options:
        if task_options not in defaults:
404
            raise ValueError(f"The task does not provide any default models for options {task_options}")
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
        default_models = defaults[task_options]["model"]
    elif "model" in defaults:
        default_models = targeted_task["default"]["model"]
    else:
        # XXX This error message needs to be updated to be more generic if more tasks are going to become
        # parametrized
        raise ValueError('The task defaults can\'t be correctly selected. You probably meant "translation_XX_to_YY"')

    if framework is None:
        framework = "pt"

    return default_models[framework]


class PipelineException(Exception):
    """
421
    Raised by a [`Pipeline`] when handling __call__.
422
423

    Args:
424
425
426
        task (`str`): The task of the pipeline.
        model (`str`): The model used by the pipeline.
        reason (`str`): The error message to display.
427
428
429
430
431
432
433
434
435
436
437
    """

    def __init__(self, task: str, model: str, reason: str):
        super().__init__(reason)

        self.task = task
        self.model = model


class ArgumentHandler(ABC):
    """
438
    Base interface for handling arguments for each [`~pipelines.Pipeline`].
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
    """

    @abstractmethod
    def __call__(self, *args, **kwargs):
        raise NotImplementedError()


class PipelineDataFormat:
    """
    Base class for all the pipeline supported data format both for reading and writing. Supported data formats
    currently includes:

    - JSON
    - CSV
    - stdin/stdout (pipe)

Sylvain Gugger's avatar
Sylvain Gugger committed
455
456
    `PipelineDataFormat` also includes some utilities to work with multi-columns like mapping from datasets columns to
    pipelines keyword arguments through the `dataset_kwarg_1=dataset_column_1` format.
457
458

    Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
459
460
461
        output_path (`str`): Where to save the outgoing data.
        input_path (`str`): Where to look for the input data.
        column (`str`): The column to read.
462
463
        overwrite (`bool`, *optional*, defaults to `False`):
            Whether or not to overwrite the `output_path`.
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
    """

    SUPPORTED_FORMATS = ["json", "csv", "pipe"]

    def __init__(
        self,
        output_path: Optional[str],
        input_path: Optional[str],
        column: Optional[str],
        overwrite: bool = False,
    ):
        self.output_path = output_path
        self.input_path = input_path
        self.column = column.split(",") if column is not None else [""]
        self.is_multi_columns = len(self.column) > 1

        if self.is_multi_columns:
            self.column = [tuple(c.split("=")) if "=" in c else (c, c) for c in self.column]

        if output_path is not None and not overwrite:
            if exists(abspath(self.output_path)):
485
                raise OSError(f"{self.output_path} already exists on disk")
486
487
488

        if input_path is not None:
            if not exists(abspath(self.input_path)):
489
                raise OSError(f"{self.input_path} doesnt exist on disk")
490
491
492
493
494
495
496
497

    @abstractmethod
    def __iter__(self):
        raise NotImplementedError()

    @abstractmethod
    def save(self, data: Union[dict, List[dict]]):
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
498
        Save the provided data object with the representation for the current [`~pipelines.PipelineDataFormat`].
499
500

        Args:
501
            data (`dict` or list of `dict`): The data to store.
502
503
504
505
506
507
508
509
        """
        raise NotImplementedError()

    def save_binary(self, data: Union[dict, List[dict]]) -> str:
        """
        Save the provided data object as a pickle-formatted binary data on the disk.

        Args:
510
            data (`dict` or list of `dict`): The data to store.
511
512

        Returns:
513
            `str`: Path where the data has been saved.
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
        """
        path, _ = os.path.splitext(self.output_path)
        binary_path = os.path.extsep.join((path, "pickle"))

        with open(binary_path, "wb+") as f_output:
            pickle.dump(data, f_output)

        return binary_path

    @staticmethod
    def from_str(
        format: str,
        output_path: Optional[str],
        input_path: Optional[str],
        column: Optional[str],
        overwrite=False,
    ) -> "PipelineDataFormat":
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
532
        Creates an instance of the right subclass of [`~pipelines.PipelineDataFormat`] depending on `format`.
533
534

        Args:
535
            format (`str`):
536
537
                The format of the desired pipeline. Acceptable values are `"json"`, `"csv"` or `"pipe"`.
            output_path (`str`, *optional*):
538
                Where to save the outgoing data.
539
            input_path (`str`, *optional*):
540
                Where to look for the input data.
541
            column (`str`, *optional*):
542
                The column to read.
543
544
            overwrite (`bool`, *optional*, defaults to `False`):
                Whether or not to overwrite the `output_path`.
545
546

        Returns:
547
            [`~pipelines.PipelineDataFormat`]: The proper data format.
548
549
550
551
552
553
554
555
        """
        if format == "json":
            return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
        elif format == "csv":
            return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
        elif format == "pipe":
            return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
        else:
556
            raise KeyError(f"Unknown reader {format} (Available reader are json/csv/pipe)")
557
558
559
560
561
562
563


class CsvPipelineDataFormat(PipelineDataFormat):
    """
    Support for pipelines using CSV data format.

    Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
564
565
566
        output_path (`str`): Where to save the outgoing data.
        input_path (`str`): Where to look for the input data.
        column (`str`): The column to read.
567
568
        overwrite (`bool`, *optional*, defaults to `False`):
            Whether or not to overwrite the `output_path`.
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
    """

    def __init__(
        self,
        output_path: Optional[str],
        input_path: Optional[str],
        column: Optional[str],
        overwrite=False,
    ):
        super().__init__(output_path, input_path, column, overwrite=overwrite)

    def __iter__(self):
        with open(self.input_path, "r") as f:
            reader = csv.DictReader(f)
            for row in reader:
                if self.is_multi_columns:
                    yield {k: row[c] for k, c in self.column}
                else:
                    yield row[self.column[0]]

    def save(self, data: List[dict]):
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
591
        Save the provided data object with the representation for the current [`~pipelines.PipelineDataFormat`].
592
593

        Args:
594
            data (`List[dict]`): The data to store.
595
596
597
598
599
600
601
602
603
604
605
606
607
        """
        with open(self.output_path, "w") as f:
            if len(data) > 0:
                writer = csv.DictWriter(f, list(data[0].keys()))
                writer.writeheader()
                writer.writerows(data)


class JsonPipelineDataFormat(PipelineDataFormat):
    """
    Support for pipelines using JSON file format.

    Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
608
609
610
        output_path (`str`): Where to save the outgoing data.
        input_path (`str`): Where to look for the input data.
        column (`str`): The column to read.
611
612
        overwrite (`bool`, *optional*, defaults to `False`):
            Whether or not to overwrite the `output_path`.
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
    """

    def __init__(
        self,
        output_path: Optional[str],
        input_path: Optional[str],
        column: Optional[str],
        overwrite=False,
    ):
        super().__init__(output_path, input_path, column, overwrite=overwrite)

        with open(input_path, "r") as f:
            self._entries = json.load(f)

    def __iter__(self):
        for entry in self._entries:
            if self.is_multi_columns:
                yield {k: entry[c] for k, c in self.column}
            else:
                yield entry[self.column[0]]

    def save(self, data: dict):
        """
        Save the provided data object in a json file.

        Args:
639
            data (`dict`): The data to store.
640
641
642
643
644
645
646
647
648
649
650
651
        """
        with open(self.output_path, "w") as f:
            json.dump(data, f)


class PipedPipelineDataFormat(PipelineDataFormat):
    """
    Read data from piped input to the python process. For multi columns data, columns should separated by \t

    If columns are provided, then the output will be a dictionary with {column_x: value_x}

    Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
652
653
654
        output_path (`str`): Where to save the outgoing data.
        input_path (`str`): Where to look for the input data.
        column (`str`): The column to read.
655
656
        overwrite (`bool`, *optional*, defaults to `False`):
            Whether or not to overwrite the `output_path`.
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
    """

    def __iter__(self):
        for line in sys.stdin:
            # Split for multi-columns
            if "\t" in line:
                line = line.split("\t")
                if self.column:
                    # Dictionary to map arguments
                    yield {kwargs: l for (kwargs, _), l in zip(self.column, line)}
                else:
                    yield tuple(line)

            # No dictionary to map arguments
            else:
                yield line

    def save(self, data: dict):
        """
        Print the data.

        Args:
679
            data (`dict`): The data to store.
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
        """
        print(data)

    def save_binary(self, data: Union[dict, List[dict]]) -> str:
        if self.output_path is None:
            raise KeyError(
                "When using piped input on pipeline outputting large object requires an output file path. "
                "Please provide such output path through --output argument."
            )

        return super().save_binary(data)


class _ScikitCompat(ABC):
    """
    Interface layer for the Scikit and Keras compatibility.
    """

    @abstractmethod
    def transform(self, X):
        raise NotImplementedError()

    @abstractmethod
    def predict(self, X):
        raise NotImplementedError()


707
708
709
710
711
712
713
def build_pipeline_init_args(
    has_tokenizer: bool = False,
    has_feature_extractor: bool = False,
    has_image_processor: bool = False,
    supports_binary_output: bool = True,
) -> str:
    docstring = r"""
714
    Arguments:
715
        model ([`PreTrainedModel`] or [`TFPreTrainedModel`]):
716
            The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
717
718
719
            [`PreTrainedModel`] for PyTorch and [`TFPreTrainedModel`] for TensorFlow."""
    if has_tokenizer:
        docstring += r"""
720
        tokenizer ([`PreTrainedTokenizer`]):
721
            The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
722
723
724
725
726
727
728
729
730
731
732
733
            [`PreTrainedTokenizer`]."""
    if has_feature_extractor:
        docstring += r"""
        feature_extractor ([`SequenceFeatureExtractor`]):
            The feature extractor that will be used by the pipeline to encode data for the model. This object inherits from
            [`SequenceFeatureExtractor`]."""
    if has_image_processor:
        docstring += r"""
        image_processor ([`BaseImageProcessor`]):
            The image processor that will be used by the pipeline to encode data for the model. This object inherits from
            [`BaseImageProcessor`]."""
    docstring += r"""
734
        modelcard (`str` or [`ModelCard`], *optional*):
735
            Model card attributed to the model for this pipeline.
736
        framework (`str`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
737
738
            The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be
            installed.
739
740

            If no framework is specified, will default to the one currently installed. If no framework is specified and
Sylvain Gugger's avatar
Sylvain Gugger committed
741
742
            both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is
            provided.
743
        task (`str`, defaults to `""`):
744
            A task-identifier for the pipeline.
745
746
        num_workers (`int`, *optional*, defaults to 8):
            When the pipeline will use *DataLoader* (when passing a dataset, on GPU for a Pytorch model), the number of
747
            workers to be used.
748
749
        batch_size (`int`, *optional*, defaults to 1):
            When the pipeline will use *DataLoader* (when passing a dataset, on GPU for a Pytorch model), the size of
Sylvain Gugger's avatar
Sylvain Gugger committed
750
751
            the batch to use, for inference this is not always beneficial, please read [Batching with
            pipelines](https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching) .
752
        args_parser ([`~pipelines.ArgumentHandler`], *optional*):
753
            Reference to the object in charge of parsing supplied pipeline parameters.
754
        device (`int`, *optional*, defaults to -1):
755
            Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on
756
757
758
759
760
761
            the associated CUDA device id. You can pass native `torch.device` or a `str` too
        torch_dtype (`str` or `torch.dtype`, *optional*):
            Sent directly as `model_kwargs` (just a simpler shortcut) to use the available precision for this model
            (`torch.float16`, `torch.bfloat16`, ... or `"auto"`)"""
    if supports_binary_output:
        docstring += r"""
762
        binary_output (`bool`, *optional*, defaults to `False`):
763
764
765
766
767
768
769
770
771
            Flag indicating if the output the pipeline should happen in a serialized format (i.e., pickle) or as
            the raw output data e.g. text."""
    return docstring


PIPELINE_INIT_ARGS = build_pipeline_init_args(
    has_tokenizer=True, has_feature_extractor=True, has_image_processor=True, supports_binary_output=True
)

772

773
if is_torch_available():
774
775
776
777
778
779
    from transformers.pipelines.pt_utils import (
        PipelineChunkIterator,
        PipelineDataset,
        PipelineIterator,
        PipelinePackIterator,
    )
780

781

782
@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True, has_feature_extractor=True, has_image_processor=True))
783
784
785
786
787
788
789
790
791
792
793
794
class Pipeline(_ScikitCompat):
    """
    The Pipeline class is the class from which all pipelines inherit. Refer to this class for methods shared across
    different pipelines.

    Base class implementing pipelined operations. Pipeline workflow is defined as a sequence of the following
    operations:

        Input -> Tokenization -> Model Inference -> Post-Processing (task dependent) -> Output

    Pipeline supports running on CPU or GPU through the device argument (see below).

Sylvain Gugger's avatar
Sylvain Gugger committed
795
796
797
    Some pipeline, like for instance [`FeatureExtractionPipeline`] (`'feature-extraction'`) output large tensor object
    as nested-lists. In order to avoid dumping such large structure as textual data we provide the `binary_output`
    constructor argument. If set to `True`, the output will be stored in the pickle format.
798
799
800
801
802
803
804
    """

    default_input_names = None

    def __init__(
        self,
        model: Union["PreTrainedModel", "TFPreTrainedModel"],
805
806
        tokenizer: Optional[PreTrainedTokenizer] = None,
        feature_extractor: Optional[PreTrainedFeatureExtractor] = None,
807
        image_processor: Optional[BaseImageProcessor] = None,
808
809
810
811
        modelcard: Optional[ModelCard] = None,
        framework: Optional[str] = None,
        task: str = "",
        args_parser: ArgumentHandler = None,
812
        device: Union[int, "torch.device"] = None,
813
        torch_dtype: Optional[Union[str, "torch.dtype"]] = None,
814
        binary_output: bool = False,
815
        **kwargs,
816
817
    ):
        if framework is None:
818
            framework, model = infer_framework_load_model(model, config=model.config)
819
820
821
822

        self.task = task
        self.model = model
        self.tokenizer = tokenizer
823
        self.feature_extractor = feature_extractor
824
        self.image_processor = image_processor
825
826
        self.modelcard = modelcard
        self.framework = framework
827

828
829
830
831
832
833
834
835
836
        # `accelerate` device map
        hf_device_map = getattr(self.model, "hf_device_map", None)

        if hf_device_map is not None and device is not None:
            raise ValueError(
                "The model has been loaded with `accelerate` and therefore cannot be moved to a specific device. Please "
                "discard the `device` argument when creating your pipeline object."
            )

837
838
839
840
841
842
843
        if device is None:
            if hf_device_map is not None:
                # Take the first device used by `accelerate`.
                device = next(iter(hf_device_map.values()))
            else:
                device = -1

844
845
        if is_torch_available() and self.framework == "pt":
            if isinstance(device, torch.device):
846
847
                if device.type == "xpu" and not is_torch_xpu_available(check_device=True):
                    raise ValueError(f'{device} is not available, you should use device="cpu" instead')
848
849
                self.device = device
            elif isinstance(device, str):
850
851
                if "xpu" in device and not is_torch_xpu_available(check_device=True):
                    raise ValueError(f'{device} is not available, you should use device="cpu" instead')
852
853
854
                self.device = torch.device(device)
            elif device < 0:
                self.device = torch.device("cpu")
855
856
            elif is_torch_mlu_available():
                self.device = torch.device(f"mlu:{device}")
857
            elif is_torch_cuda_available():
Michael Wyatt's avatar
Michael Wyatt committed
858
                self.device = torch.device(f"cuda:{device}")
859
860
            elif is_torch_npu_available():
                self.device = torch.device(f"npu:{device}")
861
862
863
864
            elif is_torch_xpu_available(check_device=True):
                self.device = torch.device(f"xpu:{device}")
            else:
                raise ValueError(f"{device} unrecognized or not available.")
865
        else:
866
            self.device = device if device is not None else -1
867

868
869
        self.binary_output = binary_output

870
871
872
873
874
875
876
877
878
        # We shouldn't call `model.to()` for models loaded with accelerate
        if (
            self.framework == "pt"
            and self.device is not None
            and not (isinstance(self.device, int) and self.device < 0)
            and hf_device_map is None
        ):
            self.model.to(self.device)

879
        # Update config and generation_config with task specific parameters
880
881
882
        task_specific_params = self.model.config.task_specific_params
        if task_specific_params is not None and task in task_specific_params:
            self.model.config.update(task_specific_params.get(task))
883
884
            if self.model.can_generate():
                self.model.generation_config.update(**task_specific_params.get(task))
885

886
        self.call_count = 0
887
888
        self._batch_size = kwargs.pop("batch_size", None)
        self._num_workers = kwargs.pop("num_workers", None)
889
890
        self._preprocess_params, self._forward_params, self._postprocess_params = self._sanitize_parameters(**kwargs)

891
892
893
894
895
896
897
898
899
900
        # Pipelines calling `generate`: if the tokenizer has a pad token but the model doesn't, set it in the
        # forward params so that `generate` is aware of the pad token.
        if (
            self.tokenizer is not None
            and self.model.can_generate()
            and self.tokenizer.pad_token_id is not None
            and self.model.generation_config.pad_token_id is None
        ):
            self._forward_params["pad_token_id"] = self.tokenizer.pad_token_id

Yih-Dar's avatar
Yih-Dar committed
901
902
903
904
905
906
907
        if self.image_processor is None and self.feature_extractor is not None:
            if isinstance(self.feature_extractor, BaseImageProcessor):
                # Backward compatible change, if users called
                # ImageSegmentationPipeline(.., feature_extractor=MyFeatureExtractor())
                # then we should keep working
                self.image_processor = self.feature_extractor

908
    def save_pretrained(self, save_directory: str, safe_serialization: bool = True):
909
910
911
912
        """
        Save the pipeline's model and tokenizer.

        Args:
913
            save_directory (`str`):
914
                A path to the directory where to saved. It will be created if it doesn't exist.
915
            safe_serialization (`str`):
916
                Whether to save the model using `safetensors` or the traditional way for PyTorch or Tensorflow.
917
918
        """
        if os.path.isfile(save_directory):
919
            logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
920
921
922
            return
        os.makedirs(save_directory, exist_ok=True)

Sylvain Gugger's avatar
Sylvain Gugger committed
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
        if hasattr(self, "_registered_impl"):
            # Add info to the config
            pipeline_info = self._registered_impl.copy()
            custom_pipelines = {}
            for task, info in pipeline_info.items():
                if info["impl"] != self.__class__:
                    continue

                info = info.copy()
                module_name = info["impl"].__module__
                last_module = module_name.split(".")[-1]
                # Change classes into their names/full names
                info["impl"] = f"{last_module}.{info['impl'].__name__}"
                info["pt"] = tuple(c.__name__ for c in info["pt"])
                info["tf"] = tuple(c.__name__ for c in info["tf"])

                custom_pipelines[task] = info
            self.model.config.custom_pipelines = custom_pipelines
            # Save the pipeline custom code
            custom_object_save(self, save_directory)

944
        self.model.save_pretrained(save_directory, safe_serialization=safe_serialization)
945
946
947
948
949
950
951

        if self.tokenizer is not None:
            self.tokenizer.save_pretrained(save_directory)

        if self.feature_extractor is not None:
            self.feature_extractor.save_pretrained(save_directory)

952
953
954
        if self.image_processor is not None:
            self.image_processor.save_pretrained(save_directory)

955
956
957
958
959
960
961
        if self.modelcard is not None:
            self.modelcard.save_pretrained(save_directory)

    def transform(self, X):
        """
        Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
        """
962
        return self(X)
963
964
965
966
967

    def predict(self, X):
        """
        Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
        """
968
        return self(X)
969

970
971
972
973
974
975
976
    @property
    def torch_dtype(self) -> Optional["torch.dtype"]:
        """
        Torch dtype of the model (if it's Pytorch model), `None` otherwise.
        """
        return getattr(self.model, "dtype", None)

977
978
979
980
981
982
983
984
    @contextmanager
    def device_placement(self):
        """
        Context Manager allowing tensor allocation on the user-specified device in framework agnostic way.

        Returns:
            Context manager

985
        Examples:
986

987
988
989
990
991
992
993
        ```python
        # Explicitly ask for tensor allocation on CUDA device :0
        pipe = pipeline(..., device=0)
        with pipe.device_placement():
            # Every framework specific tensor allocation will be done on the request device
            output = pipe(...)
        ```"""
994
        if self.framework == "tf":
995
            with tf.device("/CPU:0" if self.device == -1 else f"/device:GPU:{self.device}"):
996
997
998
                yield
        else:
            if self.device.type == "cuda":
999
1000
                with torch.cuda.device(self.device):
                    yield
1001
1002
1003
            elif self.device.type == "mlu":
                with torch.mlu.device(self.device):
                    yield
1004
1005
            else:
                yield
1006
1007
1008
1009
1010
1011

    def ensure_tensor_on_device(self, **inputs):
        """
        Ensure PyTorch tensors are on the specified device.

        Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
1012
1013
            inputs (keyword arguments that should be `torch.Tensor`, the rest is ignored):
                The tensors to place on `self.device`.
1014
            Recursive on lists **only**.
1015
1016

        Return:
1017
            `Dict[str, torch.Tensor]`: The same as `inputs` but on the proper device.
1018
        """
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
        return self._ensure_tensor_on_device(inputs, self.device)

    def _ensure_tensor_on_device(self, inputs, device):
        if isinstance(inputs, ModelOutput):
            return ModelOutput(
                {name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()}
            )
        elif isinstance(inputs, dict):
            return {name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()}
        elif isinstance(inputs, UserDict):
            return UserDict({name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()})
        elif isinstance(inputs, list):
            return [self._ensure_tensor_on_device(item, device) for item in inputs]
        elif isinstance(inputs, tuple):
            return tuple([self._ensure_tensor_on_device(item, device) for item in inputs])
        elif isinstance(inputs, torch.Tensor):
1035
            return inputs.to(device)
1036
1037
        else:
            return inputs
1038
1039
1040
1041
1042
1043

    def check_model_type(self, supported_models: Union[List[str], dict]):
        """
        Check if the model class is in supported by the pipeline.

        Args:
1044
            supported_models (`List[str]` or `dict`):
1045
1046
1047
                The list of models supported by the pipeline, or a dictionary with model class values.
        """
        if not isinstance(supported_models, list):  # Create from a model mapping
1048
            supported_models_names = []
1049
            for _, model_name in supported_models.items():
1050
                # Mapping can now contain tuples of models for the same configuration.
1051
1052
                if isinstance(model_name, tuple):
                    supported_models_names.extend(list(model_name))
1053
                else:
1054
1055
1056
1057
1058
1059
1060
                    supported_models_names.append(model_name)
            if hasattr(supported_models, "_model_mapping"):
                for _, model in supported_models._model_mapping._extra_content.items():
                    if isinstance(model_name, tuple):
                        supported_models_names.extend([m.__name__ for m in model])
                    else:
                        supported_models_names.append(model.__name__)
1061
            supported_models = supported_models_names
1062
        if self.model.__class__.__name__ not in supported_models:
1063
            logger.error(
Sylvain Gugger's avatar
Sylvain Gugger committed
1064
1065
                f"The model '{self.model.__class__.__name__}' is not supported for {self.task}. Supported models are"
                f" {supported_models}."
1066
1067
            )

1068
1069
    @abstractmethod
    def _sanitize_parameters(self, **pipeline_parameters):
1070
        """
1071
        _sanitize_parameters will be called with any excessive named arguments from either `__init__` or `__call__`
1072
1073
1074
        methods. It should return 3 dictionaries of the resolved parameters used by the various `preprocess`,
        `forward` and `postprocess` methods. Do not fill dictionaries if the caller didn't specify a kwargs. This
        lets you keep defaults in function signatures, which is more "natural".
1075

1076
1077
        It is not meant to be called directly, it will be automatically called and the final parameters resolved by
        `__init__` and `__call__`
1078
        """
1079
        raise NotImplementedError("_sanitize_parameters not implemented")
1080

1081
1082
1083
    @abstractmethod
    def preprocess(self, input_: Any, **preprocess_parameters: Dict) -> Dict[str, GenericTensor]:
        """
Kaito Sugimoto's avatar
Kaito Sugimoto committed
1084
        Preprocess will take the `input_` of a specific pipeline and return a dictionary of everything necessary for
1085
1086
1087
        `_forward` to run properly. It should contain at least one tensor, but might have arbitrary other items.
        """
        raise NotImplementedError("preprocess not implemented")
1088

1089
1090
1091
    @abstractmethod
    def _forward(self, input_tensors: Dict[str, GenericTensor], **forward_parameters: Dict) -> ModelOutput:
        """
Kaito Sugimoto's avatar
Kaito Sugimoto committed
1092
        _forward will receive the prepared dictionary from `preprocess` and run it on the model. This method might
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
        involve the GPU or the CPU and should be agnostic to it. Isolating this function is the reason for `preprocess`
        and `postprocess` to exist, so that the hot path, this method generally can run as fast as possible.

        It is not meant to be called directly, `forward` is preferred. It is basically the same but contains additional
        code surrounding `_forward` making sure tensors and models are on the same device, disabling the training part
        of the code (leading to faster inference).
        """
        raise NotImplementedError("_forward not implemented")

    @abstractmethod
    def postprocess(self, model_outputs: ModelOutput, **postprocess_parameters: Dict) -> Any:
        """
        Postprocess will receive the raw outputs of the `_forward` method, generally tensors, and reformat them into
        something more friendly. Generally it will output a list or a dict or results (containing just strings and
        numbers).
1108
        """
1109
1110
        raise NotImplementedError("postprocess not implemented")

1111
    def get_inference_context(self):
1112
        return torch.no_grad
1113

1114
    def forward(self, model_inputs, **forward_params):
1115
1116
        with self.device_placement():
            if self.framework == "tf":
1117
1118
1119
                model_inputs["training"] = False
                model_outputs = self._forward(model_inputs, **forward_params)
            elif self.framework == "pt":
1120
                inference_context = self.get_inference_context()
1121
                with inference_context():
1122
1123
1124
1125
1126
1127
1128
                    model_inputs = self._ensure_tensor_on_device(model_inputs, device=self.device)
                    model_outputs = self._forward(model_inputs, **forward_params)
                    model_outputs = self._ensure_tensor_on_device(model_outputs, device=torch.device("cpu"))
            else:
                raise ValueError(f"Framework {self.framework} is not supported")
        return model_outputs

1129
1130
1131
    def get_iterator(
        self, inputs, num_workers: int, batch_size: int, preprocess_params, forward_params, postprocess_params
    ):
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
        if isinstance(inputs, collections.abc.Sized):
            dataset = PipelineDataset(inputs, self.preprocess, preprocess_params)
        else:
            if num_workers > 1:
                logger.warning(
                    "For iterable dataset using num_workers>1 is likely to result"
                    " in errors since everything is iterable, setting `num_workers=1`"
                    " to guarantee correctness."
                )
                num_workers = 1
            dataset = PipelineIterator(inputs, self.preprocess, preprocess_params)
1143
1144
1145
        if "TOKENIZERS_PARALLELISM" not in os.environ:
            logger.info("Disabling tokenizer parallelism, we're using DataLoader multithreading already")
            os.environ["TOKENIZERS_PARALLELISM"] = "false"
1146
1147
1148
        # TODO hack by collating feature_extractor and image_processor
        feature_extractor = self.feature_extractor if self.feature_extractor is not None else self.image_processor
        collate_fn = no_collate_fn if batch_size == 1 else pad_collate_fn(self.tokenizer, feature_extractor)
1149
1150
        dataloader = DataLoader(dataset, num_workers=num_workers, batch_size=batch_size, collate_fn=collate_fn)
        model_iterator = PipelineIterator(dataloader, self.forward, forward_params, loader_batch_size=batch_size)
1151
1152
1153
        final_iterator = PipelineIterator(model_iterator, self.postprocess, postprocess_params)
        return final_iterator

1154
    def __call__(self, inputs, *args, num_workers=None, batch_size=None, **kwargs):
1155
1156
        if args:
            logger.warning(f"Ignoring args : {args}")
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168

        if num_workers is None:
            if self._num_workers is None:
                num_workers = 0
            else:
                num_workers = self._num_workers
        if batch_size is None:
            if self._batch_size is None:
                batch_size = 1
            else:
                batch_size = self._batch_size

1169
1170
1171
1172
1173
1174
1175
1176
1177
        preprocess_params, forward_params, postprocess_params = self._sanitize_parameters(**kwargs)

        # Fuse __init__ params and __call__ params without modifying the __init__ ones.
        preprocess_params = {**self._preprocess_params, **preprocess_params}
        forward_params = {**self._forward_params, **forward_params}
        postprocess_params = {**self._postprocess_params, **postprocess_params}

        self.call_count += 1
        if self.call_count > 10 and self.framework == "pt" and self.device.type == "cuda":
1178
            logger.warning_once(
Sylvain Gugger's avatar
Sylvain Gugger committed
1179
1180
                "You seem to be using the pipelines sequentially on GPU. In order to maximize efficiency please use a"
                " dataset",
1181
1182
                UserWarning,
            )
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194

        is_dataset = Dataset is not None and isinstance(inputs, Dataset)
        is_generator = isinstance(inputs, types.GeneratorType)
        is_list = isinstance(inputs, list)

        is_iterable = is_dataset or is_generator or is_list

        # TODO make the get_iterator work also for `tf` (and `flax`).
        can_use_iterator = self.framework == "pt" and (is_dataset or is_generator or is_list)

        if is_list:
            if can_use_iterator:
1195
                final_iterator = self.get_iterator(
1196
                    inputs, num_workers, batch_size, preprocess_params, forward_params, postprocess_params
1197
                )
1198
                outputs = list(final_iterator)
1199
1200
1201
                return outputs
            else:
                return self.run_multi(inputs, preprocess_params, forward_params, postprocess_params)
1202
        elif can_use_iterator:
1203
1204
1205
            return self.get_iterator(
                inputs, num_workers, batch_size, preprocess_params, forward_params, postprocess_params
            )
1206
1207
        elif is_iterable:
            return self.iterate(inputs, preprocess_params, forward_params, postprocess_params)
1208
1209
1210
1211
1212
1213
1214
1215
        elif self.framework == "pt" and isinstance(self, ChunkPipeline):
            return next(
                iter(
                    self.get_iterator(
                        [inputs], num_workers, batch_size, preprocess_params, forward_params, postprocess_params
                    )
                )
            )
1216
        else:
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
            return self.run_single(inputs, preprocess_params, forward_params, postprocess_params)

    def run_multi(self, inputs, preprocess_params, forward_params, postprocess_params):
        return [self.run_single(item, preprocess_params, forward_params, postprocess_params) for item in inputs]

    def run_single(self, inputs, preprocess_params, forward_params, postprocess_params):
        model_inputs = self.preprocess(inputs, **preprocess_params)
        model_outputs = self.forward(model_inputs, **forward_params)
        outputs = self.postprocess(model_outputs, **postprocess_params)
        return outputs
1227
1228
1229
1230
1231
1232

    def iterate(self, inputs, preprocess_params, forward_params, postprocess_params):
        # This function should become `get_iterator` again, this is a temporary
        # easy solution.
        for input_ in inputs:
            yield self.run_single(input_, preprocess_params, forward_params, postprocess_params)
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251


class ChunkPipeline(Pipeline):
    def run_single(self, inputs, preprocess_params, forward_params, postprocess_params):
        all_outputs = []
        for model_inputs in self.preprocess(inputs, **preprocess_params):
            model_outputs = self.forward(model_inputs, **forward_params)
            all_outputs.append(model_outputs)
        outputs = self.postprocess(all_outputs, **postprocess_params)
        return outputs

    def get_iterator(
        self, inputs, num_workers: int, batch_size: int, preprocess_params, forward_params, postprocess_params
    ):
        if "TOKENIZERS_PARALLELISM" not in os.environ:
            logger.info("Disabling tokenizer parallelism, we're using DataLoader multithreading already")
            os.environ["TOKENIZERS_PARALLELISM"] = "false"
        if num_workers > 1:
            logger.warning(
Sylvain Gugger's avatar
Sylvain Gugger committed
1252
1253
                "For ChunkPipeline using num_workers>0 is likely to result in errors since everything is iterable,"
                " setting `num_workers=1` to guarantee correctness."
1254
1255
1256
            )
            num_workers = 1
        dataset = PipelineChunkIterator(inputs, self.preprocess, preprocess_params)
1257
1258
1259
1260

        # TODO hack by collating feature_extractor and image_processor
        feature_extractor = self.feature_extractor if self.feature_extractor is not None else self.image_processor
        collate_fn = no_collate_fn if batch_size == 1 else pad_collate_fn(self.tokenizer, feature_extractor)
1261
1262
1263
1264
        dataloader = DataLoader(dataset, num_workers=num_workers, batch_size=batch_size, collate_fn=collate_fn)
        model_iterator = PipelinePackIterator(dataloader, self.forward, forward_params, loader_batch_size=batch_size)
        final_iterator = PipelineIterator(model_iterator, self.postprocess, postprocess_params)
        return final_iterator
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276


class PipelineRegistry:
    def __init__(self, supported_tasks: Dict[str, Any], task_aliases: Dict[str, str]) -> None:
        self.supported_tasks = supported_tasks
        self.task_aliases = task_aliases

    def get_supported_tasks(self) -> List[str]:
        supported_task = list(self.supported_tasks.keys()) + list(self.task_aliases.keys())
        supported_task.sort()
        return supported_task

1277
    def check_task(self, task: str) -> Tuple[str, Dict, Any]:
1278
1279
1280
1281
        if task in self.task_aliases:
            task = self.task_aliases[task]
        if task in self.supported_tasks:
            targeted_task = self.supported_tasks[task]
1282
            return task, targeted_task, None
1283
1284
1285
1286
1287

        if task.startswith("translation"):
            tokens = task.split("_")
            if len(tokens) == 4 and tokens[0] == "translation" and tokens[2] == "to":
                targeted_task = self.supported_tasks["translation"]
1288
1289
                task = "translation"
                return task, targeted_task, (tokens[1], tokens[3])
1290
1291
1292
1293
1294
1295
            raise KeyError(f"Invalid translation task {task}, use 'translation_XX_to_YY' format")

        raise KeyError(
            f"Unknown task {task}, available tasks are {self.get_supported_tasks() + ['translation_XX_to_YY']}"
        )

Sylvain Gugger's avatar
Sylvain Gugger committed
1296
1297
1298
1299
1300
1301
1302
1303
1304
    def register_pipeline(
        self,
        task: str,
        pipeline_class: type,
        pt_model: Optional[Union[type, Tuple[type]]] = None,
        tf_model: Optional[Union[type, Tuple[type]]] = None,
        default: Optional[Dict] = None,
        type: Optional[str] = None,
    ) -> None:
1305
1306
1307
        if task in self.supported_tasks:
            logger.warning(f"{task} is already registered. Overwriting pipeline for task {task}...")

Sylvain Gugger's avatar
Sylvain Gugger committed
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
        if pt_model is None:
            pt_model = ()
        elif not isinstance(pt_model, tuple):
            pt_model = (pt_model,)

        if tf_model is None:
            tf_model = ()
        elif not isinstance(tf_model, tuple):
            tf_model = (tf_model,)

        task_impl = {"impl": pipeline_class, "pt": pt_model, "tf": tf_model}

        if default is not None:
            if "model" not in default and ("pt" in default or "tf" in default):
                default = {"model": default}
            task_impl["default"] = default

        if type is not None:
            task_impl["type"] = type

1328
        self.supported_tasks[task] = task_impl
Sylvain Gugger's avatar
Sylvain Gugger committed
1329
        pipeline_class._registered_impl = {task: task_impl}
1330
1331
1332

    def to_dict(self):
        return self.supported_tasks