base.py 51.8 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
15
import collections
16
import csv
17
import importlib
18
19
20
21
import json
import os
import pickle
import sys
22
import traceback
23
import types
24
import warnings
25
from abc import ABC, abstractmethod
26
from collections import UserDict
27
28
from contextlib import contextmanager
from os.path import abspath, exists
29
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
30

Sylvain Gugger's avatar
Sylvain Gugger committed
31
from ..dynamic_module_utils import custom_object_save
32
from ..feature_extraction_utils import PreTrainedFeatureExtractor
33
from ..image_processing_utils import BaseImageProcessor
34
from ..modelcard import ModelCard
35
from ..models.auto.configuration_auto import AutoConfig
36
from ..tokenization_utils import PreTrainedTokenizer
37
38
39
40
41
42
43
44
45
46
from ..utils import (
    ModelOutput,
    add_end_docstrings,
    infer_framework,
    is_tf_available,
    is_torch_available,
    is_torch_cuda_available,
    is_torch_xpu_available,
    logging,
)
47
48


49
50
GenericTensor = Union[List["GenericTensor"], "torch.Tensor", "tf.Tensor"]

51
52
53
54
55
56
57
if is_tf_available():
    import tensorflow as tf

    from ..models.auto.modeling_tf_auto import TFAutoModel

if is_torch_available():
    import torch
58
    from torch.utils.data import DataLoader, Dataset
59
60

    from ..models.auto.modeling_auto import AutoModel
61
62
63

    # Re-export for backward compatibility
    from .pt_utils import KeyDataset
64
65
66
else:
    Dataset = None
    KeyDataset = None
67
68
69
70
71
72
73
74
75

if TYPE_CHECKING:
    from ..modeling_tf_utils import TFPreTrainedModel
    from ..modeling_utils import PreTrainedModel


logger = logging.get_logger(__name__)


76
def no_collate_fn(items):
77
78
79
80
81
    if len(items) != 1:
        raise ValueError("This collate_fn is meant to be used with batch_size=1")
    return items[0]


82
83
84
85
86
87
def _pad(items, key, padding_value, padding_side):
    batch_size = len(items)
    if isinstance(items[0][key], torch.Tensor):
        # Others include `attention_mask` etc...
        shape = items[0][key].shape
        dim = len(shape)
Yih-Dar's avatar
Yih-Dar committed
88
        if key in ["pixel_values", "image"]:
89
90
91
            # This is probable image so padding shouldn't be necessary
            # B, C, H, W
            return torch.cat([item[key] for item in items], dim=0)
92
93
94
        elif dim == 4 and key == "input_features":
            # this is probably a mel spectrogram batched
            return torch.cat([item[key] for item in items], dim=0)
95
        max_length = max(item[key].shape[1] for item in items)
96
        min_length = min(item[key].shape[1] for item in items)
97
98
99
        dtype = items[0][key].dtype

        if dim == 2:
100
101
102
103
            if max_length == min_length:
                # Bypass for `ImageGPT` which doesn't provide a padding value, yet
                # we can consistently pad since the size should be matching
                return torch.cat([item[key] for item in items], dim=0)
104
105
106
            tensor = torch.zeros((batch_size, max_length), dtype=dtype) + padding_value
        elif dim == 3:
            tensor = torch.zeros((batch_size, max_length, shape[-1]), dtype=dtype) + padding_value
107
108
        elif dim == 4:
            tensor = torch.zeros((batch_size, max_length, shape[-2], shape[-1]), dtype=dtype) + padding_value
109
110
111
112
113
114
115
116
117
118
119
120

        for i, item in enumerate(items):
            if dim == 2:
                if padding_side == "left":
                    tensor[i, -len(item[key][0]) :] = item[key][0].clone()
                else:
                    tensor[i, : len(item[key][0])] = item[key][0].clone()
            elif dim == 3:
                if padding_side == "left":
                    tensor[i, -len(item[key][0]) :, :] = item[key][0].clone()
                else:
                    tensor[i, : len(item[key][0]), :] = item[key][0].clone()
121
122
123
124
125
126
            elif dim == 4:
                if padding_side == "left":
                    tensor[i, -len(item[key][0]) :, :, :] = item[key][0].clone()
                else:
                    tensor[i, : len(item[key][0]), :, :] = item[key][0].clone()

127
128
129
130
131
132
        return tensor
    else:
        return [item[key] for item in items]


def pad_collate_fn(tokenizer, feature_extractor):
133
134
135
136
    # Tokenizer
    t_padding_side = None
    # Feature extractor
    f_padding_side = None
137
138
139
140
141
142
143
144
145
    if tokenizer is None and feature_extractor is None:
        raise ValueError("Pipeline without tokenizer or feature_extractor cannot do batching")
    if tokenizer is not None:
        if tokenizer.pad_token_id is None:
            raise ValueError(
                "Pipeline with tokenizer without pad_token cannot do batching. You can try to set it with "
                "`pipe.tokenizer.pad_token_id = model.config.eos_token_id`."
            )
        else:
146
147
            t_padding_value = tokenizer.pad_token_id
            t_padding_side = tokenizer.padding_side
148
149
    if feature_extractor is not None:
        # Feature extractor can be images, where no padding is expected
150
151
152
153
154
155
156
157
158
159
160
161
        f_padding_value = getattr(feature_extractor, "padding_value", None)
        f_padding_side = getattr(feature_extractor, "padding_side", None)

    if t_padding_side is not None and f_padding_side is not None and t_padding_side != f_padding_side:
        raise ValueError(
            f"The feature extractor, and tokenizer don't agree on padding side {t_padding_side} != {f_padding_side}"
        )
    padding_side = "right"
    if t_padding_side is not None:
        padding_side = t_padding_side
    if f_padding_side is not None:
        padding_side = f_padding_side
162
163
164
165
166
167

    def inner(items):
        keys = set(items[0].keys())
        for item in items:
            if set(item.keys()) != keys:
                raise ValueError(
Sylvain Gugger's avatar
Sylvain Gugger committed
168
169
                    f"The elements of the batch contain different keys. Cannot batch them ({set(item.keys())} !="
                    f" {keys})"
170
171
                )
        # input_values, input_pixels, input_ids, ...
172
173
        padded = {}
        for key in keys:
174
            if key in {"input_ids"}:
175
                # ImageGPT uses a feature extractor
176
                if tokenizer is None and feature_extractor is not None:
177
178
179
                    _padding_value = f_padding_value
                else:
                    _padding_value = t_padding_value
180
181
            elif key in {"input_values", "pixel_values", "input_features"}:
                _padding_value = f_padding_value
182
            elif key in {"p_mask", "special_tokens_mask"}:
183
                _padding_value = 1
184
185
            elif key in {"attention_mask", "token_type_ids"}:
                _padding_value = 0
186
            else:
187
                # This is likely another random key maybe even user provided
188
189
                _padding_value = 0
            padded[key] = _pad(items, key, _padding_value, padding_side)
190
191
192
193
194
        return padded

    return inner


195
196
197
198
199
200
def infer_framework_load_model(
    model,
    config: AutoConfig,
    model_classes: Optional[Dict[str, Tuple[type]]] = None,
    task: Optional[str] = None,
    framework: Optional[str] = None,
201
    **model_kwargs,
202
):
203
    """
204
    Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model).
205

Sylvain Gugger's avatar
Sylvain Gugger committed
206
207
208
    If `model` is instantiated, this function will just infer the framework from the model class. Otherwise `model` is
    actually a checkpoint name and this method will try to instantiate it using `model_classes`. Since we don't want to
    instantiate the model twice, this model is returned for use by the pipeline.
209

210
    If both frameworks are installed and available for `model`, PyTorch is selected.
211
212

    Args:
213
        model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel`]):
Sylvain Gugger's avatar
Sylvain Gugger committed
214
            The model to infer the framework from. If `str`, a checkpoint name. The model to infer the framewrok from.
215
        config ([`AutoConfig`]):
216
            The config associated with the model to help using the correct class
217
        model_classes (dictionary `str` to `type`, *optional*):
218
            A mapping framework to class.
219
        task (`str`):
220
221
            The task defining which pipeline will be returned.
        model_kwargs:
Sylvain Gugger's avatar
Sylvain Gugger committed
222
223
            Additional dictionary of keyword arguments passed along to the model's `from_pretrained(...,
            **model_kwargs)` function.
224
225

    Returns:
226
        `Tuple`: A tuple framework, model.
227
228
229
230
231
232
233
234
    """
    if not is_tf_available() and not is_torch_available():
        raise RuntimeError(
            "At least one of TensorFlow 2.0 or PyTorch should be installed. "
            "To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ "
            "To install PyTorch, read the instructions at https://pytorch.org/."
        )
    if isinstance(model, str):
235
        model_kwargs["_from_pipeline"] = task
236
237
238
239
240
241
242
243
244
245
246
247
        class_tuple = ()
        look_pt = is_torch_available() and framework in {"pt", None}
        look_tf = is_tf_available() and framework in {"tf", None}
        if model_classes:
            if look_pt:
                class_tuple = class_tuple + model_classes.get("pt", (AutoModel,))
            if look_tf:
                class_tuple = class_tuple + model_classes.get("tf", (TFAutoModel,))
        if config.architectures:
            classes = []
            for architecture in config.architectures:
                transformers_module = importlib.import_module("transformers")
248
                if look_pt:
249
250
251
                    _class = getattr(transformers_module, architecture, None)
                    if _class is not None:
                        classes.append(_class)
252
                if look_tf:
253
254
255
256
257
258
259
260
                    _class = getattr(transformers_module, f"TF{architecture}", None)
                    if _class is not None:
                        classes.append(_class)
            class_tuple = class_tuple + tuple(classes)

        if len(class_tuple) == 0:
            raise ValueError(f"Pipeline cannot infer suitable model classes from {model}")

261
        all_traceback = {}
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
        for model_class in class_tuple:
            kwargs = model_kwargs.copy()
            if framework == "pt" and model.endswith(".h5"):
                kwargs["from_tf"] = True
                logger.warning(
                    "Model might be a TensorFlow model (ending with `.h5`) but TensorFlow is not available. "
                    "Trying to load the model with PyTorch."
                )
            elif framework == "tf" and model.endswith(".bin"):
                kwargs["from_pt"] = True
                logger.warning(
                    "Model might be a PyTorch model (ending with `.bin`) but PyTorch is not available. "
                    "Trying to load the model with Tensorflow."
                )

277
            try:
278
                model = model_class.from_pretrained(model, **kwargs)
279
280
                if hasattr(model, "eval"):
                    model = model.eval()
281
282
283
                # Stop loading on the first successful load.
                break
            except (OSError, ValueError):
284
                all_traceback[model_class.__name__] = traceback.format_exc()
285
286
287
                continue

        if isinstance(model, str):
288
289
290
291
292
293
            error = ""
            for class_name, trace in all_traceback.items():
                error += f"while loading with {class_name}, an error is thrown:\n{trace}\n"
            raise ValueError(
                f"Could not load model {model} with any of the following classes: {class_tuple}. See the original errors:\n\n{error}\n"
            )
294

295
296
    if framework is None:
        framework = infer_framework(model.__class__)
297
298
299
    return framework, model


300
301
302
303
304
def infer_framework_from_model(
    model,
    model_classes: Optional[Dict[str, Tuple[type]]] = None,
    task: Optional[str] = None,
    framework: Optional[str] = None,
305
    **model_kwargs,
306
307
):
    """
308
    Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model).
309

Sylvain Gugger's avatar
Sylvain Gugger committed
310
311
312
    If `model` is instantiated, this function will just infer the framework from the model class. Otherwise `model` is
    actually a checkpoint name and this method will try to instantiate it using `model_classes`. Since we don't want to
    instantiate the model twice, this model is returned for use by the pipeline.
313

314
    If both frameworks are installed and available for `model`, PyTorch is selected.
315
316

    Args:
317
        model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel`]):
Sylvain Gugger's avatar
Sylvain Gugger committed
318
            The model to infer the framework from. If `str`, a checkpoint name. The model to infer the framewrok from.
319
        model_classes (dictionary `str` to `type`, *optional*):
320
            A mapping framework to class.
321
        task (`str`):
322
323
            The task defining which pipeline will be returned.
        model_kwargs:
Sylvain Gugger's avatar
Sylvain Gugger committed
324
325
            Additional dictionary of keyword arguments passed along to the model's `from_pretrained(...,
            **model_kwargs)` function.
326
327

    Returns:
328
        `Tuple`: A tuple framework, model.
329
330
331
332
333
334
335
336
337
338
    """
    if isinstance(model, str):
        config = AutoConfig.from_pretrained(model, _from_pipeline=task, **model_kwargs)
    else:
        config = model.config
    return infer_framework_load_model(
        model, config, model_classes=model_classes, _from_pipeline=task, task=task, framework=framework, **model_kwargs
    )


339
340
341
342
343
def get_framework(model, revision: Optional[str] = None):
    """
    Select framework (TensorFlow or PyTorch) to use.

    Args:
344
        model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel`]):
345
346
347
            If both frameworks are installed, picks the one corresponding to the model passed (either a model class or
            the model name). If no specific model is provided, defaults to using PyTorch.
    """
348
349
350
351
    warnings.warn(
        "`get_framework` is deprecated and will be removed in v5, use `infer_framework_from_model` instead.",
        FutureWarning,
    )
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
    if not is_tf_available() and not is_torch_available():
        raise RuntimeError(
            "At least one of TensorFlow 2.0 or PyTorch should be installed. "
            "To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ "
            "To install PyTorch, read the instructions at https://pytorch.org/."
        )
    if isinstance(model, str):
        if is_torch_available() and not is_tf_available():
            model = AutoModel.from_pretrained(model, revision=revision)
        elif is_tf_available() and not is_torch_available():
            model = TFAutoModel.from_pretrained(model, revision=revision)
        else:
            try:
                model = AutoModel.from_pretrained(model, revision=revision)
            except OSError:
                model = TFAutoModel.from_pretrained(model, revision=revision)

Matt's avatar
Matt committed
369
    framework = infer_framework(model.__class__)
370
371
372
    return framework


373
374
375
def get_default_model_and_revision(
    targeted_task: Dict, framework: Optional[str], task_options: Optional[Any]
) -> Union[str, Tuple[str, str]]:
376
377
378
379
    """
    Select a default model to use for a given task. Defaults to pytorch if ambiguous.

    Args:
380
        targeted_task (`Dict` ):
381
382
           Dictionary representing the given task, that should contain default models

383
        framework (`str`, None)
384
385
           "pt", "tf" or None, representing a specific framework if it was specified, or None if we don't know yet.

386
        task_options (`Any`, None)
387
388
389
390
391
           Any further value required by the task to get fully specified, for instance (SRC, TGT) languages for
           translation task.

    Returns

392
        `str` The model string representing the default model for this pipeline
393
394
395
396
397
398
399
400
401
    """
    if is_torch_available() and not is_tf_available():
        framework = "pt"
    elif is_tf_available() and not is_torch_available():
        framework = "tf"

    defaults = targeted_task["default"]
    if task_options:
        if task_options not in defaults:
402
            raise ValueError(f"The task does not provide any default models for options {task_options}")
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
        default_models = defaults[task_options]["model"]
    elif "model" in defaults:
        default_models = targeted_task["default"]["model"]
    else:
        # XXX This error message needs to be updated to be more generic if more tasks are going to become
        # parametrized
        raise ValueError('The task defaults can\'t be correctly selected. You probably meant "translation_XX_to_YY"')

    if framework is None:
        framework = "pt"

    return default_models[framework]


class PipelineException(Exception):
    """
419
    Raised by a [`Pipeline`] when handling __call__.
420
421

    Args:
422
423
424
        task (`str`): The task of the pipeline.
        model (`str`): The model used by the pipeline.
        reason (`str`): The error message to display.
425
426
427
428
429
430
431
432
433
434
435
    """

    def __init__(self, task: str, model: str, reason: str):
        super().__init__(reason)

        self.task = task
        self.model = model


class ArgumentHandler(ABC):
    """
436
    Base interface for handling arguments for each [`~pipelines.Pipeline`].
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
    """

    @abstractmethod
    def __call__(self, *args, **kwargs):
        raise NotImplementedError()


class PipelineDataFormat:
    """
    Base class for all the pipeline supported data format both for reading and writing. Supported data formats
    currently includes:

    - JSON
    - CSV
    - stdin/stdout (pipe)

Sylvain Gugger's avatar
Sylvain Gugger committed
453
454
    `PipelineDataFormat` also includes some utilities to work with multi-columns like mapping from datasets columns to
    pipelines keyword arguments through the `dataset_kwarg_1=dataset_column_1` format.
455
456

    Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
457
458
459
        output_path (`str`): Where to save the outgoing data.
        input_path (`str`): Where to look for the input data.
        column (`str`): The column to read.
460
461
        overwrite (`bool`, *optional*, defaults to `False`):
            Whether or not to overwrite the `output_path`.
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
    """

    SUPPORTED_FORMATS = ["json", "csv", "pipe"]

    def __init__(
        self,
        output_path: Optional[str],
        input_path: Optional[str],
        column: Optional[str],
        overwrite: bool = False,
    ):
        self.output_path = output_path
        self.input_path = input_path
        self.column = column.split(",") if column is not None else [""]
        self.is_multi_columns = len(self.column) > 1

        if self.is_multi_columns:
            self.column = [tuple(c.split("=")) if "=" in c else (c, c) for c in self.column]

        if output_path is not None and not overwrite:
            if exists(abspath(self.output_path)):
483
                raise OSError(f"{self.output_path} already exists on disk")
484
485
486

        if input_path is not None:
            if not exists(abspath(self.input_path)):
487
                raise OSError(f"{self.input_path} doesnt exist on disk")
488
489
490
491
492
493
494
495

    @abstractmethod
    def __iter__(self):
        raise NotImplementedError()

    @abstractmethod
    def save(self, data: Union[dict, List[dict]]):
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
496
        Save the provided data object with the representation for the current [`~pipelines.PipelineDataFormat`].
497
498

        Args:
499
            data (`dict` or list of `dict`): The data to store.
500
501
502
503
504
505
506
507
        """
        raise NotImplementedError()

    def save_binary(self, data: Union[dict, List[dict]]) -> str:
        """
        Save the provided data object as a pickle-formatted binary data on the disk.

        Args:
508
            data (`dict` or list of `dict`): The data to store.
509
510

        Returns:
511
            `str`: Path where the data has been saved.
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
        """
        path, _ = os.path.splitext(self.output_path)
        binary_path = os.path.extsep.join((path, "pickle"))

        with open(binary_path, "wb+") as f_output:
            pickle.dump(data, f_output)

        return binary_path

    @staticmethod
    def from_str(
        format: str,
        output_path: Optional[str],
        input_path: Optional[str],
        column: Optional[str],
        overwrite=False,
    ) -> "PipelineDataFormat":
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
530
        Creates an instance of the right subclass of [`~pipelines.PipelineDataFormat`] depending on `format`.
531
532

        Args:
533
            format (`str`):
534
535
                The format of the desired pipeline. Acceptable values are `"json"`, `"csv"` or `"pipe"`.
            output_path (`str`, *optional*):
536
                Where to save the outgoing data.
537
            input_path (`str`, *optional*):
538
                Where to look for the input data.
539
            column (`str`, *optional*):
540
                The column to read.
541
542
            overwrite (`bool`, *optional*, defaults to `False`):
                Whether or not to overwrite the `output_path`.
543
544

        Returns:
545
            [`~pipelines.PipelineDataFormat`]: The proper data format.
546
547
548
549
550
551
552
553
        """
        if format == "json":
            return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
        elif format == "csv":
            return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
        elif format == "pipe":
            return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
        else:
554
            raise KeyError(f"Unknown reader {format} (Available reader are json/csv/pipe)")
555
556
557
558
559
560
561


class CsvPipelineDataFormat(PipelineDataFormat):
    """
    Support for pipelines using CSV data format.

    Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
562
563
564
        output_path (`str`): Where to save the outgoing data.
        input_path (`str`): Where to look for the input data.
        column (`str`): The column to read.
565
566
        overwrite (`bool`, *optional*, defaults to `False`):
            Whether or not to overwrite the `output_path`.
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
    """

    def __init__(
        self,
        output_path: Optional[str],
        input_path: Optional[str],
        column: Optional[str],
        overwrite=False,
    ):
        super().__init__(output_path, input_path, column, overwrite=overwrite)

    def __iter__(self):
        with open(self.input_path, "r") as f:
            reader = csv.DictReader(f)
            for row in reader:
                if self.is_multi_columns:
                    yield {k: row[c] for k, c in self.column}
                else:
                    yield row[self.column[0]]

    def save(self, data: List[dict]):
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
589
        Save the provided data object with the representation for the current [`~pipelines.PipelineDataFormat`].
590
591

        Args:
592
            data (`List[dict]`): The data to store.
593
594
595
596
597
598
599
600
601
602
603
604
605
        """
        with open(self.output_path, "w") as f:
            if len(data) > 0:
                writer = csv.DictWriter(f, list(data[0].keys()))
                writer.writeheader()
                writer.writerows(data)


class JsonPipelineDataFormat(PipelineDataFormat):
    """
    Support for pipelines using JSON file format.

    Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
606
607
608
        output_path (`str`): Where to save the outgoing data.
        input_path (`str`): Where to look for the input data.
        column (`str`): The column to read.
609
610
        overwrite (`bool`, *optional*, defaults to `False`):
            Whether or not to overwrite the `output_path`.
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
    """

    def __init__(
        self,
        output_path: Optional[str],
        input_path: Optional[str],
        column: Optional[str],
        overwrite=False,
    ):
        super().__init__(output_path, input_path, column, overwrite=overwrite)

        with open(input_path, "r") as f:
            self._entries = json.load(f)

    def __iter__(self):
        for entry in self._entries:
            if self.is_multi_columns:
                yield {k: entry[c] for k, c in self.column}
            else:
                yield entry[self.column[0]]

    def save(self, data: dict):
        """
        Save the provided data object in a json file.

        Args:
637
            data (`dict`): The data to store.
638
639
640
641
642
643
644
645
646
647
648
649
        """
        with open(self.output_path, "w") as f:
            json.dump(data, f)


class PipedPipelineDataFormat(PipelineDataFormat):
    """
    Read data from piped input to the python process. For multi columns data, columns should separated by \t

    If columns are provided, then the output will be a dictionary with {column_x: value_x}

    Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
650
651
652
        output_path (`str`): Where to save the outgoing data.
        input_path (`str`): Where to look for the input data.
        column (`str`): The column to read.
653
654
        overwrite (`bool`, *optional*, defaults to `False`):
            Whether or not to overwrite the `output_path`.
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
    """

    def __iter__(self):
        for line in sys.stdin:
            # Split for multi-columns
            if "\t" in line:
                line = line.split("\t")
                if self.column:
                    # Dictionary to map arguments
                    yield {kwargs: l for (kwargs, _), l in zip(self.column, line)}
                else:
                    yield tuple(line)

            # No dictionary to map arguments
            else:
                yield line

    def save(self, data: dict):
        """
        Print the data.

        Args:
677
            data (`dict`): The data to store.
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
        """
        print(data)

    def save_binary(self, data: Union[dict, List[dict]]) -> str:
        if self.output_path is None:
            raise KeyError(
                "When using piped input on pipeline outputting large object requires an output file path. "
                "Please provide such output path through --output argument."
            )

        return super().save_binary(data)


class _ScikitCompat(ABC):
    """
    Interface layer for the Scikit and Keras compatibility.
    """

    @abstractmethod
    def transform(self, X):
        raise NotImplementedError()

    @abstractmethod
    def predict(self, X):
        raise NotImplementedError()


PIPELINE_INIT_ARGS = r"""
    Arguments:
707
        model ([`PreTrainedModel`] or [`TFPreTrainedModel`]):
708
            The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
Sylvain Gugger's avatar
Sylvain Gugger committed
709
            [`PreTrainedModel`] for PyTorch and [`TFPreTrainedModel`] for TensorFlow.
710
        tokenizer ([`PreTrainedTokenizer`]):
711
            The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
712
713
            [`PreTrainedTokenizer`].
        modelcard (`str` or [`ModelCard`], *optional*):
714
            Model card attributed to the model for this pipeline.
715
        framework (`str`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
716
717
            The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be
            installed.
718
719

            If no framework is specified, will default to the one currently installed. If no framework is specified and
Sylvain Gugger's avatar
Sylvain Gugger committed
720
721
            both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is
            provided.
722
        task (`str`, defaults to `""`):
723
            A task-identifier for the pipeline.
724
725
        num_workers (`int`, *optional*, defaults to 8):
            When the pipeline will use *DataLoader* (when passing a dataset, on GPU for a Pytorch model), the number of
726
            workers to be used.
727
728
        batch_size (`int`, *optional*, defaults to 1):
            When the pipeline will use *DataLoader* (when passing a dataset, on GPU for a Pytorch model), the size of
Sylvain Gugger's avatar
Sylvain Gugger committed
729
730
            the batch to use, for inference this is not always beneficial, please read [Batching with
            pipelines](https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching) .
731
        args_parser ([`~pipelines.ArgumentHandler`], *optional*):
732
            Reference to the object in charge of parsing supplied pipeline parameters.
733
        device (`int`, *optional*, defaults to -1):
734
            Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on
735
            the associated CUDA device id. You can pass native `torch.device` or a `str` too.
736
        binary_output (`bool`, *optional*, defaults to `False`):
737
738
739
            Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.
"""

740
if is_torch_available():
741
742
743
744
745
746
    from transformers.pipelines.pt_utils import (
        PipelineChunkIterator,
        PipelineDataset,
        PipelineIterator,
        PipelinePackIterator,
    )
747

748
749
750
751
752
753
754
755
756
757
758
759
760
761

@add_end_docstrings(PIPELINE_INIT_ARGS)
class Pipeline(_ScikitCompat):
    """
    The Pipeline class is the class from which all pipelines inherit. Refer to this class for methods shared across
    different pipelines.

    Base class implementing pipelined operations. Pipeline workflow is defined as a sequence of the following
    operations:

        Input -> Tokenization -> Model Inference -> Post-Processing (task dependent) -> Output

    Pipeline supports running on CPU or GPU through the device argument (see below).

Sylvain Gugger's avatar
Sylvain Gugger committed
762
763
764
    Some pipeline, like for instance [`FeatureExtractionPipeline`] (`'feature-extraction'`) output large tensor object
    as nested-lists. In order to avoid dumping such large structure as textual data we provide the `binary_output`
    constructor argument. If set to `True`, the output will be stored in the pickle format.
765
766
767
768
769
770
771
    """

    default_input_names = None

    def __init__(
        self,
        model: Union["PreTrainedModel", "TFPreTrainedModel"],
772
773
        tokenizer: Optional[PreTrainedTokenizer] = None,
        feature_extractor: Optional[PreTrainedFeatureExtractor] = None,
774
        image_processor: Optional[BaseImageProcessor] = None,
775
776
777
778
        modelcard: Optional[ModelCard] = None,
        framework: Optional[str] = None,
        task: str = "",
        args_parser: ArgumentHandler = None,
779
        device: Union[int, "torch.device"] = None,
780
        torch_dtype: Optional[Union[str, "torch.dtype"]] = None,
781
        binary_output: bool = False,
782
        **kwargs,
783
784
    ):
        if framework is None:
785
            framework, model = infer_framework_load_model(model, config=model.config)
786
787
788
789

        self.task = task
        self.model = model
        self.tokenizer = tokenizer
790
        self.feature_extractor = feature_extractor
791
        self.image_processor = image_processor
792
793
        self.modelcard = modelcard
        self.framework = framework
794

795
796
797
798
799
800
801
802
803
        # `accelerate` device map
        hf_device_map = getattr(self.model, "hf_device_map", None)

        if hf_device_map is not None and device is not None:
            raise ValueError(
                "The model has been loaded with `accelerate` and therefore cannot be moved to a specific device. Please "
                "discard the `device` argument when creating your pipeline object."
            )

804
805
806
807
808
809
810
        if device is None:
            if hf_device_map is not None:
                # Take the first device used by `accelerate`.
                device = next(iter(hf_device_map.values()))
            else:
                device = -1

811
812
        if is_torch_available() and self.framework == "pt":
            if isinstance(device, torch.device):
813
814
                if device.type == "xpu" and not is_torch_xpu_available(check_device=True):
                    raise ValueError(f'{device} is not available, you should use device="cpu" instead')
815
816
                self.device = device
            elif isinstance(device, str):
817
818
                if "xpu" in device and not is_torch_xpu_available(check_device=True):
                    raise ValueError(f'{device} is not available, you should use device="cpu" instead')
819
820
821
                self.device = torch.device(device)
            elif device < 0:
                self.device = torch.device("cpu")
822
            elif is_torch_cuda_available():
Michael Wyatt's avatar
Michael Wyatt committed
823
                self.device = torch.device(f"cuda:{device}")
824
825
826
827
            elif is_torch_xpu_available(check_device=True):
                self.device = torch.device(f"xpu:{device}")
            else:
                raise ValueError(f"{device} unrecognized or not available.")
828
        else:
829
            self.device = device if device is not None else -1
830
        self.torch_dtype = torch_dtype
831
832
        self.binary_output = binary_output

833
834
835
836
837
838
839
840
841
        # We shouldn't call `model.to()` for models loaded with accelerate
        if (
            self.framework == "pt"
            and self.device is not None
            and not (isinstance(self.device, int) and self.device < 0)
            and hf_device_map is None
        ):
            self.model.to(self.device)

842
        # Update config and generation_config with task specific parameters
843
844
845
        task_specific_params = self.model.config.task_specific_params
        if task_specific_params is not None and task in task_specific_params:
            self.model.config.update(task_specific_params.get(task))
846
847
            if self.model.can_generate():
                self.model.generation_config.update(**task_specific_params.get(task))
848

849
        self.call_count = 0
850
851
        self._batch_size = kwargs.pop("batch_size", None)
        self._num_workers = kwargs.pop("num_workers", None)
852
853
        self._preprocess_params, self._forward_params, self._postprocess_params = self._sanitize_parameters(**kwargs)

Yih-Dar's avatar
Yih-Dar committed
854
855
856
857
858
859
860
        if self.image_processor is None and self.feature_extractor is not None:
            if isinstance(self.feature_extractor, BaseImageProcessor):
                # Backward compatible change, if users called
                # ImageSegmentationPipeline(.., feature_extractor=MyFeatureExtractor())
                # then we should keep working
                self.image_processor = self.feature_extractor

861
    def save_pretrained(self, save_directory: str, safe_serialization: bool = True):
862
863
864
865
        """
        Save the pipeline's model and tokenizer.

        Args:
866
            save_directory (`str`):
867
                A path to the directory where to saved. It will be created if it doesn't exist.
868
            safe_serialization (`str`):
869
                Whether to save the model using `safetensors` or the traditional way for PyTorch or Tensorflow.
870
871
        """
        if os.path.isfile(save_directory):
872
            logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
873
874
875
            return
        os.makedirs(save_directory, exist_ok=True)

Sylvain Gugger's avatar
Sylvain Gugger committed
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
        if hasattr(self, "_registered_impl"):
            # Add info to the config
            pipeline_info = self._registered_impl.copy()
            custom_pipelines = {}
            for task, info in pipeline_info.items():
                if info["impl"] != self.__class__:
                    continue

                info = info.copy()
                module_name = info["impl"].__module__
                last_module = module_name.split(".")[-1]
                # Change classes into their names/full names
                info["impl"] = f"{last_module}.{info['impl'].__name__}"
                info["pt"] = tuple(c.__name__ for c in info["pt"])
                info["tf"] = tuple(c.__name__ for c in info["tf"])

                custom_pipelines[task] = info
            self.model.config.custom_pipelines = custom_pipelines
            # Save the pipeline custom code
            custom_object_save(self, save_directory)

897
        self.model.save_pretrained(save_directory, safe_serialization=safe_serialization)
898
899
900
901
902
903
904

        if self.tokenizer is not None:
            self.tokenizer.save_pretrained(save_directory)

        if self.feature_extractor is not None:
            self.feature_extractor.save_pretrained(save_directory)

905
906
907
        if self.image_processor is not None:
            self.image_processor.save_pretrained(save_directory)

908
909
910
911
912
913
914
        if self.modelcard is not None:
            self.modelcard.save_pretrained(save_directory)

    def transform(self, X):
        """
        Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
        """
915
        return self(X)
916
917
918
919
920

    def predict(self, X):
        """
        Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
        """
921
        return self(X)
922
923
924
925
926
927
928
929
930

    @contextmanager
    def device_placement(self):
        """
        Context Manager allowing tensor allocation on the user-specified device in framework agnostic way.

        Returns:
            Context manager

931
        Examples:
932

933
934
935
936
937
938
939
        ```python
        # Explicitly ask for tensor allocation on CUDA device :0
        pipe = pipeline(..., device=0)
        with pipe.device_placement():
            # Every framework specific tensor allocation will be done on the request device
            output = pipe(...)
        ```"""
940
        if self.framework == "tf":
941
            with tf.device("/CPU:0" if self.device == -1 else f"/device:GPU:{self.device}"):
942
943
944
                yield
        else:
            if self.device.type == "cuda":
945
946
947
948
                with torch.cuda.device(self.device):
                    yield
            else:
                yield
949
950
951
952
953
954

    def ensure_tensor_on_device(self, **inputs):
        """
        Ensure PyTorch tensors are on the specified device.

        Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
955
956
            inputs (keyword arguments that should be `torch.Tensor`, the rest is ignored):
                The tensors to place on `self.device`.
957
            Recursive on lists **only**.
958
959

        Return:
960
            `Dict[str, torch.Tensor]`: The same as `inputs` but on the proper device.
961
        """
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
        return self._ensure_tensor_on_device(inputs, self.device)

    def _ensure_tensor_on_device(self, inputs, device):
        if isinstance(inputs, ModelOutput):
            return ModelOutput(
                {name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()}
            )
        elif isinstance(inputs, dict):
            return {name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()}
        elif isinstance(inputs, UserDict):
            return UserDict({name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()})
        elif isinstance(inputs, list):
            return [self._ensure_tensor_on_device(item, device) for item in inputs]
        elif isinstance(inputs, tuple):
            return tuple([self._ensure_tensor_on_device(item, device) for item in inputs])
        elif isinstance(inputs, torch.Tensor):
978
979
            if device == torch.device("cpu") and inputs.dtype in {torch.float16, torch.bfloat16}:
                inputs = inputs.float()
980
            return inputs.to(device)
981
982
        else:
            return inputs
983
984
985
986
987
988

    def check_model_type(self, supported_models: Union[List[str], dict]):
        """
        Check if the model class is in supported by the pipeline.

        Args:
989
            supported_models (`List[str]` or `dict`):
990
991
992
                The list of models supported by the pipeline, or a dictionary with model class values.
        """
        if not isinstance(supported_models, list):  # Create from a model mapping
993
            supported_models_names = []
994
            for _, model_name in supported_models.items():
995
                # Mapping can now contain tuples of models for the same configuration.
996
997
                if isinstance(model_name, tuple):
                    supported_models_names.extend(list(model_name))
998
                else:
999
1000
1001
1002
1003
1004
1005
                    supported_models_names.append(model_name)
            if hasattr(supported_models, "_model_mapping"):
                for _, model in supported_models._model_mapping._extra_content.items():
                    if isinstance(model_name, tuple):
                        supported_models_names.extend([m.__name__ for m in model])
                    else:
                        supported_models_names.append(model.__name__)
1006
            supported_models = supported_models_names
1007
        if self.model.__class__.__name__ not in supported_models:
1008
            logger.error(
Sylvain Gugger's avatar
Sylvain Gugger committed
1009
1010
                f"The model '{self.model.__class__.__name__}' is not supported for {self.task}. Supported models are"
                f" {supported_models}."
1011
1012
            )

1013
1014
    @abstractmethod
    def _sanitize_parameters(self, **pipeline_parameters):
1015
        """
1016
1017
1018
1019
        _sanitize_parameters will be called with any excessive named arguments from either `__init__` or `__call__`
        methods. It should return 3 dictionnaries of the resolved parameters used by the various `preprocess`,
        `forward` and `postprocess` methods. Do not fill dictionnaries if the caller didn't specify a kwargs. This
        let's you keep defaults in function signatures, which is more "natural".
1020

1021
1022
        It is not meant to be called directly, it will be automatically called and the final parameters resolved by
        `__init__` and `__call__`
1023
        """
1024
        raise NotImplementedError("_sanitize_parameters not implemented")
1025

1026
1027
1028
    @abstractmethod
    def preprocess(self, input_: Any, **preprocess_parameters: Dict) -> Dict[str, GenericTensor]:
        """
Kaito Sugimoto's avatar
Kaito Sugimoto committed
1029
        Preprocess will take the `input_` of a specific pipeline and return a dictionary of everything necessary for
1030
1031
1032
        `_forward` to run properly. It should contain at least one tensor, but might have arbitrary other items.
        """
        raise NotImplementedError("preprocess not implemented")
1033

1034
1035
1036
    @abstractmethod
    def _forward(self, input_tensors: Dict[str, GenericTensor], **forward_parameters: Dict) -> ModelOutput:
        """
Kaito Sugimoto's avatar
Kaito Sugimoto committed
1037
        _forward will receive the prepared dictionary from `preprocess` and run it on the model. This method might
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
        involve the GPU or the CPU and should be agnostic to it. Isolating this function is the reason for `preprocess`
        and `postprocess` to exist, so that the hot path, this method generally can run as fast as possible.

        It is not meant to be called directly, `forward` is preferred. It is basically the same but contains additional
        code surrounding `_forward` making sure tensors and models are on the same device, disabling the training part
        of the code (leading to faster inference).
        """
        raise NotImplementedError("_forward not implemented")

    @abstractmethod
    def postprocess(self, model_outputs: ModelOutput, **postprocess_parameters: Dict) -> Any:
        """
        Postprocess will receive the raw outputs of the `_forward` method, generally tensors, and reformat them into
        something more friendly. Generally it will output a list or a dict or results (containing just strings and
        numbers).
1053
        """
1054
1055
        raise NotImplementedError("postprocess not implemented")

1056
    def get_inference_context(self):
1057
        return torch.no_grad
1058

1059
    def forward(self, model_inputs, **forward_params):
1060
1061
        with self.device_placement():
            if self.framework == "tf":
1062
1063
1064
                model_inputs["training"] = False
                model_outputs = self._forward(model_inputs, **forward_params)
            elif self.framework == "pt":
1065
                inference_context = self.get_inference_context()
1066
                with inference_context():
1067
1068
1069
1070
1071
1072
1073
                    model_inputs = self._ensure_tensor_on_device(model_inputs, device=self.device)
                    model_outputs = self._forward(model_inputs, **forward_params)
                    model_outputs = self._ensure_tensor_on_device(model_outputs, device=torch.device("cpu"))
            else:
                raise ValueError(f"Framework {self.framework} is not supported")
        return model_outputs

1074
1075
1076
    def get_iterator(
        self, inputs, num_workers: int, batch_size: int, preprocess_params, forward_params, postprocess_params
    ):
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
        if isinstance(inputs, collections.abc.Sized):
            dataset = PipelineDataset(inputs, self.preprocess, preprocess_params)
        else:
            if num_workers > 1:
                logger.warning(
                    "For iterable dataset using num_workers>1 is likely to result"
                    " in errors since everything is iterable, setting `num_workers=1`"
                    " to guarantee correctness."
                )
                num_workers = 1
            dataset = PipelineIterator(inputs, self.preprocess, preprocess_params)
1088
1089
1090
        if "TOKENIZERS_PARALLELISM" not in os.environ:
            logger.info("Disabling tokenizer parallelism, we're using DataLoader multithreading already")
            os.environ["TOKENIZERS_PARALLELISM"] = "false"
1091
1092
1093
        # TODO hack by collating feature_extractor and image_processor
        feature_extractor = self.feature_extractor if self.feature_extractor is not None else self.image_processor
        collate_fn = no_collate_fn if batch_size == 1 else pad_collate_fn(self.tokenizer, feature_extractor)
1094
1095
        dataloader = DataLoader(dataset, num_workers=num_workers, batch_size=batch_size, collate_fn=collate_fn)
        model_iterator = PipelineIterator(dataloader, self.forward, forward_params, loader_batch_size=batch_size)
1096
1097
1098
        final_iterator = PipelineIterator(model_iterator, self.postprocess, postprocess_params)
        return final_iterator

1099
    def __call__(self, inputs, *args, num_workers=None, batch_size=None, **kwargs):
1100
1101
        if args:
            logger.warning(f"Ignoring args : {args}")
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113

        if num_workers is None:
            if self._num_workers is None:
                num_workers = 0
            else:
                num_workers = self._num_workers
        if batch_size is None:
            if self._batch_size is None:
                batch_size = 1
            else:
                batch_size = self._batch_size

1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
        preprocess_params, forward_params, postprocess_params = self._sanitize_parameters(**kwargs)

        # Fuse __init__ params and __call__ params without modifying the __init__ ones.
        preprocess_params = {**self._preprocess_params, **preprocess_params}
        forward_params = {**self._forward_params, **forward_params}
        postprocess_params = {**self._postprocess_params, **postprocess_params}

        self.call_count += 1
        if self.call_count > 10 and self.framework == "pt" and self.device.type == "cuda":
            warnings.warn(
Sylvain Gugger's avatar
Sylvain Gugger committed
1124
1125
                "You seem to be using the pipelines sequentially on GPU. In order to maximize efficiency please use a"
                " dataset",
1126
1127
                UserWarning,
            )
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139

        is_dataset = Dataset is not None and isinstance(inputs, Dataset)
        is_generator = isinstance(inputs, types.GeneratorType)
        is_list = isinstance(inputs, list)

        is_iterable = is_dataset or is_generator or is_list

        # TODO make the get_iterator work also for `tf` (and `flax`).
        can_use_iterator = self.framework == "pt" and (is_dataset or is_generator or is_list)

        if is_list:
            if can_use_iterator:
1140
                final_iterator = self.get_iterator(
1141
                    inputs, num_workers, batch_size, preprocess_params, forward_params, postprocess_params
1142
                )
1143
                outputs = list(final_iterator)
1144
1145
1146
                return outputs
            else:
                return self.run_multi(inputs, preprocess_params, forward_params, postprocess_params)
1147
        elif can_use_iterator:
1148
1149
1150
            return self.get_iterator(
                inputs, num_workers, batch_size, preprocess_params, forward_params, postprocess_params
            )
1151
1152
        elif is_iterable:
            return self.iterate(inputs, preprocess_params, forward_params, postprocess_params)
1153
1154
1155
1156
1157
1158
1159
1160
        elif self.framework == "pt" and isinstance(self, ChunkPipeline):
            return next(
                iter(
                    self.get_iterator(
                        [inputs], num_workers, batch_size, preprocess_params, forward_params, postprocess_params
                    )
                )
            )
1161
        else:
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
            return self.run_single(inputs, preprocess_params, forward_params, postprocess_params)

    def run_multi(self, inputs, preprocess_params, forward_params, postprocess_params):
        return [self.run_single(item, preprocess_params, forward_params, postprocess_params) for item in inputs]

    def run_single(self, inputs, preprocess_params, forward_params, postprocess_params):
        model_inputs = self.preprocess(inputs, **preprocess_params)
        model_outputs = self.forward(model_inputs, **forward_params)
        outputs = self.postprocess(model_outputs, **postprocess_params)
        return outputs
1172
1173
1174
1175
1176
1177

    def iterate(self, inputs, preprocess_params, forward_params, postprocess_params):
        # This function should become `get_iterator` again, this is a temporary
        # easy solution.
        for input_ in inputs:
            yield self.run_single(input_, preprocess_params, forward_params, postprocess_params)
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196


class ChunkPipeline(Pipeline):
    def run_single(self, inputs, preprocess_params, forward_params, postprocess_params):
        all_outputs = []
        for model_inputs in self.preprocess(inputs, **preprocess_params):
            model_outputs = self.forward(model_inputs, **forward_params)
            all_outputs.append(model_outputs)
        outputs = self.postprocess(all_outputs, **postprocess_params)
        return outputs

    def get_iterator(
        self, inputs, num_workers: int, batch_size: int, preprocess_params, forward_params, postprocess_params
    ):
        if "TOKENIZERS_PARALLELISM" not in os.environ:
            logger.info("Disabling tokenizer parallelism, we're using DataLoader multithreading already")
            os.environ["TOKENIZERS_PARALLELISM"] = "false"
        if num_workers > 1:
            logger.warning(
Sylvain Gugger's avatar
Sylvain Gugger committed
1197
1198
                "For ChunkPipeline using num_workers>0 is likely to result in errors since everything is iterable,"
                " setting `num_workers=1` to guarantee correctness."
1199
1200
1201
            )
            num_workers = 1
        dataset = PipelineChunkIterator(inputs, self.preprocess, preprocess_params)
1202
1203
1204
1205

        # TODO hack by collating feature_extractor and image_processor
        feature_extractor = self.feature_extractor if self.feature_extractor is not None else self.image_processor
        collate_fn = no_collate_fn if batch_size == 1 else pad_collate_fn(self.tokenizer, feature_extractor)
1206
1207
1208
1209
        dataloader = DataLoader(dataset, num_workers=num_workers, batch_size=batch_size, collate_fn=collate_fn)
        model_iterator = PipelinePackIterator(dataloader, self.forward, forward_params, loader_batch_size=batch_size)
        final_iterator = PipelineIterator(model_iterator, self.postprocess, postprocess_params)
        return final_iterator
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221


class PipelineRegistry:
    def __init__(self, supported_tasks: Dict[str, Any], task_aliases: Dict[str, str]) -> None:
        self.supported_tasks = supported_tasks
        self.task_aliases = task_aliases

    def get_supported_tasks(self) -> List[str]:
        supported_task = list(self.supported_tasks.keys()) + list(self.task_aliases.keys())
        supported_task.sort()
        return supported_task

1222
    def check_task(self, task: str) -> Tuple[str, Dict, Any]:
1223
1224
1225
1226
        if task in self.task_aliases:
            task = self.task_aliases[task]
        if task in self.supported_tasks:
            targeted_task = self.supported_tasks[task]
1227
            return task, targeted_task, None
1228
1229
1230
1231
1232

        if task.startswith("translation"):
            tokens = task.split("_")
            if len(tokens) == 4 and tokens[0] == "translation" and tokens[2] == "to":
                targeted_task = self.supported_tasks["translation"]
1233
1234
                task = "translation"
                return task, targeted_task, (tokens[1], tokens[3])
1235
1236
1237
1238
1239
1240
            raise KeyError(f"Invalid translation task {task}, use 'translation_XX_to_YY' format")

        raise KeyError(
            f"Unknown task {task}, available tasks are {self.get_supported_tasks() + ['translation_XX_to_YY']}"
        )

Sylvain Gugger's avatar
Sylvain Gugger committed
1241
1242
1243
1244
1245
1246
1247
1248
1249
    def register_pipeline(
        self,
        task: str,
        pipeline_class: type,
        pt_model: Optional[Union[type, Tuple[type]]] = None,
        tf_model: Optional[Union[type, Tuple[type]]] = None,
        default: Optional[Dict] = None,
        type: Optional[str] = None,
    ) -> None:
1250
1251
1252
        if task in self.supported_tasks:
            logger.warning(f"{task} is already registered. Overwriting pipeline for task {task}...")

Sylvain Gugger's avatar
Sylvain Gugger committed
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
        if pt_model is None:
            pt_model = ()
        elif not isinstance(pt_model, tuple):
            pt_model = (pt_model,)

        if tf_model is None:
            tf_model = ()
        elif not isinstance(tf_model, tuple):
            tf_model = (tf_model,)

        task_impl = {"impl": pipeline_class, "pt": pt_model, "tf": tf_model}

        if default is not None:
            if "model" not in default and ("pt" in default or "tf" in default):
                default = {"model": default}
            task_impl["default"] = default

        if type is not None:
            task_impl["type"] = type

1273
        self.supported_tasks[task] = task_impl
Sylvain Gugger's avatar
Sylvain Gugger committed
1274
        pipeline_class._registered_impl = {task: task_impl}
1275
1276
1277

    def to_dict(self):
        return self.supported_tasks