Unverified Commit 25b8b8a6 authored by AK391's avatar AK391 Committed by GitHub
Browse files

Merge branch 'huggingface:master' into master

parents 23801367 b67f345d
...@@ -37,6 +37,7 @@ jobs: ...@@ -37,6 +37,7 @@ jobs:
pip install --upgrade pip pip install --upgrade pip
pip install .[integrations,sklearn,testing,onnxruntime,sentencepiece,torch-speech,vision,timm] pip install .[integrations,sklearn,testing,onnxruntime,sentencepiece,torch-speech,vision,timm]
pip install https://github.com/kpu/kenlm/archive/master.zip pip install https://github.com/kpu/kenlm/archive/master.zip
python -m pip install 'git+https://github.com/facebookresearch/detectron2.git'
- name: Are GPUs recognized by our DL frameworks - name: Are GPUs recognized by our DL frameworks
run: | run: |
...@@ -241,6 +242,7 @@ jobs: ...@@ -241,6 +242,7 @@ jobs:
pip install --upgrade pip pip install --upgrade pip
pip install .[integrations,sklearn,testing,onnxruntime,sentencepiece,torch-speech,vision,timm] pip install .[integrations,sklearn,testing,onnxruntime,sentencepiece,torch-speech,vision,timm]
pip install https://github.com/kpu/kenlm/archive/master.zip pip install https://github.com/kpu/kenlm/archive/master.zip
python -m pip install 'git+https://github.com/facebookresearch/detectron2.git'
- name: Are GPUs recognized by our DL frameworks - name: Are GPUs recognized by our DL frameworks
run: | run: |
......
# Image Captioning (vision-encoder-text-decoder model) training example
The following example showcases how to finetune a vision-encoder-text-decoder model for image captioning
using the JAX/Flax backend, leveraging 🤗 Transformers library's [FlaxVisionEncoderDecoderModel](https://huggingface.co/docs/transformers/model_doc/visionencoderdecoder#transformers.FlaxVisionEncoderDecoderModel).
JAX/Flax allows you to trace pure functions and compile them into efficient, fused accelerator code on both GPU and TPU.
Models written in JAX/Flax are **immutable** and updated in a purely functional
way which enables simple and efficient model parallelism.
`run_image_captioning_flax.py` is a lightweight example of how to download and preprocess a dataset from the 🤗 Datasets
library or use your own files (jsonlines or csv), then fine-tune one of the architectures above on it.
For custom datasets in `jsonlines` format please see: https://huggingface.co/docs/datasets/loading_datasets.html#json-files and you also will find examples of these below.
### Download COCO dataset (2017)
This example uses COCO dataset (2017) through a custom dataset script, which requires users to manually download the
COCO dataset before training.
```bash
mkdir data
cd data
wget http://images.cocodataset.org/zips/train2017.zip
wget http://images.cocodataset.org/zips/val2017.zip
wget http://images.cocodataset.org/zips/test2017.zip
wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip
wget http://images.cocodataset.org/annotations/image_info_test2017.zip
cd ..
```
### Create a model from a vision encoder model and a text decoder model
Next, we create a [FlaxVisionEncoderDecoderModel](https://huggingface.co/docs/transformers/model_doc/visionencoderdecoder#transformers.FlaxVisionEncoderDecoderModel) instance from a pre-trained vision encoder ([ViT](https://huggingface.co/docs/transformers/model_doc/vit#transformers.FlaxViTModel)) and a pre-trained text decoder ([GPT2](https://huggingface.co/docs/transformers/model_doc/gpt2#transformers.FlaxGPT2Model)):
```bash
python3 create_model_from_encoder_decoder_models.py \
--output_dir model \
--encoder_model_name_or_path google/vit-base-patch16-224-in21k \
--decoder_model_name_or_path gpt2
```
### Train the model
Finally, we can run the example script to train the model:
```bash
python3 run_image_captioning_flax.py \
--output_dir ./image-captioning-training-results \
--model_name_or_path model \
--dataset_name ydshieh/coco_dataset_script \
--dataset_config_name=2017 \
--data_dir $PWD/data \
--image_column image_path \
--caption_column caption \
--do_train --do_eval --predict_with_generate \
--num_train_epochs 1 \
--eval_steps 500 \
--learning_rate 3e-5 --warmup_steps 0 \
--per_device_train_batch_size 32 \
--per_device_eval_batch_size 32 \
--overwrite_output_dir \
--max_target_length 32 \
--num_beams 8 \
--preprocessing_num_workers 16 \
--logging_steps 10 \
--block_size 16384 \
--push_to_hub
```
This should finish in about 1h30 on Cloud TPU, with validation loss and ROUGE2 score of 2.0153 and 14.64 respectively
after 1 epoch. Training statistics can be accessed on [Models](https://huggingface.co/ydshieh/image-captioning-training-results/tensorboard).
#!/usr/bin/env python
# coding=utf-8
# Copyright 2022 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Create a VisionEncoderDecoderModel instance from pretrained encoder/decoder models.
The cross-attention will be randomly initialized.
"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoTokenizer,
FlaxVisionEncoderDecoderModel,
HfArgumentParser,
)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
output_dir: str = field(
metadata={"help": "The output directory where the model will be written."},
)
encoder_model_name_or_path: str = field(
metadata={
"help": "The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
},
)
decoder_model_name_or_path: str = field(
metadata={
"help": "The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
},
)
encoder_config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"}
)
decoder_config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"}
)
def main():
parser = HfArgumentParser((ModelArguments,))
(model_args,) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
encoder_config = AutoConfig.from_pretrained(model_args.encoder_config_name)
# Use pretrained encoder model's config
else:
encoder_config = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path)
# Use explicit specified decoder config
if model_args.decoder_config_name:
decoder_config = AutoConfig.from_pretrained(model_args.decoder_config_name)
# Use pretrained decoder model's config
else:
decoder_config = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path)
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path,
decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path,
encoder_config=encoder_config,
decoder_config=decoder_config,
)
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
decoder_start_token_id = decoder_config.decoder_start_token_id
pad_token_id = decoder_config.pad_token_id
if decoder_start_token_id is None:
decoder_start_token_id = decoder_config.bos_token_id
if pad_token_id is None:
pad_token_id = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
model.config.eos_token_id = decoder_config.eos_token_id
model.config.decoder_start_token_id = decoder_start_token_id
model.config.pad_token_id = pad_token_id
feature_extractor = AutoFeatureExtractor.from_pretrained(model_args.encoder_model_name_or_path)
tokenizer = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path)
tokenizer.pad_token = tokenizer.convert_ids_to_tokens(model.config.pad_token_id)
model.save_pretrained(model_args.output_dir)
feature_extractor.save_pretrained(model_args.output_dir)
tokenizer.save_pretrained(model_args.output_dir)
if __name__ == "__main__":
main()
#!/usr/bin/env python
# coding=utf-8
# Copyright 2022 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library vision-encoder-decoder models for image captioning.
"""
import json
import logging
import os
import sys
import time
from dataclasses import asdict, dataclass, field
from enum import Enum
from functools import partial
from pathlib import Path
from typing import Callable, Optional
import datasets
import nltk # Here to have a nice missing dependency error message early on
import numpy as np
from datasets import Dataset, load_dataset, load_metric
from PIL import Image
from tqdm import tqdm
import jax
import jax.numpy as jnp
import optax
import transformers
from filelock import FileLock
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from huggingface_hub import Repository
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
FlaxVisionEncoderDecoderModel,
HfArgumentParser,
is_tensorboard_available,
)
from transformers.file_utils import get_full_repo_name, is_offline_mode
logger = logging.getLogger(__name__)
try:
nltk.data.find("tokenizers/punkt")
except (LookupError, OSError):
if is_offline_mode():
raise LookupError(
"Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files"
)
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
# Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right
def shift_tokens_right(input_ids: np.ndarray, pad_token_id: int, decoder_start_token_id: int) -> np.ndarray:
"""
Shift input ids one token to the right.
"""
shifted_input_ids = np.zeros_like(input_ids)
shifted_input_ids[:, 1:] = input_ids[:, :-1]
shifted_input_ids[:, 0] = decoder_start_token_id
shifted_input_ids = np.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids)
return shifted_input_ids
@dataclass
class TrainingArguments:
output_dir: str = field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."},
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory. "
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(default=False, metadata={"help": "Whether to run eval on the dev set."})
do_predict: bool = field(default=False, metadata={"help": "Whether to run predictions on the test set."})
per_device_train_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."}
)
per_device_eval_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."}
)
_block_size_doc = """
The default value `0` will preprocess (tokenization + feature extraction) the whole dataset before training and
cache the results. This uses more disk space, but avoids (repeated) processing time during training. This is a
good option if your disk space is large enough to store the whole processed dataset.
If a positive value is given, the captions in the dataset will be tokenized before training and the results are
cached. During training, it iterates the dataset in chunks of size `block_size`. On each block, images are
transformed by the feature extractor with the results being kept in memory (no cache), and batches of size
`batch_size` are yielded before processing the next block. This could avoid the heavy disk usage when the
dataset is large.
"""
block_size: int = field(default=0, metadata={"help": _block_size_doc})
learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for AdamW."})
weight_decay: float = field(default=0.0, metadata={"help": "Weight decay for AdamW if we apply some."})
adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for AdamW optimizer"})
adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for AdamW optimizer"})
adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."})
label_smoothing_factor: float = field(
default=0.0, metadata={"help": "The label smoothing epsilon to apply (zero means no label smoothing)."}
)
num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."})
warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."})
logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."})
eval_steps: int = field(default=None, metadata={"help": "Run an evaluation every X steps."})
seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of training."})
push_to_hub: bool = field(
default=False, metadata={"help": "Whether or not to upload the trained model to the model hub after training."}
)
hub_model_id: str = field(
default=None, metadata={"help": "The name of the repository to keep in sync with the local `output_dir`."}
)
hub_token: str = field(default=None, metadata={"help": "The token to use to push to the Model Hub."})
def __post_init__(self):
if self.output_dir is not None:
self.output_dir = os.path.expanduser(self.output_dir)
def to_dict(self):
"""
Serializes this instance while replace `Enum` by their values (for JSON serialization support). It obfuscates
the token values by removing their value.
"""
d = asdict(self)
for k, v in d.items():
if isinstance(v, Enum):
d[k] = v.value
if isinstance(v, list) and len(v) > 0 and isinstance(v[0], Enum):
d[k] = [x.value for x in v]
if k.endswith("_token"):
d[k] = f"<{k.upper()}>"
return d
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: str = field(
metadata={"help": "The model checkpoint for weights initialization."},
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
dtype: Optional[str] = field(
default="float32",
metadata={
"help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
data_dir: Optional[str] = field(
default=None, metadata={"help": "The data directory of the dataset to use (via the datasets library)."}
)
image_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the full image file paths."},
)
caption_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the image captions."},
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
test_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input predict data file to do prediction on (a text file)."},
)
max_target_length: Optional[int] = field(
default=128,
metadata={
"help": "The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
val_max_target_length: Optional[int] = field(
default=None,
metadata={
"help": "The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`."
"This argument is also used to override the `max_length` param of `model.generate`, which is used "
"during evaluation."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
predict_with_generate: bool = field(
default=False, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."}
)
num_beams: Optional[int] = field(
default=None,
metadata={
"help": "Number of beams to use for evaluation. This argument will be passed to `model.generate`, "
"which is used during evaluation."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if self.val_max_target_length is None:
self.val_max_target_length = self.max_target_length
image_captioning_name_mapping = {
"image_caption_dataset.py": ("image_path", "caption"),
}
class TrainState(train_state.TrainState):
dropout_rng: jnp.ndarray
def replicate(self):
return jax_utils.replicate(self).replace(dropout_rng=shard_prng_key(self.dropout_rng))
def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False):
"""
Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
Shuffle batches if `shuffle` is `True`.
"""
steps = len(dataset) // batch_size # Skip incomplete batch.
# We use `numpy.ndarray` to interact with `datasets.Dataset`, since using `jax.numpy.array` to index into a
# dataset is significantly slow. Using JAX array at the 1st place is only to keep JAX's PRNGs generation
# mechanism, which works differently from NumPy/SciPy.
if shuffle:
batch_idx = jax.random.permutation(rng, len(dataset))
batch_idx = np.asarray(batch_idx)
else:
batch_idx = np.arange(len(dataset))
for idx in range(steps):
start_idx = batch_size * idx
end_idx = batch_size * (idx + 1)
selected_indices = batch_idx[start_idx:end_idx]
batch = dataset[selected_indices]
batch = shard(batch)
yield batch
def write_metric(summary_writer, metrics, train_time, step, metric_key_prefix="train"):
if train_time:
summary_writer.scalar("train_time", train_time, step)
metrics = get_metrics(metrics)
for key, vals in metrics.items():
tag = f"{metric_key_prefix}_{key}"
for i, val in enumerate(vals):
summary_writer.scalar(tag, val, step - len(vals) + i + 1)
else:
for metric_name, value in metrics.items():
summary_writer.scalar(f"{metric_key_prefix}_{metric_name}", value, step)
def create_learning_rate_fn(
train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float
) -> Callable[[int], jnp.array]:
"""Returns a linear warmup, linear_decay learning rate function."""
steps_per_epoch = train_ds_size // train_batch_size
num_train_steps = steps_per_epoch * num_train_epochs
warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps)
decay_fn = optax.linear_schedule(
init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps
)
schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps])
return schedule_fn
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty."
"Use --overwrite_output_dir to overcome."
)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
if jax.process_index() == 0:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# Set the verbosity to info of the Transformers logger (on main process only):
logger.info(f"Training/evaluation parameters {training_args}")
# Handle the repository creation
if training_args.push_to_hub:
if training_args.hub_model_id is None:
repo_name = get_full_repo_name(
Path(training_args.output_dir).absolute().name, token=training_args.hub_token
)
else:
repo_name = training_args.hub_model_id
repo = Repository(training_args.output_dir, clone_from=repo_name)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files this script will use the first column for the full image path and the second column for the
# captions (unless you specify column names for this with the `image_column` and `caption_column` arguments).
#
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
dataset = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
keep_in_memory=False,
data_dir=data_args.data_dir,
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1]
dataset = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
model = FlaxVisionEncoderDecoderModel.from_pretrained(
model_args.model_name_or_path,
seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype),
)
feature_extractor = AutoFeatureExtractor.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
)
tokenizer.pad_token = tokenizer.convert_ids_to_tokens(model.config.pad_token_id)
# Preprocessing the datasets.
# We need to tokenize inputs and targets.
if training_args.do_train:
column_names = dataset["train"].column_names
elif training_args.do_eval:
column_names = dataset["validation"].column_names
elif training_args.do_predict:
column_names = dataset["test"].column_names
else:
logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.")
return
# Get the column names for input/target.
dataset_columns = image_captioning_name_mapping.get(data_args.dataset_name, None)
if data_args.image_column is None:
assert dataset_columns is not None
image_column = dataset_columns[0]
else:
image_column = data_args.image_column
if image_column not in column_names:
raise ValueError(
f"--image_column' value '{data_args.image_column}' needs to be one of: {', '.join(column_names)}"
)
if data_args.caption_column is None:
assert dataset_columns is not None
caption_column = dataset_columns[1]
else:
caption_column = data_args.caption_column
if caption_column not in column_names:
raise ValueError(
f"--caption_column' value '{data_args.caption_column}' needs to be one of: {', '.join(column_names)}"
)
# In Flax, for seq2seq models we need to pass `decoder_input_ids`
# as the Flax models don't accept `labels`, we need to prepare the decoder_input_ids here
# for that dynamically import the `shift_tokens_right` function from the model file
model_module = __import__(model.__module__, fromlist=["shift_tokens_right"])
shift_tokens_right_fn = getattr(model_module, "shift_tokens_right", shift_tokens_right)
def filter_fn(examples):
"""remove problematic images"""
bools = []
for image_file in examples[image_column]:
try:
image = Image.open(image_file)
feature_extractor(images=image, return_tensors="np")
bools.append(True)
except Exception:
bools.append(False)
return bools
# Setting padding="max_length" as we need fixed length inputs for jitted functions
def tokenization_fn(examples, max_target_length):
"""Run tokenization on captions."""
captions = []
for caption in examples[caption_column]:
captions.append(caption.lower() + " " + tokenizer.eos_token)
targets = captions
model_inputs = {}
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(
targets, max_length=max_target_length, padding="max_length", truncation=True, return_tensors="np"
)
model_inputs["labels"] = labels["input_ids"]
decoder_input_ids = shift_tokens_right_fn(
labels["input_ids"], model.config.pad_token_id, model.config.decoder_start_token_id
)
model_inputs["decoder_input_ids"] = np.asarray(decoder_input_ids)
# We need decoder_attention_mask so we can ignore pad tokens from loss
model_inputs["decoder_attention_mask"] = labels["attention_mask"]
model_inputs[image_column] = examples[image_column]
return model_inputs
def feature_extraction_fn(examples, check_image=True):
"""
Run feature extraction on images
If `check_image` is `True`, the examples that fails during `Image.open()` will be caught and discarded.
Otherwise, an exception will be thrown.
"""
model_inputs = {}
if check_image:
images = []
to_keep = []
for image_file in examples[image_column]:
try:
img = Image.open(image_file)
images.append(img)
to_keep.append(True)
except Exception:
to_keep.append(False)
for k, v in examples.items():
if k != image_column:
model_inputs[k] = v[to_keep]
else:
images = [Image.open(image_file) for image_file in examples[image_column]]
encoder_inputs = feature_extractor(images=images, return_tensors="np")
model_inputs["pixel_values"] = encoder_inputs.pixel_values
return model_inputs
def preprocess_fn(examples, max_target_length, check_image=True):
"""Run tokenization + image feature extraction"""
model_inputs = {}
# This contains image path column
model_inputs.update(tokenization_fn(examples, max_target_length))
model_inputs.update(feature_extraction_fn(model_inputs, check_image=check_image))
# Remove image path column
model_inputs.pop(image_column)
return model_inputs
features = datasets.Features(
{
"pixel_values": datasets.Array3D(
shape=(
getattr(model.config.encoder, "num_channels", 3),
model.config.encoder.image_size,
model.config.encoder.image_size,
),
dtype="float32",
),
"labels": datasets.Sequence(feature=datasets.Value(dtype="int32", id=None), length=-1, id=None),
"decoder_input_ids": datasets.Sequence(feature=datasets.Value(dtype="int32", id=None), length=-1, id=None),
"decoder_attention_mask": datasets.Sequence(
feature=datasets.Value(dtype="int32", id=None), length=-1, id=None
),
}
)
# If `block_size` is `0`, tokenization & image feature extraction is done at the beginning
run_feat_ext_at_beginning = training_args.block_size == 0
# Used in .map() below
function_kwarg = preprocess_fn if run_feat_ext_at_beginning else tokenization_fn
# `features` is used only for the final preprocessed dataset (for the performance purpose).
features_kwarg = features if run_feat_ext_at_beginning else None
# Keep `image_column` if the feature extraction is done during training
remove_columns_kwarg = [x for x in column_names if x != image_column or run_feat_ext_at_beginning]
processor_names = "tokenizer and feature extractor" if run_feat_ext_at_beginning else "tokenizer"
# Store some constant
train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count()
eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count()
if training_args.block_size % train_batch_size > 0 or training_args.block_size % eval_batch_size > 0:
raise ValueError(
f"`training_args.block_size` needs to be a multiple of the global train/eval batch size."
f"Got {training_args.block_size}, {train_batch_size} and {eval_batch_size} respectively instead."
)
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset")
train_dataset = dataset["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
# remove problematic examples
# (if feature extraction is performed at the beginning, the filtering is done during preprocessing below
# instead here.)
if not run_feat_ext_at_beginning:
train_dataset = train_dataset.filter(filter_fn, batched=True, num_proc=data_args.preprocessing_num_workers)
train_dataset = train_dataset.map(
function=function_kwarg,
batched=True,
num_proc=data_args.preprocessing_num_workers,
# kept image paths
remove_columns=remove_columns_kwarg,
load_from_cache_file=not data_args.overwrite_cache,
desc=f"Running {processor_names} on train dataset",
fn_kwargs={"max_target_length": data_args.max_target_length},
features=features_kwarg,
)
if run_feat_ext_at_beginning:
# set format (for performance) since the dataset is ready to be used
train_dataset = train_dataset.with_format("numpy")
steps_per_epoch = len(train_dataset) // train_batch_size
num_train_examples_per_epoch = steps_per_epoch * train_batch_size
num_epochs = int(training_args.num_train_epochs)
total_train_steps = steps_per_epoch * num_epochs
else:
num_train_examples_per_epoch = 0
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = dataset["validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
# remove problematic examples
# (if feature extraction is performed at the beginning, the filtering is done during preprocessing below
# instead here.)
if not run_feat_ext_at_beginning:
eval_dataset = eval_dataset.filter(filter_fn, batched=True, num_proc=data_args.preprocessing_num_workers)
eval_dataset = eval_dataset.map(
function=function_kwarg,
batched=True,
num_proc=data_args.preprocessing_num_workers,
# kept image paths
remove_columns=remove_columns_kwarg,
load_from_cache_file=not data_args.overwrite_cache,
desc=f"Running {processor_names} on validation dataset",
fn_kwargs={"max_target_length": data_args.val_max_target_length},
features=features_kwarg,
)
if run_feat_ext_at_beginning:
# set format (for performance) since the dataset is ready to be used
eval_dataset = eval_dataset.with_format("numpy")
num_eval_examples = len(eval_dataset)
eval_steps = num_eval_examples // eval_batch_size
if training_args.do_predict:
if "test" not in dataset:
raise ValueError("--do_predict requires a test dataset")
predict_dataset = dataset["test"]
if data_args.max_predict_samples is not None:
predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
# remove problematic examples
# (if feature extraction is performed at the beginning, the filtering is done during preprocessing below
# instead here.)
if not run_feat_ext_at_beginning:
predict_dataset = predict_dataset.filter(
filter_fn, batched=True, num_proc=data_args.preprocessing_num_workers
)
predict_dataset = predict_dataset.map(
function=function_kwarg,
batched=True,
num_proc=data_args.preprocessing_num_workers,
# kept image paths
remove_columns=remove_columns_kwarg,
load_from_cache_file=not data_args.overwrite_cache,
desc=f"Running {processor_names} on prediction dataset",
fn_kwargs={"max_target_length": data_args.val_max_target_length},
features=features_kwarg,
)
if run_feat_ext_at_beginning:
# set format (for performance) since the dataset is ready to be used
predict_dataset = predict_dataset.with_format("numpy")
num_test_examples = len(predict_dataset)
test_steps = num_test_examples // eval_batch_size
def blockwise_data_loader(
rng: jax.random.PRNGKey,
ds: Dataset,
block_size: int,
batch_size: int,
shuffle: bool = False,
keep_in_memory: bool = False,
split: str = "",
):
"""
Wrap the simple `data_loader` in a block-wise way if `block_size` > 0, else it's the same as `data_loader`.
If `block_size` > 0, it requires `ds` to have a column that gives image paths in order to perform image feature
extraction (with the column name being specified by `image_column`). The tokenization should be done before
training in this case.
"""
# We use `numpy.ndarray` to interact with `datasets.Dataset`, since using `jax.numpy.array` to index into a
# dataset is significantly slow. Using JAX array at the 1st place is only to keep JAX's PRNGs generation
# mechanism, which works differently from NumPy/SciPy.
if shuffle:
indices = jax.random.permutation(rng, len(ds))
indices = np.asarray(indices)
else:
indices = np.arange(len(ds))
_block_size = len(ds) if not block_size else block_size
steps_per_block = _block_size // batch_size
num_examples = len(ds)
steps = num_examples // batch_size
num_splits = steps // steps_per_block + int(steps % steps_per_block > 0)
for idx in range(num_splits):
if not block_size:
_ds = ds
else:
start_idx = block_size * idx
end_idx = block_size * (idx + 1)
selected_indices = indices[start_idx:end_idx]
_ds = ds.select(selected_indices)
_ds = _ds.map(
feature_extraction_fn,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=[image_column],
load_from_cache_file=not data_args.overwrite_cache,
features=features,
keep_in_memory=keep_in_memory,
# The images are already checked either in `.filter()` or in `preprocess_fn()`
fn_kwargs={"check_image": False},
desc=f"Running feature extraction on {split} dataset".replace(" ", " "),
)
_ds = _ds.with_format("numpy")
# No need to shuffle here
loader = data_loader(rng, _ds, batch_size=batch_size, shuffle=False)
for batch in loader:
yield batch
# Metric
metric = load_metric("rouge")
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [label.strip() for label in labels]
# rougeLSum expects newline after each sentence
preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in preds]
labels = ["\n".join(nltk.sent_tokenize(label)) for label in labels]
return preds, labels
def compute_metrics(preds, labels):
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# Some simple post-processing
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
# Extract a few results from ROUGE
result = {key: value.mid.fmeasure * 100 for key, value in result.items()}
prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
result["gen_len"] = np.mean(prediction_lens)
result = {k: round(v, 6) for k, v in result.items()}
return result, decoded_preds, decoded_labels
# Enable tensorboard only on the master node
has_tensorboard = is_tensorboard_available()
if has_tensorboard and jax.process_index() == 0:
try:
from flax.metrics.tensorboard import SummaryWriter
summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir))
except ImportError as ie:
has_tensorboard = False
logger.warning(
f"Unable to display metrics through TensorBoard because some package are not installed: {ie}"
)
else:
logger.warning(
"Unable to display metrics through TensorBoard because the package is not installed: "
"Please run pip install tensorboard to enable."
)
# Initialize our training
rng = jax.random.PRNGKey(training_args.seed)
rng, dropout_rng = jax.random.split(rng)
# Create learning rate schedule
linear_decay_lr_schedule_fn = create_learning_rate_fn(
num_train_examples_per_epoch,
train_batch_size,
training_args.num_train_epochs,
training_args.warmup_steps,
training_args.learning_rate,
)
# We use Optax's "masking" functionality to not apply weight decay
# to bias and LayerNorm scale parameters. decay_mask_fn returns a
# mask boolean with the same structure as the parameters.
# The mask is True for parameters that should be decayed.
# Note that this mask is specifically adapted for FlaxBart.
# For FlaxT5, one should correct the layer norm parameter naming
# accordingly - see `run_t5_mlm_flax.py` e.g.
def decay_mask_fn(params):
flat_params = traverse_util.flatten_dict(params)
layer_norm_params = [
(name, "scale") for name in ["self_attn_layer_norm", "layernorm_embedding", "final_layer_norm"]
]
flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_params) for path in flat_params}
return traverse_util.unflatten_dict(flat_mask)
# create adam optimizer
adamw = optax.adamw(
learning_rate=linear_decay_lr_schedule_fn,
b1=training_args.adam_beta1,
b2=training_args.adam_beta2,
eps=training_args.adam_epsilon,
weight_decay=training_args.weight_decay,
mask=decay_mask_fn,
)
# Setup train state
state = TrainState.create(apply_fn=model.__call__, params=model.params, tx=adamw, dropout_rng=dropout_rng)
# label smoothed cross entropy
def loss_fn(logits, labels, padding_mask, label_smoothing_factor=0.0):
"""
The label smoothing implementation is adapted from Flax's official example:
https://github.com/google/flax/blob/87a211135c6a377c8f29048a1cac3840e38b9da4/examples/wmt/train.py#L104
"""
vocab_size = logits.shape[-1]
confidence = 1.0 - label_smoothing_factor
low_confidence = (1.0 - confidence) / (vocab_size - 1)
normalizing_constant = -(
confidence * jnp.log(confidence) + (vocab_size - 1) * low_confidence * jnp.log(low_confidence + 1e-20)
)
soft_labels = onehot(labels, vocab_size, on_value=confidence, off_value=low_confidence)
loss = optax.softmax_cross_entropy(logits, soft_labels)
loss = loss - normalizing_constant
# ignore padded tokens from loss
loss = loss * padding_mask
loss = loss.sum() / padding_mask.sum()
return loss
# Define gradient update step fn
def train_step(state, batch, label_smoothing_factor=0.0):
dropout_rng, new_dropout_rng = jax.random.split(state.dropout_rng)
def compute_loss(params):
labels = batch.pop("labels")
logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]
loss = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor)
return loss
grad_fn = jax.value_and_grad(compute_loss)
loss, grad = grad_fn(state.params)
grad = jax.lax.pmean(grad, "batch")
new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng)
metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}
metrics = jax.lax.pmean(metrics, axis_name="batch")
return new_state, metrics
# Define eval fn
def eval_step(params, batch, label_smoothing_factor=0.0):
labels = batch.pop("labels")
logits = model(**batch, params=params, train=False)[0]
loss = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor)
# summarize metrics
metrics = {"loss": loss}
metrics = jax.lax.pmean(metrics, axis_name="batch")
return metrics
# Define generation function
max_length = (
data_args.val_max_target_length if data_args.val_max_target_length is not None else model.config.max_length
)
num_beams = data_args.num_beams if data_args.num_beams is not None else model.config.num_beams
gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
def generate_step(params, batch):
model.params = params
output_ids = model.generate(batch["pixel_values"], **gen_kwargs)
return output_ids.sequences
# Create parallel version of the train and eval step
p_train_step = jax.pmap(
partial(train_step, label_smoothing_factor=training_args.label_smoothing_factor), "batch", donate_argnums=(0,)
)
p_eval_step = jax.pmap(partial(eval_step, label_smoothing_factor=training_args.label_smoothing_factor), "batch")
p_generate_step = jax.pmap(generate_step, "batch")
# Replicate the train state on each device
state = state.replicate()
if training_args.do_train:
logger.info("***** Running training *****")
logger.info(f" Num train examples = {num_train_examples_per_epoch}")
logger.info(f" Num Epochs = {num_epochs}")
logger.info(f" Instantaneous train batch size per device = {training_args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel & distributed) = {train_batch_size}")
logger.info(f" Optimization steps per epoch = {steps_per_epoch}")
logger.info(f" Total optimization steps = {total_train_steps}")
if training_args.do_eval:
logger.info(f" Num evaluation examples = {num_eval_examples}")
logger.info(f" Instantaneous evaluation batch size per device = {training_args.per_device_eval_batch_size}")
logger.info(f" Total evaluation batch size (w. parallel & distributed) = {eval_batch_size}")
logger.info(f" Evaluation steps = {eval_steps}")
if training_args.do_predict:
logger.info(f" Num test examples = {num_test_examples}")
logger.info(f" Instantaneous test batch size per device = {training_args.per_device_eval_batch_size}")
logger.info(f" Total test batch size (w. parallel & distributed) = {eval_batch_size}")
logger.info(f" Test steps = {test_steps}")
# create output directory
if not os.path.isdir(os.path.join(training_args.output_dir)):
os.makedirs(os.path.join(training_args.output_dir), exist_ok=True)
def save_ckpt(ckpt_dir: str, commit_msg: str = ""):
"""save checkpoints and push to Hugging Face Hub if specified"""
# save checkpoint after each epoch and push checkpoint to the hub
if jax.process_index() == 0:
params = jax.device_get(jax.tree_map(lambda x: x[0], state.params))
model.save_pretrained(os.path.join(training_args.output_dir, ckpt_dir), params=params)
tokenizer.save_pretrained(os.path.join(training_args.output_dir, ckpt_dir))
if training_args.push_to_hub:
repo.push_to_hub(commit_message=commit_msg, blocking=False)
def evaluation_loop(
rng: jax.random.PRNGKey,
dataset: Dataset,
metric_key_prefix: str = "eval",
ckpt_dir: str = "",
is_prediction=False,
):
logger.info(f"*** {'Predict' if is_prediction else 'Evaluate'} ***")
metrics = []
preds = []
labels = []
batches = blockwise_data_loader(
rng,
dataset,
block_size=training_args.block_size,
batch_size=eval_batch_size,
keep_in_memory=False,
shuffle=False,
split="prediction" if is_prediction else "validation",
)
steps = len(dataset) // eval_batch_size
for _ in tqdm(
range(steps), desc=f"{'Predicting' if is_prediction else 'Evaluating'}...", position=2, leave=False
):
# Model forward
batch = next(batches)
_labels = batch.get("labels", None)
if not is_prediction and _labels is None:
raise ValueError("Evaluation requires the validation dataset to have `labels`")
if _labels is not None:
_metrics = p_eval_step(state.params, batch)
metrics.append(_metrics)
# generation
if data_args.predict_with_generate:
generated_ids = p_generate_step(state.params, batch)
preds.extend(jax.device_get(generated_ids.reshape(-1, gen_kwargs["max_length"])))
if _labels is not None:
labels.extend(jax.device_get(_labels.reshape(-1, _labels.shape[-1])))
if metrics:
# normalize metrics
metrics = get_metrics(metrics)
metrics = jax.tree_map(jnp.mean, metrics)
# compute ROUGE metrics
generations = []
rouge_desc = ""
if data_args.predict_with_generate:
if labels:
rouge_metrics, decoded_preds, decoded_labels = compute_metrics(preds, labels)
metrics.update(rouge_metrics)
rouge_desc = " ".join(
[
f"{'Predict' if is_prediction else 'Eval'} {key}: {value} |"
for key, value in rouge_metrics.items()
]
)
for pred, label in zip(decoded_preds, decoded_labels):
pred = pred.replace("\n", " ")
label = label.replace("\n", " ")
generations.append({"label": label, "pred": pred})
else:
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
# Some simple post-processing
decoded_preds = [pred.strip() for pred in decoded_preds]
# rougeLSum expects newline after each sentence
decoded_preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in decoded_preds]
for pred in decoded_preds:
pred = pred.replace("\n", " ")
generations.append({"pred": pred})
if metrics:
# Print metrics and update progress bar
desc = f"{'Predict' if is_prediction else 'Eval'} Loss: {metrics['loss']} | {rouge_desc})"
if training_args.do_train and not is_prediction:
desc = f"Epoch... ({epoch + 1}/{num_epochs} | Step: {cur_step} | " + desc
epochs.write(desc)
epochs.desc = desc
logger.info(desc)
if jax.process_index() == 0:
if not os.path.isdir(os.path.join(training_args.output_dir, ckpt_dir)):
os.makedirs(os.path.join(training_args.output_dir, ckpt_dir), exist_ok=True)
if metrics:
# Save metrics (only for the evaluation/prediction being done along with training)
if has_tensorboard and training_args.do_train:
write_metric(
summary_writer, metrics, train_time=None, step=cur_step, metric_key_prefix=metric_key_prefix
)
# save final metrics in json
metrics = {
f"{metric_key_prefix}_{metric_name}": round(value.item(), 6)
for metric_name, value in metrics.items()
}
_path = os.path.join(training_args.output_dir, ckpt_dir, f"{metric_key_prefix}_results.json")
with open(_path, "w") as f:
json.dump(metrics, f, indent=4, sort_keys=True)
# Update report
with open(os.path.join(training_args.output_dir, "log"), "a", encoding="UTF-8") as fp:
fp.write(desc + "\n")
# Save generations
if generations:
output_file = os.path.join(training_args.output_dir, ckpt_dir, f"{metric_key_prefix}_generation.json")
with open(output_file, "w", encoding="UTF-8") as fp:
json.dump(generations, fp, ensure_ascii=False, indent=4)
def evaluate(rng: jax.random.PRNGKey, dataset: Dataset, ckpt_dir: str = ""):
evaluation_loop(rng, dataset, metric_key_prefix="eval", ckpt_dir=ckpt_dir)
def predict(rng: jax.random.PRNGKey, dataset: Dataset):
evaluation_loop(rng, dataset, metric_key_prefix="test", is_prediction=True)
input_rng = None
if training_args.do_train:
cur_step = 0
train_time = 0
epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0)
for epoch in epochs:
# ======================== Training ================================
# Create sampling rng
rng, input_rng = jax.random.split(rng)
train_metrics = []
train_batches = blockwise_data_loader(
input_rng,
train_dataset,
block_size=training_args.block_size,
batch_size=train_batch_size,
keep_in_memory=True,
shuffle=True,
split="train",
)
# train
for (batch_idx, _) in enumerate(tqdm(range(steps_per_epoch), desc="Training...", position=1, leave=False)):
cur_step += 1
batch = next(train_batches)
batch_start = time.time()
state, train_metric = p_train_step(state, batch)
train_metrics.append(train_metric)
train_time += time.time() - batch_start
time_per_step = train_time / cur_step
# log and save info
if training_args.logging_steps > 0 and cur_step % training_args.logging_steps == 0:
_train_metric = unreplicate(train_metric)
desc = f"Epoch... ({epoch + 1}/{num_epochs} | Step: {cur_step} | Loss: {_train_metric['loss']} | Learning Rate: {_train_metric['learning_rate']} | Time per step: {time_per_step})"
epochs.desc = desc
epochs.write(desc)
logger.info(desc)
with open(os.path.join(training_args.output_dir, "log"), "a", encoding="UTF-8") as fp:
fp.write(desc + "\n")
# Save metrics
if has_tensorboard and jax.process_index() == 0:
write_metric(
summary_writer,
train_metrics,
train_time=train_time,
step=cur_step,
metric_key_prefix="train",
)
# ======================== Evaluating (inside an epoch) ==============================
if (
training_args.do_eval
and (training_args.eval_steps is not None and training_args.eval_steps > 0)
and cur_step % training_args.eval_steps == 0
):
ckpt_dir = f"ckpt_epoch_{epoch + 1}_step_{cur_step}"
commit_msg = f"Saving weights and logs of epoch {epoch + 1} - step {cur_step}"
evaluate(input_rng, eval_dataset, ckpt_dir)
save_ckpt(ckpt_dir=ckpt_dir, commit_msg=commit_msg)
# ======================== Epoch End ==============================
# log and save info
if training_args.logging_steps <= 0:
logger.info(desc)
with open(os.path.join(training_args.output_dir, "log"), "a", encoding="UTF-8") as fp:
fp.write(desc + "\n")
# Save metrics
if has_tensorboard and jax.process_index() == 0:
write_metric(
summary_writer, train_metrics, train_time=train_time, step=cur_step, metric_key_prefix="train"
)
# ======================== Evaluating (after each epoch) ==============================
if training_args.do_eval and (training_args.eval_steps is None or training_args.eval_steps <= 0):
ckpt_dir = f"ckpt_epoch_{epoch + 1}_step_{cur_step}"
commit_msg = f"Saving weights and logs of epoch {epoch + 1} - step {cur_step}"
evaluate(input_rng, eval_dataset, ckpt_dir)
save_ckpt(ckpt_dir=ckpt_dir, commit_msg=commit_msg)
# ======================== Evaluating | Predicting ==============================
# Create sampling rng
if input_rng is None:
rng, input_rng = jax.random.split(rng)
# run evaluation without training
if training_args.do_eval and not training_args.do_train:
evaluate(input_rng, eval_dataset)
# run prediction after (or without) training
if training_args.do_predict:
predict(input_rng, predict_dataset)
if __name__ == "__main__":
main()
...@@ -359,7 +359,7 @@ def main(): ...@@ -359,7 +359,7 @@ def main():
raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples)) raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
if data_args.max_eval_samples is not None: if data_args.max_eval_samples is not None:
raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_train_samples)) raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
def prepare_dataset(batch): def prepare_dataset(batch):
# process audio # process audio
......
...@@ -16,6 +16,22 @@ ...@@ -16,6 +16,22 @@
#################################################################################################### ####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_bert/convert_megatron_bert_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse import argparse
import json import json
import os import os
......
...@@ -16,6 +16,22 @@ ...@@ -16,6 +16,22 @@
#################################################################################################### ####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse import argparse
import os import os
import re import re
......
...@@ -380,7 +380,7 @@ class SpeechEncoderDecoderModel(PreTrainedModel): ...@@ -380,7 +380,7 @@ class SpeechEncoderDecoderModel(PreTrainedModel):
) )
if "config" not in kwargs_encoder: if "config" not in kwargs_encoder:
encoder_config = AutoConfig.from_pretrained(encoder_pretrained_model_name_or_path, **kwargs_encoder) encoder_config = AutoConfig.from_pretrained(encoder_pretrained_model_name_or_path)
if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True: if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
logger.info( logger.info(
f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model " f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model "
...@@ -391,7 +391,7 @@ class SpeechEncoderDecoderModel(PreTrainedModel): ...@@ -391,7 +391,7 @@ class SpeechEncoderDecoderModel(PreTrainedModel):
kwargs_encoder["config"] = encoder_config kwargs_encoder["config"] = encoder_config
encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args) encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
decoder = kwargs_decoder.pop("model", None) decoder = kwargs_decoder.pop("model", None)
if decoder is None: if decoder is None:
...@@ -402,7 +402,7 @@ class SpeechEncoderDecoderModel(PreTrainedModel): ...@@ -402,7 +402,7 @@ class SpeechEncoderDecoderModel(PreTrainedModel):
) )
if "config" not in kwargs_decoder: if "config" not in kwargs_decoder:
decoder_config = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder) decoder_config = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path)
if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False: if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
logger.info( logger.info(
f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. " f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. "
...@@ -424,7 +424,7 @@ class SpeechEncoderDecoderModel(PreTrainedModel): ...@@ -424,7 +424,7 @@ class SpeechEncoderDecoderModel(PreTrainedModel):
"`decoder_config` to `.from_encoder_decoder_pretrained(...)`" "`decoder_config` to `.from_encoder_decoder_pretrained(...)`"
) )
decoder = AutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path) decoder = AutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
# instantiate config with corresponding kwargs # instantiate config with corresponding kwargs
config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs) config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
......
...@@ -1290,6 +1290,7 @@ class UniSpeechForPreTraining(UniSpeechPreTrainedModel): ...@@ -1290,6 +1290,7 @@ class UniSpeechForPreTraining(UniSpeechPreTrainedModel):
>>> batch_size, raw_sequence_length = input_values.shape >>> batch_size, raw_sequence_length = input_values.shape
>>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length) >>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length)
>>> mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2) >>> mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2)
>>> mask_time_indices = torch.tensor(mask_time_indices, device=input_values.device, dtype=torch.long)
>>> with torch.no_grad(): >>> with torch.no_grad():
... outputs = model(input_values, mask_time_indices=mask_time_indices) ... outputs = model(input_values, mask_time_indices=mask_time_indices)
......
...@@ -1322,6 +1322,7 @@ class UniSpeechSatForPreTraining(UniSpeechSatPreTrainedModel): ...@@ -1322,6 +1322,7 @@ class UniSpeechSatForPreTraining(UniSpeechSatPreTrainedModel):
>>> batch_size, raw_sequence_length = input_values.shape >>> batch_size, raw_sequence_length = input_values.shape
>>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length) >>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length)
>>> mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2) >>> mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2)
>>> mask_time_indices = torch.tensor(mask_time_indices, device=input_values.device, dtype=torch.long)
>>> with torch.no_grad(): >>> with torch.no_grad():
... outputs = model(input_values, mask_time_indices=mask_time_indices) ... outputs = model(input_values, mask_time_indices=mask_time_indices)
......
...@@ -1460,6 +1460,7 @@ class Wav2Vec2ForPreTraining(Wav2Vec2PreTrainedModel): ...@@ -1460,6 +1460,7 @@ class Wav2Vec2ForPreTraining(Wav2Vec2PreTrainedModel):
>>> batch_size, raw_sequence_length = input_values.shape >>> batch_size, raw_sequence_length = input_values.shape
>>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length) >>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length)
>>> mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2) >>> mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2)
>>> mask_time_indices = torch.tensor(mask_time_indices, device=input_values.device, dtype=torch.long)
>>> with torch.no_grad(): >>> with torch.no_grad():
... outputs = model(input_values, mask_time_indices=mask_time_indices) ... outputs = model(input_values, mask_time_indices=mask_time_indices)
......
...@@ -742,6 +742,8 @@ class Pipeline(_ScikitCompat): ...@@ -742,6 +742,8 @@ class Pipeline(_ScikitCompat):
self.model.config.update(task_specific_params.get(task)) self.model.config.update(task_specific_params.get(task))
self.call_count = 0 self.call_count = 0
self._batch_size = kwargs.pop("batch_size", None)
self._num_workers = kwargs.pop("num_workers", None)
self._preprocess_params, self._forward_params, self._postprocess_params = self._sanitize_parameters(**kwargs) self._preprocess_params, self._forward_params, self._postprocess_params = self._sanitize_parameters(**kwargs)
def save_pretrained(self, save_directory: str): def save_pretrained(self, save_directory: str):
...@@ -947,9 +949,21 @@ class Pipeline(_ScikitCompat): ...@@ -947,9 +949,21 @@ class Pipeline(_ScikitCompat):
final_iterator = PipelineIterator(model_iterator, self.postprocess, postprocess_params) final_iterator = PipelineIterator(model_iterator, self.postprocess, postprocess_params)
return final_iterator return final_iterator
def __call__(self, inputs, *args, num_workers=0, batch_size=1, **kwargs): def __call__(self, inputs, *args, num_workers=None, batch_size=None, **kwargs):
if args: if args:
logger.warning(f"Ignoring args : {args}") logger.warning(f"Ignoring args : {args}")
if num_workers is None:
if self._num_workers is None:
num_workers = 0
else:
num_workers = self._num_workers
if batch_size is None:
if self._batch_size is None:
batch_size = 1
else:
batch_size = self._batch_size
preprocess_params, forward_params, postprocess_params = self._sanitize_parameters(**kwargs) preprocess_params, forward_params, postprocess_params = self._sanitize_parameters(**kwargs)
# Fuse __init__ params and __call__ params without modifying the __init__ ones. # Fuse __init__ params and __call__ params without modifying the __init__ ones.
......
from typing import List, Union from typing import List, Union
from ..file_utils import add_end_docstrings, is_torch_available, is_vision_available, requires_backends from ..file_utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
requires_backends,
)
from ..utils import logging from ..utils import logging
from .base import PIPELINE_INIT_ARGS, Pipeline from .base import PIPELINE_INIT_ARGS, Pipeline
...@@ -10,6 +16,11 @@ if is_vision_available(): ...@@ -10,6 +16,11 @@ if is_vision_available():
from ..image_utils import load_image from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if is_torch_available(): if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
...@@ -31,12 +42,12 @@ class ImageClassificationPipeline(Pipeline): ...@@ -31,12 +42,12 @@ class ImageClassificationPipeline(Pipeline):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch.")
requires_backends(self, "vision") requires_backends(self, "vision")
self.check_model_type(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING) self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
)
def _sanitize_parameters(self, top_k=None): def _sanitize_parameters(self, top_k=None):
postprocess_params = {} postprocess_params = {}
...@@ -77,7 +88,7 @@ class ImageClassificationPipeline(Pipeline): ...@@ -77,7 +88,7 @@ class ImageClassificationPipeline(Pipeline):
def preprocess(self, image): def preprocess(self, image):
image = load_image(image) image = load_image(image)
model_inputs = self.feature_extractor(images=image, return_tensors="pt") model_inputs = self.feature_extractor(images=image, return_tensors=self.framework)
return model_inputs return model_inputs
def _forward(self, model_inputs): def _forward(self, model_inputs):
...@@ -87,8 +98,16 @@ class ImageClassificationPipeline(Pipeline): ...@@ -87,8 +98,16 @@ class ImageClassificationPipeline(Pipeline):
def postprocess(self, model_outputs, top_k=5): def postprocess(self, model_outputs, top_k=5):
if top_k > self.model.config.num_labels: if top_k > self.model.config.num_labels:
top_k = self.model.config.num_labels top_k = self.model.config.num_labels
probs = model_outputs.logits.softmax(-1)[0]
scores, ids = probs.topk(top_k) if self.framework == "pt":
probs = model_outputs.logits.softmax(-1)[0]
scores, ids = probs.topk(top_k)
elif self.framework == "tf":
probs = tf.nn.softmax(model_outputs.logits, axis=-1)[0]
topk = tf.math.top_k(probs, k=top_k)
scores, ids = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"Unsupported framework: {self.framework}")
scores = scores.tolist() scores = scores.tolist()
ids = ids.tolist() ids = ids.tolist()
......
...@@ -307,7 +307,8 @@ class BertGenerationEncoderIntegrationTest(unittest.TestCase): ...@@ -307,7 +307,8 @@ class BertGenerationEncoderIntegrationTest(unittest.TestCase):
def test_inference_no_head_absolute_embedding(self): def test_inference_no_head_absolute_embedding(self):
model = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") model = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
input_ids = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]]) input_ids = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]])
output = model(input_ids)[0] with torch.no_grad():
output = model(input_ids)[0]
expected_shape = torch.Size([1, 8, 1024]) expected_shape = torch.Size([1, 8, 1024])
self.assertEqual(output.shape, expected_shape) self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor( expected_slice = torch.tensor(
...@@ -322,7 +323,8 @@ class BertGenerationDecoderIntegrationTest(unittest.TestCase): ...@@ -322,7 +323,8 @@ class BertGenerationDecoderIntegrationTest(unittest.TestCase):
def test_inference_no_head_absolute_embedding(self): def test_inference_no_head_absolute_embedding(self):
model = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") model = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
input_ids = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]]) input_ids = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]])
output = model(input_ids)[0] with torch.no_grad():
output = model(input_ids)[0]
expected_shape = torch.Size([1, 8, 50358]) expected_shape = torch.Size([1, 8, 50358])
self.assertEqual(output.shape, expected_shape) self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor( expected_slice = torch.tensor(
......
...@@ -858,6 +858,7 @@ def prepare_img(): ...@@ -858,6 +858,7 @@ def prepare_img():
@require_vision @require_vision
@require_torch
class CLIPModelIntegrationTest(unittest.TestCase): class CLIPModelIntegrationTest(unittest.TestCase):
@slow @slow
def test_inference(self): def test_inference(self):
......
...@@ -478,11 +478,11 @@ class LayoutLMv2ModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -478,11 +478,11 @@ class LayoutLMv2ModelTest(ModelTesterMixin, unittest.TestCase):
def prepare_layoutlmv2_batch_inputs(): def prepare_layoutlmv2_batch_inputs():
# Here we prepare a batch of 2 sequences to test a LayoutLMv2 forward pass on: # Here we prepare a batch of 2 sequences to test a LayoutLMv2 forward pass on:
# fmt: off # fmt: off
input_ids = torch.tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]],device=torch_device) # noqa: E231 input_ids = torch.tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]]) # noqa: E231
bbox = torch.tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]],device=torch_device) # noqa: E231 bbox = torch.tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]]) # noqa: E231
image = ImageList(torch.randn((2,3,224,224)), image_sizes=[(224,224), (224,224)]) # noqa: E231 image = ImageList(torch.randn((2,3,224,224)), image_sizes=[(224,224), (224,224)]) # noqa: E231
attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],],device=torch_device) # noqa: E231 attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],]) # noqa: E231
token_type_ids = torch.tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]],device=torch_device) # noqa: E231 token_type_ids = torch.tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]) # noqa: E231
# fmt: on # fmt: on
return input_ids, bbox, image, attention_mask, token_type_ids return input_ids, bbox, image, attention_mask, token_type_ids
...@@ -505,11 +505,11 @@ class LayoutLMv2ModelIntegrationTest(unittest.TestCase): ...@@ -505,11 +505,11 @@ class LayoutLMv2ModelIntegrationTest(unittest.TestCase):
# forward pass # forward pass
outputs = model( outputs = model(
input_ids=input_ids, input_ids=input_ids.to(torch_device),
bbox=bbox, bbox=bbox.to(torch_device),
image=image, image=image.to(torch_device),
attention_mask=attention_mask, attention_mask=attention_mask.to(torch_device),
token_type_ids=token_type_ids, token_type_ids=token_type_ids.to(torch_device),
) )
# verify the sequence output # verify the sequence output
......
...@@ -485,7 +485,8 @@ class RobertaModelIntegrationTest(TestCasePlus): ...@@ -485,7 +485,8 @@ class RobertaModelIntegrationTest(TestCasePlus):
model = RobertaForMaskedLM.from_pretrained("roberta-base") model = RobertaForMaskedLM.from_pretrained("roberta-base")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
output = model(input_ids)[0] with torch.no_grad():
output = model(input_ids)[0]
expected_shape = torch.Size((1, 11, 50265)) expected_shape = torch.Size((1, 11, 50265))
self.assertEqual(output.shape, expected_shape) self.assertEqual(output.shape, expected_shape)
# compare the actual values for a slice. # compare the actual values for a slice.
...@@ -504,7 +505,8 @@ class RobertaModelIntegrationTest(TestCasePlus): ...@@ -504,7 +505,8 @@ class RobertaModelIntegrationTest(TestCasePlus):
model = RobertaModel.from_pretrained("roberta-base") model = RobertaModel.from_pretrained("roberta-base")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
output = model(input_ids)[0] with torch.no_grad():
output = model(input_ids)[0]
# compare the actual values for a slice. # compare the actual values for a slice.
expected_slice = torch.tensor( expected_slice = torch.tensor(
[[[-0.0231, 0.0782, 0.0074], [-0.1854, 0.0540, -0.0175], [0.0548, 0.0799, 0.1687]]] [[[-0.0231, 0.0782, 0.0074], [-0.1854, 0.0540, -0.0175], [0.0548, 0.0799, 0.1687]]]
...@@ -521,7 +523,8 @@ class RobertaModelIntegrationTest(TestCasePlus): ...@@ -521,7 +523,8 @@ class RobertaModelIntegrationTest(TestCasePlus):
model = RobertaForSequenceClassification.from_pretrained("roberta-large-mnli") model = RobertaForSequenceClassification.from_pretrained("roberta-large-mnli")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
output = model(input_ids)[0] with torch.no_grad():
output = model(input_ids)[0]
expected_shape = torch.Size((1, 3)) expected_shape = torch.Size((1, 3))
self.assertEqual(output.shape, expected_shape) self.assertEqual(output.shape, expected_shape)
expected_tensor = torch.tensor([[-0.9469, 0.3913, 0.5118]]) expected_tensor = torch.tensor([[-0.9469, 0.3913, 0.5118]])
......
...@@ -254,7 +254,7 @@ class TFCLIPVisionModelTest(TFModelTesterMixin, unittest.TestCase): ...@@ -254,7 +254,7 @@ class TFCLIPVisionModelTest(TFModelTesterMixin, unittest.TestCase):
@slow @slow
def test_model_from_pretrained(self): def test_model_from_pretrained(self):
for model_name in TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFCLIPVisionModel.from_pretrained(model_name, from_pt=True) model = TFCLIPVisionModel.from_pretrained(model_name)
self.assertIsNotNone(model) self.assertIsNotNone(model)
...@@ -359,7 +359,7 @@ class TFCLIPTextModelTest(TFModelTesterMixin, unittest.TestCase): ...@@ -359,7 +359,7 @@ class TFCLIPTextModelTest(TFModelTesterMixin, unittest.TestCase):
@slow @slow
def test_model_from_pretrained(self): def test_model_from_pretrained(self):
for model_name in TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFCLIPTextModel.from_pretrained(model_name, from_pt=True) model = TFCLIPTextModel.from_pretrained(model_name)
self.assertIsNotNone(model) self.assertIsNotNone(model)
...@@ -618,7 +618,7 @@ class TFCLIPModelTest(TFModelTesterMixin, unittest.TestCase): ...@@ -618,7 +618,7 @@ class TFCLIPModelTest(TFModelTesterMixin, unittest.TestCase):
@slow @slow
def test_model_from_pretrained(self): def test_model_from_pretrained(self):
for model_name in TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFCLIPModel.from_pretrained(model_name, from_pt=True) model = TFCLIPModel.from_pretrained(model_name)
self.assertIsNotNone(model) self.assertIsNotNone(model)
...@@ -630,11 +630,12 @@ def prepare_img(): ...@@ -630,11 +630,12 @@ def prepare_img():
@require_vision @require_vision
@require_tf
class TFCLIPModelIntegrationTest(unittest.TestCase): class TFCLIPModelIntegrationTest(unittest.TestCase):
@slow @slow
def test_inference(self): def test_inference(self):
model_name = "openai/clip-vit-base-patch32" model_name = "openai/clip-vit-base-patch32"
model = TFCLIPModel.from_pretrained(model_name, from_pt=True) model = TFCLIPModel.from_pretrained(model_name)
processor = CLIPProcessor.from_pretrained(model_name) processor = CLIPProcessor.from_pretrained(model_name)
image = prepare_img() image = prepare_img()
......
...@@ -299,6 +299,16 @@ class CommonPipelineTest(unittest.TestCase): ...@@ -299,6 +299,16 @@ class CommonPipelineTest(unittest.TestCase):
self.assertIsInstance(pipe, TextClassificationPipeline) self.assertIsInstance(pipe, TextClassificationPipeline)
@require_torch
def test_pipeline_batch_size_global(self):
pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert")
self.assertEqual(pipe._batch_size, None)
self.assertEqual(pipe._num_workers, None)
pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert", batch_size=2, num_workers=1)
self.assertEqual(pipe._batch_size, 2)
self.assertEqual(pipe._num_workers, 1)
@require_torch @require_torch
def test_pipeline_override(self): def test_pipeline_override(self):
class MyPipeline(TextClassificationPipeline): class MyPipeline(TextClassificationPipeline):
......
...@@ -14,7 +14,12 @@ ...@@ -14,7 +14,12 @@
import unittest import unittest
from transformers import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, PreTrainedTokenizer, is_vision_available from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
PreTrainedTokenizer,
is_vision_available,
)
from transformers.pipelines import ImageClassificationPipeline, pipeline from transformers.pipelines import ImageClassificationPipeline, pipeline
from transformers.testing_utils import ( from transformers.testing_utils import (
is_pipeline_test, is_pipeline_test,
...@@ -40,9 +45,9 @@ else: ...@@ -40,9 +45,9 @@ else:
@is_pipeline_test @is_pipeline_test
@require_vision @require_vision
@require_torch
class ImageClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): class ImageClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING model_mapping = MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
tf_model_mapping = TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
def get_test_pipeline(self, model, tokenizer, feature_extractor): def get_test_pipeline(self, model, tokenizer, feature_extractor):
image_classifier = ImageClassificationPipeline(model=model, feature_extractor=feature_extractor, top_k=2) image_classifier = ImageClassificationPipeline(model=model, feature_extractor=feature_extractor, top_k=2)
...@@ -145,9 +150,42 @@ class ImageClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTest ...@@ -145,9 +150,42 @@ class ImageClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTest
) )
@require_tf @require_tf
@unittest.skip("Image classification is not implemented for TF")
def test_small_model_tf(self): def test_small_model_tf(self):
pass small_model = "lysandre/tiny-vit-random"
image_classifier = pipeline("image-classification", model=small_model)
outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg")
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.0015, "label": "chambered nautilus, pearly nautilus, nautilus"},
{"score": 0.0015, "label": "pajama, pyjama, pj's, jammies"},
{"score": 0.0014, "label": "trench coat"},
{"score": 0.0014, "label": "handkerchief, hankie, hanky, hankey"},
{"score": 0.0014, "label": "baboon"},
],
)
outputs = image_classifier(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
],
top_k=2,
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[
{"score": 0.0015, "label": "chambered nautilus, pearly nautilus, nautilus"},
{"score": 0.0015, "label": "pajama, pyjama, pj's, jammies"},
],
[
{"score": 0.0015, "label": "chambered nautilus, pearly nautilus, nautilus"},
{"score": 0.0015, "label": "pajama, pyjama, pj's, jammies"},
],
],
)
def test_custom_tokenizer(self): def test_custom_tokenizer(self):
tokenizer = PreTrainedTokenizer() tokenizer = PreTrainedTokenizer()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment