"examples/legacy/seq2seq/__init__.py" did not exist on "5b396457e5035a8b16ddee14b205c098598fe6bb"
Unverified Commit ace74d16 authored by NielsRogge's avatar NielsRogge Committed by GitHub
Browse files

Add Nougat (#25942)



* Add conversion script

* Add NougatImageProcessor

* Add crop margin

* More improvements

* Add docs, READMEs

* Remove print statements

* Include model_max_length

* Add NougatTokenizerFast

* Fix imports

* Improve postprocessing

* Improve image processor

* Fix image processor

* Improve normalize method

* More improvements

* More improvements

* Add processor, improve docs

* Simplify fast tokenizer

* Remove test file

* Fix docstrings

* Use NougatProcessor in conversion script

* Add is_levensthein_available

* Add tokenizer tests

* More improvements

* Use numpy instead of opencv

* Add is_cv2_available

* Fix cv2_available

* Add is_nltk_available

* Add image processor tests, improve crop_margin

* Add integration tests

* Improve integration test

* Use do_rescale instead of hacks, thanks Amy

* Remove random_padding

* Address comments

* Address more comments

* Add import

* Address more comments

* Address more comments

* Address comment

* Address comment

* Set max_model_input_sizes

* Add tests

* Add requires_backends

* Add Nougat to exotic tests

* Use to_pil_image

* Address comment regarding nltk

* Add NLTK

* Improve variable names, integration test

* Add test

* refactor, document, and test regexes

* remove named capture groups, add comments

* format

* add non-markdown fixed tokenization

* format

* correct flakyness of args parse

* add regex comments

* test functionalities for crop_image, align long axis and expected output

* add regex tests

* remove cv2 dependency

* test crop_margin equality between cv2 and python

* refactor table regexes to markdown

add newline

* change print to log, improve doc

* fix high count tables correction

* address PR comments: naming, linting, asserts

* Address comments

* Add copied from

* Update conversion script

* Update conversion script to convert both small and base versions

* Add inference example

* Add more info

* Fix style

* Add require annotators to test

* Define all keyword arguments explicitly

* Move cv2 annotator

* Add tokenizer init method

* Transfer checkpoints

* Add reference to Donut

* Address comments

* Skip test

* Remove cv2 method

* Add copied from statements

* Use cached_property

* Fix docstring

* Add file to not doctested

---------
Co-authored-by: default avatarPablo Montalvo <pablo.montalvo.leroux@gmail.com>
parent 5e09af2a
This diff is collapsed.
......@@ -55,6 +55,7 @@ from .utils import (
is_auto_gptq_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
......@@ -69,8 +70,10 @@ from .utils import (
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
......@@ -311,6 +314,36 @@ def require_bs4(test_case):
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
......
......@@ -108,6 +108,7 @@ from .import_utils import (
is_bitsandbytes_available,
is_bs4_available,
is_coloredlogs_available,
is_cv2_available,
is_cython_available,
is_datasets_available,
is_decord_available,
......@@ -125,9 +126,11 @@ from .import_utils import (
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_nltk_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
......
......@@ -310,6 +310,13 @@ class NllbTokenizerFast(metaclass=DummyObject):
requires_backends(self, ["tokenizers"])
class NougatTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class OpenAIGPTTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
......
......@@ -359,6 +359,13 @@ class MobileViTImageProcessor(metaclass=DummyObject):
requires_backends(self, ["vision"])
class NougatImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class OneFormerImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
......
......@@ -75,6 +75,8 @@ _flash_attn_available = _is_package_available("flash_attn")
# `importlib.metadata.version` doesn't work with `bs4` but `beautifulsoup4`. For `importlib.util.find_spec`, reversed.
_bs4_available = importlib.util.find_spec("bs4") is not None
_coloredlogs_available = _is_package_available("coloredlogs")
# `importlib.metadata.util` doesn't work with `opencv-python-headless`.
_cv2_available = importlib.util.find_spec("cv2") is not None
_datasets_available = _is_package_available("datasets")
_decord_available = importlib.util.find_spec("decord") is not None
_detectron2_available = _is_package_available("detectron2")
......@@ -95,8 +97,10 @@ _jieba_available = _is_package_available("jieba")
_jinja_available = _is_package_available("jinja2")
_kenlm_available = _is_package_available("kenlm")
_keras_nlp_available = _is_package_available("keras_nlp")
_levenshtein_available = _is_package_available("Levenshtein")
_librosa_available = _is_package_available("librosa")
_natten_available = _is_package_available("natten")
_nltk_available = _is_package_available("nltk")
_onnx_available = _is_package_available("onnx")
_openai_available = _is_package_available("openai")
_optimum_available = _is_package_available("optimum")
......@@ -240,6 +244,10 @@ def is_kenlm_available():
return _kenlm_available
def is_cv2_available():
return _cv2_available
def is_torch_available():
return _torch_available
......@@ -629,6 +637,10 @@ def is_auto_gptq_available():
return _auto_gptq_available
def is_levenshtein_available():
return _levenshtein_available
def is_optimum_neuron_available():
return _optimum_available and _is_package_available("optimum.neuron")
......@@ -759,6 +771,10 @@ def is_natten_available():
return _natten_available
def is_nltk_available():
return _nltk_available
def is_torchaudio_available():
return _torchaudio_available
......@@ -813,6 +829,16 @@ def is_jinja_available():
return _jinja_available
# docstyle-ignore
CV2_IMPORT_ERROR = """
{0} requires the OpenCV library but it was not found in your environment. You can install it with:
```
pip install opencv-python
```
Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
DATASETS_IMPORT_ERROR = """
{0} requires the 🤗 Datasets library but it was not found in your environment. You can install it with:
......@@ -959,6 +985,11 @@ installation section: https://github.com/rspeer/python-ftfy/tree/master#installi
that match your environment. Please note that you may need to restart your runtime after installation.
"""
LEVENSHTEIN_IMPORT_ERROR = """
{0} requires the python-Levenshtein library but it was not found in your environment. You can install it with pip: `pip
install python-Levenshtein`. Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
PYTORCH_QUANTIZATION_IMPORT_ERROR = """
{0} requires the pytorch-quantization library but it was not found in your environment. You can install it with pip:
......@@ -1028,6 +1059,14 @@ shi-labs.com/natten . You can also install it with pip (may take longer to build
`pip install natten`. Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
NLTK_IMPORT_ERROR = """
{0} requires the NLTK library but it was not found in your environment. You can install it by referring to:
https://www.nltk.org/install.html. Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
VISION_IMPORT_ERROR = """
{0} requires the PIL library but it was not found in your environment. You can install it with pip:
......@@ -1109,6 +1148,7 @@ jinja2`. Please note that you may need to restart your runtime after installatio
BACKENDS_MAPPING = OrderedDict(
[
("bs4", (is_bs4_available, BS4_IMPORT_ERROR)),
("cv2", (is_cv2_available, CV2_IMPORT_ERROR)),
("datasets", (is_datasets_available, DATASETS_IMPORT_ERROR)),
("detectron2", (is_detectron2_available, DETECTRON2_IMPORT_ERROR)),
("essentia", (is_essentia_available, ESSENTIA_IMPORT_ERROR)),
......@@ -1118,6 +1158,7 @@ BACKENDS_MAPPING = OrderedDict(
("pandas", (is_pandas_available, PANDAS_IMPORT_ERROR)),
("phonemizer", (is_phonemizer_available, PHONEMIZER_IMPORT_ERROR)),
("pretty_midi", (is_pretty_midi_available, PRETTY_MIDI_IMPORT_ERROR)),
("levenshtein", (is_levenshtein_available, LEVENSHTEIN_IMPORT_ERROR)),
("librosa", (is_librosa_available, LIBROSA_IMPORT_ERROR)),
("protobuf", (is_protobuf_available, PROTOBUF_IMPORT_ERROR)),
("pyctcdecode", (is_pyctcdecode_available, PYCTCDECODE_IMPORT_ERROR)),
......@@ -1132,6 +1173,7 @@ BACKENDS_MAPPING = OrderedDict(
("tensorflow_text", (is_tensorflow_text_available, TENSORFLOW_TEXT_IMPORT_ERROR)),
("timm", (is_timm_available, TIMM_IMPORT_ERROR)),
("natten", (is_natten_available, NATTEN_IMPORT_ERROR)),
("nltk", (is_nltk_available, NLTK_IMPORT_ERROR)),
("tokenizers", (is_tokenizers_available, TOKENIZERS_IMPORT_ERROR)),
("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)),
("torchvision", (is_torchvision_available, TORCHVISION_IMPORT_ERROR)),
......
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import NougatImageProcessor
class NougatImageProcessingTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_crop_margin=True,
do_resize=True,
size=None,
do_thumbnail=True,
do_align_long_axis: bool = False,
do_pad=True,
do_normalize: bool = True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
size = size if size is not None else {"height": 20, "width": 20}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_crop_margin = do_crop_margin
self.do_resize = do_resize
self.size = size
self.do_thumbnail = do_thumbnail
self.do_align_long_axis = do_align_long_axis
self.do_pad = do_pad
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_image_processor_dict(self):
return {
"do_crop_margin": self.do_crop_margin,
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_long_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.size["height"], self.size["width"]
def prepare_dummy_image(self):
filepath = hf_hub_download(
repo_id="hf-internal-testing/fixtures_docvqa", filename="nougat_pdf.png", repo_type="dataset"
)
image = Image.open(filepath).convert("RGB")
return image
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class NougatImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = NougatImageProcessor if is_vision_available() else None
def setUp(self):
self.image_processor_tester = NougatImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
@cached_property
def image_processor(self):
return self.image_processing_class(**self.image_processor_dict)
def test_image_processor_properties(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
def test_image_processor_from_dict_with_kwargs(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 20, "width": 20})
image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
def test_expected_output(self):
dummy_image = self.image_processor_tester.prepare_dummy_image()
image_processor = self.image_processor
inputs = image_processor(dummy_image, return_tensors="pt")
self.assertTrue(torch.allclose(inputs["pixel_values"].mean(), torch.tensor(0.4906), atol=1e-3, rtol=1e-3))
def test_crop_margin_all_white(self):
image = np.uint8(np.ones((100, 100, 3)) * 255)
image_processor = self.image_processor
cropped_image = image_processor.crop_margin(image)
self.assertTrue(np.array_equal(image, cropped_image))
def test_crop_margin_centered_black_square(self):
image = np.ones((100, 100, 3), dtype=np.uint8) * 255
image[45:55, 45:55, :] = 0
image_processor = self.image_processor
cropped_image = image_processor.crop_margin(image)
expected_cropped = image[45:55, 45:55, :]
self.assertTrue(np.array_equal(expected_cropped, cropped_image))
def test_align_long_axis_no_rotation(self):
image = np.uint8(np.ones((100, 200, 3)) * 255)
image_processor = self.image_processor
size = {"height": 200, "width": 300}
aligned_image = image_processor.align_long_axis(image, size)
self.assertEqual(image.shape, aligned_image.shape)
def test_align_long_axis_with_rotation(self):
image = np.uint8(np.ones((200, 100, 3)) * 255)
image_processor = self.image_processor
size = {"height": 300, "width": 200}
aligned_image = image_processor.align_long_axis(image, size)
self.assertEqual((200, 100, 3), aligned_image.shape)
def test_align_long_axis_data_format(self):
image = np.uint8(np.ones((100, 200, 3)) * 255)
data_format = "channels_first"
size = {"height": 200, "width": 300}
image_processor = self.image_processor
aligned_image = image_processor.align_long_axis(image, size, data_format=data_format)
self.assertEqual((3, 100, 200), aligned_image.shape)
def prepare_dummy_np_image(self):
filepath = hf_hub_download(
repo_id="hf-internal-testing/fixtures_docvqa", filename="nougat_pdf.png", repo_type="dataset"
)
image = Image.open(filepath).convert("RGB")
return np.array(image)
def test_crop_margin_equality_cv2_python(self):
image = self.prepare_dummy_np_image()
image_processor = self.image_processor
image_cropped_python = image_processor.crop_margin(image)
self.assertEqual(image_cropped_python.shape, (850, 685, 3))
self.assertEqual(image_cropped_python.mean(), 237.43881150708458)
# coding=utf-8
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import NougatTokenizerFast
from transformers.models.nougat.tokenization_nougat_fast import markdown_compatible, normalize_list_like_lines
from transformers.testing_utils import require_levenshtein, require_nltk, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class NougatTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
slow_tokenizer_class = None
rust_tokenizer_class = NougatTokenizerFast
tokenizer_class = NougatTokenizerFast
test_rust_tokenizer = True
test_slow_tokenizer = False
from_pretrained_vocab_key = "tokenizer_file"
special_tokens_map = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def setUp(self):
super().setUp()
tokenizer = NougatTokenizerFast.from_pretrained("facebook/nougat-base")
tokenizer.save_pretrained(self.tmpdirname)
def get_rust_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return NougatTokenizerFast.from_pretrained(self.tmpdirname, **kwargs)
def test_padding(self, max_length=6):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
# Simple input
sentence1 = "This is a simple input"
sentence2 = ["This is a simple input 1", "This is a simple input 2"]
pair1 = ("This is a simple input", "This is a pair")
pair2 = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(sentence1, max_length=max_length)
tokenizer_r.encode_plus(sentence1, max_length=max_length)
tokenizer_r.batch_encode_plus(sentence2, max_length=max_length)
tokenizer_r.encode(pair1, max_length=max_length)
tokenizer_r.batch_encode_plus(pair2, max_length=max_length)
except ValueError:
self.fail("Nougat Tokenizer should be able to deal with padding")
tokenizer_r.pad_token = None # Hotfixing padding = None
self.assertRaises(
ValueError, tokenizer_r.encode, sentence1, max_length=max_length, padding="max_length"
)
# Simple input
self.assertRaises(
ValueError, tokenizer_r.encode_plus, sentence1, max_length=max_length, padding="max_length"
)
# Simple input
self.assertRaises(
ValueError,
tokenizer_r.batch_encode_plus,
sentence2,
max_length=max_length,
padding="max_length",
)
# Pair input
self.assertRaises(ValueError, tokenizer_r.encode, pair1, max_length=max_length, padding="max_length")
# Pair input
self.assertRaises(
ValueError, tokenizer_r.encode_plus, pair1, max_length=max_length, padding="max_length"
)
# Pair input
self.assertRaises(
ValueError,
tokenizer_r.batch_encode_plus,
pair2,
max_length=max_length,
padding="max_length",
)
@unittest.skip("NougatTokenizerFast does not have tokenizer_file in its signature")
def test_rust_tokenizer_signature(self):
pass
@unittest.skip("NougatTokenizerFast does not support pretokenized inputs")
def test_pretokenized_inputs(self):
pass
@unittest.skip("NougatTokenizerFast directly inherits from PreTrainedTokenizerFast")
def test_prepare_for_model(self):
pass
@unittest.skip("This needs a slow tokenizer. Nougat does not have one!")
def test_encode_decode_with_spaces(self):
pass
class MarkdownCompatibleTest(unittest.TestCase):
def test_equation_tag(self):
input_text = "(3.2) \\[Equation Text\\]"
excepted_output = "\\[Equation Text \\tag{3.2}\\]"
self.assertEqual(markdown_compatible(input_text), excepted_output)
def test_equation_tag_letters(self):
input_text = "(18a) \\[Equation Text\\]"
excepted_output = "\\[Equation Text \\tag{18a}\\]"
self.assertEqual(markdown_compatible(input_text), excepted_output)
def test_bold_formatting(self):
input_text = r"This is \bm{bold} text."
expected_output = r"This is \mathbf{bold} text."
self.assertEqual(markdown_compatible(input_text), expected_output)
def test_url_conversion(self):
input_text = "Visit my website at https://www.example.com"
expected_output = "Visit my website at [https://www.example.com](https://www.example.com)"
self.assertEqual(markdown_compatible(input_text), expected_output)
def test_algorithm_code_block(self):
input_text = "```python\nprint('Hello, world!')\n```"
expected_output = "```\npython\nprint('Hello, world!')\n```"
self.assertEqual(markdown_compatible(input_text), expected_output)
def test_escape_characters(self):
input_text = r"Escaped characters like \n should not be \\[affected\\]"
expected_output = r"Escaped characters like \n should not be \\[affected\\]"
self.assertEqual(markdown_compatible(input_text), expected_output)
def test_nested_tags(self):
input_text = r"This is a super nested \bm{\bm{\bm{\bm{\bm{bold}}}}} tag."
expected_output = r"This is a super nested \mathbf{\mathbf{\mathbf{\mathbf{\mathbf{bold}}}}} tag."
self.assertEqual(markdown_compatible(input_text), expected_output)
class TestNormalizeListLikeLines(unittest.TestCase):
def test_two_level_lines(self):
input_str = "* Item 1 * Item 2"
expected_output = "* Item 1\n* Item 2\n"
self.assertEqual(normalize_list_like_lines(input_str), expected_output)
def test_three_level_lines(self):
input_str = "- I. Item 1 - II. Item 2 - III. Item 3"
expected_output = "- I. Item 1\n- II. Item 2\n- III. Item 3\n"
self.assertEqual(normalize_list_like_lines(input_str), expected_output)
def test_nested_lines(self):
input_str = "- I. Item 1 - I.1 Sub-item 1 - I.1.1 Sub-sub-item 1 - II. Item 2"
expected_output = "- I. Item 1\n\t- I.1 Sub-item 1\n\t\t- I.1.1 Sub-sub-item 1\n- II. Item 2\n"
self.assertEqual(normalize_list_like_lines(input_str), expected_output)
@require_tokenizers
class NougatPostProcessingTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.tokenizer = NougatTokenizerFast.from_pretrained("facebook/nougat-base")
def test_correct_tables_basic(self):
input_str = "\\begin{table} \\begin{tabular}{l l} & \\ \\end{tabular} \\end{table}"
expected_output = "\\begin{table}\n\\begin{tabular}{l l} & \\ \\end{tabular}\n\\end{table}"
self.assertEqual(self.tokenizer.correct_tables(input_str), expected_output)
def test_correct_tables_high_count(self):
input_str = "\\begin{tabular}" * 20
expected_output = ""
self.assertEqual(self.tokenizer.correct_tables(input_str), expected_output)
@require_levenshtein
@require_nltk
def test_postprocess_as_nougat_no_markdown(self):
input_str = "# Nougat: Neural Optical Understanding for Academic Documents\n\n Lukas Blecher\n\nCorrespondence to: lblecher@meta.com\n\nGuillem Cucurull\n\nThomas Scialom\n\nRobert Stojnic\n\nMeta AI\n\nThe paper reports 8.1M papers but the authors recently updated the numbers on the GitHub page https://github.com/allenai/s2orc\n\n###### Abstract\n\nScientific knowledge is predominantly stored in books and scientific journals, often in the form of PDFs. However, the PDF format leads to a loss of semantic information, particularly for mathematical expressions. We propose Nougat (**N**eural **O**ptical **U**nderstanding for **A**cademic Documents), a Visual Transformer model that performs an _Optical Character Recognition_ (OCR) task for processing scientific documents into a markup language, and demonstrate the effectiveness of our model on a new dataset of scientific documents. The proposed approach offers a promising solution to enhance the accessibility of scientific knowledge in the digital age, by bridging the gap between human-readable documents and machine-readable text. We release the models and code to accelerate future work on scientific text recognition.\n\n## 1 Introduction\n\nThe majority of scientific knowledge is stored in books or published in scientific journals, most commonly in the Portable Document Format (PDF). Next to HTML, PDFs are the second most prominent data format on the internet, making up 2.4% of common crawl [1]. However, the information stored in these files is very difficult to extract into any other formats. This is especially true for highly specialized documents, such as scientific research papers, where the semantic information of mathematical expressions is lost.\n\nExisting Optical Character Recognition (OCR) engines, such as Tesseract OCR [2], excel at detecting and classifying individual characters and words in an image, but fail to understand the relationship between them due to their line-by-line approach. This means that they treat superscripts and subscripts in the same way as the surrounding text, which is a significant drawback for mathematical expressions. In mathematical notations like fractions, exponents, and matrices, relative positions of characters are crucial.\n\nConverting academic research papers into machine-readable text also enables accessibility and searchability of science as a whole. The information of millions of academic papers can not be fully accessed because they are locked behind an unreadable format. Existing corpora, such as the S2ORC dataset [3], capture the text of 12M2 papers using GROBID [4], but are missing meaningful representations of the mathematical equations.\n\nFootnote 2: The paper reports 8.1M papers but the authors recently updated the numbers on the GitHub page https://github.com/allenai/s2orc\n\nTo this end, we introduce Nougat, a transformer based model that can convert images of document pages to formatted markup text.\n\nThe primary contributions in this paper are\n\n* Release of a pre-trained model capable of converting a PDF to a lightweight markup language. We release the code and the model on GitHub3 Footnote 3: https://github.com/facebookresearch/nougat\n* We introduce a pipeline to create dataset for pairing PDFs to source code\n* Our method is only dependent on the image of a page, allowing access to scanned papers and books" # noqa: E231
expected_output = "\n\n# Nougat: Neural Optical Understanding for Academic Documents\n\n Lukas Blecher\n\nCorrespondence to: lblecher@meta.com\n\nGuillem Cucurull\n\nThomas Scialom\n\nRobert Stojnic\n\nMeta AI\n\nThe paper reports 8.1M papers but the authors recently updated the numbers on the GitHub page https://github.com/allenai/s2orc\n\n###### Abstract\n\nScientific knowledge is predominantly stored in books and scientific journals, often in the form of PDFs. However, the PDF format leads to a loss of semantic information, particularly for mathematical expressions. We propose Nougat (**N**eural **O**ptical **U**nderstanding for **A**cademic Documents), a Visual Transformer model that performs an _Optical Character Recognition_ (OCR) task for processing scientific documents into a markup language, and demonstrate the effectiveness of our model on a new dataset of scientific documents. The proposed approach offers a promising solution to enhance the accessibility of scientific knowledge in the digital age, by bridging the gap between human-readable documents and machine-readable text. We release the models and code to accelerate future work on scientific text recognition.\n\n## 1 Introduction\n\nThe majority of scientific knowledge is stored in books or published in scientific journals, most commonly in the Portable Document Format (PDF). Next to HTML, PDFs are the second most prominent data format on the internet, making up 2.4% of common crawl [1]. However, the information stored in these files is very difficult to extract into any other formats. This is especially true for highly specialized documents, such as scientific research papers, where the semantic information of mathematical expressions is lost.\n\nExisting Optical Character Recognition (OCR) engines, such as Tesseract OCR [2], excel at detecting and classifying individual characters and words in an image, but fail to understand the relationship between them due to their line-by-line approach. This means that they treat superscripts and subscripts in the same way as the surrounding text, which is a significant drawback for mathematical expressions. In mathematical notations like fractions, exponents, and matrices, relative positions of characters are crucial.\n\nConverting academic research papers into machine-readable text also enables accessibility and searchability of science as a whole. The information of millions of academic papers can not be fully accessed because they are locked behind an unreadable format. Existing corpora, such as the S2ORC dataset [3], capture the text of 12M2 papers using GROBID [4], but are missing meaningful representations of the mathematical equations.\n\nFootnote 2: The paper reports 8.1M papers but the authors recently updated the numbers on the GitHub page https://github.com/allenai/s2orc\n\nTo this end, we introduce Nougat, a transformer based model that can convert images of document pages to formatted markup text.\n\nThe primary contributions in this paper are\n\n* Release of a pre-trained model capable of converting a PDF to a lightweight markup language. We release the code and the model on GitHub3 Footnote 3: https://github.com/facebookresearch/nougat\n* We introduce a pipeline to create dataset for pairing PDFs to source code\n* Our method is only dependent on the image of a page, allowing access to scanned papers and books" # noqa: E231
self.assertEqual(self.tokenizer.post_process_single(input_str, fix_markdown=False), expected_output)
......@@ -18,10 +18,13 @@ import tempfile
import unittest
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from packaging import version
from transformers import DonutProcessor, TrOCRProcessor
from transformers import DonutProcessor, NougatProcessor, TrOCRProcessor
from transformers.testing_utils import (
require_levenshtein,
require_nltk,
require_sentencepiece,
require_torch,
require_vision,
......@@ -998,3 +1001,79 @@ class DonutModelIntegrationTest(unittest.TestCase):
outputs.scores[0][0, :3], torch.tensor([-17.6490, -4.8381, -15.7577], device=torch_device), atol=1e-4
)
)
@require_levenshtein
@require_nltk
@require_torch
@require_vision
@slow
class NougatModelIntegrationTest(unittest.TestCase):
@cached_property
def default_processor(self):
return NougatProcessor.from_pretrained("facebook/nougat-base") if is_vision_available() else None
@cached_property
def default_model(self):
return VisionEncoderDecoderModel.from_pretrained("facebook/nougat-base").to(torch_device)
@cached_property
def default_image(self):
filepath = hf_hub_download(
repo_id="hf-internal-testing/fixtures_docvqa", filename="nougat_pdf.png", repo_type="dataset"
)
image = Image.open(filepath).convert("RGB")
return image
def test_forward_pass(self):
processor = self.default_processor
model = self.default_model
image = self.default_image
pixel_values = processor(images=image, return_tensors="pt").pixel_values.to(torch_device)
decoder_input_ids = torch.tensor([[0]]).to(torch_device)
outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids)
logits = outputs.logits
# verify the logits
expected_shape = torch.Size((1, 1, model.decoder.config.vocab_size))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor(
[1.6253, -4.2179, 5.8532, -2.7911, -5.0609, -4.7397, -4.2890, -5.1073, -4.8908, -4.9729]
).to(torch_device)
self.assertTrue(torch.allclose(logits[0, 0, :10], expected_slice, atol=1e-4))
def test_generation(self):
processor = self.default_processor
model = self.default_model
image = self.default_image
pixel_values = processor(images=image, return_tensors="pt").pixel_values.to(torch_device)
outputs = model.generate(
pixel_values,
min_length=1,
max_length=3584,
bad_words_ids=[[processor.tokenizer.unk_token_id]],
return_dict_in_generate=True,
output_scores=True,
)
# verify generated sequence
generated = processor.batch_decode(outputs.sequences, skip_special_tokens=True)[0]
expected_raw_generation = "# Nougat: Neural Optical Understanding for Academic Documents\n\n Lukas Blecher\n\nCorrespondence to: lblecher@meta.com\n\nGuillem Cucurull\n\nThomas Scialom\n\nRobert Stojnic\n\nMeta AI\n\nThe paper reports 8.1M papers but the authors recently updated the numbers on the GitHub page https://github.com/allenai/s2orc\n\n###### Abstract\n\nScientific knowledge is predominantly stored in books and scientific journals, often in the form of PDFs. However, the PDF format leads to a loss of semantic information, particularly for mathematical expressions. We propose Nougat (**N**eural **O**ptical **U**nderstanding for **A**cademic Documents), a Visual Transformer model that performs an _Optical Character Recognition_ (OCR) task for processing scientific documents into a markup language, and demonstrate the effectiveness of our model on a new dataset of scientific documents. The proposed approach offers a promising solution to enhance the accessibility of scientific knowledge in the digital age, by bridging the gap between human-readable documents and machine-readable text. We release the models and code to accelerate future work on scientific text recognition.\n\n## 1 Introduction\n\nThe majority of scientific knowledge is stored in books or published in scientific journals, most commonly in the Portable Document Format (PDF). Next to HTML, PDFs are the second most prominent data format on the internet, making up 2.4% of common crawl [1]. However, the information stored in these files is very difficult to extract into any other formats. This is especially true for highly specialized documents, such as scientific research papers, where the semantic information of mathematical expressions is lost.\n\nExisting Optical Character Recognition (OCR) engines, such as Tesseract OCR [2], excel at detecting and classifying individual characters and words in an image, but fail to understand the relationship between them due to their line-by-line approach. This means that they treat superscripts and subscripts in the same way as the surrounding text, which is a significant drawback for mathematical expressions. In mathematical notations like fractions, exponents, and matrices, relative positions of characters are crucial.\n\nConverting academic research papers into machine-readable text also enables accessibility and searchability of science as a whole. The information of millions of academic papers can not be fully accessed because they are locked behind an unreadable format. Existing corpora, such as the S2ORC dataset [3], capture the text of 12M2 papers using GROBID [4], but are missing meaningful representations of the mathematical equations.\n\nFootnote 2: The paper reports 8.1M papers but the authors recently updated the numbers on the GitHub page https://github.com/allenai/s2orc\n\nTo this end, we introduce Nougat, a transformer based model that can convert images of document pages to formatted markup text.\n\nThe primary contributions in this paper are\n\n* Release of a pre-trained model capable of converting a PDF to a lightweight markup language. We release the code and the model on GitHub3 Footnote 3: https://github.com/facebookresearch/nougat\n* We introduce a pipeline to create dataset for pairing PDFs to source code\n* Our method is only dependent on the image of a page, allowing access to scanned papers and books"
self.assertTrue(generated == expected_raw_generation)
# verify postprocessed sequence
generated = processor.post_process_generation(generated, fix_markdown=False)
expected_generation = "\n\n# Nougat: Neural Optical Understanding for Academic Documents\n\n Lukas Blecher\n\nCorrespondence to: lblecher@meta.com\n\nGuillem Cucurull\n\nThomas Scialom\n\nRobert Stojnic\n\nMeta AI\n\nThe paper reports 8.1M papers but the authors recently updated the numbers on the GitHub page https://github.com/allenai/s2orc\n\n###### Abstract\n\nScientific knowledge is predominantly stored in books and scientific journals, often in the form of PDFs. However, the PDF format leads to a loss of semantic information, particularly for mathematical expressions. We propose Nougat (**N**eural **O**ptical **U**nderstanding for **A**cademic Documents), a Visual Transformer model that performs an _Optical Character Recognition_ (OCR) task for processing scientific documents into a markup language, and demonstrate the effectiveness of our model on a new dataset of scientific documents. The proposed approach offers a promising solution to enhance the accessibility of scientific knowledge in the digital age, by bridging the gap between human-readable documents and machine-readable text. We release the models and code to accelerate future work on scientific text recognition.\n\n## 1 Introduction\n\nThe majority of scientific knowledge is stored in books or published in scientific journals, most commonly in the Portable Document Format (PDF). Next to HTML, PDFs are the second most prominent data format on the internet, making up 2.4% of common crawl [1]. However, the information stored in these files is very difficult to extract into any other formats. This is especially true for highly specialized documents, such as scientific research papers, where the semantic information of mathematical expressions is lost.\n\nExisting Optical Character Recognition (OCR) engines, such as Tesseract OCR [2], excel at detecting and classifying individual characters and words in an image, but fail to understand the relationship between them due to their line-by-line approach. This means that they treat superscripts and subscripts in the same way as the surrounding text, which is a significant drawback for mathematical expressions. In mathematical notations like fractions, exponents, and matrices, relative positions of characters are crucial.\n\nConverting academic research papers into machine-readable text also enables accessibility and searchability of science as a whole. The information of millions of academic papers can not be fully accessed because they are locked behind an unreadable format. Existing corpora, such as the S2ORC dataset [3], capture the text of 12M2 papers using GROBID [4], but are missing meaningful representations of the mathematical equations.\n\nFootnote 2: The paper reports 8.1M papers but the authors recently updated the numbers on the GitHub page https://github.com/allenai/s2orc\n\nTo this end, we introduce Nougat, a transformer based model that can convert images of document pages to formatted markup text.\n\nThe primary contributions in this paper are\n\n* Release of a pre-trained model capable of converting a PDF to a lightweight markup language. We release the code and the model on GitHub3 Footnote 3: https://github.com/facebookresearch/nougat\n* We introduce a pipeline to create dataset for pairing PDFs to source code\n* Our method is only dependent on the image of a page, allowing access to scanned papers and books"
self.assertTrue(generated == expected_generation)
# verify scores
self.assertEqual(len(outputs.scores), 741)
self.assertTrue(
torch.allclose(
outputs.scores[0][0, :3], torch.tensor([1.6253, -4.2179, 5.8532], device=torch_device), atol=1e-4
)
)
......@@ -692,6 +692,7 @@ src/transformers/models/nezha/modeling_nezha.py
src/transformers/models/nllb_moe/configuration_nllb_moe.py
src/transformers/models/nllb_moe/convert_nllb_moe_sharded_original_checkpoint_to_pytorch.py
src/transformers/models/nllb_moe/modeling_nllb_moe.py
src/transformers/models/nougat/convert_nougat_to_hf.py
src/transformers/models/nystromformer/configuration_nystromformer.py
src/transformers/models/nystromformer/convert_nystromformer_original_pytorch_checkpoint_to_pytorch.py
src/transformers/models/nystromformer/modeling_nystromformer.py
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment