test_processor_owlvit.py 10.4 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import json
import os
import shutil
import tempfile
import unittest

import numpy as np
import pytest

from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
27
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
28
29
30
31
32


if is_vision_available():
    from PIL import Image

33
    from transformers import OwlViTImageProcessor, OwlViTProcessor
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54


@require_vision
class OwlViTProcessorTest(unittest.TestCase):
    def setUp(self):
        self.tmpdirname = tempfile.mkdtemp()

        # fmt: off
        vocab = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
        # fmt: on
        vocab_tokens = dict(zip(vocab, range(len(vocab))))
        merges = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
        self.special_tokens_map = {"unk_token": "<unk>"}

        self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
        self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
        with open(self.vocab_file, "w", encoding="utf-8") as fp:
            fp.write(json.dumps(vocab_tokens) + "\n")
        with open(self.merges_file, "w", encoding="utf-8") as fp:
            fp.write("\n".join(merges))

55
        image_processor_map = {
56
57
58
59
60
61
62
63
            "do_resize": True,
            "size": 20,
            "do_center_crop": True,
            "crop_size": 18,
            "do_normalize": True,
            "image_mean": [0.48145466, 0.4578275, 0.40821073],
            "image_std": [0.26862954, 0.26130258, 0.27577711],
        }
64
65
66
        self.image_processor_file = os.path.join(self.tmpdirname, IMAGE_PROCESSOR_NAME)
        with open(self.image_processor_file, "w", encoding="utf-8") as fp:
            json.dump(image_processor_map, fp)
67
68
69
70
71
72
73

    def get_tokenizer(self, **kwargs):
        return CLIPTokenizer.from_pretrained(self.tmpdirname, pad_token="!", **kwargs)

    def get_rust_tokenizer(self, **kwargs):
        return CLIPTokenizerFast.from_pretrained(self.tmpdirname, pad_token="!", **kwargs)

74
75
    def get_image_processor(self, **kwargs):
        return OwlViTImageProcessor.from_pretrained(self.tmpdirname, **kwargs)
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93

    def tearDown(self):
        shutil.rmtree(self.tmpdirname)

    def prepare_image_inputs(self):
        """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
        or a list of PyTorch tensors if one specifies torchify=True.
        """

        image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)]

        image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs]

        return image_inputs

    def test_save_load_pretrained_default(self):
        tokenizer_slow = self.get_tokenizer()
        tokenizer_fast = self.get_rust_tokenizer()
94
        image_processor = self.get_image_processor()
95

96
        processor_slow = OwlViTProcessor(tokenizer=tokenizer_slow, image_processor=image_processor)
97
98
99
        processor_slow.save_pretrained(self.tmpdirname)
        processor_slow = OwlViTProcessor.from_pretrained(self.tmpdirname, use_fast=False)

100
        processor_fast = OwlViTProcessor(tokenizer=tokenizer_fast, image_processor=image_processor)
101
102
103
104
105
106
107
108
109
        processor_fast.save_pretrained(self.tmpdirname)
        processor_fast = OwlViTProcessor.from_pretrained(self.tmpdirname)

        self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
        self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
        self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
        self.assertIsInstance(processor_slow.tokenizer, CLIPTokenizer)
        self.assertIsInstance(processor_fast.tokenizer, CLIPTokenizerFast)

110
111
112
113
        self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
        self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
        self.assertIsInstance(processor_slow.image_processor, OwlViTImageProcessor)
        self.assertIsInstance(processor_fast.image_processor, OwlViTImageProcessor)
114
115

    def test_save_load_pretrained_additional_features(self):
116
        processor = OwlViTProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
117
118
119
        processor.save_pretrained(self.tmpdirname)

        tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
120
        image_processor_add_kwargs = self.get_image_processor(do_normalize=False)
121
122

        processor = OwlViTProcessor.from_pretrained(
123
            self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", pad_token="!", do_normalize=False
124
125
126
127
128
        )

        self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
        self.assertIsInstance(processor.tokenizer, CLIPTokenizerFast)

129
130
        self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
        self.assertIsInstance(processor.image_processor, OwlViTImageProcessor)
131

132
133
    def test_image_processor(self):
        image_processor = self.get_image_processor()
134
135
        tokenizer = self.get_tokenizer()

136
        processor = OwlViTProcessor(tokenizer=tokenizer, image_processor=image_processor)
137
138
139

        image_input = self.prepare_image_inputs()

140
        input_image_proc = image_processor(image_input, return_tensors="np")
141
142
        input_processor = processor(images=image_input, return_tensors="np")

143
144
        for key in input_image_proc.keys():
            self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2)
145
146

    def test_tokenizer(self):
147
        image_processor = self.get_image_processor()
148
149
        tokenizer = self.get_tokenizer()

150
        processor = OwlViTProcessor(tokenizer=tokenizer, image_processor=image_processor)
151
152
153
154
155
156
157
158
159
160
161

        input_str = "lower newer"

        encoded_processor = processor(text=input_str, return_tensors="np")

        encoded_tok = tokenizer(input_str, return_tensors="np")

        for key in encoded_tok.keys():
            self.assertListEqual(encoded_tok[key][0].tolist(), encoded_processor[key][0].tolist())

    def test_processor(self):
162
        image_processor = self.get_image_processor()
163
164
        tokenizer = self.get_tokenizer()

165
        processor = OwlViTProcessor(tokenizer=tokenizer, image_processor=image_processor)
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229

        input_str = "lower newer"
        image_input = self.prepare_image_inputs()

        inputs = processor(text=input_str, images=image_input)

        self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])

        # test if it raises when no input is passed
        with pytest.raises(ValueError):
            processor()

    def test_processor_with_text_list(self):
        model_name = "google/owlvit-base-patch32"
        processor = OwlViTProcessor.from_pretrained(model_name)

        input_text = ["cat", "nasa badge"]
        inputs = processor(text=input_text)

        seq_length = 16
        self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask"])
        self.assertEqual(inputs["input_ids"].shape, (2, seq_length))

        # test if it raises when no input is passed
        with pytest.raises(ValueError):
            processor()

    def test_processor_with_nested_text_list(self):
        model_name = "google/owlvit-base-patch32"
        processor = OwlViTProcessor.from_pretrained(model_name)

        input_texts = [["cat", "nasa badge"], ["person"]]
        inputs = processor(text=input_texts)

        seq_length = 16
        batch_size = len(input_texts)
        num_max_text_queries = max([len(texts) for texts in input_texts])

        self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask"])
        self.assertEqual(inputs["input_ids"].shape, (batch_size * num_max_text_queries, seq_length))

        # test if it raises when no input is passed
        with pytest.raises(ValueError):
            processor()

    def test_processor_case(self):
        model_name = "google/owlvit-base-patch32"
        processor = OwlViTProcessor.from_pretrained(model_name)

        input_texts = ["cat", "nasa badge"]
        inputs = processor(text=input_texts)

        seq_length = 16
        input_ids = inputs["input_ids"]
        predicted_ids = [
            [49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        ]

        self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask"])
        self.assertEqual(inputs["input_ids"].shape, (2, seq_length))
        self.assertListEqual(list(input_ids[0]), predicted_ids[0])
        self.assertListEqual(list(input_ids[1]), predicted_ids[1])

230
    def test_processor_case2(self):
231
        image_processor = self.get_image_processor()
232
233
        tokenizer = self.get_tokenizer()

234
        processor = OwlViTProcessor(tokenizer=tokenizer, image_processor=image_processor)
235

236
237
        image_input = self.prepare_image_inputs()
        query_input = self.prepare_image_inputs()
238

239
        inputs = processor(images=image_input, query_images=query_input)
240

241
242
243
244
245
        self.assertListEqual(list(inputs.keys()), ["query_pixel_values", "pixel_values"])

        # test if it raises when no input is passed
        with pytest.raises(ValueError):
            processor()
246

247
    def test_tokenizer_decode(self):
248
        image_processor = self.get_image_processor()
249
250
        tokenizer = self.get_tokenizer()

251
        processor = OwlViTProcessor(tokenizer=tokenizer, image_processor=image_processor)
252

253
        predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
254

255
256
        decoded_processor = processor.batch_decode(predicted_ids)
        decoded_tok = tokenizer.batch_decode(predicted_ids)
257

258
        self.assertListEqual(decoded_tok, decoded_processor)