test_processor_clipseg.py 7.78 KB
Newer Older
NielsRogge's avatar
NielsRogge committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import json
import os
import shutil
import tempfile
import unittest

import numpy as np
import pytest

from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
27
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
NielsRogge's avatar
NielsRogge committed
28
29
30
31
32


if is_vision_available():
    from PIL import Image

33
    from transformers import CLIPSegProcessor, ViTImageProcessor
NielsRogge's avatar
NielsRogge committed
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54


@require_vision
class CLIPSegProcessorTest(unittest.TestCase):
    def setUp(self):
        self.tmpdirname = tempfile.mkdtemp()

        # fmt: off
        vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
        # fmt: on
        vocab_tokens = dict(zip(vocab, range(len(vocab))))
        merges = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
        self.special_tokens_map = {"unk_token": "<unk>"}

        self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
        self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
        with open(self.vocab_file, "w", encoding="utf-8") as fp:
            fp.write(json.dumps(vocab_tokens) + "\n")
        with open(self.merges_file, "w", encoding="utf-8") as fp:
            fp.write("\n".join(merges))

55
        image_processor_map = {
NielsRogge's avatar
NielsRogge committed
56
57
58
59
60
61
62
63
            "do_resize": True,
            "size": 20,
            "do_center_crop": True,
            "crop_size": 18,
            "do_normalize": True,
            "image_mean": [0.48145466, 0.4578275, 0.40821073],
            "image_std": [0.26862954, 0.26130258, 0.27577711],
        }
64
65
66
        self.image_processor_file = os.path.join(self.tmpdirname, IMAGE_PROCESSOR_NAME)
        with open(self.image_processor_file, "w", encoding="utf-8") as fp:
            json.dump(image_processor_map, fp)
NielsRogge's avatar
NielsRogge committed
67
68
69
70
71
72
73

    def get_tokenizer(self, **kwargs):
        return CLIPTokenizer.from_pretrained(self.tmpdirname, **kwargs)

    def get_rust_tokenizer(self, **kwargs):
        return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **kwargs)

74
75
    def get_image_processor(self, **kwargs):
        return ViTImageProcessor.from_pretrained(self.tmpdirname, **kwargs)
NielsRogge's avatar
NielsRogge committed
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92

    def tearDown(self):
        shutil.rmtree(self.tmpdirname)

    def prepare_image_inputs(self):
        """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
        or a list of PyTorch tensors if one specifies torchify=True."""

        image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)]

        image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs]

        return image_inputs

    def test_save_load_pretrained_default(self):
        tokenizer_slow = self.get_tokenizer()
        tokenizer_fast = self.get_rust_tokenizer()
93
        image_processor = self.get_image_processor()
NielsRogge's avatar
NielsRogge committed
94

95
        processor_slow = CLIPSegProcessor(tokenizer=tokenizer_slow, image_processor=image_processor)
NielsRogge's avatar
NielsRogge committed
96
97
98
        processor_slow.save_pretrained(self.tmpdirname)
        processor_slow = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=False)

99
        processor_fast = CLIPSegProcessor(tokenizer=tokenizer_fast, image_processor=image_processor)
NielsRogge's avatar
NielsRogge committed
100
101
102
103
104
105
106
107
108
        processor_fast.save_pretrained(self.tmpdirname)
        processor_fast = CLIPSegProcessor.from_pretrained(self.tmpdirname)

        self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
        self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
        self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
        self.assertIsInstance(processor_slow.tokenizer, CLIPTokenizer)
        self.assertIsInstance(processor_fast.tokenizer, CLIPTokenizerFast)

109
110
111
112
        self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
        self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
        self.assertIsInstance(processor_slow.image_processor, ViTImageProcessor)
        self.assertIsInstance(processor_fast.image_processor, ViTImageProcessor)
NielsRogge's avatar
NielsRogge committed
113
114

    def test_save_load_pretrained_additional_features(self):
115
        processor = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
NielsRogge's avatar
NielsRogge committed
116
117
118
        processor.save_pretrained(self.tmpdirname)

        tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
119
        image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0)
NielsRogge's avatar
NielsRogge committed
120
121
122
123
124
125
126
127

        processor = CLIPSegProcessor.from_pretrained(
            self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0
        )

        self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
        self.assertIsInstance(processor.tokenizer, CLIPTokenizerFast)

128
129
        self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
        self.assertIsInstance(processor.image_processor, ViTImageProcessor)
NielsRogge's avatar
NielsRogge committed
130

131
132
    def test_image_processor(self):
        image_processor = self.get_image_processor()
NielsRogge's avatar
NielsRogge committed
133
134
        tokenizer = self.get_tokenizer()

135
        processor = CLIPSegProcessor(tokenizer=tokenizer, image_processor=image_processor)
NielsRogge's avatar
NielsRogge committed
136
137
138

        image_input = self.prepare_image_inputs()

139
        input_feat_extract = image_processor(image_input, return_tensors="np")
NielsRogge's avatar
NielsRogge committed
140
141
142
143
144
145
        input_processor = processor(images=image_input, return_tensors="np")

        for key in input_feat_extract.keys():
            self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)

    def test_tokenizer(self):
146
        image_processor = self.get_image_processor()
NielsRogge's avatar
NielsRogge committed
147
148
        tokenizer = self.get_tokenizer()

149
        processor = CLIPSegProcessor(tokenizer=tokenizer, image_processor=image_processor)
NielsRogge's avatar
NielsRogge committed
150
151
152
153
154
155
156
157
158
159
160

        input_str = "lower newer"

        encoded_processor = processor(text=input_str)

        encoded_tok = tokenizer(input_str)

        for key in encoded_tok.keys():
            self.assertListEqual(encoded_tok[key], encoded_processor[key])

    def test_processor(self):
161
        image_processor = self.get_image_processor()
NielsRogge's avatar
NielsRogge committed
162
163
        tokenizer = self.get_tokenizer()

164
        processor = CLIPSegProcessor(tokenizer=tokenizer, image_processor=image_processor)
NielsRogge's avatar
NielsRogge committed
165
166
167
168
169
170
171
172
173
174
175
176
177

        input_str = "lower newer"
        image_input = self.prepare_image_inputs()

        inputs = processor(text=input_str, images=image_input)

        self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])

        # test if it raises when no input is passed
        with pytest.raises(ValueError):
            processor()

    def test_tokenizer_decode(self):
178
        image_processor = self.get_image_processor()
NielsRogge's avatar
NielsRogge committed
179
180
        tokenizer = self.get_tokenizer()

181
        processor = CLIPSegProcessor(tokenizer=tokenizer, image_processor=image_processor)
NielsRogge's avatar
NielsRogge committed
182
183
184
185
186
187
188

        predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]

        decoded_processor = processor.batch_decode(predicted_ids)
        decoded_tok = tokenizer.batch_decode(predicted_ids)

        self.assertListEqual(decoded_tok, decoded_processor)