".github/vscode:/vscode.git/clone" did not exist on "d627333a6088bf8dee6d5cd51e9bff40f3170c85"
test_pipelines_visual_question_answering.py 6.45 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest

from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
19
20
from transformers.testing_utils import (
    is_pipeline_test,
21
    is_torch_available,
22
23
24
    nested_simplify,
    require_tf,
    require_torch,
25
    require_torch_gpu,
26
27
28
    require_vision,
    slow,
)
29

30
from .test_pipelines_common import ANY
31
32


33
34
35
36
if is_torch_available():
    import torch


37
38
39
40
41
42
43
44
45
46
if is_vision_available():
    from PIL import Image
else:

    class Image:
        @staticmethod
        def open(*args, **kwargs):
            pass


47
@is_pipeline_test
48
49
@require_torch
@require_vision
50
class VisualQuestionAnsweringPipelineTests(unittest.TestCase):
51
52
    model_mapping = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING

53
    def get_test_pipeline(self, model, tokenizer, processor):
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
        vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa")
        examples = [
            {
                "image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
                "question": "How many cats are there?",
            },
            {
                "image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
                "question": "How many cats are there?",
            },
        ]
        return vqa_pipeline, examples

    def run_pipeline_test(self, vqa_pipeline, examples):
        outputs = vqa_pipeline(examples, top_k=1)
        self.assertEqual(
            outputs,
            [
                [{"score": ANY(float), "answer": ANY(str)}],
                [{"score": ANY(float), "answer": ANY(str)}],
            ],
        )

    @require_torch
    def test_small_model_pt(self):
        vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa")
        image = "./tests/fixtures/tests_samples/COCO/000000039769.png"
        question = "How many cats are there?"

        outputs = vqa_pipeline(image=image, question="How many cats are there?", top_k=2)
        self.assertEqual(
            outputs, [{"score": ANY(float), "answer": ANY(str)}, {"score": ANY(float), "answer": ANY(str)}]
        )

        outputs = vqa_pipeline({"image": image, "question": question}, top_k=2)
        self.assertEqual(
            outputs, [{"score": ANY(float), "answer": ANY(str)}, {"score": ANY(float), "answer": ANY(str)}]
        )

93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
    @require_torch
    @require_torch_gpu
    def test_small_model_pt_blip2(self):
        vqa_pipeline = pipeline(
            "visual-question-answering", model="hf-internal-testing/tiny-random-Blip2ForConditionalGeneration"
        )
        image = "./tests/fixtures/tests_samples/COCO/000000039769.png"
        question = "How many cats are there?"

        outputs = vqa_pipeline(image=image, question=question)
        self.assertEqual(outputs, [{"answer": ANY(str)}])

        outputs = vqa_pipeline({"image": image, "question": question})
        self.assertEqual(outputs, [{"answer": ANY(str)}])

        outputs = vqa_pipeline([{"image": image, "question": question}, {"image": image, "question": question}])
        self.assertEqual(outputs, [[{"answer": ANY(str)}]] * 2)

        vqa_pipeline = pipeline(
            "visual-question-answering",
            model="hf-internal-testing/tiny-random-Blip2ForConditionalGeneration",
            model_kwargs={"torch_dtype": torch.float16},
            device=0,
        )
        self.assertEqual(vqa_pipeline.model.device, torch.device(0))
        self.assertEqual(vqa_pipeline.model.language_model.dtype, torch.float16)
        self.assertEqual(vqa_pipeline.model.vision_model.dtype, torch.float16)

        outputs = vqa_pipeline(image=image, question=question)
        self.assertEqual(outputs, [{"answer": ANY(str)}])

124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
    @slow
    @require_torch
    def test_large_model_pt(self):
        vqa_pipeline = pipeline("visual-question-answering", model="dandelin/vilt-b32-finetuned-vqa")
        image = "./tests/fixtures/tests_samples/COCO/000000039769.png"
        question = "How many cats are there?"

        outputs = vqa_pipeline(image=image, question=question, top_k=2)
        self.assertEqual(
            nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]
        )

        outputs = vqa_pipeline({"image": image, "question": question}, top_k=2)
        self.assertEqual(
            nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]
        )

        outputs = vqa_pipeline(
            [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2
        )
        self.assertEqual(
            nested_simplify(outputs, decimals=4),
            [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2,
        )

149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
    @slow
    @require_torch
    @require_torch_gpu
    def test_large_model_pt_blip2(self):
        vqa_pipeline = pipeline(
            "visual-question-answering",
            model="Salesforce/blip2-opt-2.7b",
            model_kwargs={"torch_dtype": torch.float16},
            device=0,
        )
        self.assertEqual(vqa_pipeline.model.device, torch.device(0))
        self.assertEqual(vqa_pipeline.model.language_model.dtype, torch.float16)

        image = "./tests/fixtures/tests_samples/COCO/000000039769.png"
        question = "Question: how many cats are there? Answer:"

        outputs = vqa_pipeline(image=image, question=question)
        self.assertEqual(outputs, [{"answer": "two"}])

        outputs = vqa_pipeline({"image": image, "question": question})
        self.assertEqual(outputs, [{"answer": "two"}])

        outputs = vqa_pipeline([{"image": image, "question": question}, {"image": image, "question": question}])
        self.assertEqual(outputs, [[{"answer": "two"}]] * 2)

174
175
176
177
    @require_tf
    @unittest.skip("Visual question answering not implemented in TF")
    def test_small_model_tf(self):
        pass