"tests/models/conditional_detr/__init__.py" did not exist on "783d7d2629e97c5f0c5f9ef01b8c66410275c204"
test_pipelines_zero_shot.py 9.37 KB
Newer Older
Sylvain Gugger's avatar
Sylvain Gugger committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import unittest
16
from copy import deepcopy
17
18
19
20
21
22
23
24

from transformers.pipelines import Pipeline

from .test_pipelines_common import CustomInputPipelineCommonMixin


class ZeroShotClassificationPipelineTests(CustomInputPipelineCommonMixin, unittest.TestCase):
    pipeline_task = "zero-shot-classification"
25
    small_models = ["sgugger/tiny-distilbert-classification"]  # Models tested without the @slow decorator
26
    large_models = ["roberta-large-mnli"]  # Models tested with the @slow decorator
27
28
29
30
31
32
33
34
35
36
37
38
    valid_inputs = [
        {"sequences": "Who are you voting for in 2020?", "candidate_labels": "politics"},
        {"sequences": "Who are you voting for in 2020?", "candidate_labels": ["politics"]},
        {"sequences": "Who are you voting for in 2020?", "candidate_labels": "politics, public health"},
        {"sequences": "Who are you voting for in 2020?", "candidate_labels": ["politics", "public health"]},
        {"sequences": ["Who are you voting for in 2020?"], "candidate_labels": "politics"},
        {
            "sequences": "Who are you voting for in 2020?",
            "candidate_labels": "politics",
            "hypothesis_template": "This text is about {}",
        },
    ]
39
40
41
42
43

    def _test_scores_sum_to_one(self, result):
        sum = 0.0
        for score in result["scores"]:
            sum += score
44
        self.assertAlmostEqual(sum, 1.0, places=5)
45

46
47
    def _test_entailment_id(self, zero_shot_classifier: Pipeline):
        config = zero_shot_classifier.model.config
48
49
50
        original_config = deepcopy(config)

        config.label2id = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
51
        self.assertEqual(zero_shot_classifier.entailment_id, -1)
52
53

        config.label2id = {"entailment": 0, "neutral": 1, "contradiction": 2}
54
        self.assertEqual(zero_shot_classifier.entailment_id, 0)
55
56

        config.label2id = {"ENTAIL": 0, "NON-ENTAIL": 1}
57
        self.assertEqual(zero_shot_classifier.entailment_id, 0)
58
59

        config.label2id = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
60
        self.assertEqual(zero_shot_classifier.entailment_id, 2)
61

62
        zero_shot_classifier.model.config = original_config
63

64
    def _test_pipeline(self, zero_shot_classifier: Pipeline):
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
        output_keys = {"sequence", "labels", "scores"}
        valid_mono_inputs = [
            {"sequences": "Who are you voting for in 2020?", "candidate_labels": "politics"},
            {"sequences": "Who are you voting for in 2020?", "candidate_labels": ["politics"]},
            {"sequences": "Who are you voting for in 2020?", "candidate_labels": "politics, public health"},
            {"sequences": "Who are you voting for in 2020?", "candidate_labels": ["politics", "public health"]},
            {"sequences": ["Who are you voting for in 2020?"], "candidate_labels": "politics"},
            {
                "sequences": "Who are you voting for in 2020?",
                "candidate_labels": "politics",
                "hypothesis_template": "This text is about {}",
            },
        ]
        valid_multi_input = {
            "sequences": ["Who are you voting for in 2020?", "What is the capital of Spain?"],
            "candidate_labels": "politics",
        }
        invalid_inputs = [
            {"sequences": None, "candidate_labels": "politics"},
            {"sequences": "", "candidate_labels": "politics"},
            {"sequences": "Who are you voting for in 2020?", "candidate_labels": None},
            {"sequences": "Who are you voting for in 2020?", "candidate_labels": ""},
            {
                "sequences": "Who are you voting for in 2020?",
                "candidate_labels": "politics",
                "hypothesis_template": None,
            },
            {
                "sequences": "Who are you voting for in 2020?",
                "candidate_labels": "politics",
                "hypothesis_template": "",
            },
            {
                "sequences": "Who are you voting for in 2020?",
                "candidate_labels": "politics",
                "hypothesis_template": "Template without formatting syntax.",
            },
        ]
103
        self.assertIsNotNone(zero_shot_classifier)
104

105
        self._test_entailment_id(zero_shot_classifier)
106

107
        for mono_input in valid_mono_inputs:
108
            mono_result = zero_shot_classifier(**mono_input)
109
110
111
112
113
114
115
            self.assertIsInstance(mono_result, dict)
            if len(mono_result["labels"]) > 1:
                self._test_scores_sum_to_one(mono_result)

            for key in output_keys:
                self.assertIn(key, mono_result)

116
        multi_result = zero_shot_classifier(**valid_multi_input)
117
118
119
120
121
122
123
124
125
126
127
128
        self.assertIsInstance(multi_result, list)
        self.assertIsInstance(multi_result[0], dict)
        self.assertEqual(len(multi_result), len(valid_multi_input["sequences"]))

        for result in multi_result:
            for key in output_keys:
                self.assertIn(key, result)

            if len(result["labels"]) > 1:
                self._test_scores_sum_to_one(result)

        for bad_input in invalid_inputs:
129
            self.assertRaises(Exception, zero_shot_classifier, **bad_input)
130

131
        if zero_shot_classifier.model.name_or_path in self.large_models:
132
133
134
135
136
137
138
139
140
            # We also check the outputs for the large models
            inputs = [
                {
                    "sequences": "Who are you voting for in 2020?",
                    "candidate_labels": ["politics", "public health", "science"],
                },
                {
                    "sequences": "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.",
                    "candidate_labels": ["machine learning", "statistics", "translation", "vision"],
141
                    "multi_label": True,
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
                },
            ]

            expected_outputs = [
                {
                    "sequence": "Who are you voting for in 2020?",
                    "labels": ["politics", "public health", "science"],
                    "scores": [0.975, 0.015, 0.008],
                },
                {
                    "sequence": "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.",
                    "labels": ["translation", "machine learning", "vision", "statistics"],
                    "scores": [0.817, 0.712, 0.018, 0.017],
                },
            ]

            for input, expected_output in zip(inputs, expected_outputs):
159
                output = zero_shot_classifier(**input)
160
161
162
163
164
165
                for key in output:
                    if key == "scores":
                        for output_score, expected_score in zip(output[key], expected_output[key]):
                            self.assertAlmostEqual(output_score, expected_score, places=2)
                    else:
                        self.assertEqual(output[key], expected_output[key])