utils_hans.py 10.2 KB
Newer Older
Nafise Sadat Moosavi's avatar
Nafise Sadat Moosavi committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Sylvain Gugger's avatar
Sylvain Gugger committed
17
18
19
20
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
thomwolf's avatar
thomwolf committed
21

Sylvain Gugger's avatar
Sylvain Gugger committed
22
23
import tqdm
from filelock import FileLock
Nafise Sadat Moosavi's avatar
Nafise Sadat Moosavi committed
24

25
from transformers import DataProcessor, PreTrainedTokenizer, is_tf_available, is_torch_available
Sylvain Gugger's avatar
Sylvain Gugger committed
26
27
28
29
30
31
32


logger = logging.getLogger(__name__)


@dataclass(frozen=True)
class InputExample:
Nafise Sadat Moosavi's avatar
Nafise Sadat Moosavi committed
33
34
35
36
37
38
    """
    A single training/test example for simple sequence classification.

    Args:
        guid: Unique id for the example.
        text_a: string. The untokenized text of the first sequence. For single
Sylvain Gugger's avatar
Sylvain Gugger committed
39
            sequence tasks, only this sequence must be specified.
Nafise Sadat Moosavi's avatar
Nafise Sadat Moosavi committed
40
        text_b: (Optional) string. The untokenized text of the second sequence.
Sylvain Gugger's avatar
Sylvain Gugger committed
41
            Only must be specified for sequence pair tasks.
Nafise Sadat Moosavi's avatar
Nafise Sadat Moosavi committed
42
        label: (Optional) string. The label of the example. This should be
Sylvain Gugger's avatar
Sylvain Gugger committed
43
44
            specified for train and dev examples, but not for test examples.
        pairID: (Optional) string. Unique identifier for the pair of sentences.
Nafise Sadat Moosavi's avatar
Nafise Sadat Moosavi committed
45
    """
thomwolf's avatar
thomwolf committed
46

Sylvain Gugger's avatar
Sylvain Gugger committed
47
48
49
50
51
    guid: str
    text_a: str
    text_b: Optional[str] = None
    label: Optional[str] = None
    pairID: Optional[str] = None
Nafise Sadat Moosavi's avatar
Nafise Sadat Moosavi committed
52
53


Sylvain Gugger's avatar
Sylvain Gugger committed
54
55
@dataclass(frozen=True)
class InputFeatures:
Nafise Sadat Moosavi's avatar
Nafise Sadat Moosavi committed
56
57
    """
    A single set of features of data.
Sylvain Gugger's avatar
Sylvain Gugger committed
58
    Property names are the same names as the corresponding inputs to a model.
Nafise Sadat Moosavi's avatar
Nafise Sadat Moosavi committed
59
60
61
62
63
64

    Args:
        input_ids: Indices of input sequence tokens in the vocabulary.
        attention_mask: Mask to avoid performing attention on padding token indices.
            Mask values selected in ``[0, 1]``:
            Usually  ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens.
Sylvain Gugger's avatar
Sylvain Gugger committed
65
66
67
68
69
        token_type_ids: (Optional) Segment token indices to indicate first and second
            portions of the inputs. Only some models use them.
        label: (Optional) Label corresponding to the input. Int for classification problems,
            float for regression problems.
        pairID: (Optional) Unique identifier for the pair of sentences.
Nafise Sadat Moosavi's avatar
Nafise Sadat Moosavi committed
70
71
    """

Sylvain Gugger's avatar
Sylvain Gugger committed
72
73
74
75
76
    input_ids: List[int]
    attention_mask: Optional[List[int]] = None
    token_type_ids: Optional[List[int]] = None
    label: Optional[Union[int, float]] = None
    pairID: Optional[int] = None
Nafise Sadat Moosavi's avatar
Nafise Sadat Moosavi committed
77
78


Sylvain Gugger's avatar
Sylvain Gugger committed
79
80
81
if is_torch_available():
    import torch
    from torch.utils.data.dataset import Dataset
Nafise Sadat Moosavi's avatar
Nafise Sadat Moosavi committed
82

Sylvain Gugger's avatar
Sylvain Gugger committed
83
84
85
86
87
    class HansDataset(Dataset):
        """
        This will be superseded by a framework-agnostic approach
        soon.
        """
Nafise Sadat Moosavi's avatar
Nafise Sadat Moosavi committed
88

Sylvain Gugger's avatar
Sylvain Gugger committed
89
        features: List[InputFeatures]
Nafise Sadat Moosavi's avatar
Nafise Sadat Moosavi committed
90

Sylvain Gugger's avatar
Sylvain Gugger committed
91
92
93
94
95
96
97
98
99
100
        def __init__(
            self,
            data_dir: str,
            tokenizer: PreTrainedTokenizer,
            task: str,
            max_seq_length: Optional[int] = None,
            overwrite_cache=False,
            evaluate: bool = False,
        ):
            processor = hans_processors[task]()
Nafise Sadat Moosavi's avatar
Nafise Sadat Moosavi committed
101

Sylvain Gugger's avatar
Sylvain Gugger committed
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
            cached_features_file = os.path.join(
                data_dir,
                "cached_{}_{}_{}_{}".format(
                    "dev" if evaluate else "train", tokenizer.__class__.__name__, str(max_seq_length), task,
                ),
            )

            # Make sure only the first process in distributed training processes the dataset,
            # and the others will use the cache.
            lock_path = cached_features_file + ".lock"
            with FileLock(lock_path):

                if os.path.exists(cached_features_file) and not overwrite_cache:
                    logger.info(f"Loading features from cached file {cached_features_file}")
                    self.features = torch.load(cached_features_file)
                else:
                    logger.info(f"Creating features from dataset file at {data_dir}")
                    label_list = processor.get_labels()

                    examples = (
                        processor.get_dev_examples(data_dir) if evaluate else processor.get_train_examples(data_dir)
                    )

                    logger.info("Training examples: %s", len(examples))
126
                    self.features = hans_convert_examples_to_features(examples, label_list, max_seq_length, tokenizer)
Sylvain Gugger's avatar
Sylvain Gugger committed
127
128
                    logger.info("Saving features into cached file %s", cached_features_file)
                    torch.save(self.features, cached_features_file)
Nafise Sadat Moosavi's avatar
Nafise Sadat Moosavi committed
129

Sylvain Gugger's avatar
Sylvain Gugger committed
130
131
132
133
134
135
136
137
138
139
140
        def __len__(self):
            return len(self.features)

        def __getitem__(self, i) -> InputFeatures:
            return self.features[i]


if is_tf_available():
    import tensorflow as tf

    class TFHansDataset:
Nafise Sadat Moosavi's avatar
Nafise Sadat Moosavi committed
141
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
        This will be superseded by a framework-agnostic approach
        soon.
        """

        features: List[InputFeatures]

        def __init__(
            self,
            data_dir: str,
            tokenizer: PreTrainedTokenizer,
            task: str,
            max_seq_length: Optional[int] = 128,
            overwrite_cache=False,
            evaluate: bool = False,
        ):
            processor = hans_processors[task]()
            label_list = processor.get_labels()

            examples = processor.get_dev_examples(data_dir) if evaluate else processor.get_train_examples(data_dir)
161
            self.features = hans_convert_examples_to_features(examples, label_list, max_seq_length, tokenizer)
Sylvain Gugger's avatar
Sylvain Gugger committed
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212

            def gen():
                for (ex_index, ex) in tqdm.tqdm(enumerate(self.features), desc="convert examples to features"):
                    if ex_index % 10000 == 0:
                        logger.info("Writing example %d of %d" % (ex_index, len(examples)))

                    yield (
                        {
                            "example_id": 0,
                            "input_ids": ex.input_ids,
                            "attention_mask": ex.attention_mask,
                            "token_type_ids": ex.token_type_ids,
                        },
                        ex.label,
                    )

            self.dataset = tf.data.Dataset.from_generator(
                gen,
                (
                    {
                        "example_id": tf.int32,
                        "input_ids": tf.int32,
                        "attention_mask": tf.int32,
                        "token_type_ids": tf.int32,
                    },
                    tf.int64,
                ),
                (
                    {
                        "example_id": tf.TensorShape([]),
                        "input_ids": tf.TensorShape([None, None]),
                        "attention_mask": tf.TensorShape([None, None]),
                        "token_type_ids": tf.TensorShape([None, None]),
                    },
                    tf.TensorShape([]),
                ),
            )

        def get_dataset(self):
            return self.dataset

        def __len__(self):
            return len(self.features)

        def __getitem__(self, i) -> InputFeatures:
            return self.features[i]


class HansProcessor(DataProcessor):
    """Processor for the HANS data set."""

Nafise Sadat Moosavi's avatar
Nafise Sadat Moosavi committed
213
    def get_train_examples(self, data_dir):
Sylvain Gugger's avatar
Sylvain Gugger committed
214
215
        """See base class."""
        return self._create_examples(self._read_tsv(os.path.join(data_dir, "heuristics_train_set.txt")), "train")
Nafise Sadat Moosavi's avatar
Nafise Sadat Moosavi committed
216
217

    def get_dev_examples(self, data_dir):
Sylvain Gugger's avatar
Sylvain Gugger committed
218
219
        """See base class."""
        return self._create_examples(self._read_tsv(os.path.join(data_dir, "heuristics_evaluation_set.txt")), "dev")
Nafise Sadat Moosavi's avatar
Nafise Sadat Moosavi committed
220
221

    def get_labels(self):
Sylvain Gugger's avatar
Sylvain Gugger committed
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
        """See base class."""
        return ["contradiction", "entailment", "neutral"]

    def _create_examples(self, lines, set_type):
        """Creates examples for the training and dev sets."""
        examples = []
        for (i, line) in enumerate(lines):
            if i == 0:
                continue
            guid = "%s-%s" % (set_type, line[0])
            text_a = line[5]
            text_b = line[6]
            pairID = line[7][2:] if line[7].startswith("ex") else line[7]
            label = line[-1]
            examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, pairID=pairID))
        return examples


def hans_convert_examples_to_features(
241
    examples: List[InputExample], label_list: List[str], max_length: int, tokenizer: PreTrainedTokenizer,
Sylvain Gugger's avatar
Sylvain Gugger committed
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
):
    """
    Loads a data file into a list of ``InputFeatures``

    Args:
        examples: List of ``InputExamples`` containing the examples.
        tokenizer: Instance of a tokenizer that will tokenize the examples.
        max_length: Maximum example length.
        label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method.
        output_mode: String indicating the output mode. Either ``regression`` or ``classification``.

    Returns:
        A list of task-specific ``InputFeatures`` which can be fed to the model.

    """

    label_map = {label: i for i, label in enumerate(label_list)}

    features = []
    for (ex_index, example) in tqdm.tqdm(enumerate(examples), desc="convert examples to features"):
        if ex_index % 10000 == 0:
            logger.info("Writing example %d" % (ex_index))

        inputs = tokenizer.encode_plus(
            example.text_a,
            example.text_b,
            add_special_tokens=True,
            max_length=max_length,
            pad_to_max_length=True,
            return_overflowing_tokens=True,
        )

274
        label = label_map[example.label] if example.label in label_map else 0
Sylvain Gugger's avatar
Sylvain Gugger committed
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294

        pairID = int(example.pairID)

        features.append(InputFeatures(**inputs, label=label, pairID=pairID))

    for i, example in enumerate(examples[:5]):
        logger.info("*** Example ***")
        logger.info(f"guid: {example}")
        logger.info(f"features: {features[i]}")

    return features


hans_tasks_num_labels = {
    "hans": 3,
}

hans_processors = {
    "hans": HansProcessor,
}