test_examples.py 11 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2018 HuggingFace Inc..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Aymeric Augustin's avatar
Aymeric Augustin committed
15

16
17

import argparse
18
import json
19
import logging
20
import os
Aymeric Augustin's avatar
Aymeric Augustin committed
21
import sys
Aymeric Augustin's avatar
Aymeric Augustin committed
22
from unittest.mock import patch
Aymeric Augustin's avatar
Aymeric Augustin committed
23

Stas Bekman's avatar
Stas Bekman committed
24
25
import torch

26
from transformers.file_utils import is_apex_available
27
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow, torch_device
28

29
30
31

SRC_DIRS = [
    os.path.join(os.path.dirname(__file__), dirname)
32
33
34
35
36
    for dirname in [
        "text-generation",
        "text-classification",
        "token-classification",
        "language-modeling",
37
        "multiple-choice",
38
        "question-answering",
39
        "seq2seq",
40
    ]
41
42
43
44
45
]
sys.path.extend(SRC_DIRS)


if SRC_DIRS is not None:
Sylvain Gugger's avatar
Sylvain Gugger committed
46
    import run_clm
47
48
    import run_generation
    import run_glue
49
    import run_mlm
50
    import run_ner
Sylvain Gugger's avatar
Sylvain Gugger committed
51
    import run_qa as run_squad
52
    import run_summarization
53
    import run_swag
54
    import run_translation
Aymeric Augustin's avatar
Aymeric Augustin committed
55

56

57
58
59
logging.basicConfig(level=logging.DEBUG)

logger = logging.getLogger()
60

61

62
63
def get_setup_file():
    parser = argparse.ArgumentParser()
64
    parser.add_argument("-f")
65
66
67
68
    args = parser.parse_args()
    return args.f


69
70
71
72
73
74
75
76
77
78
79
def get_results(output_dir):
    results = {}
    path = os.path.join(output_dir, "all_results.json")
    if os.path.exists(path):
        with open(path, "r") as f:
            results = json.load(f)
    else:
        raise ValueError(f"can't find {path}")
    return results


80
def is_cuda_and_apex_available():
81
82
83
84
    is_using_cuda = torch.cuda.is_available() and torch_device == "cuda"
    return is_using_cuda and is_apex_available()


85
class ExamplesTests(TestCasePlus):
86
87
88
89
    def test_run_glue(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

90
91
        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
92
            run_glue.py
93
            --model_name_or_path distilbert-base-uncased
94
95
            --output_dir {tmp_dir}
            --overwrite_output_dir
Sylvain Gugger's avatar
Sylvain Gugger committed
96
97
            --train_file ./tests/fixtures/tests_samples/MRPC/train.csv
            --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
98
99
            --do_train
            --do_eval
100
101
            --per_device_train_batch_size=2
            --per_device_eval_batch_size=1
102
103
104
105
106
            --learning_rate=1e-4
            --max_steps=10
            --warmup_steps=2
            --seed=42
            --max_seq_length=128
107
            """.split()
108

109
        if is_cuda_and_apex_available():
110
            testargs.append("--fp16")
111

112
        with patch.object(sys, "argv", testargs):
113
114
            run_glue.main()
            result = get_results(tmp_dir)
115
            self.assertGreaterEqual(result["eval_accuracy"], 0.75)
116

Sylvain Gugger's avatar
Sylvain Gugger committed
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
    def test_run_clm(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
            run_clm.py
            --model_name_or_path distilgpt2
            --train_file ./tests/fixtures/sample_text.txt
            --validation_file ./tests/fixtures/sample_text.txt
            --do_train
            --do_eval
            --block_size 128
            --per_device_train_batch_size 5
            --per_device_eval_batch_size 5
            --num_train_epochs 2
            --output_dir {tmp_dir}
            --overwrite_output_dir
            """.split()

        if torch.cuda.device_count() > 1:
            # Skipping because there are not enough batches to train the model + would need a drop_last to work.
            return

        if torch_device != "cuda":
            testargs.append("--no_cuda")

        with patch.object(sys, "argv", testargs):
145
146
            run_clm.main()
            result = get_results(tmp_dir)
Sylvain Gugger's avatar
Sylvain Gugger committed
147
148
            self.assertLess(result["perplexity"], 100)

149
    def test_run_mlm(self):
Julien Chaumond's avatar
Julien Chaumond committed
150
151
152
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

153
154
        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
155
            run_mlm.py
Julien Chaumond's avatar
Julien Chaumond committed
156
            --model_name_or_path distilroberta-base
157
158
            --train_file ./tests/fixtures/sample_text.txt
            --validation_file ./tests/fixtures/sample_text.txt
159
            --output_dir {tmp_dir}
Julien Chaumond's avatar
Julien Chaumond committed
160
161
162
            --overwrite_output_dir
            --do_train
            --do_eval
163
            --prediction_loss_only
Julien Chaumond's avatar
Julien Chaumond committed
164
            --num_train_epochs=1
165
        """.split()
166
167
168

        if torch_device != "cuda":
            testargs.append("--no_cuda")
169

Julien Chaumond's avatar
Julien Chaumond committed
170
        with patch.object(sys, "argv", testargs):
171
172
            run_mlm.main()
            result = get_results(tmp_dir)
173
            self.assertLess(result["perplexity"], 42)
Julien Chaumond's avatar
Julien Chaumond committed
174

175
176
177
178
    def test_run_ner(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

179
180
181
        # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
        epochs = 7 if get_gpu_count() > 1 else 2

182
183
184
185
186
187
188
189
190
191
192
193
        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
            run_ner.py
            --model_name_or_path bert-base-uncased
            --train_file tests/fixtures/tests_samples/conll/sample.json
            --validation_file tests/fixtures/tests_samples/conll/sample.json
            --output_dir {tmp_dir}
            --overwrite_output_dir
            --do_train
            --do_eval
            --warmup_steps=2
            --learning_rate=2e-4
Sylvain Gugger's avatar
Sylvain Gugger committed
194
195
            --per_device_train_batch_size=2
            --per_device_eval_batch_size=2
196
            --num_train_epochs={epochs}
197
198
199
200
201
202
        """.split()

        if torch_device != "cuda":
            testargs.append("--no_cuda")

        with patch.object(sys, "argv", testargs):
203
204
            run_ner.main()
            result = get_results(tmp_dir)
205
            self.assertGreaterEqual(result["eval_accuracy"], 0.75)
206
207
208
            self.assertGreaterEqual(result["eval_precision"], 0.75)
            self.assertLess(result["eval_loss"], 0.5)

209
210
211
212
    def test_run_squad(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

213
214
        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
215
            run_squad.py
Sylvain Gugger's avatar
Sylvain Gugger committed
216
217
218
219
            --model_name_or_path bert-base-uncased
            --version_2_with_negative
            --train_file tests/fixtures/tests_samples/SQUAD/sample.json
            --validation_file tests/fixtures/tests_samples/SQUAD/sample.json
220
221
            --output_dir {tmp_dir}
            --overwrite_output_dir
222
223
224
225
226
            --max_steps=10
            --warmup_steps=2
            --do_train
            --do_eval
            --learning_rate=2e-4
Sylvain Gugger's avatar
Sylvain Gugger committed
227
228
            --per_device_train_batch_size=2
            --per_device_eval_batch_size=1
229
230
        """.split()

231
        with patch.object(sys, "argv", testargs):
232
233
            run_squad.main()
            result = get_results(tmp_dir)
Sylvain Gugger's avatar
Sylvain Gugger committed
234
235
            self.assertGreaterEqual(result["f1"], 30)
            self.assertGreaterEqual(result["exact"], 30)
236

237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
    def test_run_swag(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
            run_swag.py
            --model_name_or_path bert-base-uncased
            --train_file tests/fixtures/tests_samples/swag/sample.json
            --validation_file tests/fixtures/tests_samples/swag/sample.json
            --output_dir {tmp_dir}
            --overwrite_output_dir
            --max_steps=20
            --warmup_steps=2
            --do_train
            --do_eval
            --learning_rate=2e-4
            --per_device_train_batch_size=2
            --per_device_eval_batch_size=1
        """.split()

        with patch.object(sys, "argv", testargs):
259
260
            run_swag.main()
            result = get_results(tmp_dir)
261
262
            self.assertGreaterEqual(result["eval_accuracy"], 0.8)

263
264
265
266
    def test_generation(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

267
        testargs = ["run_generation.py", "--prompt=Hello", "--length=10", "--seed=42"]
268

269
        if is_cuda_and_apex_available():
270
271
272
273
274
275
            testargs.append("--fp16")

        model_type, model_name = (
            "--model_type=gpt2",
            "--model_name_or_path=sshleifer/tiny-gpt2",
        )
276
        with patch.object(sys, "argv", testargs + [model_type, model_name]):
277
            result = run_generation.main()
278
            self.assertGreaterEqual(len(result[0]), 10)
279
280

    @slow
281
    def test_run_summarization(self):
282
283
284
285
286
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
287
            run_summarization.py
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
            --model_name_or_path t5-small
            --train_file tests/fixtures/tests_samples/xsum/sample.json
            --validation_file tests/fixtures/tests_samples/xsum/sample.json
            --output_dir {tmp_dir}
            --overwrite_output_dir
            --max_steps=50
            --warmup_steps=8
            --do_train
            --do_eval
            --learning_rate=2e-4
            --per_device_train_batch_size=2
            --per_device_eval_batch_size=1
            --predict_with_generate
        """.split()

        with patch.object(sys, "argv", testargs):
304
            run_summarization.main()
305
            result = get_results(tmp_dir)
306
307
308
309
310
311
            self.assertGreaterEqual(result["eval_rouge1"], 10)
            self.assertGreaterEqual(result["eval_rouge2"], 2)
            self.assertGreaterEqual(result["eval_rougeL"], 7)
            self.assertGreaterEqual(result["eval_rougeLsum"], 7)

    @slow
312
    def test_run_translation(self):
313
314
315
316
317
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
318
            run_translation.py
319
            --model_name_or_path sshleifer/student_marian_en_ro_6_1
320
321
            --source_lang en
            --target_lang ro
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
            --train_file tests/fixtures/tests_samples/wmt16/sample.json
            --validation_file tests/fixtures/tests_samples/wmt16/sample.json
            --output_dir {tmp_dir}
            --overwrite_output_dir
            --max_steps=50
            --warmup_steps=8
            --do_train
            --do_eval
            --learning_rate=3e-3
            --per_device_train_batch_size=2
            --per_device_eval_batch_size=1
            --predict_with_generate
            --source_lang en_XX
            --target_lang ro_RO
        """.split()

        with patch.object(sys, "argv", testargs):
339
            run_translation.main()
340
            result = get_results(tmp_dir)
341
            self.assertGreaterEqual(result["eval_bleu"], 30)