test_examples.py 11 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2018 HuggingFace Inc..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Aymeric Augustin's avatar
Aymeric Augustin committed
15

16
17

import argparse
18
import json
19
import logging
20
import os
Aymeric Augustin's avatar
Aymeric Augustin committed
21
import sys
Aymeric Augustin's avatar
Aymeric Augustin committed
22
from unittest.mock import patch
Aymeric Augustin's avatar
Aymeric Augustin committed
23

Stas Bekman's avatar
Stas Bekman committed
24
25
import torch

26
from transformers.file_utils import is_apex_available
27
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow, torch_device
28

29
30
31

SRC_DIRS = [
    os.path.join(os.path.dirname(__file__), dirname)
32
33
34
35
36
    for dirname in [
        "text-generation",
        "text-classification",
        "token-classification",
        "language-modeling",
37
        "multiple-choice",
38
        "question-answering",
39
        "seq2seq",
40
    ]
41
42
43
44
45
]
sys.path.extend(SRC_DIRS)


if SRC_DIRS is not None:
Sylvain Gugger's avatar
Sylvain Gugger committed
46
    import run_clm
47
48
    import run_generation
    import run_glue
49
    import run_mlm
50
    import run_ner
Sylvain Gugger's avatar
Sylvain Gugger committed
51
    import run_qa as run_squad
52
    import run_seq2seq
53
    import run_swag
Aymeric Augustin's avatar
Aymeric Augustin committed
54

55

56
57
58
logging.basicConfig(level=logging.DEBUG)

logger = logging.getLogger()
59

60

61
62
def get_setup_file():
    parser = argparse.ArgumentParser()
63
    parser.add_argument("-f")
64
65
66
67
    args = parser.parse_args()
    return args.f


68
69
70
71
72
73
74
75
76
77
78
def get_results(output_dir):
    results = {}
    path = os.path.join(output_dir, "all_results.json")
    if os.path.exists(path):
        with open(path, "r") as f:
            results = json.load(f)
    else:
        raise ValueError(f"can't find {path}")
    return results


79
def is_cuda_and_apex_available():
80
81
82
83
    is_using_cuda = torch.cuda.is_available() and torch_device == "cuda"
    return is_using_cuda and is_apex_available()


84
class ExamplesTests(TestCasePlus):
85
86
87
88
    def test_run_glue(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

89
90
        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
91
            run_glue.py
92
            --model_name_or_path distilbert-base-uncased
93
94
            --output_dir {tmp_dir}
            --overwrite_output_dir
Sylvain Gugger's avatar
Sylvain Gugger committed
95
96
            --train_file ./tests/fixtures/tests_samples/MRPC/train.csv
            --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
97
98
            --do_train
            --do_eval
99
100
            --per_device_train_batch_size=2
            --per_device_eval_batch_size=1
101
102
103
104
105
            --learning_rate=1e-4
            --max_steps=10
            --warmup_steps=2
            --seed=42
            --max_seq_length=128
106
            """.split()
107

108
        if is_cuda_and_apex_available():
109
            testargs.append("--fp16")
110

111
        with patch.object(sys, "argv", testargs):
112
113
            run_glue.main()
            result = get_results(tmp_dir)
114
            self.assertGreaterEqual(result["eval_accuracy"], 0.75)
115

Sylvain Gugger's avatar
Sylvain Gugger committed
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
    def test_run_clm(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
            run_clm.py
            --model_name_or_path distilgpt2
            --train_file ./tests/fixtures/sample_text.txt
            --validation_file ./tests/fixtures/sample_text.txt
            --do_train
            --do_eval
            --block_size 128
            --per_device_train_batch_size 5
            --per_device_eval_batch_size 5
            --num_train_epochs 2
            --output_dir {tmp_dir}
            --overwrite_output_dir
            """.split()

        if torch.cuda.device_count() > 1:
            # Skipping because there are not enough batches to train the model + would need a drop_last to work.
            return

        if torch_device != "cuda":
            testargs.append("--no_cuda")

        with patch.object(sys, "argv", testargs):
144
145
            run_clm.main()
            result = get_results(tmp_dir)
Sylvain Gugger's avatar
Sylvain Gugger committed
146
147
            self.assertLess(result["perplexity"], 100)

148
    def test_run_mlm(self):
Julien Chaumond's avatar
Julien Chaumond committed
149
150
151
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

152
153
        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
154
            run_mlm.py
Julien Chaumond's avatar
Julien Chaumond committed
155
            --model_name_or_path distilroberta-base
156
157
            --train_file ./tests/fixtures/sample_text.txt
            --validation_file ./tests/fixtures/sample_text.txt
158
            --output_dir {tmp_dir}
Julien Chaumond's avatar
Julien Chaumond committed
159
160
161
            --overwrite_output_dir
            --do_train
            --do_eval
162
            --prediction_loss_only
Julien Chaumond's avatar
Julien Chaumond committed
163
            --num_train_epochs=1
164
        """.split()
165
166
167

        if torch_device != "cuda":
            testargs.append("--no_cuda")
168

Julien Chaumond's avatar
Julien Chaumond committed
169
        with patch.object(sys, "argv", testargs):
170
171
            run_mlm.main()
            result = get_results(tmp_dir)
172
            self.assertLess(result["perplexity"], 42)
Julien Chaumond's avatar
Julien Chaumond committed
173

174
175
176
177
    def test_run_ner(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

178
179
180
        # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
        epochs = 7 if get_gpu_count() > 1 else 2

181
182
183
184
185
186
187
188
189
190
191
192
        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
            run_ner.py
            --model_name_or_path bert-base-uncased
            --train_file tests/fixtures/tests_samples/conll/sample.json
            --validation_file tests/fixtures/tests_samples/conll/sample.json
            --output_dir {tmp_dir}
            --overwrite_output_dir
            --do_train
            --do_eval
            --warmup_steps=2
            --learning_rate=2e-4
Sylvain Gugger's avatar
Sylvain Gugger committed
193
194
            --per_device_train_batch_size=2
            --per_device_eval_batch_size=2
195
            --num_train_epochs={epochs}
196
197
198
199
200
201
        """.split()

        if torch_device != "cuda":
            testargs.append("--no_cuda")

        with patch.object(sys, "argv", testargs):
202
203
            run_ner.main()
            result = get_results(tmp_dir)
204
            self.assertGreaterEqual(result["eval_accuracy"], 0.75)
205
206
207
            self.assertGreaterEqual(result["eval_precision"], 0.75)
            self.assertLess(result["eval_loss"], 0.5)

208
209
210
211
    def test_run_squad(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

212
213
        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
214
            run_squad.py
Sylvain Gugger's avatar
Sylvain Gugger committed
215
216
217
218
            --model_name_or_path bert-base-uncased
            --version_2_with_negative
            --train_file tests/fixtures/tests_samples/SQUAD/sample.json
            --validation_file tests/fixtures/tests_samples/SQUAD/sample.json
219
220
            --output_dir {tmp_dir}
            --overwrite_output_dir
221
222
223
224
225
            --max_steps=10
            --warmup_steps=2
            --do_train
            --do_eval
            --learning_rate=2e-4
Sylvain Gugger's avatar
Sylvain Gugger committed
226
227
            --per_device_train_batch_size=2
            --per_device_eval_batch_size=1
228
229
        """.split()

230
        with patch.object(sys, "argv", testargs):
231
232
            run_squad.main()
            result = get_results(tmp_dir)
Sylvain Gugger's avatar
Sylvain Gugger committed
233
234
            self.assertGreaterEqual(result["f1"], 30)
            self.assertGreaterEqual(result["exact"], 30)
235

236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
    def test_run_swag(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
            run_swag.py
            --model_name_or_path bert-base-uncased
            --train_file tests/fixtures/tests_samples/swag/sample.json
            --validation_file tests/fixtures/tests_samples/swag/sample.json
            --output_dir {tmp_dir}
            --overwrite_output_dir
            --max_steps=20
            --warmup_steps=2
            --do_train
            --do_eval
            --learning_rate=2e-4
            --per_device_train_batch_size=2
            --per_device_eval_batch_size=1
        """.split()

        with patch.object(sys, "argv", testargs):
258
259
            run_swag.main()
            result = get_results(tmp_dir)
260
261
            self.assertGreaterEqual(result["eval_accuracy"], 0.8)

262
263
264
265
    def test_generation(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

266
        testargs = ["run_generation.py", "--prompt=Hello", "--length=10", "--seed=42"]
267

268
        if is_cuda_and_apex_available():
269
270
271
272
273
274
            testargs.append("--fp16")

        model_type, model_name = (
            "--model_type=gpt2",
            "--model_name_or_path=sshleifer/tiny-gpt2",
        )
275
        with patch.object(sys, "argv", testargs + [model_type, model_name]):
276
            result = run_generation.main()
277
            self.assertGreaterEqual(len(result[0]), 10)
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303

    @slow
    def test_run_seq2seq_summarization(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
            run_seq2seq.py
            --model_name_or_path t5-small
            --task summarization
            --train_file tests/fixtures/tests_samples/xsum/sample.json
            --validation_file tests/fixtures/tests_samples/xsum/sample.json
            --output_dir {tmp_dir}
            --overwrite_output_dir
            --max_steps=50
            --warmup_steps=8
            --do_train
            --do_eval
            --learning_rate=2e-4
            --per_device_train_batch_size=2
            --per_device_eval_batch_size=1
            --predict_with_generate
        """.split()

        with patch.object(sys, "argv", testargs):
304
305
            run_seq2seq.main()
            result = get_results(tmp_dir)
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
            self.assertGreaterEqual(result["eval_rouge1"], 10)
            self.assertGreaterEqual(result["eval_rouge2"], 2)
            self.assertGreaterEqual(result["eval_rougeL"], 7)
            self.assertGreaterEqual(result["eval_rougeLsum"], 7)

    @slow
    def test_run_seq2seq_translation(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
            run_seq2seq.py
            --model_name_or_path sshleifer/student_marian_en_ro_6_1
            --task translation_en_to_ro
            --train_file tests/fixtures/tests_samples/wmt16/sample.json
            --validation_file tests/fixtures/tests_samples/wmt16/sample.json
            --output_dir {tmp_dir}
            --overwrite_output_dir
            --max_steps=50
            --warmup_steps=8
            --do_train
            --do_eval
            --learning_rate=3e-3
            --per_device_train_batch_size=2
            --per_device_eval_batch_size=1
            --predict_with_generate
            --source_lang en_XX
            --target_lang ro_RO
        """.split()

        with patch.object(sys, "argv", testargs):
338
339
            run_seq2seq.main()
            result = get_results(tmp_dir)
340
            self.assertGreaterEqual(result["eval_bleu"], 30)