test_examples.py 11 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2018 HuggingFace Inc..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Aymeric Augustin's avatar
Aymeric Augustin committed
15

16
17

import argparse
18
import json
19
import logging
20
import os
Aymeric Augustin's avatar
Aymeric Augustin committed
21
import sys
Aymeric Augustin's avatar
Aymeric Augustin committed
22
from unittest.mock import patch
Aymeric Augustin's avatar
Aymeric Augustin committed
23

Stas Bekman's avatar
Stas Bekman committed
24
25
import torch

26
from transformers.file_utils import is_apex_available
27
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow, torch_device
28

29
30
31

SRC_DIRS = [
    os.path.join(os.path.dirname(__file__), dirname)
32
33
34
35
36
    for dirname in [
        "text-generation",
        "text-classification",
        "token-classification",
        "language-modeling",
37
        "multiple-choice",
38
        "question-answering",
Sylvain Gugger's avatar
Sylvain Gugger committed
39
40
        "summarization",
        "translation",
41
    ]
42
43
44
45
46
]
sys.path.extend(SRC_DIRS)


if SRC_DIRS is not None:
Sylvain Gugger's avatar
Sylvain Gugger committed
47
    import run_clm
48
49
    import run_generation
    import run_glue
50
    import run_mlm
51
    import run_ner
Sylvain Gugger's avatar
Sylvain Gugger committed
52
    import run_qa as run_squad
53
    import run_summarization
54
    import run_swag
55
    import run_translation
Aymeric Augustin's avatar
Aymeric Augustin committed
56

57

58
59
60
logging.basicConfig(level=logging.DEBUG)

logger = logging.getLogger()
61

62

63
64
def get_setup_file():
    parser = argparse.ArgumentParser()
65
    parser.add_argument("-f")
66
67
68
69
    args = parser.parse_args()
    return args.f


70
71
72
73
74
75
76
77
78
79
80
def get_results(output_dir):
    results = {}
    path = os.path.join(output_dir, "all_results.json")
    if os.path.exists(path):
        with open(path, "r") as f:
            results = json.load(f)
    else:
        raise ValueError(f"can't find {path}")
    return results


81
def is_cuda_and_apex_available():
82
83
84
85
    is_using_cuda = torch.cuda.is_available() and torch_device == "cuda"
    return is_using_cuda and is_apex_available()


86
class ExamplesTests(TestCasePlus):
87
88
89
90
    def test_run_glue(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

91
92
        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
93
            run_glue.py
94
            --model_name_or_path distilbert-base-uncased
95
96
            --output_dir {tmp_dir}
            --overwrite_output_dir
Sylvain Gugger's avatar
Sylvain Gugger committed
97
98
            --train_file ./tests/fixtures/tests_samples/MRPC/train.csv
            --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
99
100
            --do_train
            --do_eval
101
102
            --per_device_train_batch_size=2
            --per_device_eval_batch_size=1
103
104
105
106
107
            --learning_rate=1e-4
            --max_steps=10
            --warmup_steps=2
            --seed=42
            --max_seq_length=128
108
            """.split()
109

110
        if is_cuda_and_apex_available():
111
            testargs.append("--fp16")
112

113
        with patch.object(sys, "argv", testargs):
114
115
            run_glue.main()
            result = get_results(tmp_dir)
116
            self.assertGreaterEqual(result["eval_accuracy"], 0.75)
117

Sylvain Gugger's avatar
Sylvain Gugger committed
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
    def test_run_clm(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
            run_clm.py
            --model_name_or_path distilgpt2
            --train_file ./tests/fixtures/sample_text.txt
            --validation_file ./tests/fixtures/sample_text.txt
            --do_train
            --do_eval
            --block_size 128
            --per_device_train_batch_size 5
            --per_device_eval_batch_size 5
            --num_train_epochs 2
            --output_dir {tmp_dir}
            --overwrite_output_dir
            """.split()

        if torch.cuda.device_count() > 1:
            # Skipping because there are not enough batches to train the model + would need a drop_last to work.
            return

        if torch_device != "cuda":
            testargs.append("--no_cuda")

        with patch.object(sys, "argv", testargs):
146
147
            run_clm.main()
            result = get_results(tmp_dir)
Sylvain Gugger's avatar
Sylvain Gugger committed
148
149
            self.assertLess(result["perplexity"], 100)

150
    def test_run_mlm(self):
Julien Chaumond's avatar
Julien Chaumond committed
151
152
153
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

154
155
        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
156
            run_mlm.py
Julien Chaumond's avatar
Julien Chaumond committed
157
            --model_name_or_path distilroberta-base
158
159
            --train_file ./tests/fixtures/sample_text.txt
            --validation_file ./tests/fixtures/sample_text.txt
160
            --output_dir {tmp_dir}
Julien Chaumond's avatar
Julien Chaumond committed
161
162
163
            --overwrite_output_dir
            --do_train
            --do_eval
164
            --prediction_loss_only
Julien Chaumond's avatar
Julien Chaumond committed
165
            --num_train_epochs=1
166
        """.split()
167
168
169

        if torch_device != "cuda":
            testargs.append("--no_cuda")
170

Julien Chaumond's avatar
Julien Chaumond committed
171
        with patch.object(sys, "argv", testargs):
172
173
            run_mlm.main()
            result = get_results(tmp_dir)
174
            self.assertLess(result["perplexity"], 42)
Julien Chaumond's avatar
Julien Chaumond committed
175

176
177
178
179
    def test_run_ner(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

180
181
182
        # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
        epochs = 7 if get_gpu_count() > 1 else 2

183
184
185
186
187
188
189
190
191
192
193
194
        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
            run_ner.py
            --model_name_or_path bert-base-uncased
            --train_file tests/fixtures/tests_samples/conll/sample.json
            --validation_file tests/fixtures/tests_samples/conll/sample.json
            --output_dir {tmp_dir}
            --overwrite_output_dir
            --do_train
            --do_eval
            --warmup_steps=2
            --learning_rate=2e-4
Sylvain Gugger's avatar
Sylvain Gugger committed
195
196
            --per_device_train_batch_size=2
            --per_device_eval_batch_size=2
197
            --num_train_epochs={epochs}
198
199
200
201
202
203
        """.split()

        if torch_device != "cuda":
            testargs.append("--no_cuda")

        with patch.object(sys, "argv", testargs):
204
205
            run_ner.main()
            result = get_results(tmp_dir)
206
            self.assertGreaterEqual(result["eval_accuracy"], 0.75)
207
208
209
            self.assertGreaterEqual(result["eval_precision"], 0.75)
            self.assertLess(result["eval_loss"], 0.5)

210
211
212
213
    def test_run_squad(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

214
215
        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
216
            run_squad.py
Sylvain Gugger's avatar
Sylvain Gugger committed
217
218
219
220
            --model_name_or_path bert-base-uncased
            --version_2_with_negative
            --train_file tests/fixtures/tests_samples/SQUAD/sample.json
            --validation_file tests/fixtures/tests_samples/SQUAD/sample.json
221
222
            --output_dir {tmp_dir}
            --overwrite_output_dir
223
224
225
226
227
            --max_steps=10
            --warmup_steps=2
            --do_train
            --do_eval
            --learning_rate=2e-4
Sylvain Gugger's avatar
Sylvain Gugger committed
228
229
            --per_device_train_batch_size=2
            --per_device_eval_batch_size=1
230
231
        """.split()

232
        with patch.object(sys, "argv", testargs):
233
234
            run_squad.main()
            result = get_results(tmp_dir)
Sylvain Gugger's avatar
Sylvain Gugger committed
235
236
            self.assertGreaterEqual(result["f1"], 30)
            self.assertGreaterEqual(result["exact"], 30)
237

238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
    def test_run_swag(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
            run_swag.py
            --model_name_or_path bert-base-uncased
            --train_file tests/fixtures/tests_samples/swag/sample.json
            --validation_file tests/fixtures/tests_samples/swag/sample.json
            --output_dir {tmp_dir}
            --overwrite_output_dir
            --max_steps=20
            --warmup_steps=2
            --do_train
            --do_eval
            --learning_rate=2e-4
            --per_device_train_batch_size=2
            --per_device_eval_batch_size=1
        """.split()

        with patch.object(sys, "argv", testargs):
260
261
            run_swag.main()
            result = get_results(tmp_dir)
262
263
            self.assertGreaterEqual(result["eval_accuracy"], 0.8)

264
265
266
267
    def test_generation(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

268
        testargs = ["run_generation.py", "--prompt=Hello", "--length=10", "--seed=42"]
269

270
        if is_cuda_and_apex_available():
271
272
273
274
275
276
            testargs.append("--fp16")

        model_type, model_name = (
            "--model_type=gpt2",
            "--model_name_or_path=sshleifer/tiny-gpt2",
        )
277
        with patch.object(sys, "argv", testargs + [model_type, model_name]):
278
            result = run_generation.main()
279
            self.assertGreaterEqual(len(result[0]), 10)
280
281

    @slow
282
    def test_run_summarization(self):
283
284
285
286
287
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
288
            run_summarization.py
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
            --model_name_or_path t5-small
            --train_file tests/fixtures/tests_samples/xsum/sample.json
            --validation_file tests/fixtures/tests_samples/xsum/sample.json
            --output_dir {tmp_dir}
            --overwrite_output_dir
            --max_steps=50
            --warmup_steps=8
            --do_train
            --do_eval
            --learning_rate=2e-4
            --per_device_train_batch_size=2
            --per_device_eval_batch_size=1
            --predict_with_generate
        """.split()

        with patch.object(sys, "argv", testargs):
305
            run_summarization.main()
306
            result = get_results(tmp_dir)
307
308
309
310
311
312
            self.assertGreaterEqual(result["eval_rouge1"], 10)
            self.assertGreaterEqual(result["eval_rouge2"], 2)
            self.assertGreaterEqual(result["eval_rougeL"], 7)
            self.assertGreaterEqual(result["eval_rougeLsum"], 7)

    @slow
313
    def test_run_translation(self):
314
315
316
317
318
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
319
            run_translation.py
320
            --model_name_or_path sshleifer/student_marian_en_ro_6_1
321
322
            --source_lang en
            --target_lang ro
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
            --train_file tests/fixtures/tests_samples/wmt16/sample.json
            --validation_file tests/fixtures/tests_samples/wmt16/sample.json
            --output_dir {tmp_dir}
            --overwrite_output_dir
            --max_steps=50
            --warmup_steps=8
            --do_train
            --do_eval
            --learning_rate=3e-3
            --per_device_train_batch_size=2
            --per_device_eval_batch_size=1
            --predict_with_generate
            --source_lang en_XX
            --target_lang ro_RO
        """.split()

        with patch.object(sys, "argv", testargs):
340
            run_translation.main()
341
            result = get_results(tmp_dir)
342
            self.assertGreaterEqual(result["eval_bleu"], 30)