"docs/vscode:/vscode.git/clone" did not exist on "e4bacf6614744c7d5e637fea9498bdb0dcd61eb7"
test_examples.py 10.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2018 HuggingFace Inc..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Aymeric Augustin's avatar
Aymeric Augustin committed
15

16
17

import argparse
18
import logging
19
import os
Aymeric Augustin's avatar
Aymeric Augustin committed
20
import sys
Aymeric Augustin's avatar
Aymeric Augustin committed
21
from unittest.mock import patch
Aymeric Augustin's avatar
Aymeric Augustin committed
22

Stas Bekman's avatar
Stas Bekman committed
23
24
import torch

25
from transformers.file_utils import is_apex_available
26
from transformers.testing_utils import TestCasePlus, require_torch_non_multi_gpu_but_fix_me, slow, torch_device
27

28
29
30

SRC_DIRS = [
    os.path.join(os.path.dirname(__file__), dirname)
31
32
33
34
35
    for dirname in [
        "text-generation",
        "text-classification",
        "token-classification",
        "language-modeling",
36
        "multiple-choice",
37
        "question-answering",
38
        "seq2seq",
39
    ]
40
41
42
43
44
]
sys.path.extend(SRC_DIRS)


if SRC_DIRS is not None:
Sylvain Gugger's avatar
Sylvain Gugger committed
45
    import run_clm
46
47
    import run_generation
    import run_glue
48
    import run_mlm
49
    import run_ner
Sylvain Gugger's avatar
Sylvain Gugger committed
50
    import run_qa as run_squad
51
    import run_seq2seq
52
    import run_swag
Aymeric Augustin's avatar
Aymeric Augustin committed
53

54

55
56
57
logging.basicConfig(level=logging.DEBUG)

logger = logging.getLogger()
58

59

60
61
def get_setup_file():
    parser = argparse.ArgumentParser()
62
    parser.add_argument("-f")
63
64
65
66
    args = parser.parse_args()
    return args.f


67
def is_cuda_and_apex_available():
68
69
70
71
    is_using_cuda = torch.cuda.is_available() and torch_device == "cuda"
    return is_using_cuda and is_apex_available()


72
class ExamplesTests(TestCasePlus):
73
    @require_torch_non_multi_gpu_but_fix_me
74
75
76
77
    def test_run_glue(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

78
79
        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
80
            run_glue.py
81
            --model_name_or_path distilbert-base-uncased
82
83
            --output_dir {tmp_dir}
            --overwrite_output_dir
Sylvain Gugger's avatar
Sylvain Gugger committed
84
85
            --train_file ./tests/fixtures/tests_samples/MRPC/train.csv
            --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
86
87
            --do_train
            --do_eval
88
89
            --per_device_train_batch_size=2
            --per_device_eval_batch_size=1
90
91
92
93
94
            --learning_rate=1e-4
            --max_steps=10
            --warmup_steps=2
            --seed=42
            --max_seq_length=128
95
            """.split()
96

97
        if is_cuda_and_apex_available():
98
            testargs.append("--fp16")
99

100
        with patch.object(sys, "argv", testargs):
101
            result = run_glue.main()
102
            self.assertGreaterEqual(result["eval_accuracy"], 0.75)
103

104
    @require_torch_non_multi_gpu_but_fix_me
Sylvain Gugger's avatar
Sylvain Gugger committed
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
    def test_run_clm(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
            run_clm.py
            --model_name_or_path distilgpt2
            --train_file ./tests/fixtures/sample_text.txt
            --validation_file ./tests/fixtures/sample_text.txt
            --do_train
            --do_eval
            --block_size 128
            --per_device_train_batch_size 5
            --per_device_eval_batch_size 5
            --num_train_epochs 2
            --output_dir {tmp_dir}
            --overwrite_output_dir
            """.split()

        if torch.cuda.device_count() > 1:
            # Skipping because there are not enough batches to train the model + would need a drop_last to work.
            return

        if torch_device != "cuda":
            testargs.append("--no_cuda")

        with patch.object(sys, "argv", testargs):
            result = run_clm.main()
            self.assertLess(result["perplexity"], 100)

136
    @require_torch_non_multi_gpu_but_fix_me
137
    def test_run_mlm(self):
Julien Chaumond's avatar
Julien Chaumond committed
138
139
140
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

141
142
        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
143
            run_mlm.py
Julien Chaumond's avatar
Julien Chaumond committed
144
            --model_name_or_path distilroberta-base
145
146
            --train_file ./tests/fixtures/sample_text.txt
            --validation_file ./tests/fixtures/sample_text.txt
147
            --output_dir {tmp_dir}
Julien Chaumond's avatar
Julien Chaumond committed
148
149
150
            --overwrite_output_dir
            --do_train
            --do_eval
151
            --prediction_loss_only
Julien Chaumond's avatar
Julien Chaumond committed
152
            --num_train_epochs=1
153
        """.split()
154
155
156

        if torch_device != "cuda":
            testargs.append("--no_cuda")
157

Julien Chaumond's avatar
Julien Chaumond committed
158
        with patch.object(sys, "argv", testargs):
159
            result = run_mlm.main()
160
            self.assertLess(result["perplexity"], 42)
Julien Chaumond's avatar
Julien Chaumond committed
161

162
    @require_torch_non_multi_gpu_but_fix_me
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
    def test_run_ner(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
            run_ner.py
            --model_name_or_path bert-base-uncased
            --train_file tests/fixtures/tests_samples/conll/sample.json
            --validation_file tests/fixtures/tests_samples/conll/sample.json
            --output_dir {tmp_dir}
            --overwrite_output_dir
            --do_train
            --do_eval
            --warmup_steps=2
            --learning_rate=2e-4
Sylvain Gugger's avatar
Sylvain Gugger committed
179
180
            --per_device_train_batch_size=2
            --per_device_eval_batch_size=2
181
182
183
184
185
186
187
188
            --num_train_epochs=2
        """.split()

        if torch_device != "cuda":
            testargs.append("--no_cuda")

        with patch.object(sys, "argv", testargs):
            result = run_ner.main()
189
            self.assertGreaterEqual(result["eval_accuracy"], 0.75)
190
191
192
            self.assertGreaterEqual(result["eval_precision"], 0.75)
            self.assertLess(result["eval_loss"], 0.5)

193
    @require_torch_non_multi_gpu_but_fix_me
194
195
196
197
    def test_run_squad(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

198
199
        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
200
            run_squad.py
Sylvain Gugger's avatar
Sylvain Gugger committed
201
202
203
204
            --model_name_or_path bert-base-uncased
            --version_2_with_negative
            --train_file tests/fixtures/tests_samples/SQUAD/sample.json
            --validation_file tests/fixtures/tests_samples/SQUAD/sample.json
205
206
            --output_dir {tmp_dir}
            --overwrite_output_dir
207
208
209
210
211
            --max_steps=10
            --warmup_steps=2
            --do_train
            --do_eval
            --learning_rate=2e-4
Sylvain Gugger's avatar
Sylvain Gugger committed
212
213
            --per_device_train_batch_size=2
            --per_device_eval_batch_size=1
214
215
        """.split()

216
        with patch.object(sys, "argv", testargs):
217
            result = run_squad.main()
Sylvain Gugger's avatar
Sylvain Gugger committed
218
219
            self.assertGreaterEqual(result["f1"], 30)
            self.assertGreaterEqual(result["exact"], 30)
220

221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
    @require_torch_non_multi_gpu_but_fix_me
    def test_run_swag(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
            run_swag.py
            --model_name_or_path bert-base-uncased
            --train_file tests/fixtures/tests_samples/swag/sample.json
            --validation_file tests/fixtures/tests_samples/swag/sample.json
            --output_dir {tmp_dir}
            --overwrite_output_dir
            --max_steps=20
            --warmup_steps=2
            --do_train
            --do_eval
            --learning_rate=2e-4
            --per_device_train_batch_size=2
            --per_device_eval_batch_size=1
        """.split()

        with patch.object(sys, "argv", testargs):
            result = run_swag.main()
            self.assertGreaterEqual(result["eval_accuracy"], 0.8)

247
    @require_torch_non_multi_gpu_but_fix_me
248
249
250
251
    def test_generation(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

252
        testargs = ["run_generation.py", "--prompt=Hello", "--length=10", "--seed=42"]
253

254
        if is_cuda_and_apex_available():
255
256
257
258
259
260
            testargs.append("--fp16")

        model_type, model_name = (
            "--model_type=gpt2",
            "--model_name_or_path=sshleifer/tiny-gpt2",
        )
261
        with patch.object(sys, "argv", testargs + [model_type, model_name]):
262
            result = run_generation.main()
263
            self.assertGreaterEqual(len(result[0]), 10)
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327

    @slow
    @require_torch_non_multi_gpu_but_fix_me
    def test_run_seq2seq_summarization(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
            run_seq2seq.py
            --model_name_or_path t5-small
            --task summarization
            --train_file tests/fixtures/tests_samples/xsum/sample.json
            --validation_file tests/fixtures/tests_samples/xsum/sample.json
            --output_dir {tmp_dir}
            --overwrite_output_dir
            --max_steps=50
            --warmup_steps=8
            --do_train
            --do_eval
            --learning_rate=2e-4
            --per_device_train_batch_size=2
            --per_device_eval_batch_size=1
            --predict_with_generate
        """.split()

        with patch.object(sys, "argv", testargs):
            result = run_seq2seq.main()

            self.assertGreaterEqual(result["eval_rouge1"], 10)
            self.assertGreaterEqual(result["eval_rouge2"], 2)
            self.assertGreaterEqual(result["eval_rougeL"], 7)
            self.assertGreaterEqual(result["eval_rougeLsum"], 7)

    @slow
    @require_torch_non_multi_gpu_but_fix_me
    def test_run_seq2seq_translation(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

        tmp_dir = self.get_auto_remove_tmp_dir()
        testargs = f"""
            run_seq2seq.py
            --model_name_or_path sshleifer/student_marian_en_ro_6_1
            --task translation_en_to_ro
            --train_file tests/fixtures/tests_samples/wmt16/sample.json
            --validation_file tests/fixtures/tests_samples/wmt16/sample.json
            --output_dir {tmp_dir}
            --overwrite_output_dir
            --max_steps=50
            --warmup_steps=8
            --do_train
            --do_eval
            --learning_rate=3e-3
            --per_device_train_batch_size=2
            --per_device_eval_batch_size=1
            --predict_with_generate
            --source_lang en_XX
            --target_lang ro_RO
        """.split()

        with patch.object(sys, "argv", testargs):
            result = run_seq2seq.main()
            self.assertGreaterEqual(result["eval_bleu"], 30)