test_trainer.py 39 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# coding=utf-8
# Copyright 2018 the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

16
import dataclasses
17
18
import os
import tempfile
Julien Chaumond's avatar
Julien Chaumond committed
19
20
import unittest

Sylvain Gugger's avatar
Sylvain Gugger committed
21
22
import numpy as np

23
from transformers import AutoTokenizer, EvaluationStrategy, PretrainedConfig, TrainingArguments, is_torch_available
24
from transformers.file_utils import WEIGHTS_NAME
25
26
from transformers.testing_utils import (
    get_tests_dir,
27
    require_datasets,
28
29
30
31
32
33
34
    require_optuna,
    require_sentencepiece,
    require_tokenizers,
    require_torch,
    slow,
)
from transformers.utils.hp_naming import TrialShortNamer
Julien Chaumond's avatar
Julien Chaumond committed
35
36
37
38


if is_torch_available():
    import torch
39
40
    from torch.utils.data import IterableDataset

Julien Chaumond's avatar
Julien Chaumond committed
41
    from transformers import (
42
        AutoModelForMaskedLM,
Julien Chaumond's avatar
Julien Chaumond committed
43
        AutoModelForSequenceClassification,
44
        DataCollatorForLanguageModeling,
45
        EarlyStoppingCallback,
Julien Chaumond's avatar
Julien Chaumond committed
46
47
        GlueDataset,
        GlueDataTrainingArguments,
48
49
        GPT2Config,
        GPT2LMHeadModel,
50
        LineByLineTextDataset,
51
        PreTrainedModel,
52
        TextDataset,
53
        Trainer,
54
        TrainerState,
Julien Chaumond's avatar
Julien Chaumond committed
55
56
57
    )


58
PATH_SAMPLE_TEXT = f"{get_tests_dir()}/fixtures/sample_text.txt"
Julien Chaumond's avatar
Julien Chaumond committed
59
60


Sylvain Gugger's avatar
Sylvain Gugger committed
61
class RegressionDataset:
Sylvain Gugger's avatar
Sylvain Gugger committed
62
    def __init__(self, a=2, b=3, length=64, seed=42, label_names=None):
Sylvain Gugger's avatar
Sylvain Gugger committed
63
        np.random.seed(seed)
Sylvain Gugger's avatar
Sylvain Gugger committed
64
        self.label_names = ["labels"] if label_names is None else label_names
Sylvain Gugger's avatar
Sylvain Gugger committed
65
66
        self.length = length
        self.x = np.random.normal(size=(length,)).astype(np.float32)
Sylvain Gugger's avatar
Sylvain Gugger committed
67
68
        self.ys = [a * self.x + b + np.random.normal(scale=0.1, size=(length,)) for _ in self.label_names]
        self.ys = [y.astype(np.float32) for y in self.ys]
Julien Chaumond's avatar
Julien Chaumond committed
69

Sylvain Gugger's avatar
Sylvain Gugger committed
70
71
72
73
    def __len__(self):
        return self.length

    def __getitem__(self, i):
Sylvain Gugger's avatar
Sylvain Gugger committed
74
75
76
        result = {name: y[i] for name, y in zip(self.label_names, self.ys)}
        result["input_x"] = self.x[i]
        return result
Sylvain Gugger's avatar
Sylvain Gugger committed
77
78


79
80
81
82
83
84
85
86
87
88
89
90
class RepeatDataset:
    def __init__(self, x, length=64):
        self.x = x
        self.length = length

    def __len__(self):
        return self.length

    def __getitem__(self, i):
        return {"input_ids": self.x, "labels": self.x}


91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
class DynamicShapesDataset:
    def __init__(self, length=64, seed=42, batch_size=8):
        self.length = length
        np.random.seed(seed)
        sizes = np.random.randint(1, 20, (length // batch_size,))
        # For easy batching, we make every batch_size consecutive samples the same size.
        self.xs = [np.random.normal(size=(s,)) for s in sizes.repeat(batch_size)]
        self.ys = [np.random.normal(size=(s,)) for s in sizes.repeat(batch_size)]

    def __len__(self):
        return self.length

    def __getitem__(self, i):
        return {"input_x": self.xs[i], "labels": self.ys[i]}


Sylvain Gugger's avatar
Sylvain Gugger committed
107
108
109
110
111
112
113
114
class AlmostAccuracy:
    def __init__(self, thresh=0.25):
        self.thresh = thresh

    def __call__(self, eval_pred):
        predictions, labels = eval_pred
        true = np.abs(predictions - labels) <= self.thresh
        return {"accuracy": true.astype(np.float32).mean().item()}
115

Julien Chaumond's avatar
Julien Chaumond committed
116

117
118
119
120
121
122
123
124
class RegressionModelConfig(PretrainedConfig):
    def __init__(self, a=0, b=0, double_output=False, **kwargs):
        super().__init__(**kwargs)
        self.a = a
        self.b = b
        self.double_output = double_output


125
126
127
if is_torch_available():

    class SampleIterableDataset(IterableDataset):
128
129
130
        """
        Criteria is not whether it is IterableDataset or not, criteria is whether __len__ is implemented
        """
131

132
133
        def __init__(self, file_path, tokenizer):
            self.ds = TextDataset(file_path=file_path, tokenizer=tokenizer, block_size=64)
134
135

        def __iter__(self):
136
137
            for i in range(len(self.ds)):
                yield self.ds[i]
138

Sylvain Gugger's avatar
Sylvain Gugger committed
139
    class RegressionModel(torch.nn.Module):
140
        def __init__(self, a=0, b=0, double_output=False):
Sylvain Gugger's avatar
Sylvain Gugger committed
141
142
143
            super().__init__()
            self.a = torch.nn.Parameter(torch.tensor(a).float())
            self.b = torch.nn.Parameter(torch.tensor(b).float())
144
145
            self.double_output = double_output
            self.config = None
Sylvain Gugger's avatar
Sylvain Gugger committed
146

Sylvain Gugger's avatar
Sylvain Gugger committed
147
        def forward(self, input_x=None, labels=None, **kwargs):
Sylvain Gugger's avatar
Sylvain Gugger committed
148
149
            y = input_x * self.a + self.b
            if labels is None:
150
                return (y, y) if self.double_output else (y,)
Sylvain Gugger's avatar
Sylvain Gugger committed
151
            loss = torch.nn.functional.mse_loss(y, labels)
152
            return (loss, y, y) if self.double_output else (loss, y)
Sylvain Gugger's avatar
Sylvain Gugger committed
153

154
155
156
157
158
159
160
161
162
163
164
165
166
167
    class RegressionDictModel(torch.nn.Module):
        def __init__(self, a=0, b=0):
            super().__init__()
            self.a = torch.nn.Parameter(torch.tensor(a).float())
            self.b = torch.nn.Parameter(torch.tensor(b).float())
            self.config = None

        def forward(self, input_x=None, labels=None, **kwargs):
            y = input_x * self.a + self.b
            result = {"output": y}
            if labels is not None:
                result["loss"] = torch.nn.functional.mse_loss(y, labels)
            return result

168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
    class RegressionPreTrainedModel(PreTrainedModel):
        config_class = RegressionModelConfig
        base_model_prefix = "regression"

        def __init__(self, config):
            super().__init__(config)
            self.a = torch.nn.Parameter(torch.tensor(config.a).float())
            self.b = torch.nn.Parameter(torch.tensor(config.b).float())
            self.double_output = config.double_output

        def forward(self, input_x=None, labels=None, **kwargs):
            y = input_x * self.a + self.b
            if labels is None:
                return (y, y) if self.double_output else (y,)
            loss = torch.nn.functional.mse_loss(y, labels)
            return (loss, y, y) if self.double_output else (loss, y)

185
    def get_regression_trainer(a=0, b=0, double_output=False, train_len=64, eval_len=64, pretrained=True, **kwargs):
Sylvain Gugger's avatar
Sylvain Gugger committed
186
187
188
        label_names = kwargs.get("label_names", None)
        train_dataset = RegressionDataset(length=train_len, label_names=label_names)
        eval_dataset = RegressionDataset(length=eval_len, label_names=label_names)
189
190
191
192
193
        if pretrained:
            config = RegressionModelConfig(a=a, b=b, double_output=double_output)
            model = RegressionPreTrainedModel(config)
        else:
            model = RegressionModel(a=a, b=b, double_output=double_output)
Sylvain Gugger's avatar
Sylvain Gugger committed
194
195
196
        compute_metrics = kwargs.pop("compute_metrics", None)
        data_collator = kwargs.pop("data_collator", None)
        optimizers = kwargs.pop("optimizers", (None, None))
197
        output_dir = kwargs.pop("output_dir", "./regression")
198
        model_init = kwargs.pop("model_init", None)
199
        args = TrainingArguments(output_dir, **kwargs)
Sylvain Gugger's avatar
Sylvain Gugger committed
200
201
202
203
204
205
206
207
        return Trainer(
            model,
            args,
            data_collator=data_collator,
            train_dataset=train_dataset,
            eval_dataset=eval_dataset,
            compute_metrics=compute_metrics,
            optimizers=optimizers,
208
            model_init=model_init,
Sylvain Gugger's avatar
Sylvain Gugger committed
209
210
        )

211

Julien Chaumond's avatar
Julien Chaumond committed
212
@require_torch
213
214
@require_sentencepiece
@require_tokenizers
Julien Chaumond's avatar
Julien Chaumond committed
215
class TrainerIntegrationTest(unittest.TestCase):
Sylvain Gugger's avatar
Sylvain Gugger committed
216
217
218
    def setUp(self):
        args = TrainingArguments(".")
        self.n_epochs = args.num_train_epochs
219
220
221
222
223
224
225
226
227
228
229
230
231
232
        self.batch_size = args.train_batch_size
        trainer = get_regression_trainer(learning_rate=0.1)
        trainer.train()
        self.default_trained_model = (trainer.model.a, trainer.model.b)

        trainer = get_regression_trainer(learning_rate=0.1, seed=314)
        trainer.train()
        self.alternate_trained_model = (trainer.model.a, trainer.model.b)

    def check_trained_model(self, model, alternate_seed=False):
        # Checks a training seeded with learning_rate = 0.1
        (a, b) = self.alternate_trained_model if alternate_seed else self.default_trained_model
        self.assertTrue(torch.allclose(model.a, a))
        self.assertTrue(torch.allclose(model.b, b))
Sylvain Gugger's avatar
Sylvain Gugger committed
233

234
    def check_saved_checkpoints(self, output_dir, freq, total, is_pretrained=True):
235
        file_list = [WEIGHTS_NAME, "training_args.bin", "optimizer.pt", "scheduler.pt", "trainer_state.json"]
236
237
238
239
240
241
242
243
244
245
246
247
        if is_pretrained:
            file_list.append("config.json")
        for step in range(freq, total, freq):
            checkpoint = os.path.join(output_dir, f"checkpoint-{step}")
            self.assertTrue(os.path.isdir(checkpoint))
            for filename in file_list:
                self.assertTrue(os.path.isfile(os.path.join(checkpoint, filename)))

    def check_best_model_has_been_loaded(
        self, output_dir, freq, total, trainer, metric, greater_is_better=False, is_pretrained=True
    ):
        checkpoint = os.path.join(output_dir, f"checkpoint-{(total // freq) * freq}")
248
        log_history = TrainerState.load_from_json(os.path.join(checkpoint, "trainer_state.json")).log_history
249
250
251
252
253
254
255
256
257
258
259
260

        values = [d[metric] for d in log_history]
        best_value = max(values) if greater_is_better else min(values)
        best_checkpoint = (values.index(best_value) + 1) * freq
        checkpoint = os.path.join(output_dir, f"checkpoint-{best_checkpoint}")
        if is_pretrained:
            best_model = RegressionPreTrainedModel.from_pretrained(checkpoint)
            best_model.to(trainer.args.device)
        else:
            best_model = RegressionModel()
            state_dict = torch.load(os.path.join(checkpoint, WEIGHTS_NAME))
            best_model.load_state_dict(state_dict)
261
            best_model.to(trainer.args.device)
262
263
264
265
266
267
        self.assertTrue(torch.allclose(best_model.a, trainer.model.a))
        self.assertTrue(torch.allclose(best_model.b, trainer.model.b))

        metrics = trainer.evaluate()
        self.assertEqual(metrics[metric], best_value)

268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
    def check_trainer_state_are_the_same(self, trainer_state, trainer_state1):
        # We'll pop things so operate on copies.
        state = trainer_state.copy()
        state1 = trainer_state1.copy()
        # Log history main contain different logs for the time metrics (after resuming a training).
        log_history = state.pop("log_history", None)
        log_history1 = state1.pop("log_history", None)
        self.assertEqual(state, state1)
        for log, log1 in zip(log_history, log_history1):
            _ = log.pop("train_runtime", None)
            _ = log1.pop("train_runtime", None)
            _ = log.pop("train_samples_per_second", None)
            _ = log1.pop("train_samples_per_second", None)
            self.assertEqual(log, log1)

283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
    def test_trainer_works_with_dict(self):
        # Edge case because Apex with mode O2 will change our models to return dicts. This test checks it doesn't break
        # anything.
        train_dataset = RegressionDataset()
        eval_dataset = RegressionDataset()
        model = RegressionDictModel()
        args = TrainingArguments("./regression")
        trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset)
        trainer.train()
        _ = trainer.evaluate()
        _ = trainer.predict(eval_dataset)

    def test_evaluation_with_keys_to_drop(self):
        config = GPT2Config(vocab_size=100, n_positions=128, n_ctx=128, n_embd=32, n_layer=3, n_head=4)
        tiny_gpt2 = GPT2LMHeadModel(config)
        x = torch.randint(0, 100, (128,))
        eval_dataset = RepeatDataset(x)
        args = TrainingArguments("./test")
        trainer = Trainer(tiny_gpt2, args, eval_dataset=eval_dataset)
        # By default the past_key_values are removed
        result = trainer.predict(eval_dataset)
        self.assertTrue(isinstance(result.predictions, np.ndarray))
        # We can still get them by setting ignore_keys to []
        result = trainer.predict(eval_dataset, ignore_keys=[])
        self.assertTrue(isinstance(result.predictions, tuple))
        self.assertEqual(len(result.predictions), 2)

310
311
312
313
    def test_training_arguments_are_left_untouched(self):
        trainer = get_regression_trainer()
        trainer.train()
        args = TrainingArguments("./regression")
314
315
        dict1, dict2 = args.to_dict(), trainer.args.to_dict()
        for key in dict1.keys():
316
            # Logging dir can be slightly different as they default to something with the time.
Sylvain Gugger's avatar
Sylvain Gugger committed
317
            if key != "logging_dir":
318
                self.assertEqual(dict1[key], dict2[key])
319

Sylvain Gugger's avatar
Sylvain Gugger committed
320
321
322
323
    def test_reproducible_training(self):
        # Checks that training worked, model trained and seed made a reproducible training.
        trainer = get_regression_trainer(learning_rate=0.1)
        trainer.train()
Sylvain Gugger's avatar
Sylvain Gugger committed
324
        self.check_trained_model(trainer.model)
Sylvain Gugger's avatar
Sylvain Gugger committed
325
326
327
328

        # Checks that a different seed gets different (reproducible) results.
        trainer = get_regression_trainer(learning_rate=0.1, seed=314)
        trainer.train()
Sylvain Gugger's avatar
Sylvain Gugger committed
329
        self.check_trained_model(trainer.model, alternate_seed=True)
Sylvain Gugger's avatar
Sylvain Gugger committed
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347

    def test_number_of_steps_in_training(self):
        # Regular training has n_epochs * len(train_dl) steps
        trainer = get_regression_trainer(learning_rate=0.1)
        train_output = trainer.train()
        self.assertEqual(train_output.global_step, self.n_epochs * 64 / self.batch_size)

        # Check passing num_train_epochs works (and a float version too):
        trainer = get_regression_trainer(learning_rate=0.1, num_train_epochs=1.5)
        train_output = trainer.train()
        self.assertEqual(train_output.global_step, int(1.5 * 64 / self.batch_size))

        # If we pass a max_steps, num_train_epochs is ignored
        trainer = get_regression_trainer(learning_rate=0.1, max_steps=10)
        train_output = trainer.train()
        self.assertEqual(train_output.global_step, 10)

    def test_train_and_eval_dataloaders(self):
348
        n_gpu = max(1, torch.cuda.device_count())
Sylvain Gugger's avatar
Sylvain Gugger committed
349
        trainer = get_regression_trainer(learning_rate=0.1, per_device_train_batch_size=16)
350
        self.assertEqual(trainer.get_train_dataloader().batch_size, 16 * n_gpu)
Sylvain Gugger's avatar
Sylvain Gugger committed
351
        trainer = get_regression_trainer(learning_rate=0.1, per_device_eval_batch_size=16)
352
        self.assertEqual(trainer.get_eval_dataloader().batch_size, 16 * n_gpu)
Sylvain Gugger's avatar
Sylvain Gugger committed
353
354
355
356
357

        # Check drop_last works
        trainer = get_regression_trainer(
            train_len=66, eval_len=74, learning_rate=0.1, per_device_train_batch_size=16, per_device_eval_batch_size=32
        )
358
359
        self.assertEqual(len(trainer.get_train_dataloader()), 66 // (16 * n_gpu) + 1)
        self.assertEqual(len(trainer.get_eval_dataloader()), 74 // (32 * n_gpu) + 1)
Sylvain Gugger's avatar
Sylvain Gugger committed
360
361
362
363
364
365
366
367
368

        trainer = get_regression_trainer(
            train_len=66,
            eval_len=74,
            learning_rate=0.1,
            per_device_train_batch_size=16,
            per_device_eval_batch_size=32,
            dataloader_drop_last=True,
        )
369
370
        self.assertEqual(len(trainer.get_train_dataloader()), 66 // (16 * n_gpu))
        self.assertEqual(len(trainer.get_eval_dataloader()), 74 // (32 * n_gpu))
Sylvain Gugger's avatar
Sylvain Gugger committed
371

372
        # Check passing a new dataset for evaluation works
Sylvain Gugger's avatar
Sylvain Gugger committed
373
        new_eval_dataset = RegressionDataset(length=128)
374
        self.assertEqual(len(trainer.get_eval_dataloader(new_eval_dataset)), 128 // (32 * n_gpu))
Sylvain Gugger's avatar
Sylvain Gugger committed
375
376
377
378
379

    def test_evaluate(self):
        trainer = get_regression_trainer(a=1.5, b=2.5, compute_metrics=AlmostAccuracy())
        results = trainer.evaluate()

Sylvain Gugger's avatar
Sylvain Gugger committed
380
        x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0]
Sylvain Gugger's avatar
Sylvain Gugger committed
381
382
383
384
385
386
387
388
389
390
        pred = 1.5 * x + 2.5
        expected_loss = ((pred - y) ** 2).mean()
        self.assertAlmostEqual(results["eval_loss"], expected_loss)
        expected_acc = AlmostAccuracy()((pred, y))["accuracy"]
        self.assertAlmostEqual(results["eval_accuracy"], expected_acc)

        # With a number of elements not a round multiple of the batch size
        trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, compute_metrics=AlmostAccuracy())
        results = trainer.evaluate()

Sylvain Gugger's avatar
Sylvain Gugger committed
391
        x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0]
Sylvain Gugger's avatar
Sylvain Gugger committed
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
        pred = 1.5 * x + 2.5
        expected_loss = ((pred - y) ** 2).mean()
        self.assertAlmostEqual(results["eval_loss"], expected_loss)
        expected_acc = AlmostAccuracy()((pred, y))["accuracy"]
        self.assertAlmostEqual(results["eval_accuracy"], expected_acc)

    def test_predict(self):
        trainer = get_regression_trainer(a=1.5, b=2.5)
        preds = trainer.predict(trainer.eval_dataset).predictions
        x = trainer.eval_dataset.x
        self.assertTrue(np.allclose(preds, 1.5 * x + 2.5))

        # With a number of elements not a round multiple of the batch size
        trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66)
        preds = trainer.predict(trainer.eval_dataset).predictions
        x = trainer.eval_dataset.x
        self.assertTrue(np.allclose(preds, 1.5 * x + 2.5))

410
411
412
413
414
415
416
417
        # With more than one output of the model
        trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True)
        preds = trainer.predict(trainer.eval_dataset).predictions
        x = trainer.eval_dataset.x
        self.assertTrue(len(preds), 2)
        self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5))
        self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5))

Sylvain Gugger's avatar
Sylvain Gugger committed
418
419
420
421
422
423
424
425
426
427
428
429
        # With more than one output/label of the model
        trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True, label_names=["labels", "labels_2"])
        outputs = trainer.predict(trainer.eval_dataset)
        preds = outputs.predictions
        labels = outputs.label_ids
        x = trainer.eval_dataset.x
        self.assertTrue(len(preds), 2)
        self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5))
        self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5))
        self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0]))
        self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1]))

430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
    def test_dynamic_shapes(self):
        eval_dataset = DynamicShapesDataset(batch_size=self.batch_size)
        model = RegressionModel(a=2, b=1)
        args = TrainingArguments("./regression")
        trainer = Trainer(model, args, eval_dataset=eval_dataset)

        # Check evaluation can run to completion
        _ = trainer.evaluate()

        # Check predictions
        preds = trainer.predict(eval_dataset)
        for expected, seen in zip(eval_dataset.ys, preds.label_ids):
            self.assertTrue(np.array_equal(expected, seen[: expected.shape[0]]))
            self.assertTrue(np.all(seen[expected.shape[0] :] == -100))

        for expected, seen in zip(eval_dataset.xs, preds.predictions):
            self.assertTrue(np.array_equal(2 * expected + 1, seen[: expected.shape[0]]))
            self.assertTrue(np.all(seen[expected.shape[0] :] == -100))

        # Same tests with eval accumulation
        args = TrainingArguments("./regression", eval_accumulation_steps=2)
        trainer = Trainer(model, args, eval_dataset=eval_dataset)

        # Check evaluation can run to completion
        _ = trainer.evaluate()

        # Check predictions
        preds = trainer.predict(eval_dataset)
        for expected, seen in zip(eval_dataset.ys, preds.label_ids):
            self.assertTrue(np.array_equal(expected, seen[: expected.shape[0]]))
            self.assertTrue(np.all(seen[expected.shape[0] :] == -100))

        for expected, seen in zip(eval_dataset.xs, preds.predictions):
            self.assertTrue(np.array_equal(2 * expected + 1, seen[: expected.shape[0]]))
            self.assertTrue(np.all(seen[expected.shape[0] :] == -100))

466
    @require_datasets
467
    def test_trainer_with_datasets(self):
468
469
        import datasets

Sylvain Gugger's avatar
Sylvain Gugger committed
470
471
472
        np.random.seed(42)
        x = np.random.normal(size=(64,)).astype(np.float32)
        y = 2.0 * x + 3.0 + np.random.normal(scale=0.1, size=(64,))
473
        train_dataset = datasets.Dataset.from_dict({"input_x": x, "label": y})
Sylvain Gugger's avatar
Sylvain Gugger committed
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490

        # Base training. Should have the same results as test_reproducible_training
        model = RegressionModel()
        args = TrainingArguments("./regression", learning_rate=0.1)
        trainer = Trainer(model, args, train_dataset=train_dataset)
        trainer.train()
        self.check_trained_model(trainer.model)

        # Can return tensors.
        train_dataset.set_format(type="torch")
        model = RegressionModel()
        trainer = Trainer(model, args, train_dataset=train_dataset)
        trainer.train()
        self.check_trained_model(trainer.model)

        # Adding one column not used by the model should have no impact
        z = np.random.normal(size=(64,)).astype(np.float32)
491
        train_dataset = datasets.Dataset.from_dict({"input_x": x, "label": y, "extra": z})
Sylvain Gugger's avatar
Sylvain Gugger committed
492
493
494
495
496
497
498
499
500
501
502
503
504
505
        model = RegressionModel()
        trainer = Trainer(model, args, train_dataset=train_dataset)
        trainer.train()
        self.check_trained_model(trainer.model)

    def test_custom_optimizer(self):
        train_dataset = RegressionDataset()
        args = TrainingArguments("./regression")
        model = RegressionModel()
        optimizer = torch.optim.SGD(model.parameters(), lr=1.0)
        lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda x: 1.0)
        trainer = Trainer(model, args, train_dataset=train_dataset, optimizers=(optimizer, lr_scheduler))
        trainer.train()

506
507
508
        (a, b) = self.default_trained_model
        self.assertFalse(torch.allclose(trainer.model.a, a))
        self.assertFalse(torch.allclose(trainer.model.b, b))
Sylvain Gugger's avatar
Sylvain Gugger committed
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
        self.assertEqual(trainer.optimizer.state_dict()["param_groups"][0]["lr"], 1.0)

    def test_model_init(self):
        train_dataset = RegressionDataset()
        args = TrainingArguments("./regression", learning_rate=0.1)
        trainer = Trainer(args=args, train_dataset=train_dataset, model_init=lambda: RegressionModel())
        trainer.train()
        self.check_trained_model(trainer.model)

        # Re-training should restart from scratch, thus lead the same results.
        trainer.train()
        self.check_trained_model(trainer.model)

        # Re-training should restart from scratch, thus lead the same results and new seed should be used.
        trainer.args.seed = 314
        trainer.train()
        self.check_trained_model(trainer.model, alternate_seed=True)

527
528
529
530
531
532
533
534
    def test_save_checkpoints(self):
        with tempfile.TemporaryDirectory() as tmpdir:
            trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5)
            trainer.train()
            self.check_saved_checkpoints(tmpdir, 5, int(self.n_epochs * 64 / self.batch_size))

        # With a regular model that is not a PreTrainedModel
        with tempfile.TemporaryDirectory() as tmpdir:
535
            trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5, pretrained=False)
536
537
538
            trainer.train()
            self.check_saved_checkpoints(tmpdir, 5, int(self.n_epochs * 64 / self.batch_size), False)

539
540
541
542
543
544
545
546
    def test_gradient_accumulation(self):
        # Training with half the batch size but accumulation steps as 2 should give the same results.
        trainer = get_regression_trainer(
            gradient_accumulation_steps=2, per_device_train_batch_size=4, learning_rate=0.1
        )
        trainer.train()
        self.check_trained_model(trainer.model)

547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
    def test_can_resume_training(self):
        if torch.cuda.device_count() > 2:
            # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of
            # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model
            # won't be the same since the training dataloader is shuffled).
            return
        with tempfile.TemporaryDirectory() as tmpdir:
            trainer = get_regression_trainer(output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1)
            trainer.train()
            (a, b) = trainer.model.a.item(), trainer.model.b.item()
            state = dataclasses.asdict(trainer.state)

            checkpoint = os.path.join(tmpdir, "checkpoint-5")

            # Reinitialize trainer and load model
            model = RegressionPreTrainedModel.from_pretrained(checkpoint)
            trainer = Trainer(model, trainer.args, train_dataset=trainer.train_dataset)

            trainer.train(model_path=checkpoint)
            (a1, b1) = trainer.model.a.item(), trainer.model.b.item()
            state1 = dataclasses.asdict(trainer.state)
            self.assertEqual(a, a1)
            self.assertEqual(b, b1)
570
            self.check_trainer_state_are_the_same(state, state1)
571

572
573
574
575
576
577
578
579
580
581
582
583
            # Now check with a later checkpoint that it also works when we span over one epoch
            checkpoint = os.path.join(tmpdir, "checkpoint-15")

            # Reinitialize trainer and load model
            model = RegressionPreTrainedModel.from_pretrained(checkpoint)
            trainer = Trainer(model, trainer.args, train_dataset=trainer.train_dataset)

            trainer.train(model_path=checkpoint)
            (a1, b1) = trainer.model.a.item(), trainer.model.b.item()
            state1 = dataclasses.asdict(trainer.state)
            self.assertEqual(a, a1)
            self.assertEqual(b, b1)
584
            self.check_trainer_state_are_the_same(state, state1)
585

586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
        # With a regular model that is not a PreTrainedModel
        with tempfile.TemporaryDirectory() as tmpdir:
            trainer = get_regression_trainer(
                output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1, pretrained=False
            )
            trainer.train()
            (a, b) = trainer.model.a.item(), trainer.model.b.item()
            state = dataclasses.asdict(trainer.state)

            checkpoint = os.path.join(tmpdir, "checkpoint-5")

            # Reinitialize trainer and load model
            model = RegressionModel()
            state_dict = torch.load(os.path.join(checkpoint, WEIGHTS_NAME))
            model.load_state_dict(state_dict)
            trainer = Trainer(model, trainer.args, train_dataset=trainer.train_dataset)

            trainer.train(model_path=checkpoint)
            (a1, b1) = trainer.model.a.item(), trainer.model.b.item()
            state1 = dataclasses.asdict(trainer.state)
            self.assertEqual(a, a1)
            self.assertEqual(b, b1)
608
            self.check_trainer_state_are_the_same(state, state1)
609

610
611
612
613
614
615
616
617
618
619
620
621
622
623
            # Now check with a later checkpoint that it also works when we span over one epoch
            checkpoint = os.path.join(tmpdir, "checkpoint-15")

            # Reinitialize trainer and load model
            model = RegressionModel()
            state_dict = torch.load(os.path.join(checkpoint, WEIGHTS_NAME))
            model.load_state_dict(state_dict)
            trainer = Trainer(model, trainer.args, train_dataset=trainer.train_dataset)

            trainer.train(model_path=checkpoint)
            (a1, b1) = trainer.model.a.item(), trainer.model.b.item()
            state1 = dataclasses.asdict(trainer.state)
            self.assertEqual(a, a1)
            self.assertEqual(b, b1)
624
            self.check_trainer_state_are_the_same(state, state1)
625

626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
    def test_resume_training_with_gradient_accumulation(self):
        if torch.cuda.device_count() > 2:
            # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of
            # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model
            # won't be the same since the training dataloader is shuffled).
            return
        with tempfile.TemporaryDirectory() as tmpdir:
            trainer = get_regression_trainer(
                output_dir=tmpdir,
                train_len=128,
                gradient_accumulation_steps=2,
                per_device_train_batch_size=4,
                save_steps=5,
                learning_rate=0.1,
            )
            trainer.train()
            (a, b) = trainer.model.a.item(), trainer.model.b.item()
            state = dataclasses.asdict(trainer.state)

            checkpoint = os.path.join(tmpdir, "checkpoint-5")

            # Reinitialize trainer and load model
            model = RegressionPreTrainedModel.from_pretrained(checkpoint)
            trainer = Trainer(model, trainer.args, train_dataset=trainer.train_dataset)

            trainer.train(model_path=checkpoint)
            (a1, b1) = trainer.model.a.item(), trainer.model.b.item()
            state1 = dataclasses.asdict(trainer.state)
            self.assertEqual(a, a1)
            self.assertEqual(b, b1)
656
            self.check_trainer_state_are_the_same(state, state1)
657

658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
    def test_load_best_model_at_end(self):
        total = int(self.n_epochs * 64 / self.batch_size)
        with tempfile.TemporaryDirectory() as tmpdir:
            trainer = get_regression_trainer(
                a=1.5,
                b=2.5,
                output_dir=tmpdir,
                learning_rate=0.1,
                eval_steps=5,
                evaluation_strategy="steps",
                load_best_model_at_end=True,
            )
            self.assertFalse(trainer.args.greater_is_better)
            trainer.train()
            self.check_saved_checkpoints(tmpdir, 5, total)
            self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_loss")

        with tempfile.TemporaryDirectory() as tmpdir:
            trainer = get_regression_trainer(
                a=1.5,
                b=2.5,
                output_dir=tmpdir,
                learning_rate=0.1,
                eval_steps=5,
                evaluation_strategy="steps",
                load_best_model_at_end=True,
                metric_for_best_model="accuracy",
                compute_metrics=AlmostAccuracy(),
            )
            self.assertTrue(trainer.args.greater_is_better)
            trainer.train()
            self.check_saved_checkpoints(tmpdir, 5, total)
            self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_accuracy", greater_is_better=True)

        # Save is done every eval regardless of the strategy
        with tempfile.TemporaryDirectory() as tmpdir:
            trainer = get_regression_trainer(
                a=1.5,
                b=2.5,
                output_dir=tmpdir,
                learning_rate=0.1,
                evaluation_strategy="epoch",
                load_best_model_at_end=True,
                metric_for_best_model="accuracy",
                compute_metrics=AlmostAccuracy(),
            )
            self.assertTrue(trainer.args.greater_is_better)
            trainer.train()
            self.check_saved_checkpoints(tmpdir, 64 // self.batch_size, total)
            self.check_best_model_has_been_loaded(
                tmpdir, 64 // self.batch_size, total, trainer, "eval_accuracy", greater_is_better=True
            )

        # Test this works with a non PreTrainedModel
        with tempfile.TemporaryDirectory() as tmpdir:
            trainer = get_regression_trainer(
                output_dir=tmpdir,
                learning_rate=0.1,
                eval_steps=5,
                evaluation_strategy="steps",
                load_best_model_at_end=True,
719
                pretrained=False,
720
721
722
723
724
725
            )
            self.assertFalse(trainer.args.greater_is_better)
            trainer.train()
            self.check_saved_checkpoints(tmpdir, 5, total, is_pretrained=False)
            self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_loss", is_pretrained=False)

726
    @slow
Julien Chaumond's avatar
Julien Chaumond committed
727
728
729
730
731
    def test_trainer_eval_mrpc(self):
        MODEL_ID = "bert-base-cased-finetuned-mrpc"
        tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
        model = AutoModelForSequenceClassification.from_pretrained(MODEL_ID)
        data_args = GlueDataTrainingArguments(
732
            task_name="mrpc", data_dir=f"{get_tests_dir()}/fixtures/tests_samples/MRPC", overwrite_cache=True
Julien Chaumond's avatar
Julien Chaumond committed
733
        )
734
        eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev")
Julien Chaumond's avatar
Julien Chaumond committed
735
736
737
738

        training_args = TrainingArguments(output_dir="./examples", no_cuda=True)
        trainer = Trainer(model=model, args=training_args, eval_dataset=eval_dataset)
        result = trainer.evaluate()
739
        self.assertLess(result["eval_loss"], 0.2)
Julien Chaumond's avatar
Julien Chaumond committed
740

741
    @slow
Julien Chaumond's avatar
Julien Chaumond committed
742
743
744
745
    def test_trainer_eval_lm(self):
        MODEL_ID = "distilroberta-base"
        tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
        dataset = LineByLineTextDataset(
Lysandre's avatar
Lysandre committed
746
747
748
            tokenizer=tokenizer,
            file_path=PATH_SAMPLE_TEXT,
            block_size=tokenizer.max_len_single_sentence,
Julien Chaumond's avatar
Julien Chaumond committed
749
750
        )
        self.assertEqual(len(dataset), 31)
751
752

    def test_trainer_iterable_dataset(self):
753
754
755
        # Simulate Language Modeling with an IterableDataset, with no __len__ method
        # Pick-up a tiny model, so it works on CPU
        # See Issue #5990: https://github.com/huggingface/transformers/issues/5990
756
        MODEL_ID = "sshleifer/tiny-distilbert-base-cased"
757
758
759
760
761
762
763
764
765
766
        model = AutoModelForMaskedLM.from_pretrained(MODEL_ID)
        tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
        train_dataset = SampleIterableDataset(file_path=PATH_SAMPLE_TEXT, tokenizer=tokenizer)
        training_args = TrainingArguments(output_dir="./examples", no_cuda=True, max_steps=2)
        data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=True, mlm_probability=0.15)

        training_args = TrainingArguments(output_dir="./examples", no_cuda=True, max_steps=2)
        trainer = Trainer(model=model, args=training_args, train_dataset=train_dataset, data_collator=data_collator)
        trainer.train()

767
768
        loader = trainer.get_train_dataloader()
        self.assertIsInstance(loader, torch.utils.data.DataLoader)
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
        self.assertIsInstance(loader.sampler, torch.utils.data.dataloader._InfiniteConstantSampler)

        # Exception if giving iterable dataset and no max_steps
        with self.assertRaises(ValueError):
            training_args = TrainingArguments(output_dir="./examples", no_cuda=True)
            _ = Trainer(model=model, args=training_args, train_dataset=train_dataset, data_collator=data_collator)

        # Exception if eval_dataset is iterable in __init__
        with self.assertRaises(ValueError):
            training_args = TrainingArguments(output_dir="./examples", no_cuda=True, max_steps=2)
            _ = Trainer(
                model=model,
                args=training_args,
                train_dataset=train_dataset,
                eval_dataset=train_dataset,
                data_collator=data_collator,
            )

        # Exception if predicting with iterable dataset
        with self.assertRaises(ValueError):
            training_args = TrainingArguments(output_dir="./examples", no_cuda=True)
            trainer = Trainer(model=model, args=training_args, data_collator=data_collator)
            trainer.predict(train_dataset)

        # Exception if evaluating with iterable dataset
        with self.assertRaises(ValueError):
            training_args = TrainingArguments(output_dir="./examples", no_cuda=True)
            trainer = Trainer(model=model, args=training_args, data_collator=data_collator)
            trainer.evaluate(train_dataset)
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812

    def test_num_train_epochs_in_training(self):
        # len(train_dl) < gradient_accumulation_steps shouldn't give ``ZeroDivisionError`` when ``max_steps`` is given.
        # It should give 1 update step for each epoch.
        trainer = get_regression_trainer(
            max_steps=3, train_len=64, per_device_train_batch_size=16, gradient_accumulation_steps=5
        )
        train_output = trainer.train()
        self.assertEqual(train_output.global_step, 3)

        # Even ``max_steps`` is not specified, we still expect 1 update step for each epoch if
        # len(train_dl) < gradient_accumulation_steps.
        trainer = get_regression_trainer(train_len=64, per_device_train_batch_size=16, gradient_accumulation_steps=5)
        train_output = trainer.train()
        self.assertEqual(train_output.global_step, int(self.n_epochs))
Marcin Zab艂ocki's avatar
Marcin Zab艂ocki committed
813

814
815
    def test_early_stopping_callback(self):
        # early stopping stops training before num_training_epochs
816
817
818
819
820
821
822
823
824
825
826
827
828
829
        with tempfile.TemporaryDirectory() as tmp_dir:
            trainer = get_regression_trainer(
                output_dir=tmp_dir,
                num_train_epochs=20,
                gradient_accumulation_steps=1,
                per_device_train_batch_size=16,
                load_best_model_at_end=True,
                evaluation_strategy=EvaluationStrategy.EPOCH,
                compute_metrics=AlmostAccuracy(),
                metric_for_best_model="accuracy",
            )
            trainer.add_callback(EarlyStoppingCallback(1, 0.0001))
            train_output = trainer.train()
            self.assertLess(train_output.global_step, 20 * 64 / 16)
830
831

        # Invalid inputs to trainer with early stopping callback result in assertion error
832
833
834
835
836
837
838
839
840
841
842
        with tempfile.TemporaryDirectory() as tmp_dir:
            trainer = get_regression_trainer(
                output_dir=tmp_dir,
                num_train_epochs=20,
                gradient_accumulation_steps=1,
                per_device_train_batch_size=16,
                evaluation_strategy=EvaluationStrategy.EPOCH,
                compute_metrics=AlmostAccuracy(),
                metric_for_best_model="accuracy",
            )
            trainer.add_callback(EarlyStoppingCallback(1))
843
            self.assertEqual(trainer.state.global_step, 0)
844
845
846
847
            try:
                trainer.train()
            except AssertionError:
                self.assertEqual(trainer.state.global_step, 0)
848

Marcin Zab艂ocki's avatar
Marcin Zab艂ocki committed
849
850
851
852
853
854
855
856
857
858
859
860
    def test_flos_extraction(self):
        trainer = get_regression_trainer(learning_rate=0.1)

        def assert_flos_extraction(trainer, wrapped_model_to_check):
            self.assertEqual(trainer.model, trainer._actual_model(wrapped_model_to_check))
            self.assertGreaterEqual(getattr(trainer._actual_model(wrapped_model_to_check).config, "total_flos", 0), 0)

        # with plain model
        assert_flos_extraction(trainer, trainer.model)

        # with enforced DataParallel
        assert_flos_extraction(trainer, torch.nn.DataParallel(trainer.model))
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891


@require_torch
@require_optuna
class TrainerHyperParameterIntegrationTest(unittest.TestCase):
    def setUp(self):
        args = TrainingArguments(".")
        self.n_epochs = args.num_train_epochs
        self.batch_size = args.train_batch_size

    def test_hyperparameter_search(self):
        class MyTrialShortNamer(TrialShortNamer):
            DEFAULTS = {"a": 0, "b": 0}

        def hp_space(trial):
            return {}

        def model_init(trial):
            if trial is not None:
                a = trial.suggest_int("a", -4, 4)
                b = trial.suggest_int("b", -4, 4)
            else:
                a = 0
                b = 0
            config = RegressionModelConfig(a=a, b=b, double_output=False)

            return RegressionPreTrainedModel(config)

        def hp_name(trial):
            return MyTrialShortNamer.shortname(trial.params)

892
893
894
895
896
897
898
899
900
901
902
903
904
905
        with tempfile.TemporaryDirectory() as tmp_dir:
            trainer = get_regression_trainer(
                output_dir=tmp_dir,
                learning_rate=0.1,
                logging_steps=1,
                evaluation_strategy=EvaluationStrategy.EPOCH,
                num_train_epochs=4,
                disable_tqdm=True,
                load_best_model_at_end=True,
                logging_dir="runs",
                run_name="test",
                model_init=model_init,
            )
            trainer.hyperparameter_search(direction="minimize", hp_space=hp_space, hp_name=hp_name, n_trials=4)