testing_utils.py 68.9 KB
Newer Older
Sylvain Gugger's avatar
Sylvain Gugger committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

NielsRogge's avatar
NielsRogge committed
15
import collections
16
import contextlib
17
import doctest
18
import functools
19
import inspect
20
import logging
21
import multiprocessing
22
import os
23
import re
24
import shlex
25
import shutil
Zachary Mueller's avatar
Zachary Mueller committed
26
import subprocess
27
import sys
28
import tempfile
29
import time
Aymeric Augustin's avatar
Aymeric Augustin committed
30
import unittest
31
from collections.abc import Mapping
32
from io import StringIO
33
from pathlib import Path
34
from typing import Iterable, Iterator, List, Optional, Union
35
from unittest import mock
36

37
import huggingface_hub
Lucain's avatar
Lucain committed
38
import requests
39

40
41
from transformers import logging as transformers_logging

42
from .deepspeed import is_deepspeed_available
43
from .integrations import (
44
    is_clearml_available,
45
46
47
48
49
50
    is_fairscale_available,
    is_optuna_available,
    is_ray_available,
    is_sigopt_available,
    is_wandb_available,
)
51
from .utils import (
52
    is_accelerate_available,
53
    is_apex_available,
Marc Sun's avatar
Marc Sun committed
54
    is_auto_gptq_available,
55
    is_bitsandbytes_available,
NielsRogge's avatar
NielsRogge committed
56
    is_bs4_available,
57
    is_cython_available,
58
    is_decord_available,
59
    is_detectron2_available,
Susnato Dhar's avatar
Susnato Dhar committed
60
    is_essentia_available,
61
62
    is_faiss_available,
    is_flax_available,
63
    is_ftfy_available,
64
    is_ipex_available,
65
    is_jieba_available,
66
    is_jumanpp_available,
Matt's avatar
Matt committed
67
    is_keras_nlp_available,
68
    is_librosa_available,
69
    is_natten_available,
70
    is_onnx_available,
71
    is_optimum_available,
72
    is_pandas_available,
73
    is_peft_available,
74
    is_phonemizer_available,
Susnato Dhar's avatar
Susnato Dhar committed
75
    is_pretty_midi_available,
76
    is_pyctcdecode_available,
77
    is_pytesseract_available,
78
    is_pytest_available,
79
    is_pytorch_quantization_available,
yujun's avatar
yujun committed
80
    is_rjieba_available,
81
    is_safetensors_available,
82
    is_scipy_available,
83
    is_sentencepiece_available,
84
    is_seqio_available,
Patrick von Platen's avatar
Patrick von Platen committed
85
    is_soundfile_availble,
86
    is_spacy_available,
87
    is_sudachi_available,
Kamal Raj's avatar
Kamal Raj committed
88
    is_tensorflow_probability_available,
89
    is_tensorflow_text_available,
90
    is_tf2onnx_available,
91
    is_tf_available,
NielsRogge's avatar
NielsRogge committed
92
    is_timm_available,
93
94
    is_tokenizers_available,
    is_torch_available,
95
96
    is_torch_bf16_cpu_available,
    is_torch_bf16_gpu_available,
97
    is_torch_neuroncore_available,
98
    is_torch_npu_available,
99
    is_torch_tensorrt_fx_available,
100
    is_torch_tf32_available,
101
    is_torch_tpu_available,
Suraj Patil's avatar
Suraj Patil committed
102
    is_torchaudio_available,
103
    is_torchdynamo_available,
NielsRogge's avatar
NielsRogge committed
104
    is_torchvision_available,
105
    is_vision_available,
106
    strtobool,
107
)
108
109


110
111
112
113
if is_accelerate_available():
    from accelerate.state import AcceleratorState, PartialState


114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
if is_pytest_available():
    from _pytest.doctest import (
        Module,
        _get_checker,
        _get_continue_on_failure,
        _get_runner,
        _is_mocked,
        _patch_unwrap_mock_aware,
        get_optionflags,
        import_path,
    )
    from _pytest.outcomes import skip
    from pytest import DoctestItem
else:
    Module = object
    DoctestItem = object


Julien Chaumond's avatar
Julien Chaumond committed
132
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
133
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
134
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
Julien Chaumond's avatar
Julien Chaumond committed
135
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
Julien Chaumond's avatar
Julien Chaumond committed
136

Sylvain Gugger's avatar
Sylvain Gugger committed
137
138
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
139
140
141
142
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"

# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
Sylvain Gugger's avatar
Sylvain Gugger committed
143

Julien Chaumond's avatar
Julien Chaumond committed
144

145
def parse_flag_from_env(key, default=False):
146
    try:
147
148
149
150
151
152
153
154
155
156
        value = os.environ[key]
    except KeyError:
        # KEY isn't set, default to `default`.
        _value = default
    else:
        # KEY is set, convert it to True or False.
        try:
            _value = strtobool(value)
        except ValueError:
            # More values are supported, but let's keep the message simple.
157
            raise ValueError(f"If set, {key} must be yes or no.")
158
159
    return _value

160

Julien Chaumond's avatar
Julien Chaumond committed
161
162
163
164
165
166
167
168
169
def parse_int_from_env(key, default=None):
    try:
        value = os.environ[key]
    except KeyError:
        _value = default
    else:
        try:
            _value = int(value)
        except ValueError:
170
            raise ValueError(f"If set, {key} must be a int.")
Julien Chaumond's avatar
Julien Chaumond committed
171
172
173
    return _value


174
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
175
176
_run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
177
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
Sylvain Gugger's avatar
Sylvain Gugger committed
178
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
Julien Chaumond's avatar
Julien Chaumond committed
179
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
180
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
Sylvain Gugger's avatar
Sylvain Gugger committed
181
_run_tool_tests = parse_flag_from_env("RUN_TOOL_TESTS", default=False)
182
_run_third_party_device_tests = parse_flag_from_env("RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
183
184


185
186
187
188
189
190
191
192
def is_pt_tf_cross_test(test_case):
    """
    Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.

    PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
    to a truthy value and selecting the is_pt_tf_cross_test pytest mark.

    """
193
    if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
194
195
196
197
198
199
200
201
202
203
        return unittest.skip("test is PT+TF test")(test_case)
    else:
        try:
            import pytest  # We don't need a hard dependency on pytest in the main library
        except ImportError:
            return test_case
        else:
            return pytest.mark.is_pt_tf_cross_test()(test_case)


204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
def is_pt_flax_cross_test(test_case):
    """
    Decorator marking a test as a test that control interactions between PyTorch and Flax

    PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
    variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.

    """
    if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
        return unittest.skip("test is PT+FLAX test")(test_case)
    else:
        try:
            import pytest  # We don't need a hard dependency on pytest in the main library
        except ImportError:
            return test_case
        else:
            return pytest.mark.is_pt_flax_cross_test()(test_case)


Sylvain Gugger's avatar
Sylvain Gugger committed
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
def is_staging_test(test_case):
    """
    Decorator marking a test as a staging test.

    Those tests will run using the staging environment of huggingface.co instead of the real model hub.
    """
    if not _run_staging:
        return unittest.skip("test is staging test")(test_case)
    else:
        try:
            import pytest  # We don't need a hard dependency on pytest in the main library
        except ImportError:
            return test_case
        else:
            return pytest.mark.is_staging_test()(test_case)


240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
def is_pipeline_test(test_case):
    """
    Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
    skipped.
    """
    if not _run_pipeline_tests:
        return unittest.skip("test is pipeline test")(test_case)
    else:
        try:
            import pytest  # We don't need a hard dependency on pytest in the main library
        except ImportError:
            return test_case
        else:
            return pytest.mark.is_pipeline_test()(test_case)


Sylvain Gugger's avatar
Sylvain Gugger committed
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
def is_tool_test(test_case):
    """
    Decorator marking a test as a tool test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
    """
    if not _run_tool_tests:
        return unittest.skip("test is a tool test")(test_case)
    else:
        try:
            import pytest  # We don't need a hard dependency on pytest in the main library
        except ImportError:
            return test_case
        else:
            return pytest.mark.is_tool_test()(test_case)


271
272
273
274
def slow(test_case):
    """
    Decorator marking a test as slow.

Sylvain Gugger's avatar
Sylvain Gugger committed
275
    Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
276
277

    """
278
    return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
279
280


Lysandre Debut's avatar
Lysandre Debut committed
281
282
283
284
285
286
287
288
289
290
291
def tooslow(test_case):
    """
    Decorator marking a test as too slow.

    Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
    these will not be tested by the CI.

    """
    return unittest.skip("test is too slow")(test_case)


292
293
294
295
def custom_tokenizers(test_case):
    """
    Decorator marking a test for a custom tokenizer.

Sylvain Gugger's avatar
Sylvain Gugger committed
296
297
    Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
    environment variable to a truthy value to run them.
298
    """
299
    return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
300
301


NielsRogge's avatar
NielsRogge committed
302
303
304
305
306
307
308
def require_bs4(test_case):
    """
    Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
    """
    return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)


309
310
311
312
313
314
315
def require_accelerate(test_case):
    """
    Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
    """
    return unittest.skipUnless(is_accelerate_available(), "test requires accelerate")(test_case)


316
317
318
319
320
321
322
def require_safetensors(test_case):
    """
    Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
    """
    return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)


yujun's avatar
yujun committed
323
324
325
326
def require_rjieba(test_case):
    """
    Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
    """
327
    return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
yujun's avatar
yujun committed
328
329


330
331
332
333
334
335
336
def require_jieba(test_case):
    """
    Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
    """
    return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)


337
def require_tf2onnx(test_case):
338
    return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
339
340


341
def require_onnx(test_case):
342
    return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
343
344


NielsRogge's avatar
NielsRogge committed
345
346
347
348
349
350
351
def require_timm(test_case):
    """
    Decorator marking a test that requires Timm.

    These tests are skipped when Timm isn't installed.

    """
352
    return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
NielsRogge's avatar
NielsRogge committed
353
354


355
356
357
358
359
360
361
362
363
364
def require_natten(test_case):
    """
    Decorator marking a test that requires NATTEN.

    These tests are skipped when NATTEN isn't installed.

    """
    return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)


365
366
367
368
369
370
371
def require_torch(test_case):
    """
    Decorator marking a test that requires PyTorch.

    These tests are skipped when PyTorch isn't installed.

    """
372
    return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
373
374
375
376
377
378
379
380
381
382


def require_peft(test_case):
    """
    Decorator marking a test that requires PEFT.

    These tests are skipped when PEFT isn't installed.

    """
    return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
383
384


NielsRogge's avatar
NielsRogge committed
385
386
387
388
389
390
391
392
393
394
def require_torchvision(test_case):
    """
    Decorator marking a test that requires Torchvision.

    These tests are skipped when Torchvision isn't installed.

    """
    return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)


395
396
397
398
399
400
401
402
403
404
405
406
def require_torch_or_tf(test_case):
    """
    Decorator marking a test that requires PyTorch or TensorFlow.

    These tests are skipped when neither PyTorch not TensorFlow is installed.

    """
    return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
        test_case
    )


407
408
409
410
def require_intel_extension_for_pytorch(test_case):
    """
    Decorator marking a test that requires Intel Extension for PyTorch.

411
412
    These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
    version.
413
414

    """
415
416
417
418
419
    return unittest.skipUnless(
        is_ipex_available(),
        "test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
        " https://github.com/intel/intel-extension-for-pytorch",
    )(test_case)
420
421


Kamal Raj's avatar
Kamal Raj committed
422
423
424
425
426
427
428
def require_tensorflow_probability(test_case):
    """
    Decorator marking a test that requires TensorFlow probability.

    These tests are skipped when TensorFlow probability isn't installed.

    """
429
430
431
    return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
        test_case
    )
Kamal Raj's avatar
Kamal Raj committed
432
433


Suraj Patil's avatar
Suraj Patil committed
434
435
def require_torchaudio(test_case):
    """
436
    Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
Suraj Patil's avatar
Suraj Patil committed
437
    """
438
    return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
439
440


441
442
def require_tf(test_case):
    """
443
    Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
444
    """
445
    return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
446
447


448
449
def require_flax(test_case):
    """
450
    Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
451
    """
452
    return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
453
454


455
456
def require_sentencepiece(test_case):
    """
457
    Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
458
    """
459
    return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
460
461


462
463
464
465
466
467
468
def require_seqio(test_case):
    """
    Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
    """
    return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)


469
470
471
472
def require_scipy(test_case):
    """
    Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
    """
473
    return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
474
475


476
477
def require_tokenizers(test_case):
    """
478
    Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
479
    """
480
    return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
481
482


483
484
485
486
487
488
489
490
def require_tensorflow_text(test_case):
    """
    Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
    installed.
    """
    return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)


Matt's avatar
Matt committed
491
492
493
494
495
496
497
def require_keras_nlp(test_case):
    """
    Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
    """
    return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)


NielsRogge's avatar
NielsRogge committed
498
499
500
501
def require_pandas(test_case):
    """
    Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
    """
502
    return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
NielsRogge's avatar
NielsRogge committed
503
504


505
506
507
508
def require_pytesseract(test_case):
    """
    Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
    """
509
    return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
510
511


512
513
514
515
516
def require_pytorch_quantization(test_case):
    """
    Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
    Quantization Toolkit isn't installed.
    """
517
518
519
    return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
        test_case
    )
520
521


522
def require_vision(test_case):
523
    """
524
525
526
    Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
    installed.
    """
527
    return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
528

529

530
531
532
533
def require_ftfy(test_case):
    """
    Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
    """
534
    return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
535
536
537
538
539
540


def require_spacy(test_case):
    """
    Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
    """
541
    return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
542
543


544
545
546
547
548
549
550
def require_decord(test_case):
    """
    Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
    """
    return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)


551
552
553
554
def require_torch_multi_gpu(test_case):
    """
    Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
    multiple GPUs.
555

556
    To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
557
    """
558
    if not is_torch_available():
559
560
561
562
        return unittest.skip("test requires PyTorch")(test_case)

    import torch

563
    return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
564
565


566
def require_torch_non_multi_gpu(test_case):
567
568
569
    """
    Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
    """
570
    if not is_torch_available():
571
572
573
574
        return unittest.skip("test requires PyTorch")(test_case)

    import torch

575
    return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
576
577


578
579
580
581
582
583
584
585
586
def require_torch_up_to_2_gpus(test_case):
    """
    Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
    """
    if not is_torch_available():
        return unittest.skip("test requires PyTorch")(test_case)

    import torch

587
    return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
588
589


Lysandre Debut's avatar
Lysandre Debut committed
590
591
592
593
def require_torch_tpu(test_case):
    """
    Decorator marking a test that requires a TPU (in PyTorch).
    """
594
    return unittest.skipUnless(is_torch_tpu_available(check_device=False), "test requires PyTorch TPU")(test_case)
Lysandre Debut's avatar
Lysandre Debut committed
595
596


597
598
599
600
601
602
603
604
605
def require_torch_neuroncore(test_case):
    """
    Decorator marking a test that requires NeuronCore (in PyTorch).
    """
    return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
        test_case
    )


606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
def require_torch_npu(test_case):
    """
    Decorator marking a test that requires NPU (in PyTorch).
    """
    return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)


def require_torch_multi_npu(test_case):
    """
    Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
    multiple NPUs.

    To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
    """
    if not is_torch_npu_available():
        return unittest.skip("test requires PyTorch NPU")(test_case)

    return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)


626
if is_torch_available():
Stas Bekman's avatar
Stas Bekman committed
627
628
629
    # Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
    import torch

630
631
632
633
634
635
636
637
638
639
    if "TRANSFORMERS_TEST_DEVICE" in os.environ:
        torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
        try:
            # try creating device to see if provided device is valid
            _ = torch.device(torch_device)
        except RuntimeError as e:
            raise RuntimeError(
                f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
            ) from e
    elif torch.cuda.is_available():
640
641
642
643
644
        torch_device = "cuda"
    elif _run_third_party_device_tests and is_torch_npu_available():
        torch_device = "npu"
    else:
        torch_device = "cpu"
645
646
else:
    torch_device = None
647

648
649
650
if is_tf_available():
    import tensorflow as tf

651
652
653
654
655
656
657
if is_flax_available():
    import jax

    jax_device = jax.default_backend()
else:
    jax_device = None

658

659
660
661
662
663
def require_torchdynamo(test_case):
    """Decorator marking a test that requires TorchDynamo"""
    return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)


664
665
666
667
668
def require_torch_tensorrt_fx(test_case):
    """Decorator marking a test that requires Torch-TensorRT FX"""
    return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)


669
def require_torch_gpu(test_case):
Patrick von Platen's avatar
Patrick von Platen committed
670
    """Decorator marking a test that requires CUDA and PyTorch."""
671
    return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
672
673


674
675
def require_torch_bf16_gpu(test_case):
    """Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
676
    return unittest.skipUnless(
677
678
679
680
681
682
683
684
685
686
        is_torch_bf16_gpu_available(),
        "test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
    )(test_case)


def require_torch_bf16_cpu(test_case):
    """Decorator marking a test that requires torch>=1.10, using CPU."""
    return unittest.skipUnless(
        is_torch_bf16_cpu_available(),
        "test requires torch>=1.10, using CPU",
687
    )(test_case)
688
689
690
691


def require_torch_tf32(test_case):
    """Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
692
693
694
    return unittest.skipUnless(
        is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
    )(test_case)
695
696


697
698
def require_detectron2(test_case):
    """Decorator marking a test that requires detectron2."""
699
    return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
700
701


Ola Piktus's avatar
Ola Piktus committed
702
703
def require_faiss(test_case):
    """Decorator marking a test that requires faiss."""
704
    return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
Ola Piktus's avatar
Ola Piktus committed
705
706


707
708
709
710
711
712
713
def require_optuna(test_case):
    """
    Decorator marking a test that requires optuna.

    These tests are skipped when optuna isn't installed.

    """
714
    return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
715
716
717
718
719
720
721
722
723


def require_ray(test_case):
    """
    Decorator marking a test that requires Ray/tune.

    These tests are skipped when Ray/tune isn't installed.

    """
724
    return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
725
726


727
728
729
730
731
732
733
def require_sigopt(test_case):
    """
    Decorator marking a test that requires SigOpt.

    These tests are skipped when SigOpt isn't installed.

    """
734
    return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
735
736


737
738
739
740
741
742
743
def require_wandb(test_case):
    """
    Decorator marking a test that requires wandb.

    These tests are skipped when wandb isn't installed.

    """
744
    return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
745
746


747
748
749
750
751
752
753
754
755
756
def require_clearml(test_case):
    """
    Decorator marking a test requires clearml.

    These tests are skipped when clearml isn't installed.

    """
    return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)


Patrick von Platen's avatar
Patrick von Platen committed
757
758
759
760
761
762
763
def require_soundfile(test_case):
    """
    Decorator marking a test that requires soundfile

    These tests are skipped when soundfile isn't installed.

    """
764
    return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
Patrick von Platen's avatar
Patrick von Platen committed
765
766


767
768
769
770
def require_deepspeed(test_case):
    """
    Decorator marking a test that requires deepspeed
    """
771
    return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
772
773


774
775
776
777
def require_fairscale(test_case):
    """
    Decorator marking a test that requires fairscale
    """
778
    return unittest.skipUnless(is_fairscale_available(), "test requires fairscale")(test_case)
779
780
781
782
783
784


def require_apex(test_case):
    """
    Decorator marking a test that requires apex
    """
785
    return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
786
787
788
789
790
791


def require_bitsandbytes(test_case):
    """
    Decorator for bits and bytes (bnb) dependency
    """
792
    return unittest.skipUnless(is_bitsandbytes_available(), "test requires bnb")(test_case)
793
794


795
796
797
798
799
800
801
def require_optimum(test_case):
    """
    Decorator for optimum dependency
    """
    return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)


Marc Sun's avatar
Marc Sun committed
802
803
804
805
806
807
808
def require_auto_gptq(test_case):
    """
    Decorator for auto_gptq dependency
    """
    return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)


809
810
811
812
def require_phonemizer(test_case):
    """
    Decorator marking a test that requires phonemizer
    """
813
    return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
814
815


816
817
818
819
def require_pyctcdecode(test_case):
    """
    Decorator marking a test that requires pyctcdecode
    """
820
    return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
821
822
823
824
825
826


def require_librosa(test_case):
    """
    Decorator marking a test that requires librosa
    """
827
    return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
828
829


Susnato Dhar's avatar
Susnato Dhar committed
830
831
832
833
834
835
836
837
838
839
840
841
842
843
def require_essentia(test_case):
    """
    Decorator marking a test that requires essentia
    """
    return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)


def require_pretty_midi(test_case):
    """
    Decorator marking a test that requires pretty_midi
    """
    return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)


844
845
846
847
848
849
850
851
def cmd_exists(cmd):
    return shutil.which(cmd) is not None


def require_usr_bin_time(test_case):
    """
    Decorator marking a test that requires `/usr/bin/time`
    """
852
    return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
853
854
855
856
857
858
859
860
861
862
863
864
865
866


def require_sudachi(test_case):
    """
    Decorator marking a test that requires sudachi
    """
    return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)


def require_jumanpp(test_case):
    """
    Decorator marking a test that requires jumanpp
    """
    return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
867
868


869
870
871
872
873
874
875
def require_cython(test_case):
    """
    Decorator marking a test that requires jumanpp
    """
    return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)


876
877
def get_gpu_count():
    """
Suraj Patil's avatar
Suraj Patil committed
878
    Return the number of available gpus (regardless of whether torch, tf or jax is used)
879
    """
880
    if is_torch_available():
881
882
883
        import torch

        return torch.cuda.device_count()
884
    elif is_tf_available():
885
886
887
        import tensorflow as tf

        return len(tf.config.list_physical_devices("GPU"))
Suraj Patil's avatar
Suraj Patil committed
888
889
890
891
    elif is_flax_available():
        import jax

        return jax.device_count()
892
893
894
895
    else:
        return 0


896
def get_tests_dir(append_path=None):
897
    """
898
899
900
901
    Args:
        append_path: optional path to append to the tests dir path

    Return:
Sylvain Gugger's avatar
Sylvain Gugger committed
902
903
        The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
        joined after the `tests` dir the former is provided.
904

905
906
907
    """
    # this function caller's __file__
    caller__file__ = inspect.stack()[1][1]
908
    tests_dir = os.path.abspath(os.path.dirname(caller__file__))
909
910
911
912

    while not tests_dir.endswith("tests"):
        tests_dir = os.path.dirname(tests_dir)

913
914
915
916
    if append_path:
        return os.path.join(tests_dir, append_path)
    else:
        return tests_dir
917
918


919
920
921
922
923
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py

924

925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
    return re.sub(r"^.*\r", "", buf, 0, re.M)


def assert_screenout(out, what):
    out_pr = apply_print_resets(out).lower()
    match_str = out_pr.find(what.lower())
    assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"


class CaptureStd:
Sylvain Gugger's avatar
Sylvain Gugger committed
945
946
    """
    Context manager to capture:
947

948
949
        - stdout: replay it, clean it up and make it available via `obj.out`
        - stderr: replay it and make it available via `obj.err`
950

951
952
953
954
955
    Args:
        out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
        err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
        replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
            By default each captured stream gets replayed back on context's exit, so that one can see what the test was
Sylvain Gugger's avatar
Sylvain Gugger committed
956
957
            doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
            disable this feature.
958
959
960
961
962
963
964
965
966
967
968

    Examples:

    ```python
    # to capture stdout only with auto-replay
    with CaptureStdout() as cs:
        print("Secret message")
    assert "message" in cs.out

    # to capture stderr only with auto-replay
    import sys
Sylvain Gugger's avatar
Sylvain Gugger committed
969

970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
    with CaptureStderr() as cs:
        print("Warning: ", file=sys.stderr)
    assert "Warning" in cs.err

    # to capture both streams with auto-replay
    with CaptureStd() as cs:
        print("Secret message")
        print("Warning: ", file=sys.stderr)
    assert "message" in cs.out
    assert "Warning" in cs.err

    # to capture just one of the streams, and not the other, with auto-replay
    with CaptureStd(err=False) as cs:
        print("Secret message")
    assert "message" in cs.out
    # but best use the stream-specific subclasses

    # to capture without auto-replay
    with CaptureStd(replay=False) as cs:
        print("Secret message")
    assert "message" in cs.out
    ```"""
992

993
994
995
    def __init__(self, out=True, err=True, replay=True):
        self.replay = replay

996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
        if out:
            self.out_buf = StringIO()
            self.out = "error: CaptureStd context is unfinished yet, called too early"
        else:
            self.out_buf = None
            self.out = "not capturing stdout"

        if err:
            self.err_buf = StringIO()
            self.err = "error: CaptureStd context is unfinished yet, called too early"
        else:
            self.err_buf = None
            self.err = "not capturing stderr"

    def __enter__(self):
        if self.out_buf:
            self.out_old = sys.stdout
            sys.stdout = self.out_buf

        if self.err_buf:
            self.err_old = sys.stderr
            sys.stderr = self.err_buf

        return self

    def __exit__(self, *exc):
        if self.out_buf:
            sys.stdout = self.out_old
1024
1025
1026
1027
            captured = self.out_buf.getvalue()
            if self.replay:
                sys.stdout.write(captured)
            self.out = apply_print_resets(captured)
1028
1029
1030

        if self.err_buf:
            sys.stderr = self.err_old
1031
1032
1033
1034
            captured = self.err_buf.getvalue()
            if self.replay:
                sys.stderr.write(captured)
            self.err = captured
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051

    def __repr__(self):
        msg = ""
        if self.out_buf:
            msg += f"stdout: {self.out}\n"
        if self.err_buf:
            msg += f"stderr: {self.err}\n"
        return msg


# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.


class CaptureStdout(CaptureStd):
Patrick von Platen's avatar
Patrick von Platen committed
1052
    """Same as CaptureStd but captures only stdout"""
1053

1054
1055
    def __init__(self, replay=True):
        super().__init__(err=False, replay=replay)
1056
1057
1058


class CaptureStderr(CaptureStd):
Patrick von Platen's avatar
Patrick von Platen committed
1059
    """Same as CaptureStd but captures only stderr"""
1060

1061
1062
    def __init__(self, replay=True):
        super().__init__(out=False, replay=replay)
1063
1064


1065
class CaptureLogger:
Sylvain Gugger's avatar
Sylvain Gugger committed
1066
1067
    """
    Context manager to capture `logging` streams
1068
1069

    Args:
1070
        logger: 'logging` logger object
1071

1072
    Returns:
1073
1074
        The captured output is available via `self.out`

1075
    Example:
1076

1077
1078
1079
    ```python
    >>> from transformers import logging
    >>> from transformers.testing_utils import CaptureLogger
1080

1081
1082
1083
1084
1085
    >>> msg = "Testing 1, 2, 3"
    >>> logging.set_verbosity_info()
    >>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
    >>> with CaptureLogger(logger) as cl:
    ...     logger.info(msg)
Sylvain Gugger's avatar
Sylvain Gugger committed
1086
    >>> assert cl.out, msg + "\n"
1087
    ```
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
    """

    def __init__(self, logger):
        self.logger = logger
        self.io = StringIO()
        self.sh = logging.StreamHandler(self.io)
        self.out = ""

    def __enter__(self):
        self.logger.addHandler(self.sh)
        return self

    def __exit__(self, *exc):
        self.logger.removeHandler(self.sh)
        self.out = self.io.getvalue()

    def __repr__(self):
        return f"captured: {self.out}\n"


1108
1109
1110
1111
1112
1113
@contextlib.contextmanager
def LoggingLevel(level):
    """
    This is a context manager to temporarily change transformers modules logging level to the desired value and have it
    restored to the original setting at the end of the scope.

1114
    Example:
1115

1116
1117
    ```python
    with LoggingLevel(logging.INFO):
Sylvain Gugger's avatar
Sylvain Gugger committed
1118
        AutoModel.from_pretrained("gpt2")  # calls logger.info() several times
1119
    ```
1120
1121
1122
1123
1124
1125
1126
1127
1128
    """
    orig_level = transformers_logging.get_verbosity()
    try:
        transformers_logging.set_verbosity(level)
        yield
    finally:
        transformers_logging.set_verbosity(orig_level)


1129
1130
1131
1132
1133
1134
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
    """
    Temporary add given path to `sys.path`.

1135
    Usage :
1136

1137
    ```python
Sylvain Gugger's avatar
Sylvain Gugger committed
1138
1139
    with ExtendSysPath("/path/to/dir"):
        mymodule = importlib.import_module("mymodule")
1140
    ```
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
    """

    path = os.fspath(path)
    try:
        sys.path.insert(0, path)
        yield
    finally:
        sys.path.remove(path)


1151
class TestCasePlus(unittest.TestCase):
Sylvain Gugger's avatar
Sylvain Gugger committed
1152
    """
1153
    This class extends *unittest.TestCase* with additional features.
1154

1155
1156
1157
1158
1159
1160
    Feature 1: A set of fully resolved important file and dir path accessors.

    In tests often we need to know where things are relative to the current test file, and it's not trivial since the
    test could be invoked from more than one directory or could reside in sub-directories with different depths. This
    class solves this problem by sorting out all the basic paths and provides easy accessors to them:

1161
    - `pathlib` objects (all fully resolved):
1162

1163
1164
1165
1166
1167
1168
       - `test_file_path` - the current test file path (=`__file__`)
       - `test_file_dir` - the directory containing the current test file
       - `tests_dir` - the directory of the `tests` test suite
       - `examples_dir` - the directory of the `examples` test suite
       - `repo_root_dir` - the directory of the repository
       - `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
1169

1170
    - stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
1171

1172
1173
1174
1175
1176
1177
       - `test_file_path_str`
       - `test_file_dir_str`
       - `tests_dir_str`
       - `examples_dir_str`
       - `repo_root_dir_str`
       - `src_dir_str`
1178

1179
    Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1180

1181
    1. Create a unique temporary dir:
1182

1183
1184
1185
1186
    ```python
    def test_whatever(self):
        tmp_dir = self.get_auto_remove_tmp_dir()
    ```
1187

1188
    `tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
1189
1190
1191
1192
1193
    test.


    2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
    empty it after the test.
1194

1195
1196
1197
1198
    ```python
    def test_whatever(self):
        tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
    ```
1199

1200
1201
    This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
    didn't leave any data in there.
1202

1203
1204
    3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
        following behavior:
1205

1206
    `before=True`: the temporary dir will always be cleared at the beginning of the test.
1207

1208
    `before=False`: if the temporary dir already existed, any existing files will remain there.
1209

1210
    `after=True`: the temporary dir will always be deleted at the end of the test.
1211

1212
    `after=False`: the temporary dir will always be left intact at the end of the test.
1213

1214
    Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
Sylvain Gugger's avatar
Sylvain Gugger committed
1215
1216
    allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
    will get nuked. i.e. please always pass paths that start with `./`
1217

1218
1219
    Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
    otherwise.
1220

Sylvain Gugger's avatar
Sylvain Gugger committed
1221
1222
    Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
    is useful for invoking external programs from the test suite - e.g. distributed training.
1223
1224


1225
1226
1227
1228
    ```python
    def test_whatever(self):
        env = self.get_env()
    ```"""
1229
1230

    def setUp(self):
1231
        # get_auto_remove_tmp_dir feature:
1232
1233
        self.teardown_tmp_dirs = []

1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
        # figure out the resolved paths for repo_root, tests, examples, etc.
        self._test_file_path = inspect.getfile(self.__class__)
        path = Path(self._test_file_path).resolve()
        self._test_file_dir = path.parents[0]
        for up in [1, 2, 3]:
            tmp_dir = path.parents[up]
            if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
                break
        if tmp_dir:
            self._repo_root_dir = tmp_dir
        else:
            raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
        self._tests_dir = self._repo_root_dir / "tests"
        self._examples_dir = self._repo_root_dir / "examples"
        self._src_dir = self._repo_root_dir / "src"

    @property
    def test_file_path(self):
        return self._test_file_path

    @property
    def test_file_path_str(self):
        return str(self._test_file_path)

    @property
    def test_file_dir(self):
        return self._test_file_dir

    @property
    def test_file_dir_str(self):
        return str(self._test_file_dir)

    @property
    def tests_dir(self):
        return self._tests_dir

    @property
    def tests_dir_str(self):
        return str(self._tests_dir)

    @property
    def examples_dir(self):
        return self._examples_dir

    @property
    def examples_dir_str(self):
        return str(self._examples_dir)

    @property
    def repo_root_dir(self):
        return self._repo_root_dir

    @property
    def repo_root_dir_str(self):
        return str(self._repo_root_dir)

    @property
    def src_dir(self):
        return self._src_dir

    @property
    def src_dir_str(self):
        return str(self._src_dir)

    def get_env(self):
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
1300
1301
        Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
        invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
1302

Sylvain Gugger's avatar
Sylvain Gugger committed
1303
1304
        It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
        the preset `PYTHONPATH` if any (all full resolved paths).
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317

        """
        env = os.environ.copy()
        paths = [self.src_dir_str]
        if "/examples" in self.test_file_dir_str:
            paths.append(self.examples_dir_str)
        else:
            paths.append(self.tests_dir_str)
        paths.append(env.get("PYTHONPATH", ""))

        env["PYTHONPATH"] = ":".join(paths)
        return env

1318
    def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
1319
1320
        """
        Args:
1321
1322
            tmp_dir (`string`, *optional*):
                if `None`:
1323
1324

                   - a unique temporary path will be created
1325
1326
                   - sets `before=True` if `before` is `None`
                   - sets `after=True` if `after` is `None`
1327
1328
                else:

1329
1330
1331
1332
                   - `tmp_dir` will be created
                   - sets `before=True` if `before` is `None`
                   - sets `after=False` if `after` is `None`
            before (`bool`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
1333
1334
                If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
                `tmp_dir` already exists, any existing files will remain there.
1335
            after (`bool`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
1336
1337
                If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
                intact at the end of the test.
1338
1339

        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
1340
            tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
1341
1342
        """
        if tmp_dir is not None:
1343
1344
1345
1346
1347
1348
1349
1350
1351
            # defining the most likely desired behavior for when a custom path is provided.
            # this most likely indicates the debug mode where we want an easily locatable dir that:
            # 1. gets cleared out before the test (if it already exists)
            # 2. is left intact after the test
            if before is None:
                before = True
            if after is None:
                after = False

1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
            # using provided path
            path = Path(tmp_dir).resolve()

            # to avoid nuking parts of the filesystem, only relative paths are allowed
            if not tmp_dir.startswith("./"):
                raise ValueError(
                    f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
                )

            # ensure the dir is empty to start with
            if before is True and path.exists():
                shutil.rmtree(tmp_dir, ignore_errors=True)

            path.mkdir(parents=True, exist_ok=True)

        else:
1368
1369
1370
1371
1372
1373
1374
1375
1376
            # defining the most likely desired behavior for when a unique tmp path is auto generated
            # (not a debug mode), here we require a unique tmp dir that:
            # 1. is empty before the test (it will be empty in this situation anyway)
            # 2. gets fully removed after the test
            if before is None:
                before = True
            if after is None:
                after = True

1377
1378
1379
1380
1381
1382
1383
1384
1385
            # using unique tmp dir (always empty, regardless of `before`)
            tmp_dir = tempfile.mkdtemp()

        if after is True:
            # register for deletion
            self.teardown_tmp_dirs.append(tmp_dir)

        return tmp_dir

1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
    def python_one_liner_max_rss(self, one_liner_str):
        """
        Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
        program.

        Args:
            one_liner_str (`string`):
                a python one liner code that gets passed to `python -c`

        Returns:
            max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.

        Requirements:
            this helper needs `/usr/bin/time` to be installed (`apt install time`)

        Example:

        ```
        one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("t5-large")'
        max_rss = self.python_one_liner_max_rss(one_liner_str)
        ```
        """

        if not cmd_exists("/usr/bin/time"):
            raise ValueError("/usr/bin/time is required, install with `apt install time`")

        cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
        with CaptureStd() as cs:
            execute_subprocess_async(cmd, env=self.get_env())
        # returned data is in KB so convert to bytes
        max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
        return max_rss

1419
    def tearDown(self):
1420
        # get_auto_remove_tmp_dir feature: remove registered temp dirs
1421
1422
1423
        for path in self.teardown_tmp_dirs:
            shutil.rmtree(path, ignore_errors=True)
        self.teardown_tmp_dirs = []
1424
1425
1426
        if is_accelerate_available():
            AcceleratorState._reset_state()
            PartialState._reset_state()
1427

1428
1429
1430
1431
1432
            # delete all the env variables having `ACCELERATE` in them
            for k in list(os.environ.keys()):
                if "ACCELERATE" in k:
                    del os.environ[k]

1433
1434

def mockenv(**kwargs):
Sylvain Gugger's avatar
Sylvain Gugger committed
1435
    """
1436
1437
    this is a convenience wrapper, that allows this ::

Sylvain Gugger's avatar
Sylvain Gugger committed
1438
1439
    @mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
        run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
1440
1441

    """
1442
    return mock.patch.dict(os.environ, kwargs)
1443
1444


1445
1446
1447
1448
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
    """
1449
    Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
1450

1451
    The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476

    Args:
      remove: Environment variables to remove.
      update: Dictionary of environment variables and values to add/update.
    """
    env = os.environ
    update = update or {}
    remove = remove or []

    # List of environment variables being updated or removed.
    stomped = (set(update.keys()) | set(remove)) & set(env.keys())
    # Environment variables and values to restore on exit.
    update_after = {k: env[k] for k in stomped}
    # Environment variables and values to remove on exit.
    remove_after = frozenset(k for k in update if k not in env)

    try:
        env.update(update)
        [env.pop(k, None) for k in remove]
        yield
    finally:
        env.update(update_after)
        [env.pop(k) for k in remove_after]


1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
# --- pytest conf functions --- #

# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}


def pytest_addoption_shared(parser):
    """
    This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.

    It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
    option.

    """
    option = "--make-reports"
    if option not in pytest_opt_registered:
        parser.addoption(
            option,
            action="store",
            default=False,
            help="generate report files. The value of this option is used as a prefix to report names",
        )
        pytest_opt_registered[option] = 1


1502
1503
def pytest_terminal_summary_main(tr, id):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
1504
1505
    Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
    directory. The report files are prefixed with the test suite name.
1506
1507
1508

    This function emulates --duration and -rA pytest arguments.

Sylvain Gugger's avatar
Sylvain Gugger committed
1509
1510
    This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
    there.
1511
1512
1513

    Args:
    - tr: `terminalreporter` passed from `conftest.py`
1514
1515
    - id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
      needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
1516

Sylvain Gugger's avatar
Sylvain Gugger committed
1517
1518
1519
    NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
    changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
    plugins and interfere.
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531

    """
    from _pytest.config import create_terminal_writer

    if not len(id):
        id = "tests"

    config = tr.config
    orig_writer = config.get_terminal_writer()
    orig_tbstyle = config.option.tbstyle
    orig_reportchars = tr.reportchars

1532
    dir = f"reports/{id}"
1533
    Path(dir).mkdir(parents=True, exist_ok=True)
Stas Bekman's avatar
Stas Bekman committed
1534
    report_files = {
1535
        k: f"{dir}/{k}.txt"
Stas Bekman's avatar
Stas Bekman committed
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
        for k in [
            "durations",
            "errors",
            "failures_long",
            "failures_short",
            "failures_line",
            "passes",
            "stats",
            "summary_short",
            "warnings",
        ]
    }
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567

    # custom durations report
    # note: there is no need to call pytest --durations=XX to get this separate report
    # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
    dlist = []
    for replist in tr.stats.values():
        for rep in replist:
            if hasattr(rep, "duration"):
                dlist.append(rep)
    if dlist:
        dlist.sort(key=lambda x: x.duration, reverse=True)
        with open(report_files["durations"], "w") as f:
            durations_min = 0.05  # sec
            f.write("slowest durations\n")
            for i, rep in enumerate(dlist):
                if rep.duration < durations_min:
                    f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
                    break
                f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")

Stas Bekman's avatar
Stas Bekman committed
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
    def summary_failures_short(tr):
        # expecting that the reports were --tb=long (default) so we chop them off here to the last frame
        reports = tr.getreports("failed")
        if not reports:
            return
        tr.write_sep("=", "FAILURES SHORT STACK")
        for rep in reports:
            msg = tr._getfailureheadline(rep)
            tr.write_sep("_", msg, red=True, bold=True)
            # chop off the optional leading extra frames, leaving only the last one
            longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
            tr._tw.line(longrepr)
            # note: not printing out any rep.sections to keep the report short

1582
1583
1584
1585
    # use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
    # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
    # note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
    # pytest-instafail does that)
Stas Bekman's avatar
Stas Bekman committed
1586
1587
1588
1589

    # report failures with line/short/long styles
    config.option.tbstyle = "auto"  # full tb
    with open(report_files["failures_long"], "w") as f:
1590
1591
1592
        tr._tw = create_terminal_writer(config, f)
        tr.summary_failures()

Stas Bekman's avatar
Stas Bekman committed
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
    # config.option.tbstyle = "short" # short tb
    with open(report_files["failures_short"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        summary_failures_short(tr)

    config.option.tbstyle = "line"  # one line per error
    with open(report_files["failures_line"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.summary_failures()

    with open(report_files["errors"], "w") as f:
1604
1605
1606
        tr._tw = create_terminal_writer(config, f)
        tr.summary_errors()

Stas Bekman's avatar
Stas Bekman committed
1607
    with open(report_files["warnings"], "w") as f:
1608
1609
1610
1611
        tr._tw = create_terminal_writer(config, f)
        tr.summary_warnings()  # normal warnings
        tr.summary_warnings()  # final warnings

Stas Bekman's avatar
Stas Bekman committed
1612
    tr.reportchars = "wPpsxXEf"  # emulate -rA (used in summary_passes() and short_test_summary())
1613
1614
1615
1616
1617
1618
1619

    # Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
    # takes > 10 minutes (as this part doesn't generate any output on the terminal).
    # (also, it seems there is no useful information in this report, and we rarely need to read it)
    # with open(report_files["passes"], "w") as f:
    #     tr._tw = create_terminal_writer(config, f)
    #     tr.summary_passes()
1620

Stas Bekman's avatar
Stas Bekman committed
1621
    with open(report_files["summary_short"], "w") as f:
1622
1623
1624
        tr._tw = create_terminal_writer(config, f)
        tr.short_test_summary()

Stas Bekman's avatar
Stas Bekman committed
1625
    with open(report_files["stats"], "w") as f:
1626
1627
1628
1629
1630
1631
1632
        tr._tw = create_terminal_writer(config, f)
        tr.summary_stats()

    # restore:
    tr._tw = orig_writer
    tr.reportchars = orig_reportchars
    config.option.tbstyle = orig_tbstyle
1633
1634


1635
# --- distributed testing functions --- #
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689

# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio  # noqa


class _RunOutput:
    def __init__(self, returncode, stdout, stderr):
        self.returncode = returncode
        self.stdout = stdout
        self.stderr = stderr


async def _read_stream(stream, callback):
    while True:
        line = await stream.readline()
        if line:
            callback(line)
        else:
            break


async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
    if echo:
        print("\nRunning: ", " ".join(cmd))

    p = await asyncio.create_subprocess_exec(
        cmd[0],
        *cmd[1:],
        stdin=stdin,
        stdout=asyncio.subprocess.PIPE,
        stderr=asyncio.subprocess.PIPE,
        env=env,
    )

    # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
    # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
    #
    # If it starts hanging, will need to switch to the following code. The problem is that no data
    # will be seen until it's done and if it hangs for example there will be no debug info.
    # out, err = await p.communicate()
    # return _RunOutput(p.returncode, out, err)

    out = []
    err = []

    def tee(line, sink, pipe, label=""):
        line = line.decode("utf-8").rstrip()
        sink.append(line)
        if not quiet:
            print(label, line, file=pipe)

    # XXX: the timeout doesn't seem to make any difference here
    await asyncio.wait(
        [
Stas Bekman's avatar
Stas Bekman committed
1690
            _read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")),
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
            _read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
        ],
        timeout=timeout,
    )
    return _RunOutput(await p.wait(), out, err)


def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
    loop = asyncio.get_event_loop()
    result = loop.run_until_complete(
        _stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
    )

    cmd_str = " ".join(cmd)
    if result.returncode > 0:
1706
        stderr = "\n".join(result.stderr)
1707
        raise RuntimeError(
1708
1709
            f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
            f"The combined stderr from workers follows:\n{stderr}"
1710
        )
Stas Bekman's avatar
Stas Bekman committed
1711
1712
1713
1714

    # check that the subprocess actually did run and produced some output, should the test rely on
    # the remote side to do the testing
    if not result.stdout and not result.stderr:
1715
1716
1717
        raise RuntimeError(f"'{cmd_str}' produced no output.")

    return result
1718
1719


1720
1721
def pytest_xdist_worker_id():
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
1722
1723
    Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
    if `-n 1` or `pytest-xdist` isn't being used.
1724
1725
1726
1727
1728
1729
1730
1731
    """
    worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
    worker = re.sub(r"^gw", "", worker, 0, re.M)
    return int(worker)


def get_torch_dist_unique_port():
    """
1732
    Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
1733

Sylvain Gugger's avatar
Sylvain Gugger committed
1734
1735
    Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
    port at once.
1736
1737
1738
1739
1740
1741
    """
    port = 29500
    uniq_delta = pytest_xdist_worker_id()
    return port + uniq_delta


1742
1743
1744
1745
1746
def nested_simplify(obj, decimals=3):
    """
    Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
    within tests.
    """
1747
1748
    import numpy as np

1749
1750
    if isinstance(obj, list):
        return [nested_simplify(item, decimals) for item in obj]
1751
1752
    if isinstance(obj, tuple):
        return tuple([nested_simplify(item, decimals) for item in obj])
1753
1754
    elif isinstance(obj, np.ndarray):
        return nested_simplify(obj.tolist())
1755
    elif isinstance(obj, Mapping):
1756
        return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
1757
    elif isinstance(obj, (str, int, np.int64)):
1758
        return obj
1759
1760
    elif obj is None:
        return obj
1761
    elif is_torch_available() and isinstance(obj, torch.Tensor):
1762
        return nested_simplify(obj.tolist(), decimals)
1763
1764
1765
1766
    elif is_tf_available() and tf.is_tensor(obj):
        return nested_simplify(obj.numpy().tolist())
    elif isinstance(obj, float):
        return round(obj, decimals)
1767
    elif isinstance(obj, (np.int32, np.float32)):
1768
        return nested_simplify(obj.item(), decimals)
1769
1770
    else:
        raise Exception(f"Not supported: {type(obj)}")
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787


def check_json_file_has_correct_format(file_path):
    with open(file_path, "r") as f:
        lines = f.readlines()
        if len(lines) == 1:
            # length can only be 1 if dict is empty
            assert lines[0] == "{}"
        else:
            # otherwise make sure json has correct format (at least 3 lines)
            assert len(lines) >= 3
            # each key one line, ident should be 2, min length is 3
            assert lines[0].strip() == "{"
            for line in lines[1:-1]:
                left_indent = len(lines[1]) - len(lines[1].lstrip())
                assert left_indent == 2
            assert lines[-1].strip() == "}"
NielsRogge's avatar
NielsRogge committed
1788
1789
1790
1791
1792
1793


def to_2tuple(x):
    if isinstance(x, collections.abc.Iterable):
        return x
    return (x, x)
Zachary Mueller's avatar
Zachary Mueller committed
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815


# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
    pass


def run_command(command: List[str], return_stdout=False):
    """
    Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
    if an error occured while running `command`
    """
    try:
        output = subprocess.check_output(command, stderr=subprocess.STDOUT)
        if return_stdout:
            if hasattr(output, "decode"):
                output = output.decode("utf-8")
            return output
    except subprocess.CalledProcessError as e:
        raise SubprocessCallException(
            f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
        ) from e
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826


class RequestCounter:
    """
    Helper class that will count all requests made online.
    """

    def __enter__(self):
        self.head_request_count = 0
        self.get_request_count = 0
        self.other_request_count = 0
Lucain's avatar
Lucain committed
1827
1828
1829
1830
1831
1832

        # Mock `get_session` to count HTTP calls.
        self.old_get_session = huggingface_hub.utils._http.get_session
        self.session = requests.Session()
        self.session.request = self.new_request
        huggingface_hub.utils._http.get_session = lambda: self.session
1833
1834
1835
        return self

    def __exit__(self, *args, **kwargs):
Lucain's avatar
Lucain committed
1836
        huggingface_hub.utils._http.get_session = self.old_get_session
1837
1838
1839
1840
1841
1842
1843
1844
1845

    def new_request(self, method, **kwargs):
        if method == "GET":
            self.get_request_count += 1
        elif method == "HEAD":
            self.head_request_count += 1
        else:
            self.other_request_count += 1

Lucain's avatar
Lucain committed
1846
        return requests.request(method=method, **kwargs)
1847
1848


1849
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
1850
1851
1852
1853
1854
1855
1856
1857
    """
    To decorate flaky tests. They will be retried on failures.

    Args:
        max_attempts (`int`, *optional*, defaults to 5):
            The maximum number of attempts to retry the flaky test.
        wait_before_retry (`float`, *optional*):
            If provided, will wait that number of seconds before retrying the test.
1858
1859
1860
        description (`str`, *optional*):
            A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
            etc.)
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
    """

    def decorator(test_func_ref):
        @functools.wraps(test_func_ref)
        def wrapper(*args, **kwargs):
            retry_count = 1

            while retry_count < max_attempts:
                try:
                    return test_func_ref(*args, **kwargs)

                except Exception as err:
                    print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
                    if wait_before_retry is not None:
                        time.sleep(wait_before_retry)
                    retry_count += 1

            return test_func_ref(*args, **kwargs)

        return wrapper

    return decorator
1883
1884


1885
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
    """
    To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.

    Args:
        test_case (`unittest.TestCase`):
            The test that will run `target_func`.
        target_func (`Callable`):
            The function implementing the actual testing logic.
        inputs (`dict`, *optional*, defaults to `None`):
            The inputs that will be passed to `target_func` through an (input) queue.
1896
1897
1898
        timeout (`int`, *optional*, defaults to `None`):
            The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
            variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
1899
    """
1900
1901
    if timeout is None:
        timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925

    start_methohd = "spawn"
    ctx = multiprocessing.get_context(start_methohd)

    input_queue = ctx.Queue(1)
    output_queue = ctx.JoinableQueue(1)

    # We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
    input_queue.put(inputs, timeout=timeout)

    process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
    process.start()
    # Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
    # the test to exit properly.
    try:
        results = output_queue.get(timeout=timeout)
        output_queue.task_done()
    except Exception as e:
        process.terminate()
        test_case.fail(e)
    process.join(timeout=timeout)

    if results["error"] is not None:
        test_case.fail(f'{results["error"]}')
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938


"""
The following contains utils to run the documentation tests without having to overwrite any files.

The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.

To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""


def preprocess_string(string, skip_cuda_tests):
1939
    """Prepare a docstring or a `.md` file to be run by doctest.
1940

1941
    The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
    its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
    cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
    `string`.
    """
    codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
    codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
    is_cuda_found = False
    for i, codeblock in enumerate(codeblocks):
        if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
            codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
        if (
            (">>>" in codeblock or "..." in codeblock)
            and re.search(r"cuda|to\(0\)|device=0", codeblock)
            and skip_cuda_tests
        ):
            is_cuda_found = True
            break
Yih-Dar's avatar
Yih-Dar committed
1959

1960
1961
1962
    modified_string = ""
    if not is_cuda_found:
        modified_string = "".join(codeblocks)
Yih-Dar's avatar
Yih-Dar committed
1963

1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
    return modified_string


class HfDocTestParser(doctest.DocTestParser):
    """
    Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
    means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
    added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.

    Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
    """

    # This regular expression is used to find doctest examples in a
    # string.  It defines three groups: `source` is the source code
    # (including leading indentation and prompts); `indent` is the
    # indentation of the first (PS1) line of the source code; and
    # `want` is the expected output (including leading indentation).
    # fmt: off
    _EXAMPLE_RE = re.compile(r'''
        # Source consists of a PS1 line followed by zero or more PS2 lines.
        (?P<source>
            (?:^(?P<indent> [ ]*) >>>    .*)    # PS1 line
            (?:\n           [ ]*  \.\.\. .*)*)  # PS2 lines
        \n?
        # Want consists of any non-blank lines that do not start with PS1.
        (?P<want> (?:(?![ ]*$)    # Not a blank line
             (?![ ]*>>>)          # Not a line starting with PS1
             # !!!!!!!!!!! HF Specific !!!!!!!!!!!
             (?:(?!```).)*        # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
             # !!!!!!!!!!! HF Specific !!!!!!!!!!!
             (?:\n|$)  # Match a new line or end of string
          )*)
        ''', re.MULTILINE | re.VERBOSE
    )
    # fmt: on

    # !!!!!!!!!!! HF Specific !!!!!!!!!!!
    skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
    # !!!!!!!!!!! HF Specific !!!!!!!!!!!

    def parse(self, string, name="<string>"):
        """
        Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
        calling `super().parse`
        """
        string = preprocess_string(string, self.skip_cuda_tests)
        return super().parse(string, name)


class HfDoctestModule(Module):
    """
    Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
    tests.
    """

    def collect(self) -> Iterable[DoctestItem]:
        class MockAwareDocTestFinder(doctest.DocTestFinder):
            """A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.

            https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
            """

            def _find_lineno(self, obj, source_lines):
                """Doctest code does not take into account `@property`, this
                is a hackish way to fix it. https://bugs.python.org/issue17446

                Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
                reported upstream. #8796
                """
                if isinstance(obj, property):
                    obj = getattr(obj, "fget", obj)

                if hasattr(obj, "__wrapped__"):
                    # Get the main obj in case of it being wrapped
                    obj = inspect.unwrap(obj)

                # Type ignored because this is a private function.
                return super()._find_lineno(  # type:ignore[misc]
                    obj,
                    source_lines,
                )

            def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
                if _is_mocked(obj):
                    return
                with _patch_unwrap_mock_aware():
                    # Type ignored because this is a private function.
                    super()._find(  # type:ignore[misc]
                        tests, obj, name, module, source_lines, globs, seen
                    )

        if self.path.name == "conftest.py":
            module = self.config.pluginmanager._importconftest(
                self.path,
                self.config.getoption("importmode"),
                rootpath=self.config.rootpath,
            )
        else:
            try:
                module = import_path(
                    self.path,
                    root=self.config.rootpath,
                    mode=self.config.getoption("importmode"),
                )
            except ImportError:
                if self.config.getvalue("doctest_ignore_import_errors"):
                    skip("unable to import module %r" % self.path)
                else:
                    raise

        # !!!!!!!!!!! HF Specific !!!!!!!!!!!
        finder = MockAwareDocTestFinder(parser=HfDocTestParser())
        # !!!!!!!!!!! HF Specific !!!!!!!!!!!
        optionflags = get_optionflags(self)
        runner = _get_runner(
            verbose=False,
            optionflags=optionflags,
            checker=_get_checker(),
            continue_on_failure=_get_continue_on_failure(self.config),
        )
        for test in finder.find(module, module.__name__):
            if test.examples:  # skip empty doctests and cuda
                yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)