testing_utils.py 67.3 KB
Newer Older
Sylvain Gugger's avatar
Sylvain Gugger committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

NielsRogge's avatar
NielsRogge committed
15
import collections
16
import contextlib
17
import doctest
18
import functools
19
import inspect
20
import logging
21
import multiprocessing
22
import os
23
import re
24
import shlex
25
import shutil
Zachary Mueller's avatar
Zachary Mueller committed
26
import subprocess
27
import sys
28
import tempfile
29
import time
Aymeric Augustin's avatar
Aymeric Augustin committed
30
import unittest
31
from collections.abc import Mapping
32
from io import StringIO
33
from pathlib import Path
34
from typing import Iterable, Iterator, List, Optional, Union
35
from unittest import mock
36

37
import huggingface_hub
Lucain's avatar
Lucain committed
38
import requests
39

40
41
from transformers import logging as transformers_logging

42
from .deepspeed import is_deepspeed_available
43
from .integrations import (
44
    is_clearml_available,
45
46
47
48
49
50
    is_fairscale_available,
    is_optuna_available,
    is_ray_available,
    is_sigopt_available,
    is_wandb_available,
)
51
from .utils import (
52
    is_accelerate_available,
53
54
    is_apex_available,
    is_bitsandbytes_available,
NielsRogge's avatar
NielsRogge committed
55
    is_bs4_available,
56
    is_cython_available,
57
    is_decord_available,
58
    is_detectron2_available,
59
60
    is_faiss_available,
    is_flax_available,
61
    is_ftfy_available,
62
    is_ipex_available,
63
    is_jieba_available,
64
    is_jumanpp_available,
Matt's avatar
Matt committed
65
    is_keras_nlp_available,
66
    is_librosa_available,
67
    is_natten_available,
68
    is_onnx_available,
69
    is_optimum_available,
70
    is_pandas_available,
71
    is_phonemizer_available,
72
    is_pyctcdecode_available,
73
    is_pytesseract_available,
74
    is_pytest_available,
75
    is_pytorch_quantization_available,
yujun's avatar
yujun committed
76
    is_rjieba_available,
77
    is_safetensors_available,
78
    is_scipy_available,
79
    is_sentencepiece_available,
80
    is_seqio_available,
Patrick von Platen's avatar
Patrick von Platen committed
81
    is_soundfile_availble,
82
    is_spacy_available,
83
    is_sudachi_available,
Kamal Raj's avatar
Kamal Raj committed
84
    is_tensorflow_probability_available,
85
    is_tensorflow_text_available,
86
    is_tf2onnx_available,
87
    is_tf_available,
NielsRogge's avatar
NielsRogge committed
88
    is_timm_available,
89
90
    is_tokenizers_available,
    is_torch_available,
91
92
    is_torch_bf16_cpu_available,
    is_torch_bf16_gpu_available,
93
    is_torch_neuroncore_available,
94
    is_torch_npu_available,
95
    is_torch_tensorrt_fx_available,
96
    is_torch_tf32_available,
97
    is_torch_tpu_available,
Suraj Patil's avatar
Suraj Patil committed
98
    is_torchaudio_available,
99
    is_torchdynamo_available,
NielsRogge's avatar
NielsRogge committed
100
    is_torchvision_available,
101
    is_vision_available,
102
    strtobool,
103
)
104
105


106
107
108
109
if is_accelerate_available():
    from accelerate.state import AcceleratorState, PartialState


110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
if is_pytest_available():
    from _pytest.doctest import (
        Module,
        _get_checker,
        _get_continue_on_failure,
        _get_runner,
        _is_mocked,
        _patch_unwrap_mock_aware,
        get_optionflags,
        import_path,
    )
    from _pytest.outcomes import skip
    from pytest import DoctestItem
else:
    Module = object
    DoctestItem = object


Julien Chaumond's avatar
Julien Chaumond committed
128
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
129
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
130
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
Julien Chaumond's avatar
Julien Chaumond committed
131
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
Julien Chaumond's avatar
Julien Chaumond committed
132

Sylvain Gugger's avatar
Sylvain Gugger committed
133
134
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
135
136
137
138
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"

# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
Sylvain Gugger's avatar
Sylvain Gugger committed
139

Julien Chaumond's avatar
Julien Chaumond committed
140

141
def parse_flag_from_env(key, default=False):
142
    try:
143
144
145
146
147
148
149
150
151
152
        value = os.environ[key]
    except KeyError:
        # KEY isn't set, default to `default`.
        _value = default
    else:
        # KEY is set, convert it to True or False.
        try:
            _value = strtobool(value)
        except ValueError:
            # More values are supported, but let's keep the message simple.
153
            raise ValueError(f"If set, {key} must be yes or no.")
154
155
    return _value

156

Julien Chaumond's avatar
Julien Chaumond committed
157
158
159
160
161
162
163
164
165
def parse_int_from_env(key, default=None):
    try:
        value = os.environ[key]
    except KeyError:
        _value = default
    else:
        try:
            _value = int(value)
        except ValueError:
166
            raise ValueError(f"If set, {key} must be a int.")
Julien Chaumond's avatar
Julien Chaumond committed
167
168
169
    return _value


170
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
171
172
_run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
173
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
Sylvain Gugger's avatar
Sylvain Gugger committed
174
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
Julien Chaumond's avatar
Julien Chaumond committed
175
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
176
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
Sylvain Gugger's avatar
Sylvain Gugger committed
177
_run_tool_tests = parse_flag_from_env("RUN_TOOL_TESTS", default=False)
178
179


180
181
182
183
184
185
186
187
def is_pt_tf_cross_test(test_case):
    """
    Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.

    PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
    to a truthy value and selecting the is_pt_tf_cross_test pytest mark.

    """
188
    if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
189
190
191
192
193
194
195
196
197
198
        return unittest.skip("test is PT+TF test")(test_case)
    else:
        try:
            import pytest  # We don't need a hard dependency on pytest in the main library
        except ImportError:
            return test_case
        else:
            return pytest.mark.is_pt_tf_cross_test()(test_case)


199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
def is_pt_flax_cross_test(test_case):
    """
    Decorator marking a test as a test that control interactions between PyTorch and Flax

    PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
    variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.

    """
    if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
        return unittest.skip("test is PT+FLAX test")(test_case)
    else:
        try:
            import pytest  # We don't need a hard dependency on pytest in the main library
        except ImportError:
            return test_case
        else:
            return pytest.mark.is_pt_flax_cross_test()(test_case)


Sylvain Gugger's avatar
Sylvain Gugger committed
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
def is_staging_test(test_case):
    """
    Decorator marking a test as a staging test.

    Those tests will run using the staging environment of huggingface.co instead of the real model hub.
    """
    if not _run_staging:
        return unittest.skip("test is staging test")(test_case)
    else:
        try:
            import pytest  # We don't need a hard dependency on pytest in the main library
        except ImportError:
            return test_case
        else:
            return pytest.mark.is_staging_test()(test_case)


235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
def is_pipeline_test(test_case):
    """
    Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
    skipped.
    """
    if not _run_pipeline_tests:
        return unittest.skip("test is pipeline test")(test_case)
    else:
        try:
            import pytest  # We don't need a hard dependency on pytest in the main library
        except ImportError:
            return test_case
        else:
            return pytest.mark.is_pipeline_test()(test_case)


Sylvain Gugger's avatar
Sylvain Gugger committed
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
def is_tool_test(test_case):
    """
    Decorator marking a test as a tool test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
    """
    if not _run_tool_tests:
        return unittest.skip("test is a tool test")(test_case)
    else:
        try:
            import pytest  # We don't need a hard dependency on pytest in the main library
        except ImportError:
            return test_case
        else:
            return pytest.mark.is_tool_test()(test_case)


266
267
268
269
def slow(test_case):
    """
    Decorator marking a test as slow.

Sylvain Gugger's avatar
Sylvain Gugger committed
270
    Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
271
272

    """
273
    return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
274
275


Lysandre Debut's avatar
Lysandre Debut committed
276
277
278
279
280
281
282
283
284
285
286
def tooslow(test_case):
    """
    Decorator marking a test as too slow.

    Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
    these will not be tested by the CI.

    """
    return unittest.skip("test is too slow")(test_case)


287
288
289
290
def custom_tokenizers(test_case):
    """
    Decorator marking a test for a custom tokenizer.

Sylvain Gugger's avatar
Sylvain Gugger committed
291
292
    Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
    environment variable to a truthy value to run them.
293
    """
294
    return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
295
296


NielsRogge's avatar
NielsRogge committed
297
298
299
300
301
302
303
def require_bs4(test_case):
    """
    Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
    """
    return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)


304
305
306
307
308
309
310
def require_accelerate(test_case):
    """
    Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
    """
    return unittest.skipUnless(is_accelerate_available(), "test requires accelerate")(test_case)


311
312
313
314
315
316
317
def require_safetensors(test_case):
    """
    Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
    """
    return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)


yujun's avatar
yujun committed
318
319
320
321
def require_rjieba(test_case):
    """
    Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
    """
322
    return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
yujun's avatar
yujun committed
323
324


325
326
327
328
329
330
331
def require_jieba(test_case):
    """
    Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
    """
    return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)


332
def require_tf2onnx(test_case):
333
    return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
334
335


336
def require_onnx(test_case):
337
    return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
338
339


NielsRogge's avatar
NielsRogge committed
340
341
342
343
344
345
346
def require_timm(test_case):
    """
    Decorator marking a test that requires Timm.

    These tests are skipped when Timm isn't installed.

    """
347
    return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
NielsRogge's avatar
NielsRogge committed
348
349


350
351
352
353
354
355
356
357
358
359
def require_natten(test_case):
    """
    Decorator marking a test that requires NATTEN.

    These tests are skipped when NATTEN isn't installed.

    """
    return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)


360
361
362
363
364
365
366
def require_torch(test_case):
    """
    Decorator marking a test that requires PyTorch.

    These tests are skipped when PyTorch isn't installed.

    """
367
    return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
368
369


NielsRogge's avatar
NielsRogge committed
370
371
372
373
374
375
376
377
378
379
def require_torchvision(test_case):
    """
    Decorator marking a test that requires Torchvision.

    These tests are skipped when Torchvision isn't installed.

    """
    return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)


380
381
382
383
384
385
386
387
388
389
390
391
def require_torch_or_tf(test_case):
    """
    Decorator marking a test that requires PyTorch or TensorFlow.

    These tests are skipped when neither PyTorch not TensorFlow is installed.

    """
    return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
        test_case
    )


392
393
394
395
def require_intel_extension_for_pytorch(test_case):
    """
    Decorator marking a test that requires Intel Extension for PyTorch.

396
397
    These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
    version.
398
399

    """
400
401
402
403
404
    return unittest.skipUnless(
        is_ipex_available(),
        "test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
        " https://github.com/intel/intel-extension-for-pytorch",
    )(test_case)
405
406


Kamal Raj's avatar
Kamal Raj committed
407
408
409
410
411
412
413
def require_tensorflow_probability(test_case):
    """
    Decorator marking a test that requires TensorFlow probability.

    These tests are skipped when TensorFlow probability isn't installed.

    """
414
415
416
    return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
        test_case
    )
Kamal Raj's avatar
Kamal Raj committed
417
418


Suraj Patil's avatar
Suraj Patil committed
419
420
def require_torchaudio(test_case):
    """
421
    Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
Suraj Patil's avatar
Suraj Patil committed
422
    """
423
    return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
424
425


426
427
def require_tf(test_case):
    """
428
    Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
429
    """
430
    return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
431
432


433
434
def require_flax(test_case):
    """
435
    Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
436
    """
437
    return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
438
439


440
441
def require_sentencepiece(test_case):
    """
442
    Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
443
    """
444
    return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
445
446


447
448
449
450
451
452
453
def require_seqio(test_case):
    """
    Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
    """
    return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)


454
455
456
457
def require_scipy(test_case):
    """
    Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
    """
458
    return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
459
460


461
462
def require_tokenizers(test_case):
    """
463
    Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
464
    """
465
    return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
466
467


468
469
470
471
472
473
474
475
def require_tensorflow_text(test_case):
    """
    Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
    installed.
    """
    return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)


Matt's avatar
Matt committed
476
477
478
479
480
481
482
def require_keras_nlp(test_case):
    """
    Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
    """
    return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)


NielsRogge's avatar
NielsRogge committed
483
484
485
486
def require_pandas(test_case):
    """
    Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
    """
487
    return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
NielsRogge's avatar
NielsRogge committed
488
489


490
491
492
493
def require_pytesseract(test_case):
    """
    Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
    """
494
    return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
495
496


497
498
499
500
501
def require_pytorch_quantization(test_case):
    """
    Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
    Quantization Toolkit isn't installed.
    """
502
503
504
    return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
        test_case
    )
505
506


507
def require_vision(test_case):
508
    """
509
510
511
    Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
    installed.
    """
512
    return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
513

514

515
516
517
518
def require_ftfy(test_case):
    """
    Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
    """
519
    return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
520
521
522
523
524
525


def require_spacy(test_case):
    """
    Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
    """
526
    return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
527
528


529
530
531
532
533
534
535
def require_decord(test_case):
    """
    Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
    """
    return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)


536
537
538
539
def require_torch_multi_gpu(test_case):
    """
    Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
    multiple GPUs.
540

541
    To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
542
    """
543
    if not is_torch_available():
544
545
546
547
        return unittest.skip("test requires PyTorch")(test_case)

    import torch

548
    return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
549
550


551
def require_torch_non_multi_gpu(test_case):
552
553
554
    """
    Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
    """
555
    if not is_torch_available():
556
557
558
559
        return unittest.skip("test requires PyTorch")(test_case)

    import torch

560
    return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
561
562


563
564
565
566
567
568
569
570
571
def require_torch_up_to_2_gpus(test_case):
    """
    Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
    """
    if not is_torch_available():
        return unittest.skip("test requires PyTorch")(test_case)

    import torch

572
    return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
573
574


Lysandre Debut's avatar
Lysandre Debut committed
575
576
577
578
def require_torch_tpu(test_case):
    """
    Decorator marking a test that requires a TPU (in PyTorch).
    """
579
    return unittest.skipUnless(is_torch_tpu_available(check_device=False), "test requires PyTorch TPU")(test_case)
Lysandre Debut's avatar
Lysandre Debut committed
580
581


582
583
584
585
586
587
588
589
590
def require_torch_neuroncore(test_case):
    """
    Decorator marking a test that requires NeuronCore (in PyTorch).
    """
    return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
        test_case
    )


591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
def require_torch_npu(test_case):
    """
    Decorator marking a test that requires NPU (in PyTorch).
    """
    return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)


def require_torch_multi_npu(test_case):
    """
    Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
    multiple NPUs.

    To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
    """
    if not is_torch_npu_available():
        return unittest.skip("test requires PyTorch NPU")(test_case)

    return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)


611
if is_torch_available():
Stas Bekman's avatar
Stas Bekman committed
612
613
614
615
    # Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
    import torch

    torch_device = "cuda" if torch.cuda.is_available() else "cpu"
616
617
else:
    torch_device = None
618

619
620
621
if is_tf_available():
    import tensorflow as tf

622
623
624
625
626
627
628
if is_flax_available():
    import jax

    jax_device = jax.default_backend()
else:
    jax_device = None

629

630
631
632
633
634
def require_torchdynamo(test_case):
    """Decorator marking a test that requires TorchDynamo"""
    return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)


635
636
637
638
639
def require_torch_tensorrt_fx(test_case):
    """Decorator marking a test that requires Torch-TensorRT FX"""
    return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)


640
def require_torch_gpu(test_case):
Patrick von Platen's avatar
Patrick von Platen committed
641
    """Decorator marking a test that requires CUDA and PyTorch."""
642
    return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
643
644


645
646
def require_torch_bf16_gpu(test_case):
    """Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
647
    return unittest.skipUnless(
648
649
650
651
652
653
654
655
656
657
        is_torch_bf16_gpu_available(),
        "test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
    )(test_case)


def require_torch_bf16_cpu(test_case):
    """Decorator marking a test that requires torch>=1.10, using CPU."""
    return unittest.skipUnless(
        is_torch_bf16_cpu_available(),
        "test requires torch>=1.10, using CPU",
658
    )(test_case)
659
660
661
662


def require_torch_tf32(test_case):
    """Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
663
664
665
    return unittest.skipUnless(
        is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
    )(test_case)
666
667


668
669
def require_detectron2(test_case):
    """Decorator marking a test that requires detectron2."""
670
    return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
671
672


Ola Piktus's avatar
Ola Piktus committed
673
674
def require_faiss(test_case):
    """Decorator marking a test that requires faiss."""
675
    return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
Ola Piktus's avatar
Ola Piktus committed
676
677


678
679
680
681
682
683
684
def require_optuna(test_case):
    """
    Decorator marking a test that requires optuna.

    These tests are skipped when optuna isn't installed.

    """
685
    return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
686
687
688
689
690
691
692
693
694


def require_ray(test_case):
    """
    Decorator marking a test that requires Ray/tune.

    These tests are skipped when Ray/tune isn't installed.

    """
695
    return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
696
697


698
699
700
701
702
703
704
def require_sigopt(test_case):
    """
    Decorator marking a test that requires SigOpt.

    These tests are skipped when SigOpt isn't installed.

    """
705
    return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
706
707


708
709
710
711
712
713
714
def require_wandb(test_case):
    """
    Decorator marking a test that requires wandb.

    These tests are skipped when wandb isn't installed.

    """
715
    return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
716
717


718
719
720
721
722
723
724
725
726
727
def require_clearml(test_case):
    """
    Decorator marking a test requires clearml.

    These tests are skipped when clearml isn't installed.

    """
    return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)


Patrick von Platen's avatar
Patrick von Platen committed
728
729
730
731
732
733
734
def require_soundfile(test_case):
    """
    Decorator marking a test that requires soundfile

    These tests are skipped when soundfile isn't installed.

    """
735
    return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
Patrick von Platen's avatar
Patrick von Platen committed
736
737


738
739
740
741
def require_deepspeed(test_case):
    """
    Decorator marking a test that requires deepspeed
    """
742
    return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
743
744


745
746
747
748
def require_fairscale(test_case):
    """
    Decorator marking a test that requires fairscale
    """
749
    return unittest.skipUnless(is_fairscale_available(), "test requires fairscale")(test_case)
750
751
752
753
754
755


def require_apex(test_case):
    """
    Decorator marking a test that requires apex
    """
756
    return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
757
758
759
760
761
762


def require_bitsandbytes(test_case):
    """
    Decorator for bits and bytes (bnb) dependency
    """
763
    return unittest.skipUnless(is_bitsandbytes_available(), "test requires bnb")(test_case)
764
765


766
767
768
769
770
771
772
def require_optimum(test_case):
    """
    Decorator for optimum dependency
    """
    return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)


773
774
775
776
def require_phonemizer(test_case):
    """
    Decorator marking a test that requires phonemizer
    """
777
    return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
778
779


780
781
782
783
def require_pyctcdecode(test_case):
    """
    Decorator marking a test that requires pyctcdecode
    """
784
    return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
785
786
787
788
789
790


def require_librosa(test_case):
    """
    Decorator marking a test that requires librosa
    """
791
    return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
792
793


794
795
796
797
798
799
800
801
def cmd_exists(cmd):
    return shutil.which(cmd) is not None


def require_usr_bin_time(test_case):
    """
    Decorator marking a test that requires `/usr/bin/time`
    """
802
    return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
803
804
805
806
807
808
809
810
811
812
813
814
815
816


def require_sudachi(test_case):
    """
    Decorator marking a test that requires sudachi
    """
    return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)


def require_jumanpp(test_case):
    """
    Decorator marking a test that requires jumanpp
    """
    return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
817
818


819
820
821
822
823
824
825
def require_cython(test_case):
    """
    Decorator marking a test that requires jumanpp
    """
    return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)


826
827
def get_gpu_count():
    """
Suraj Patil's avatar
Suraj Patil committed
828
    Return the number of available gpus (regardless of whether torch, tf or jax is used)
829
    """
830
    if is_torch_available():
831
832
833
        import torch

        return torch.cuda.device_count()
834
    elif is_tf_available():
835
836
837
        import tensorflow as tf

        return len(tf.config.list_physical_devices("GPU"))
Suraj Patil's avatar
Suraj Patil committed
838
839
840
841
    elif is_flax_available():
        import jax

        return jax.device_count()
842
843
844
845
    else:
        return 0


846
def get_tests_dir(append_path=None):
847
    """
848
849
850
851
    Args:
        append_path: optional path to append to the tests dir path

    Return:
Sylvain Gugger's avatar
Sylvain Gugger committed
852
853
        The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
        joined after the `tests` dir the former is provided.
854

855
856
857
    """
    # this function caller's __file__
    caller__file__ = inspect.stack()[1][1]
858
    tests_dir = os.path.abspath(os.path.dirname(caller__file__))
859
860
861
862

    while not tests_dir.endswith("tests"):
        tests_dir = os.path.dirname(tests_dir)

863
864
865
866
    if append_path:
        return os.path.join(tests_dir, append_path)
    else:
        return tests_dir
867
868


869
870
871
872
873
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py

874

875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
    return re.sub(r"^.*\r", "", buf, 0, re.M)


def assert_screenout(out, what):
    out_pr = apply_print_resets(out).lower()
    match_str = out_pr.find(what.lower())
    assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"


class CaptureStd:
Sylvain Gugger's avatar
Sylvain Gugger committed
895
896
    """
    Context manager to capture:
897

898
899
        - stdout: replay it, clean it up and make it available via `obj.out`
        - stderr: replay it and make it available via `obj.err`
900

901
902
903
904
905
    Args:
        out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
        err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
        replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
            By default each captured stream gets replayed back on context's exit, so that one can see what the test was
Sylvain Gugger's avatar
Sylvain Gugger committed
906
907
            doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
            disable this feature.
908
909
910
911
912
913
914
915
916
917
918

    Examples:

    ```python
    # to capture stdout only with auto-replay
    with CaptureStdout() as cs:
        print("Secret message")
    assert "message" in cs.out

    # to capture stderr only with auto-replay
    import sys
Sylvain Gugger's avatar
Sylvain Gugger committed
919

920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
    with CaptureStderr() as cs:
        print("Warning: ", file=sys.stderr)
    assert "Warning" in cs.err

    # to capture both streams with auto-replay
    with CaptureStd() as cs:
        print("Secret message")
        print("Warning: ", file=sys.stderr)
    assert "message" in cs.out
    assert "Warning" in cs.err

    # to capture just one of the streams, and not the other, with auto-replay
    with CaptureStd(err=False) as cs:
        print("Secret message")
    assert "message" in cs.out
    # but best use the stream-specific subclasses

    # to capture without auto-replay
    with CaptureStd(replay=False) as cs:
        print("Secret message")
    assert "message" in cs.out
    ```"""
942

943
944
945
    def __init__(self, out=True, err=True, replay=True):
        self.replay = replay

946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
        if out:
            self.out_buf = StringIO()
            self.out = "error: CaptureStd context is unfinished yet, called too early"
        else:
            self.out_buf = None
            self.out = "not capturing stdout"

        if err:
            self.err_buf = StringIO()
            self.err = "error: CaptureStd context is unfinished yet, called too early"
        else:
            self.err_buf = None
            self.err = "not capturing stderr"

    def __enter__(self):
        if self.out_buf:
            self.out_old = sys.stdout
            sys.stdout = self.out_buf

        if self.err_buf:
            self.err_old = sys.stderr
            sys.stderr = self.err_buf

        return self

    def __exit__(self, *exc):
        if self.out_buf:
            sys.stdout = self.out_old
974
975
976
977
            captured = self.out_buf.getvalue()
            if self.replay:
                sys.stdout.write(captured)
            self.out = apply_print_resets(captured)
978
979
980

        if self.err_buf:
            sys.stderr = self.err_old
981
982
983
984
            captured = self.err_buf.getvalue()
            if self.replay:
                sys.stderr.write(captured)
            self.err = captured
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001

    def __repr__(self):
        msg = ""
        if self.out_buf:
            msg += f"stdout: {self.out}\n"
        if self.err_buf:
            msg += f"stderr: {self.err}\n"
        return msg


# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.


class CaptureStdout(CaptureStd):
Patrick von Platen's avatar
Patrick von Platen committed
1002
    """Same as CaptureStd but captures only stdout"""
1003

1004
1005
    def __init__(self, replay=True):
        super().__init__(err=False, replay=replay)
1006
1007
1008


class CaptureStderr(CaptureStd):
Patrick von Platen's avatar
Patrick von Platen committed
1009
    """Same as CaptureStd but captures only stderr"""
1010

1011
1012
    def __init__(self, replay=True):
        super().__init__(out=False, replay=replay)
1013
1014


1015
class CaptureLogger:
Sylvain Gugger's avatar
Sylvain Gugger committed
1016
1017
    """
    Context manager to capture `logging` streams
1018
1019

    Args:
1020
        logger: 'logging` logger object
1021

1022
    Returns:
1023
1024
        The captured output is available via `self.out`

1025
    Example:
1026

1027
1028
1029
    ```python
    >>> from transformers import logging
    >>> from transformers.testing_utils import CaptureLogger
1030

1031
1032
1033
1034
1035
    >>> msg = "Testing 1, 2, 3"
    >>> logging.set_verbosity_info()
    >>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
    >>> with CaptureLogger(logger) as cl:
    ...     logger.info(msg)
Sylvain Gugger's avatar
Sylvain Gugger committed
1036
    >>> assert cl.out, msg + "\n"
1037
    ```
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
    """

    def __init__(self, logger):
        self.logger = logger
        self.io = StringIO()
        self.sh = logging.StreamHandler(self.io)
        self.out = ""

    def __enter__(self):
        self.logger.addHandler(self.sh)
        return self

    def __exit__(self, *exc):
        self.logger.removeHandler(self.sh)
        self.out = self.io.getvalue()

    def __repr__(self):
        return f"captured: {self.out}\n"


1058
1059
1060
1061
1062
1063
@contextlib.contextmanager
def LoggingLevel(level):
    """
    This is a context manager to temporarily change transformers modules logging level to the desired value and have it
    restored to the original setting at the end of the scope.

1064
    Example:
1065

1066
1067
    ```python
    with LoggingLevel(logging.INFO):
Sylvain Gugger's avatar
Sylvain Gugger committed
1068
        AutoModel.from_pretrained("gpt2")  # calls logger.info() several times
1069
    ```
1070
1071
1072
1073
1074
1075
1076
1077
1078
    """
    orig_level = transformers_logging.get_verbosity()
    try:
        transformers_logging.set_verbosity(level)
        yield
    finally:
        transformers_logging.set_verbosity(orig_level)


1079
1080
1081
1082
1083
1084
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
    """
    Temporary add given path to `sys.path`.

1085
    Usage :
1086

1087
    ```python
Sylvain Gugger's avatar
Sylvain Gugger committed
1088
1089
    with ExtendSysPath("/path/to/dir"):
        mymodule = importlib.import_module("mymodule")
1090
    ```
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
    """

    path = os.fspath(path)
    try:
        sys.path.insert(0, path)
        yield
    finally:
        sys.path.remove(path)


1101
class TestCasePlus(unittest.TestCase):
Sylvain Gugger's avatar
Sylvain Gugger committed
1102
    """
1103
    This class extends *unittest.TestCase* with additional features.
1104

1105
1106
1107
1108
1109
1110
    Feature 1: A set of fully resolved important file and dir path accessors.

    In tests often we need to know where things are relative to the current test file, and it's not trivial since the
    test could be invoked from more than one directory or could reside in sub-directories with different depths. This
    class solves this problem by sorting out all the basic paths and provides easy accessors to them:

1111
    - `pathlib` objects (all fully resolved):
1112

1113
1114
1115
1116
1117
1118
       - `test_file_path` - the current test file path (=`__file__`)
       - `test_file_dir` - the directory containing the current test file
       - `tests_dir` - the directory of the `tests` test suite
       - `examples_dir` - the directory of the `examples` test suite
       - `repo_root_dir` - the directory of the repository
       - `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
1119

1120
    - stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
1121

1122
1123
1124
1125
1126
1127
       - `test_file_path_str`
       - `test_file_dir_str`
       - `tests_dir_str`
       - `examples_dir_str`
       - `repo_root_dir_str`
       - `src_dir_str`
1128

1129
    Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1130

1131
    1. Create a unique temporary dir:
1132

1133
1134
1135
1136
    ```python
    def test_whatever(self):
        tmp_dir = self.get_auto_remove_tmp_dir()
    ```
1137

1138
    `tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
1139
1140
1141
1142
1143
    test.


    2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
    empty it after the test.
1144

1145
1146
1147
1148
    ```python
    def test_whatever(self):
        tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
    ```
1149

1150
1151
    This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
    didn't leave any data in there.
1152

1153
1154
    3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
        following behavior:
1155

1156
    `before=True`: the temporary dir will always be cleared at the beginning of the test.
1157

1158
    `before=False`: if the temporary dir already existed, any existing files will remain there.
1159

1160
    `after=True`: the temporary dir will always be deleted at the end of the test.
1161

1162
    `after=False`: the temporary dir will always be left intact at the end of the test.
1163

1164
    Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
Sylvain Gugger's avatar
Sylvain Gugger committed
1165
1166
    allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
    will get nuked. i.e. please always pass paths that start with `./`
1167

1168
1169
    Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
    otherwise.
1170

Sylvain Gugger's avatar
Sylvain Gugger committed
1171
1172
    Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
    is useful for invoking external programs from the test suite - e.g. distributed training.
1173
1174


1175
1176
1177
1178
    ```python
    def test_whatever(self):
        env = self.get_env()
    ```"""
1179
1180

    def setUp(self):
1181
        # get_auto_remove_tmp_dir feature:
1182
1183
        self.teardown_tmp_dirs = []

1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
        # figure out the resolved paths for repo_root, tests, examples, etc.
        self._test_file_path = inspect.getfile(self.__class__)
        path = Path(self._test_file_path).resolve()
        self._test_file_dir = path.parents[0]
        for up in [1, 2, 3]:
            tmp_dir = path.parents[up]
            if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
                break
        if tmp_dir:
            self._repo_root_dir = tmp_dir
        else:
            raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
        self._tests_dir = self._repo_root_dir / "tests"
        self._examples_dir = self._repo_root_dir / "examples"
        self._src_dir = self._repo_root_dir / "src"

    @property
    def test_file_path(self):
        return self._test_file_path

    @property
    def test_file_path_str(self):
        return str(self._test_file_path)

    @property
    def test_file_dir(self):
        return self._test_file_dir

    @property
    def test_file_dir_str(self):
        return str(self._test_file_dir)

    @property
    def tests_dir(self):
        return self._tests_dir

    @property
    def tests_dir_str(self):
        return str(self._tests_dir)

    @property
    def examples_dir(self):
        return self._examples_dir

    @property
    def examples_dir_str(self):
        return str(self._examples_dir)

    @property
    def repo_root_dir(self):
        return self._repo_root_dir

    @property
    def repo_root_dir_str(self):
        return str(self._repo_root_dir)

    @property
    def src_dir(self):
        return self._src_dir

    @property
    def src_dir_str(self):
        return str(self._src_dir)

    def get_env(self):
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
1250
1251
        Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
        invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
1252

Sylvain Gugger's avatar
Sylvain Gugger committed
1253
1254
        It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
        the preset `PYTHONPATH` if any (all full resolved paths).
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267

        """
        env = os.environ.copy()
        paths = [self.src_dir_str]
        if "/examples" in self.test_file_dir_str:
            paths.append(self.examples_dir_str)
        else:
            paths.append(self.tests_dir_str)
        paths.append(env.get("PYTHONPATH", ""))

        env["PYTHONPATH"] = ":".join(paths)
        return env

1268
    def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
1269
1270
        """
        Args:
1271
1272
            tmp_dir (`string`, *optional*):
                if `None`:
1273
1274

                   - a unique temporary path will be created
1275
1276
                   - sets `before=True` if `before` is `None`
                   - sets `after=True` if `after` is `None`
1277
1278
                else:

1279
1280
1281
1282
                   - `tmp_dir` will be created
                   - sets `before=True` if `before` is `None`
                   - sets `after=False` if `after` is `None`
            before (`bool`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
1283
1284
                If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
                `tmp_dir` already exists, any existing files will remain there.
1285
            after (`bool`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
1286
1287
                If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
                intact at the end of the test.
1288
1289

        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
1290
            tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
1291
1292
        """
        if tmp_dir is not None:
1293
1294
1295
1296
1297
1298
1299
1300
1301
            # defining the most likely desired behavior for when a custom path is provided.
            # this most likely indicates the debug mode where we want an easily locatable dir that:
            # 1. gets cleared out before the test (if it already exists)
            # 2. is left intact after the test
            if before is None:
                before = True
            if after is None:
                after = False

1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
            # using provided path
            path = Path(tmp_dir).resolve()

            # to avoid nuking parts of the filesystem, only relative paths are allowed
            if not tmp_dir.startswith("./"):
                raise ValueError(
                    f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
                )

            # ensure the dir is empty to start with
            if before is True and path.exists():
                shutil.rmtree(tmp_dir, ignore_errors=True)

            path.mkdir(parents=True, exist_ok=True)

        else:
1318
1319
1320
1321
1322
1323
1324
1325
1326
            # defining the most likely desired behavior for when a unique tmp path is auto generated
            # (not a debug mode), here we require a unique tmp dir that:
            # 1. is empty before the test (it will be empty in this situation anyway)
            # 2. gets fully removed after the test
            if before is None:
                before = True
            if after is None:
                after = True

1327
1328
1329
1330
1331
1332
1333
1334
1335
            # using unique tmp dir (always empty, regardless of `before`)
            tmp_dir = tempfile.mkdtemp()

        if after is True:
            # register for deletion
            self.teardown_tmp_dirs.append(tmp_dir)

        return tmp_dir

1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
    def python_one_liner_max_rss(self, one_liner_str):
        """
        Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
        program.

        Args:
            one_liner_str (`string`):
                a python one liner code that gets passed to `python -c`

        Returns:
            max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.

        Requirements:
            this helper needs `/usr/bin/time` to be installed (`apt install time`)

        Example:

        ```
        one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("t5-large")'
        max_rss = self.python_one_liner_max_rss(one_liner_str)
        ```
        """

        if not cmd_exists("/usr/bin/time"):
            raise ValueError("/usr/bin/time is required, install with `apt install time`")

        cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
        with CaptureStd() as cs:
            execute_subprocess_async(cmd, env=self.get_env())
        # returned data is in KB so convert to bytes
        max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
        return max_rss

1369
    def tearDown(self):
1370
        # get_auto_remove_tmp_dir feature: remove registered temp dirs
1371
1372
1373
        for path in self.teardown_tmp_dirs:
            shutil.rmtree(path, ignore_errors=True)
        self.teardown_tmp_dirs = []
1374
1375
1376
        if is_accelerate_available():
            AcceleratorState._reset_state()
            PartialState._reset_state()
1377

1378
1379
1380
1381
1382
            # delete all the env variables having `ACCELERATE` in them
            for k in list(os.environ.keys()):
                if "ACCELERATE" in k:
                    del os.environ[k]

1383
1384

def mockenv(**kwargs):
Sylvain Gugger's avatar
Sylvain Gugger committed
1385
    """
1386
1387
    this is a convenience wrapper, that allows this ::

Sylvain Gugger's avatar
Sylvain Gugger committed
1388
1389
    @mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
        run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
1390
1391

    """
1392
    return mock.patch.dict(os.environ, kwargs)
1393
1394


1395
1396
1397
1398
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
    """
1399
    Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
1400

1401
    The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426

    Args:
      remove: Environment variables to remove.
      update: Dictionary of environment variables and values to add/update.
    """
    env = os.environ
    update = update or {}
    remove = remove or []

    # List of environment variables being updated or removed.
    stomped = (set(update.keys()) | set(remove)) & set(env.keys())
    # Environment variables and values to restore on exit.
    update_after = {k: env[k] for k in stomped}
    # Environment variables and values to remove on exit.
    remove_after = frozenset(k for k in update if k not in env)

    try:
        env.update(update)
        [env.pop(k, None) for k in remove]
        yield
    finally:
        env.update(update_after)
        [env.pop(k) for k in remove_after]


1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
# --- pytest conf functions --- #

# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}


def pytest_addoption_shared(parser):
    """
    This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.

    It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
    option.

    """
    option = "--make-reports"
    if option not in pytest_opt_registered:
        parser.addoption(
            option,
            action="store",
            default=False,
            help="generate report files. The value of this option is used as a prefix to report names",
        )
        pytest_opt_registered[option] = 1


1452
1453
def pytest_terminal_summary_main(tr, id):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
1454
1455
    Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
    directory. The report files are prefixed with the test suite name.
1456
1457
1458

    This function emulates --duration and -rA pytest arguments.

Sylvain Gugger's avatar
Sylvain Gugger committed
1459
1460
    This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
    there.
1461
1462
1463

    Args:
    - tr: `terminalreporter` passed from `conftest.py`
1464
1465
    - id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
      needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
1466

Sylvain Gugger's avatar
Sylvain Gugger committed
1467
1468
1469
    NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
    changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
    plugins and interfere.
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481

    """
    from _pytest.config import create_terminal_writer

    if not len(id):
        id = "tests"

    config = tr.config
    orig_writer = config.get_terminal_writer()
    orig_tbstyle = config.option.tbstyle
    orig_reportchars = tr.reportchars

1482
    dir = f"reports/{id}"
1483
    Path(dir).mkdir(parents=True, exist_ok=True)
Stas Bekman's avatar
Stas Bekman committed
1484
    report_files = {
1485
        k: f"{dir}/{k}.txt"
Stas Bekman's avatar
Stas Bekman committed
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
        for k in [
            "durations",
            "errors",
            "failures_long",
            "failures_short",
            "failures_line",
            "passes",
            "stats",
            "summary_short",
            "warnings",
        ]
    }
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517

    # custom durations report
    # note: there is no need to call pytest --durations=XX to get this separate report
    # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
    dlist = []
    for replist in tr.stats.values():
        for rep in replist:
            if hasattr(rep, "duration"):
                dlist.append(rep)
    if dlist:
        dlist.sort(key=lambda x: x.duration, reverse=True)
        with open(report_files["durations"], "w") as f:
            durations_min = 0.05  # sec
            f.write("slowest durations\n")
            for i, rep in enumerate(dlist):
                if rep.duration < durations_min:
                    f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
                    break
                f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")

Stas Bekman's avatar
Stas Bekman committed
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
    def summary_failures_short(tr):
        # expecting that the reports were --tb=long (default) so we chop them off here to the last frame
        reports = tr.getreports("failed")
        if not reports:
            return
        tr.write_sep("=", "FAILURES SHORT STACK")
        for rep in reports:
            msg = tr._getfailureheadline(rep)
            tr.write_sep("_", msg, red=True, bold=True)
            # chop off the optional leading extra frames, leaving only the last one
            longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
            tr._tw.line(longrepr)
            # note: not printing out any rep.sections to keep the report short

1532
1533
1534
1535
    # use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
    # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
    # note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
    # pytest-instafail does that)
Stas Bekman's avatar
Stas Bekman committed
1536
1537
1538
1539

    # report failures with line/short/long styles
    config.option.tbstyle = "auto"  # full tb
    with open(report_files["failures_long"], "w") as f:
1540
1541
1542
        tr._tw = create_terminal_writer(config, f)
        tr.summary_failures()

Stas Bekman's avatar
Stas Bekman committed
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
    # config.option.tbstyle = "short" # short tb
    with open(report_files["failures_short"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        summary_failures_short(tr)

    config.option.tbstyle = "line"  # one line per error
    with open(report_files["failures_line"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.summary_failures()

    with open(report_files["errors"], "w") as f:
1554
1555
1556
        tr._tw = create_terminal_writer(config, f)
        tr.summary_errors()

Stas Bekman's avatar
Stas Bekman committed
1557
    with open(report_files["warnings"], "w") as f:
1558
1559
1560
1561
        tr._tw = create_terminal_writer(config, f)
        tr.summary_warnings()  # normal warnings
        tr.summary_warnings()  # final warnings

Stas Bekman's avatar
Stas Bekman committed
1562
    tr.reportchars = "wPpsxXEf"  # emulate -rA (used in summary_passes() and short_test_summary())
1563
1564
1565
1566
1567
1568
1569

    # Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
    # takes > 10 minutes (as this part doesn't generate any output on the terminal).
    # (also, it seems there is no useful information in this report, and we rarely need to read it)
    # with open(report_files["passes"], "w") as f:
    #     tr._tw = create_terminal_writer(config, f)
    #     tr.summary_passes()
1570

Stas Bekman's avatar
Stas Bekman committed
1571
    with open(report_files["summary_short"], "w") as f:
1572
1573
1574
        tr._tw = create_terminal_writer(config, f)
        tr.short_test_summary()

Stas Bekman's avatar
Stas Bekman committed
1575
    with open(report_files["stats"], "w") as f:
1576
1577
1578
1579
1580
1581
1582
        tr._tw = create_terminal_writer(config, f)
        tr.summary_stats()

    # restore:
    tr._tw = orig_writer
    tr.reportchars = orig_reportchars
    config.option.tbstyle = orig_tbstyle
1583
1584


1585
# --- distributed testing functions --- #
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639

# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio  # noqa


class _RunOutput:
    def __init__(self, returncode, stdout, stderr):
        self.returncode = returncode
        self.stdout = stdout
        self.stderr = stderr


async def _read_stream(stream, callback):
    while True:
        line = await stream.readline()
        if line:
            callback(line)
        else:
            break


async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
    if echo:
        print("\nRunning: ", " ".join(cmd))

    p = await asyncio.create_subprocess_exec(
        cmd[0],
        *cmd[1:],
        stdin=stdin,
        stdout=asyncio.subprocess.PIPE,
        stderr=asyncio.subprocess.PIPE,
        env=env,
    )

    # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
    # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
    #
    # If it starts hanging, will need to switch to the following code. The problem is that no data
    # will be seen until it's done and if it hangs for example there will be no debug info.
    # out, err = await p.communicate()
    # return _RunOutput(p.returncode, out, err)

    out = []
    err = []

    def tee(line, sink, pipe, label=""):
        line = line.decode("utf-8").rstrip()
        sink.append(line)
        if not quiet:
            print(label, line, file=pipe)

    # XXX: the timeout doesn't seem to make any difference here
    await asyncio.wait(
        [
Stas Bekman's avatar
Stas Bekman committed
1640
            _read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")),
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
            _read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
        ],
        timeout=timeout,
    )
    return _RunOutput(await p.wait(), out, err)


def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
    loop = asyncio.get_event_loop()
    result = loop.run_until_complete(
        _stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
    )

    cmd_str = " ".join(cmd)
    if result.returncode > 0:
1656
        stderr = "\n".join(result.stderr)
1657
        raise RuntimeError(
1658
1659
            f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
            f"The combined stderr from workers follows:\n{stderr}"
1660
        )
Stas Bekman's avatar
Stas Bekman committed
1661
1662
1663
1664

    # check that the subprocess actually did run and produced some output, should the test rely on
    # the remote side to do the testing
    if not result.stdout and not result.stderr:
1665
1666
1667
        raise RuntimeError(f"'{cmd_str}' produced no output.")

    return result
1668
1669


1670
1671
def pytest_xdist_worker_id():
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
1672
1673
    Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
    if `-n 1` or `pytest-xdist` isn't being used.
1674
1675
1676
1677
1678
1679
1680
1681
    """
    worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
    worker = re.sub(r"^gw", "", worker, 0, re.M)
    return int(worker)


def get_torch_dist_unique_port():
    """
1682
    Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
1683

Sylvain Gugger's avatar
Sylvain Gugger committed
1684
1685
    Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
    port at once.
1686
1687
1688
1689
1690
1691
    """
    port = 29500
    uniq_delta = pytest_xdist_worker_id()
    return port + uniq_delta


1692
1693
1694
1695
1696
def nested_simplify(obj, decimals=3):
    """
    Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
    within tests.
    """
1697
1698
    import numpy as np

1699
1700
    if isinstance(obj, list):
        return [nested_simplify(item, decimals) for item in obj]
1701
1702
    if isinstance(obj, tuple):
        return tuple([nested_simplify(item, decimals) for item in obj])
1703
1704
    elif isinstance(obj, np.ndarray):
        return nested_simplify(obj.tolist())
1705
    elif isinstance(obj, Mapping):
1706
        return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
1707
    elif isinstance(obj, (str, int, np.int64)):
1708
        return obj
1709
1710
    elif obj is None:
        return obj
1711
    elif is_torch_available() and isinstance(obj, torch.Tensor):
1712
        return nested_simplify(obj.tolist(), decimals)
1713
1714
1715
1716
    elif is_tf_available() and tf.is_tensor(obj):
        return nested_simplify(obj.numpy().tolist())
    elif isinstance(obj, float):
        return round(obj, decimals)
1717
    elif isinstance(obj, (np.int32, np.float32)):
1718
        return nested_simplify(obj.item(), decimals)
1719
1720
    else:
        raise Exception(f"Not supported: {type(obj)}")
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737


def check_json_file_has_correct_format(file_path):
    with open(file_path, "r") as f:
        lines = f.readlines()
        if len(lines) == 1:
            # length can only be 1 if dict is empty
            assert lines[0] == "{}"
        else:
            # otherwise make sure json has correct format (at least 3 lines)
            assert len(lines) >= 3
            # each key one line, ident should be 2, min length is 3
            assert lines[0].strip() == "{"
            for line in lines[1:-1]:
                left_indent = len(lines[1]) - len(lines[1].lstrip())
                assert left_indent == 2
            assert lines[-1].strip() == "}"
NielsRogge's avatar
NielsRogge committed
1738
1739
1740
1741
1742
1743


def to_2tuple(x):
    if isinstance(x, collections.abc.Iterable):
        return x
    return (x, x)
Zachary Mueller's avatar
Zachary Mueller committed
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765


# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
    pass


def run_command(command: List[str], return_stdout=False):
    """
    Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
    if an error occured while running `command`
    """
    try:
        output = subprocess.check_output(command, stderr=subprocess.STDOUT)
        if return_stdout:
            if hasattr(output, "decode"):
                output = output.decode("utf-8")
            return output
    except subprocess.CalledProcessError as e:
        raise SubprocessCallException(
            f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
        ) from e
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776


class RequestCounter:
    """
    Helper class that will count all requests made online.
    """

    def __enter__(self):
        self.head_request_count = 0
        self.get_request_count = 0
        self.other_request_count = 0
Lucain's avatar
Lucain committed
1777
1778
1779
1780
1781
1782

        # Mock `get_session` to count HTTP calls.
        self.old_get_session = huggingface_hub.utils._http.get_session
        self.session = requests.Session()
        self.session.request = self.new_request
        huggingface_hub.utils._http.get_session = lambda: self.session
1783
1784
1785
        return self

    def __exit__(self, *args, **kwargs):
Lucain's avatar
Lucain committed
1786
        huggingface_hub.utils._http.get_session = self.old_get_session
1787
1788
1789
1790
1791
1792
1793
1794
1795

    def new_request(self, method, **kwargs):
        if method == "GET":
            self.get_request_count += 1
        elif method == "HEAD":
            self.head_request_count += 1
        else:
            self.other_request_count += 1

Lucain's avatar
Lucain committed
1796
        return requests.request(method=method, **kwargs)
1797
1798


1799
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
1800
1801
1802
1803
1804
1805
1806
1807
    """
    To decorate flaky tests. They will be retried on failures.

    Args:
        max_attempts (`int`, *optional*, defaults to 5):
            The maximum number of attempts to retry the flaky test.
        wait_before_retry (`float`, *optional*):
            If provided, will wait that number of seconds before retrying the test.
1808
1809
1810
        description (`str`, *optional*):
            A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
            etc.)
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
    """

    def decorator(test_func_ref):
        @functools.wraps(test_func_ref)
        def wrapper(*args, **kwargs):
            retry_count = 1

            while retry_count < max_attempts:
                try:
                    return test_func_ref(*args, **kwargs)

                except Exception as err:
                    print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
                    if wait_before_retry is not None:
                        time.sleep(wait_before_retry)
                    retry_count += 1

            return test_func_ref(*args, **kwargs)

        return wrapper

    return decorator
1833
1834


1835
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
    """
    To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.

    Args:
        test_case (`unittest.TestCase`):
            The test that will run `target_func`.
        target_func (`Callable`):
            The function implementing the actual testing logic.
        inputs (`dict`, *optional*, defaults to `None`):
            The inputs that will be passed to `target_func` through an (input) queue.
1846
1847
1848
        timeout (`int`, *optional*, defaults to `None`):
            The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
            variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
1849
    """
1850
1851
    if timeout is None:
        timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875

    start_methohd = "spawn"
    ctx = multiprocessing.get_context(start_methohd)

    input_queue = ctx.Queue(1)
    output_queue = ctx.JoinableQueue(1)

    # We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
    input_queue.put(inputs, timeout=timeout)

    process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
    process.start()
    # Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
    # the test to exit properly.
    try:
        results = output_queue.get(timeout=timeout)
        output_queue.task_done()
    except Exception as e:
        process.terminate()
        test_case.fail(e)
    process.join(timeout=timeout)

    if results["error"] is not None:
        test_case.fail(f'{results["error"]}')
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888


"""
The following contains utils to run the documentation tests without having to overwrite any files.

The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.

To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""


def preprocess_string(string, skip_cuda_tests):
1889
    """Prepare a docstring or a `.md` file to be run by doctest.
1890

1891
    The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
    its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
    cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
    `string`.
    """
    codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
    codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
    is_cuda_found = False
    for i, codeblock in enumerate(codeblocks):
        if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
            codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
        if (
            (">>>" in codeblock or "..." in codeblock)
            and re.search(r"cuda|to\(0\)|device=0", codeblock)
            and skip_cuda_tests
        ):
            is_cuda_found = True
            break
Yih-Dar's avatar
Yih-Dar committed
1909

1910
1911
1912
    modified_string = ""
    if not is_cuda_found:
        modified_string = "".join(codeblocks)
Yih-Dar's avatar
Yih-Dar committed
1913

1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
    return modified_string


class HfDocTestParser(doctest.DocTestParser):
    """
    Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
    means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
    added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.

    Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
    """

    # This regular expression is used to find doctest examples in a
    # string.  It defines three groups: `source` is the source code
    # (including leading indentation and prompts); `indent` is the
    # indentation of the first (PS1) line of the source code; and
    # `want` is the expected output (including leading indentation).
    # fmt: off
    _EXAMPLE_RE = re.compile(r'''
        # Source consists of a PS1 line followed by zero or more PS2 lines.
        (?P<source>
            (?:^(?P<indent> [ ]*) >>>    .*)    # PS1 line
            (?:\n           [ ]*  \.\.\. .*)*)  # PS2 lines
        \n?
        # Want consists of any non-blank lines that do not start with PS1.
        (?P<want> (?:(?![ ]*$)    # Not a blank line
             (?![ ]*>>>)          # Not a line starting with PS1
             # !!!!!!!!!!! HF Specific !!!!!!!!!!!
             (?:(?!```).)*        # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
             # !!!!!!!!!!! HF Specific !!!!!!!!!!!
             (?:\n|$)  # Match a new line or end of string
          )*)
        ''', re.MULTILINE | re.VERBOSE
    )
    # fmt: on

    # !!!!!!!!!!! HF Specific !!!!!!!!!!!
    skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
    # !!!!!!!!!!! HF Specific !!!!!!!!!!!

    def parse(self, string, name="<string>"):
        """
        Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
        calling `super().parse`
        """
        string = preprocess_string(string, self.skip_cuda_tests)
        return super().parse(string, name)


class HfDoctestModule(Module):
    """
    Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
    tests.
    """

    def collect(self) -> Iterable[DoctestItem]:
        class MockAwareDocTestFinder(doctest.DocTestFinder):
            """A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.

            https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
            """

            def _find_lineno(self, obj, source_lines):
                """Doctest code does not take into account `@property`, this
                is a hackish way to fix it. https://bugs.python.org/issue17446

                Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
                reported upstream. #8796
                """
                if isinstance(obj, property):
                    obj = getattr(obj, "fget", obj)

                if hasattr(obj, "__wrapped__"):
                    # Get the main obj in case of it being wrapped
                    obj = inspect.unwrap(obj)

                # Type ignored because this is a private function.
                return super()._find_lineno(  # type:ignore[misc]
                    obj,
                    source_lines,
                )

            def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
                if _is_mocked(obj):
                    return
                with _patch_unwrap_mock_aware():
                    # Type ignored because this is a private function.
                    super()._find(  # type:ignore[misc]
                        tests, obj, name, module, source_lines, globs, seen
                    )

        if self.path.name == "conftest.py":
            module = self.config.pluginmanager._importconftest(
                self.path,
                self.config.getoption("importmode"),
                rootpath=self.config.rootpath,
            )
        else:
            try:
                module = import_path(
                    self.path,
                    root=self.config.rootpath,
                    mode=self.config.getoption("importmode"),
                )
            except ImportError:
                if self.config.getvalue("doctest_ignore_import_errors"):
                    skip("unable to import module %r" % self.path)
                else:
                    raise

        # !!!!!!!!!!! HF Specific !!!!!!!!!!!
        finder = MockAwareDocTestFinder(parser=HfDocTestParser())
        # !!!!!!!!!!! HF Specific !!!!!!!!!!!
        optionflags = get_optionflags(self)
        runner = _get_runner(
            verbose=False,
            optionflags=optionflags,
            checker=_get_checker(),
            continue_on_failure=_get_continue_on_failure(self.config),
        )
        for test in finder.find(module, module.__name__):
            if test.examples:  # skip empty doctests and cuda
                yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)