create_circleci_config.py 22.6 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import argparse
import copy
18
import glob
19
import os
20
import random
21
22
23
24
25
26
from dataclasses import dataclass
from typing import Any, Dict, List, Optional

import yaml


27
28
29
30
31
32
33
34
COMMON_ENV_VARIABLES = {
    "OMP_NUM_THREADS": 1,
    "TRANSFORMERS_IS_CI": True,
    "PYTEST_TIMEOUT": 120,
    "RUN_PIPELINE_TESTS": False,
    "RUN_PT_TF_CROSS_TESTS": False,
    "RUN_PT_FLAX_CROSS_TESTS": False,
}
35
COMMON_PYTEST_OPTIONS = {"max-worker-restart": 0, "dist": "loadfile", "s": None}
36
DEFAULT_DOCKER_IMAGE = [{"image": "cimg/python:3.8.12"}]
37
38


39
40
41
42
43
44
45
46
47
48
49
class EmptyJob:
    job_name = "empty"

    def to_dict(self):
        return {
            "working_directory": "~/transformers",
            "docker": copy.deepcopy(DEFAULT_DOCKER_IMAGE),
            "steps":["checkout"],
        }


50
51
52
53
54
@dataclass
class CircleCIJob:
    name: str
    additional_env: Dict[str, Any] = None
    cache_name: str = None
55
    cache_version: str = "0.6"
56
57
58
59
60
61
62
63
64
    docker_image: List[Dict[str, str]] = None
    install_steps: List[str] = None
    marker: Optional[str] = None
    parallelism: Optional[int] = 1
    pytest_num_workers: int = 8
    pytest_options: Dict[str, Any] = None
    resource_class: Optional[str] = "xlarge"
    tests_to_run: Optional[List[str]] = None
    working_directory: str = "~/transformers"
65
66
    # This should be only used for doctest job!
    command_timeout: Optional[int] = None
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82

    def __post_init__(self):
        # Deal with defaults for mutable attributes.
        if self.additional_env is None:
            self.additional_env = {}
        if self.cache_name is None:
            self.cache_name = self.name
        if self.docker_image is None:
            # Let's avoid changing the default list and make a copy.
            self.docker_image = copy.deepcopy(DEFAULT_DOCKER_IMAGE)
        if self.install_steps is None:
            self.install_steps = []
        if self.pytest_options is None:
            self.pytest_options = {}
        if isinstance(self.tests_to_run, str):
            self.tests_to_run = [self.tests_to_run]
83
84
        if self.parallelism is None:
            self.parallelism = 1
85
86

    def to_dict(self):
87
88
        env = COMMON_ENV_VARIABLES.copy()
        env.update(self.additional_env)
89
90
91
        job = {
            "working_directory": self.working_directory,
            "docker": self.docker_image,
92
            "environment": env,
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
        }
        if self.resource_class is not None:
            job["resource_class"] = self.resource_class
        if self.parallelism is not None:
            job["parallelism"] = self.parallelism
        steps = [
            "checkout",
            {"attach_workspace": {"at": "~/transformers/test_preparation"}},
            {
                "restore_cache": {
                    "keys": [
                        f"v{self.cache_version}-{self.cache_name}-" + '{{ checksum "setup.py" }}',
                        f"v{self.cache_version}-{self.cache_name}-",
                    ]
                }
            },
        ]
        steps.extend([{"run": l} for l in self.install_steps])
        steps.append(
            {
                "save_cache": {
                    "key": f"v{self.cache_version}-{self.cache_name}-" + '{{ checksum "setup.py" }}',
                    "paths": ["~/.cache/pip"],
                }
            }
        )
119
120
        steps.append({"run": {"name": "Show installed libraries and their versions", "command": "pip freeze | tee installed.txt"}})
        steps.append({"store_artifacts": {"path": "~/transformers/installed.txt"}})
121
122

        all_options = {**COMMON_PYTEST_OPTIONS, **self.pytest_options}
123
        pytest_flags = [f"--{key}={value}" if (value is not None or key in ["doctest-modules"]) else f"-{key}" for key, value in all_options.items()]
124
125
126
        pytest_flags.append(
            f"--make-reports={self.name}" if "examples" in self.name else f"--make-reports=tests_{self.name}"
        )
127
128
129
130
        test_command = ""
        if self.command_timeout:
            test_command = f"timeout {self.command_timeout} "
        test_command += f"python -m pytest -n {self.pytest_num_workers} " + " ".join(pytest_flags)
amyeroberts's avatar
amyeroberts committed
131

132
133
134
135
136
        if self.parallelism == 1:
            if self.tests_to_run is None:
                test_command += " << pipeline.parameters.tests_to_run >>"
            else:
                test_command += " " + " ".join(self.tests_to_run)
137
        else:
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
            # We need explicit list instead of `pipeline.parameters.tests_to_run` (only available at job runtime)
            tests = self.tests_to_run
            if tests is None:
                folder = os.environ["test_preparation_dir"]
                test_file = os.path.join(folder, "filtered_test_list.txt")
                if os.path.exists(test_file):
                    with open(test_file) as f:
                        tests = f.read().split(" ")

            # expand the test list
            if tests == ["tests"]:
                tests = [os.path.join("tests", x) for x in os.listdir("tests")]
            expanded_tests = []
            for test in tests:
                if test.endswith(".py"):
                    expanded_tests.append(test)
                elif test == "tests/models":
                    expanded_tests.extend([os.path.join(test, x) for x in os.listdir(test)])
                elif test == "tests/pipelines":
                    expanded_tests.extend([os.path.join(test, x) for x in os.listdir(test)])
                else:
                    expanded_tests.append(test)
            # Avoid long tests always being collected together
            random.shuffle(expanded_tests)
            tests = " ".join(expanded_tests)

            # Each executor to run ~10 tests
            n_executors = max(len(tests) // 10, 1)
            # Avoid empty test list on some executor(s) or launching too many executors
            if n_executors > self.parallelism:
                n_executors = self.parallelism
            job["parallelism"] = n_executors

            # Need to be newline separated for the command `circleci tests split` below
            command = f'echo {tests} | tr " " "\\n" >> tests.txt'
            steps.append({"run": {"name": "Get tests", "command": command}})

            command = 'TESTS=$(circleci tests split tests.txt) && echo $TESTS > splitted_tests.txt'
            steps.append({"run": {"name": "Split tests", "command": command}})

            steps.append({"store_artifacts": {"path": "~/transformers/tests.txt"}})
            steps.append({"store_artifacts": {"path": "~/transformers/splitted_tests.txt"}})

181
182
183
184
            test_command = ""
            if self.timeout:
                test_command = f"timeout {self.timeout} "
            test_command += f"python -m pytest -n {self.pytest_num_workers} " + " ".join(pytest_flags)
185
            test_command += " $(cat splitted_tests.txt)"
186
187
        if self.marker is not None:
            test_command += f" -m {self.marker}"
188
189
190
191
192
193
194
195
196
197
198
199

        if self.name == "pr_documentation_tests":
            # can't use ` | tee tee tests_output.txt` as usual
            test_command += " > tests_output.txt"
            # Save the return code, so we can check if it is timeout in the next step.
            test_command += '; touch "$?".txt'
            # Never fail the test step for the doctest job. We will check the results in the next step, and fail that
            # step instead if the actual test failures are found. This is to avoid the timeout being reported as test
            # failure.
            test_command = f"({test_command}) || true"
        else:
            test_command += " | tee tests_output.txt"
200
        steps.append({"run": {"name": "Run tests", "command": test_command}})
201
202
203
204
205
206
207
208
209
210
211

        # return code `124` means the previous (pytest run) step is timeout
        if self.name == "pr_documentation_tests":
            checkout_doctest_command = 'if [ -s reports/tests_pr_documentation_tests/failures_short.txt ]; '
            checkout_doctest_command += 'then echo "some test failed"; '
            checkout_doctest_command += 'cat reports/tests_pr_documentation_tests/failures_short.txt; '
            checkout_doctest_command += 'cat reports/tests_pr_documentation_tests/summary_short.txt; exit -1; '
            checkout_doctest_command += 'elif [ -s reports/tests_pr_documentation_tests/stats.txt ]; then echo "All tests pass!"; '
            checkout_doctest_command += 'elif [ -f 124.txt ]; then echo "doctest timeout!"; else echo "other fatal error)"; exit -1; fi;'
            steps.append({"run": {"name": "Check doctest results", "command": checkout_doctest_command}})

212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
        steps.append({"store_artifacts": {"path": "~/transformers/tests_output.txt"}})
        steps.append({"store_artifacts": {"path": "~/transformers/reports"}})
        job["steps"] = steps
        return job

    @property
    def job_name(self):
        return self.name if "examples" in self.name else f"tests_{self.name}"


# JOBS
torch_and_tf_job = CircleCIJob(
    "torch_and_tf",
    additional_env={"RUN_PT_TF_CROSS_TESTS": True},
    install_steps=[
227
        "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng git-lfs cmake",
228
229
230
        "git lfs install",
        "pip install --upgrade pip",
        "pip install .[sklearn,tf-cpu,torch,testing,sentencepiece,torch-speech,vision]",
Yih-Dar's avatar
Yih-Dar committed
231
        "pip install tensorflow_probability",
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
        "pip install git+https://github.com/huggingface/accelerate",
    ],
    marker="is_pt_tf_cross_test",
    pytest_options={"rA": None, "durations": 0},
)


torch_and_flax_job = CircleCIJob(
    "torch_and_flax",
    additional_env={"RUN_PT_FLAX_CROSS_TESTS": True},
    install_steps=[
        "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng",
        "pip install --upgrade pip",
        "pip install .[sklearn,flax,torch,testing,sentencepiece,torch-speech,vision]",
        "pip install git+https://github.com/huggingface/accelerate",
    ],
    marker="is_pt_flax_cross_test",
    pytest_options={"rA": None, "durations": 0},
)


torch_job = CircleCIJob(
    "torch",
    install_steps=[
        "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng time",
        "pip install --upgrade pip",
        "pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm]",
        "pip install git+https://github.com/huggingface/accelerate",
    ],
261
    parallelism=1,
262
263
264
265
266
267
268
    pytest_num_workers=3,
)


tf_job = CircleCIJob(
    "tf",
    install_steps=[
269
        "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng cmake",
270
271
        "pip install --upgrade pip",
        "pip install .[sklearn,tf-cpu,testing,sentencepiece,tf-speech,vision]",
Yih-Dar's avatar
Yih-Dar committed
272
        "pip install tensorflow_probability",
273
    ],
274
    parallelism=1,
Yih-Dar's avatar
Yih-Dar committed
275
    pytest_num_workers=6,
276
277
278
279
280
281
282
283
284
285
286
    pytest_options={"rA": None},
)


flax_job = CircleCIJob(
    "flax",
    install_steps=[
        "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng",
        "pip install --upgrade pip",
        "pip install .[flax,testing,sentencepiece,flax-speech,vision]",
    ],
287
    parallelism=1,
288
289
290
291
292
293
    pytest_options={"rA": None},
)


pipelines_torch_job = CircleCIJob(
    "pipelines_torch",
294
    additional_env={"RUN_PIPELINE_TESTS": True},
295
296
297
    install_steps=[
        "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng",
        "pip install --upgrade pip",
298
        "pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm,video]",
299
300
    ],
    pytest_options={"rA": None},
301
    marker="is_pipeline_test",
302
303
304
305
306
)


pipelines_tf_job = CircleCIJob(
    "pipelines_tf",
307
    additional_env={"RUN_PIPELINE_TESTS": True},
308
    install_steps=[
Sylvain Gugger's avatar
Sylvain Gugger committed
309
        "sudo apt-get -y update && sudo apt-get install -y cmake",
310
        "pip install --upgrade pip",
311
        "pip install .[sklearn,tf-cpu,testing,sentencepiece,vision]",
Yih-Dar's avatar
Yih-Dar committed
312
        "pip install tensorflow_probability",
313
314
    ],
    pytest_options={"rA": None},
315
    marker="is_pipeline_test",
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
)


custom_tokenizers_job = CircleCIJob(
    "custom_tokenizers",
    additional_env={"RUN_CUSTOM_TOKENIZERS": True},
    install_steps=[
        "sudo apt-get -y update && sudo apt-get install -y cmake",
        {
            "name": "install jumanpp",
            "command":
                "wget https://github.com/ku-nlp/jumanpp/releases/download/v2.0.0-rc3/jumanpp-2.0.0-rc3.tar.xz\n"
                "tar xvf jumanpp-2.0.0-rc3.tar.xz\n"
                "mkdir jumanpp-2.0.0-rc3/bld\n"
                "cd jumanpp-2.0.0-rc3/bld\n"
                "sudo cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local\n"
                "sudo make install\n",
        },
        "pip install --upgrade pip",
        "pip install .[ja,testing,sentencepiece,jieba,spacy,ftfy,rjieba]",
        "python -m unidic download",
    ],
    parallelism=None,
    resource_class=None,
    tests_to_run=[
        "./tests/models/bert_japanese/test_tokenization_bert_japanese.py",
        "./tests/models/openai/test_tokenization_openai.py",
        "./tests/models/clip/test_tokenization_clip.py",
    ],
)


examples_torch_job = CircleCIJob(
    "examples_torch",
    cache_name="torch_examples",
    install_steps=[
        "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng",
        "pip install --upgrade pip",
        "pip install .[sklearn,torch,sentencepiece,testing,torch-speech]",
        "pip install -r examples/pytorch/_tests_requirements.txt",
    ],
)


examples_tensorflow_job = CircleCIJob(
    "examples_tensorflow",
    cache_name="tensorflow_examples",
    install_steps=[
364
        "sudo apt-get -y update && sudo apt-get install -y cmake",
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
        "pip install --upgrade pip",
        "pip install .[sklearn,tensorflow,sentencepiece,testing]",
        "pip install -r examples/tensorflow/_tests_requirements.txt",
    ],
)


examples_flax_job = CircleCIJob(
    "examples_flax",
    cache_name="flax_examples",
    install_steps=[
        "pip install --upgrade pip",
        "pip install .[flax,testing,sentencepiece]",
        "pip install -r examples/flax/_tests_requirements.txt",
    ],
)


hub_job = CircleCIJob(
    "hub",
    install_steps=[
        "sudo apt-get -y update && sudo apt-get install git-lfs",
        'git config --global user.email "ci@dummy.com"',
        'git config --global user.name "ci"',
        "pip install --upgrade pip",
        "pip install .[torch,sentencepiece,testing]",
    ],
    marker="is_staging_test",
    pytest_num_workers=1,
)


onnx_job = CircleCIJob(
    "onnx",
    install_steps=[
400
        "sudo apt-get -y update && sudo apt-get install -y cmake",
401
402
403
404
405
406
407
408
        "pip install --upgrade pip",
        "pip install .[torch,tf,testing,sentencepiece,onnxruntime,vision,rjieba]",
    ],
    pytest_options={"k onnx": None},
    pytest_num_workers=1,
)


409
410
exotic_models_job = CircleCIJob(
    "exotic_models",
411
412
413
414
415
    install_steps=[
        "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev",
        "pip install --upgrade pip",
        "pip install .[torch,testing,vision]",
        "pip install torchvision",
NielsRogge's avatar
NielsRogge committed
416
        "pip install scipy",
417
418
419
        "pip install 'git+https://github.com/facebookresearch/detectron2.git'",
        "sudo apt install tesseract-ocr",
        "pip install pytesseract",
Ali Hassani's avatar
Ali Hassani committed
420
        "pip install natten",
421
422
423
424
    ],
    tests_to_run=[
        "tests/models/*layoutlmv*",
        "tests/models/*nat",
NielsRogge's avatar
NielsRogge committed
425
        "tests/models/deta",
426
427
428
429
430
431
    ],
    pytest_num_workers=1,
    pytest_options={"durations": 100},
)


Sylvain Gugger's avatar
Sylvain Gugger committed
432
433
434
435
repo_utils_job = CircleCIJob(
    "repo_utils",
    install_steps=[
        "pip install --upgrade pip",
436
        "pip install .[quality,testing,torch]",
Sylvain Gugger's avatar
Sylvain Gugger committed
437
438
439
    ],
    parallelism=None,
    pytest_num_workers=1,
440
    resource_class="large",
Sylvain Gugger's avatar
Sylvain Gugger committed
441
442
443
    tests_to_run="tests/repo_utils",
)

444
445
446
447
448

# We also include a `dummy.py` file in the files to be doc-tested to prevent edge case failure. Otherwise, the pytest
# hangs forever during test collection while showing `collecting 0 items / 21 errors`. (To see this, we have to remove
# the bash output redirection.)
py_command = 'from utils.tests_fetcher import get_doctest_files; to_test = get_doctest_files() + ["dummy.py"]; to_test = " ".join(to_test); print(to_test)'
449
py_command = f"$(python3 -c '{py_command}')"
450
command = f'echo "{py_command}" > pr_documentation_tests_temp.txt'
451
452
453
454
455
456
457
458
459
doc_test_job = CircleCIJob(
    "pr_documentation_tests",
    additional_env={"TRANSFORMERS_VERBOSITY": "error", "DATASETS_VERBOSITY": "error", "SKIP_CUDA_DOCTEST": "1"},
    install_steps=[
        "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng time",
        "pip install --upgrade pip",
        "pip install -e .[dev]",
        "pip install git+https://github.com/huggingface/accelerate",
        "pip install --upgrade pytest pytest-sugar",
amyeroberts's avatar
amyeroberts committed
460
        "pip install natten",
461
462
463
464
465
466
        "find -name __pycache__ -delete",
        "find . -name \*.pyc -delete",
        # Add an empty file to keep the test step running correctly even no file is selected to be tested.
        "touch dummy.py",
        {
            "name": "Get files to test",
467
            "command": command,
468
469
        },
        {
470
            "name": "Show information in `Get files to test`",
471
            "command":
472
                "cat pr_documentation_tests_temp.txt"
473
474
        },
        {
475
            "name": "Get the last line in `pr_documentation_tests.txt`",
476
            "command":
477
                "tail -n1 pr_documentation_tests_temp.txt | tee pr_documentation_tests.txt"
478
479
        },
    ],
480
    tests_to_run="$(cat pr_documentation_tests.txt)",  # noqa
481
482
483
484
485
    pytest_options={"-doctest-modules": None, "doctest-glob": "*.mdx", "dist": "loadfile", "rvsA": None},
    command_timeout=1200,  # test cannot run longer than 1200 seconds
    pytest_num_workers=1,
)

486
487
488
489
490
491
492
493
494
REGULAR_TESTS = [
    torch_and_tf_job,
    torch_and_flax_job,
    torch_job,
    tf_job,
    flax_job,
    custom_tokenizers_job,
    hub_job,
    onnx_job,
495
    exotic_models_job,
496
497
498
499
500
501
502
503
504
505
]
EXAMPLES_TESTS = [
    examples_torch_job,
    examples_tensorflow_job,
    examples_flax_job,
]
PIPELINE_TESTS = [
    pipelines_torch_job,
    pipelines_tf_job,
]
Sylvain Gugger's avatar
Sylvain Gugger committed
506
REPO_UTIL_TESTS = [repo_utils_job]
507
508
DOC_TESTS = [doc_test_job]

509
510
511
512

def create_circleci_config(folder=None):
    if folder is None:
        folder = os.getcwd()
513
514
    # Used in CircleCIJob.to_dict() to expand the test list (for using parallelism)
    os.environ["test_preparation_dir"] = folder
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
    jobs = []
    all_test_file = os.path.join(folder, "test_list.txt")
    if os.path.exists(all_test_file):
        with open(all_test_file) as f:
            all_test_list = f.read()
    else:
        all_test_list = []
    if len(all_test_list) > 0:
        jobs.extend(PIPELINE_TESTS)

    test_file = os.path.join(folder, "filtered_test_list.txt")
    if os.path.exists(test_file):
        with open(test_file) as f:
            test_list = f.read()
    else:
        test_list = []
    if len(test_list) > 0:
        jobs.extend(REGULAR_TESTS)

Yih-Dar's avatar
Yih-Dar committed
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
        extended_tests_to_run = set(test_list.split())
        # Extend the test files for cross test jobs
        for job in jobs:
            if job.job_name in ["tests_torch_and_tf", "tests_torch_and_flax"]:
                for test_path in copy.copy(extended_tests_to_run):
                    dir_path, fn = os.path.split(test_path)
                    if fn.startswith("test_modeling_tf_"):
                        fn = fn.replace("test_modeling_tf_", "test_modeling_")
                    elif fn.startswith("test_modeling_flax_"):
                        fn = fn.replace("test_modeling_flax_", "test_modeling_")
                    else:
                        if job.job_name == "test_torch_and_tf":
                            fn = fn.replace("test_modeling_", "test_modeling_tf_")
                        elif job.job_name == "test_torch_and_flax":
                            fn = fn.replace("test_modeling_", "test_modeling_flax_")
                    new_test_file = str(os.path.join(dir_path, fn))
                    if os.path.isfile(new_test_file):
                        if new_test_file not in extended_tests_to_run:
                            extended_tests_to_run.add(new_test_file)
        extended_tests_to_run = sorted(extended_tests_to_run)
        for job in jobs:
            if job.job_name in ["tests_torch_and_tf", "tests_torch_and_flax"]:
                job.tests_to_run = extended_tests_to_run
        fn = "filtered_test_list_cross_tests.txt"
        f_path = os.path.join(folder, fn)
        with open(f_path, "w") as fp:
            fp.write(" ".join(extended_tests_to_run))

562
563
    example_file = os.path.join(folder, "examples_test_list.txt")
    if os.path.exists(example_file) and os.path.getsize(example_file) > 0:
564
565
566
567
568
569
570
571
572
573
574
        with open(example_file, "r", encoding="utf-8") as f:
            example_tests = f.read().split(" ")
        for job in EXAMPLES_TESTS:
            framework = job.name.replace("examples_", "").replace("torch", "pytorch")
            if example_tests == "all":
                job.tests_to_run = [f"examples/{framework}"]
            else:
                job.tests_to_run = [f for f in example_tests if f.startswith(f"examples/{framework}")]
            
            if len(job.tests_to_run) > 0:
                jobs.append(job)
575

576
577
578
579
580
581
582
583
584
    doctest_file = os.path.join(folder, "doctest_list.txt")
    if os.path.exists(doctest_file):
        with open(doctest_file) as f:
            doctest_list = f.read()
    else:
        doctest_list = []
    if len(doctest_list) > 0:
        jobs.extend(DOC_TESTS)

Sylvain Gugger's avatar
Sylvain Gugger committed
585
586
587
    repo_util_file = os.path.join(folder, "test_repo_utils.txt")
    if os.path.exists(repo_util_file) and os.path.getsize(repo_util_file) > 0:
        jobs.extend(REPO_UTIL_TESTS)
588

589
590
591
592
593
594
595
596
597
598
599
600
    if len(jobs) == 0:
        jobs = [EmptyJob()]
    config = {"version": "2.1"}
    config["parameters"] = {
        # Only used to accept the parameters from the trigger
        "nightly": {"type": "boolean", "default": False},
        "tests_to_run": {"type": "string", "default": test_list},
    }
    config["jobs"] = {j.job_name: j.to_dict() for j in jobs}
    config["workflows"] = {"version": 2, "run_tests": {"jobs": [j.job_name for j in jobs]}}
    with open(os.path.join(folder, "generated_config.yml"), "w") as f:
        f.write(yaml.dump(config, indent=2, width=1000000, sort_keys=False))
601
602
603
604
605
606
607
608
609
610


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--fetcher_folder", type=str, default=None, help="Only test that all tests and modules are accounted for."
    )
    args = parser.parse_args()

    create_circleci_config(args.fetcher_folder)