config.yml 42.8 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
version: 2.1
orbs:
    gcp-gke: circleci/gcp-gke@1.0.4
    go: circleci/go@1.3.0

# TPU REFERENCES
references:
    checkout_ml_testing: &checkout_ml_testing
        run:
            name: Checkout ml-testing-accelerators
            command: |
                git clone https://github.com/GoogleCloudPlatform/ml-testing-accelerators.git
                cd ml-testing-accelerators
                git fetch origin 5e88ac24f631c27045e62f0e8d5dfcf34e425e25:stable
                git checkout stable
    build_push_docker: &build_push_docker
        run:
            name: Configure Docker
            command: |
                gcloud --quiet auth configure-docker
                cd docker/transformers-pytorch-tpu
22
                if [ -z "$CIRCLE_PR_NUMBER" ]; then docker build --tag "$GCR_IMAGE_PATH:$CIRCLE_WORKFLOW_JOB_ID" -f Dockerfile --build-arg "TEST_IMAGE=1" . ; else docker build --tag "$GCR_IMAGE_PATH:$CIRCLE_WORKFLOW_JOB_ID" -f Dockerfile --build-arg "TEST_IMAGE=1" --build-arg "GITHUB_REF=pull/$CIRCLE_PR_NUMBER/head" . ; fi
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
                docker push "$GCR_IMAGE_PATH:$CIRCLE_WORKFLOW_JOB_ID"
    deploy_cluster: &deploy_cluster
        run:
            name: Deploy the job on the kubernetes cluster
            command: |
                go get github.com/google/go-jsonnet/cmd/jsonnet && \
                export PATH=$PATH:$HOME/go/bin && \
                kubectl create -f docker/transformers-pytorch-tpu/dataset.yaml || true && \
                job_name=$(jsonnet -J ml-testing-accelerators/ docker/transformers-pytorch-tpu/bert-base-cased.jsonnet --ext-str image=$GCR_IMAGE_PATH --ext-str image-tag=$CIRCLE_WORKFLOW_JOB_ID | kubectl create -f -) && \
                job_name=${job_name#job.batch/} && \
                job_name=${job_name% created} && \
                echo "Waiting on kubernetes job: $job_name" && \
                i=0 && \
                # 30 checks spaced 30s apart = 900s total.
                max_checks=30 && \
                status_code=2 && \
                # Check on the job periodically. Set the status code depending on what
                # happened to the job in Kubernetes. If we try max_checks times and
                # still the job hasn't finished, give up and return the starting
                # non-zero status code.
                while [ $i -lt $max_checks ]; do ((i++)); if kubectl get jobs $job_name -o jsonpath='Failed:{.status.failed}' | grep "Failed:1"; then status_code=1 && break; elif kubectl get jobs $job_name -o jsonpath='Succeeded:{.status.succeeded}' | grep "Succeeded:1" ; then status_code=0 && break; else echo "Job not finished yet"; fi; sleep 30; done && \
                echo "Done waiting. Job status code: $status_code" && \
45
46
47
                pod_name=$(kubectl get po -l controller-uid=`kubectl get job $job_name -o "jsonpath={.metadata.labels.controller-uid}"` | awk 'match($0,!/NAME/) {print $1}') && \
                echo "GKE pod name: $pod_name" && \
                kubectl logs -f $pod_name --container=train
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
                echo "Done with log retrieval attempt." && \
                gcloud container images delete "$GCR_IMAGE_PATH:$CIRCLE_WORKFLOW_JOB_ID" --force-delete-tags && \
                exit $status_code
    delete_gke_jobs: &delete_gke_jobs
        run:
            name: Delete GKE Jobs
            command: |
                # Match jobs whose age matches patterns like '1h' or '1d', i.e. any job
                # that has been around longer than 1hr. First print all columns for
                # matches, then execute the delete.
                kubectl get job | awk 'match($4,/[0-9]+[dh]/) {print $0}'
                kubectl delete job $(kubectl get job | awk 'match($4,/[0-9]+[dh]/) {print $1}')




Julien Chaumond's avatar
Julien Chaumond committed
64
jobs:
Aymeric Augustin's avatar
Aymeric Augustin committed
65
    run_tests_torch_and_tf:
66
        working_directory: ~/transformers
67
        docker:
68
            - image: cimg/python:3.7.12
69
70
        environment:
            OMP_NUM_THREADS: 1
71
72
            RUN_PT_TF_CROSS_TESTS: yes
            TRANSFORMERS_IS_CI: yes
73
74
75
76
        resource_class: xlarge
        parallelism: 1
        steps:
            - checkout
77
78
            - restore_cache:
                  keys:
Yih-Dar's avatar
Yih-Dar committed
79
80
                      - v0.5-torch_and_tf-{{ checksum "setup.py" }}
                      - v0.5-{{ checksum "setup.py" }}
81
82
            - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng git-lfs
            - run: git lfs install
83
            - run: pip install --upgrade pip
84
            - run: pip install .[sklearn,tf-cpu,torch,testing,sentencepiece,torch-speech,vision]
85
            - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html
Kamal Raj's avatar
Kamal Raj committed
86
            - run: pip install tensorflow_probability
87
            - run: pip install https://github.com/kpu/kenlm/archive/master.zip
88
            - run: pip install git+https://github.com/huggingface/accelerate
89
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
90
                key: v0.5-{{ checksum "setup.py" }}
91
92
                paths:
                    - '~/.cache/pip'
93
94
95
96
97
            - run: python utils/tests_fetcher.py | tee test_preparation.txt
            - store_artifacts:
                  path: ~/transformers/test_preparation.txt
            - run: |
                  if [ -f test_list.txt ]; then
Yih-Dar's avatar
Yih-Dar committed
98
                    python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_tf $(cat test_list.txt) -m is_pt_tf_cross_test --durations=0 | tee tests_output.txt
99
                  fi
100
            - store_artifacts:
101
102
103
                  path: ~/transformers/tests_output.txt
            - store_artifacts:
                  path: ~/transformers/reports
104

105
106
107
    run_tests_torch_and_tf_all:
        working_directory: ~/transformers
        docker:
108
            - image: cimg/python:3.7.12
109
110
111
112
113
114
115
116
117
118
        environment:
            OMP_NUM_THREADS: 1
            RUN_PT_TF_CROSS_TESTS: yes
            TRANSFORMERS_IS_CI: yes
        resource_class: xlarge
        parallelism: 1
        steps:
            - checkout
            - restore_cache:
                  keys:
Yih-Dar's avatar
Yih-Dar committed
119
120
                      - v0.5-torch_and_tf-{{ checksum "setup.py" }}
                      - v0.5-{{ checksum "setup.py" }}
121
122
            - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng git-lfs
            - run: git lfs install
123
            - run: pip install --upgrade pip
124
            - run: pip install .[sklearn,tf-cpu,torch,testing,sentencepiece,torch-speech,vision]
125
            - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html
Kamal Raj's avatar
Kamal Raj committed
126
            - run: pip install tensorflow_probability
127
            - run: pip install https://github.com/kpu/kenlm/archive/master.zip
128
            - run: pip install git+https://github.com/huggingface/accelerate
129
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
130
                key: v0.5-{{ checksum "setup.py" }}
131
132
133
                paths:
                    - '~/.cache/pip'
            - run: |
Yih-Dar's avatar
Yih-Dar committed
134
                  python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_tf tests -m is_pt_tf_cross_test --durations=0 | tee tests_output.txt
135
136
137
138
            - store_artifacts:
                  path: ~/transformers/tests_output.txt
            - store_artifacts:
                  path: ~/transformers/reports
139

140
141
142
    run_tests_torch_and_flax:
        working_directory: ~/transformers
        docker:
143
            - image: cimg/python:3.7.12
144
145
        environment:
            OMP_NUM_THREADS: 1
146
147
            RUN_PT_FLAX_CROSS_TESTS: yes
            TRANSFORMERS_IS_CI: yes
148
149
150
151
152
153
        resource_class: xlarge
        parallelism: 1
        steps:
            - checkout
            - restore_cache:
                  keys:
Yih-Dar's avatar
Yih-Dar committed
154
155
                      - v0.5-torch_and_flax-{{ checksum "setup.py" }}
                      - v0.5-{{ checksum "setup.py" }}
156
            - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
157
            - run: pip install --upgrade pip
158
            - run: pip install .[sklearn,flax,torch,testing,sentencepiece,torch-speech,vision]
159
            - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html
160
            - run: pip install https://github.com/kpu/kenlm/archive/master.zip
161
            - run: pip install git+https://github.com/huggingface/accelerate
162
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
163
                key: v0.5-{{ checksum "setup.py" }}
164
165
                paths:
                    - '~/.cache/pip'
166
167
168
169
170
            - run: python utils/tests_fetcher.py | tee test_preparation.txt
            - store_artifacts:
                  path: ~/transformers/test_preparation.txt
            - run: |
                  if [ -f test_list.txt ]; then
Yih-Dar's avatar
Yih-Dar committed
171
                    python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_flax $(cat test_list.txt) -m is_pt_flax_cross_test --durations=0 | tee tests_output.txt
172
                  fi
173
174
175
176
            - store_artifacts:
                  path: ~/transformers/tests_output.txt
            - store_artifacts:
                  path: ~/transformers/reports
177

178
179
180
    run_tests_torch_and_flax_all:
        working_directory: ~/transformers
        docker:
181
            - image: cimg/python:3.7.12
182
183
184
185
186
187
188
189
190
191
        environment:
            OMP_NUM_THREADS: 1
            RUN_PT_FLAX_CROSS_TESTS: yes
            TRANSFORMERS_IS_CI: yes
        resource_class: xlarge
        parallelism: 1
        steps:
            - checkout
            - restore_cache:
                  keys:
Yih-Dar's avatar
Yih-Dar committed
192
193
                      - v0.5-torch_and_flax-{{ checksum "setup.py" }}
                      - v0.5-{{ checksum "setup.py" }}
194
            - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
195
            - run: pip install --upgrade pip
196
            - run: pip install .[sklearn,flax,torch,testing,sentencepiece,torch-speech,vision]
197
            - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html
198
            - run: pip install https://github.com/kpu/kenlm/archive/master.zip
199
            - run: pip install git+https://github.com/huggingface/accelerate
200
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
201
                key: v0.5-{{ checksum "setup.py" }}
202
203
204
                paths:
                    - '~/.cache/pip'
            - run: |
Yih-Dar's avatar
Yih-Dar committed
205
                  python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_flax tests -m is_pt_flax_cross_test --durations=0 | tee tests_output.txt
206
207
208
209
            - store_artifacts:
                  path: ~/transformers/tests_output.txt
            - store_artifacts:
                  path: ~/transformers/reports
210

Aymeric Augustin's avatar
Aymeric Augustin committed
211
    run_tests_torch:
212
        working_directory: ~/transformers
Julien Chaumond's avatar
Julien Chaumond committed
213
        docker:
214
            - image: cimg/python:3.7.12
215
216
        environment:
            OMP_NUM_THREADS: 1
217
            TRANSFORMERS_IS_CI: yes
218
        resource_class: xlarge
219
        parallelism: 1
Julien Chaumond's avatar
Julien Chaumond committed
220
221
        steps:
            - checkout
222
223
            - restore_cache:
                  keys:
Yih-Dar's avatar
Yih-Dar committed
224
225
                      - v0.5-torch-{{ checksum "setup.py" }}
                      - v0.5-{{ checksum "setup.py" }}
226
            - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng time
227
            - run: pip install --upgrade pip
228
            - run: pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm]
229
            - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html
230
            - run: pip install https://github.com/kpu/kenlm/archive/master.zip
231
            - run: pip install git+https://github.com/huggingface/accelerate
232
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
233
                  key: v0.5-torch-{{ checksum "setup.py" }}
234
235
                  paths:
                      - '~/.cache/pip'
236
237
238
239
240
            - run: python utils/tests_fetcher.py | tee test_preparation.txt
            - store_artifacts:
                  path: ~/transformers/test_preparation.txt
            - run: |
                  if [ -f test_list.txt ]; then
Yih-Dar's avatar
Yih-Dar committed
241
                    python -m pytest -n 3 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_torch $(cat test_list.txt) | tee tests_output.txt
242
                  fi
243
            - store_artifacts:
244
245
246
                  path: ~/transformers/tests_output.txt
            - store_artifacts:
                  path: ~/transformers/reports
247

248
249
250
    run_tests_torch_all:
        working_directory: ~/transformers
        docker:
251
            - image: cimg/python:3.7.12
252
253
254
255
256
257
258
259
260
        environment:
            OMP_NUM_THREADS: 1
            TRANSFORMERS_IS_CI: yes
        resource_class: xlarge
        parallelism: 1
        steps:
            - checkout
            - restore_cache:
                  keys:
Yih-Dar's avatar
Yih-Dar committed
261
262
                      - v0.5-torch-{{ checksum "setup.py" }}
                      - v0.5-{{ checksum "setup.py" }}
263
            - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
264
            - run: pip install --upgrade pip
265
            - run: pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm]
266
            - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html
267
            - run: pip install https://github.com/kpu/kenlm/archive/master.zip
268
            - run: pip install git+https://github.com/huggingface/accelerate
269
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
270
                  key: v0.5-torch-{{ checksum "setup.py" }}
271
272
273
                  paths:
                      - '~/.cache/pip'
            - run: |
Yih-Dar's avatar
Yih-Dar committed
274
                  python -m pytest -n 3 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_torch tests | tee tests_output.txt
275
276
277
278
            - store_artifacts:
                  path: ~/transformers/tests_output.txt
            - store_artifacts:
                  path: ~/transformers/reports
Lysandre Debut's avatar
Lysandre Debut committed
279

Aymeric Augustin's avatar
Aymeric Augustin committed
280
    run_tests_tf:
281
        working_directory: ~/transformers
thomwolf's avatar
thomwolf committed
282
        docker:
283
            - image: cimg/python:3.7.12
284
285
        environment:
            OMP_NUM_THREADS: 1
286
            TRANSFORMERS_IS_CI: yes
thomwolf's avatar
thomwolf committed
287
288
289
290
        resource_class: xlarge
        parallelism: 1
        steps:
            - checkout
291
292
            - restore_cache:
                  keys:
Yih-Dar's avatar
Yih-Dar committed
293
294
                      - v0.5-tf-{{ checksum "setup.py" }}
                      - v0.5-{{ checksum "setup.py" }}
295
            - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
296
            - run: pip install --upgrade pip
297
            - run: pip install .[sklearn,tf-cpu,testing,sentencepiece,tf-speech,vision]
Kamal Raj's avatar
Kamal Raj committed
298
            - run: pip install tensorflow_probability
299
            - run: pip install https://github.com/kpu/kenlm/archive/master.zip
300
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
301
                  key: v0.5-tf-{{ checksum "setup.py" }}
302
303
                  paths:
                      - '~/.cache/pip'
304
305
306
307
308
            - run: python utils/tests_fetcher.py | tee test_preparation.txt
            - store_artifacts:
                  path: ~/transformers/test_preparation.txt
            - run: |
                  if [ -f test_list.txt ]; then
Yih-Dar's avatar
Yih-Dar committed
309
                    python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_tf $(cat test_list.txt) | tee tests_output.txt
310
                  fi
311
            - store_artifacts:
312
313
314
                  path: ~/transformers/tests_output.txt
            - store_artifacts:
                  path: ~/transformers/reports
315

316
317
318
    run_tests_tf_all:
        working_directory: ~/transformers
        docker:
319
            - image: cimg/python:3.7.12
320
321
322
323
324
325
326
327
328
        environment:
            OMP_NUM_THREADS: 1
            TRANSFORMERS_IS_CI: yes
        resource_class: xlarge
        parallelism: 1
        steps:
            - checkout
            - restore_cache:
                  keys:
Yih-Dar's avatar
Yih-Dar committed
329
330
                      - v0.5-tf-{{ checksum "setup.py" }}
                      - v0.5-{{ checksum "setup.py" }}
331
            - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
332
            - run: pip install --upgrade pip
333
            - run: pip install .[sklearn,tf-cpu,testing,sentencepiece,tf-speech,vision]
Kamal Raj's avatar
Kamal Raj committed
334
            - run: pip install tensorflow_probability
335
            - run: pip install https://github.com/kpu/kenlm/archive/master.zip
336
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
337
                  key: v0.5-tf-{{ checksum "setup.py" }}
338
339
340
                  paths:
                      - '~/.cache/pip'
            - run: |
Yih-Dar's avatar
Yih-Dar committed
341
                  python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_tf tests | tee tests_output.txt
342
343
344
345
            - store_artifacts:
                  path: ~/transformers/tests_output.txt
            - store_artifacts:
                  path: ~/transformers/reports
346

347
348
349
    run_tests_flax:
        working_directory: ~/transformers
        docker:
350
            - image: cimg/python:3.7.12
351
352
        environment:
            OMP_NUM_THREADS: 1
353
            TRANSFORMERS_IS_CI: yes
354
355
356
357
358
359
        resource_class: xlarge
        parallelism: 1
        steps:
            - checkout
            - restore_cache:
                keys:
Yih-Dar's avatar
Yih-Dar committed
360
361
                    - v0.5-flax-{{ checksum "setup.py" }}
                    - v0.5-{{ checksum "setup.py" }}
362
            - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
363
            - run: pip install --upgrade pip
364
365
            - run: pip install .[flax,testing,sentencepiece,flax-speech,vision]
            - run: pip install https://github.com/kpu/kenlm/archive/master.zip
366
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
367
                  key: v0.5-flax-{{ checksum "setup.py" }}
368
369
                  paths:
                      - '~/.cache/pip'
370
371
372
373
374
            - run: python utils/tests_fetcher.py | tee test_preparation.txt
            - store_artifacts:
                  path: ~/transformers/test_preparation.txt
            - run: |
                  if [ -f test_list.txt ]; then
Yih-Dar's avatar
Yih-Dar committed
375
                    python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_flax $(cat test_list.txt) | tee tests_output.txt
376
                  fi
377
            - store_artifacts:
378
379
380
                  path: ~/transformers/tests_output.txt
            - store_artifacts:
                  path: ~/transformers/reports
381

382
383
384
    run_tests_flax_all:
        working_directory: ~/transformers
        docker:
385
            - image: cimg/python:3.7.12
386
387
388
389
390
391
392
393
394
        environment:
            OMP_NUM_THREADS: 1
            TRANSFORMERS_IS_CI: yes
        resource_class: xlarge
        parallelism: 1
        steps:
            - checkout
            - restore_cache:
                keys:
Yih-Dar's avatar
Yih-Dar committed
395
396
                    - v0.5-flax-{{ checksum "setup.py" }}
                    - v0.5-{{ checksum "setup.py" }}
397
            - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
398
            - run: pip install --upgrade pip
399
400
            - run: pip install .[flax,testing,sentencepiece,vision,flax-speech]
            - run: pip install https://github.com/kpu/kenlm/archive/master.zip
401
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
402
                  key: v0.5-flax-{{ checksum "setup.py" }}
403
404
405
                  paths:
                      - '~/.cache/pip'
            - run: |
Yih-Dar's avatar
Yih-Dar committed
406
                  python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_flax tests | tee tests_output.txt
407
408
409
410
            - store_artifacts:
                  path: ~/transformers/tests_output.txt
            - store_artifacts:
                  path: ~/transformers/reports
411

412
413
414
    run_tests_pipelines_torch:
        working_directory: ~/transformers
        docker:
415
            - image: cimg/python:3.7.12
416
417
        environment:
            OMP_NUM_THREADS: 1
418
419
            RUN_PIPELINE_TESTS: yes
            TRANSFORMERS_IS_CI: yes
420
421
422
423
424
425
        resource_class: xlarge
        parallelism: 1
        steps:
            - checkout
            - restore_cache:
                  keys:
Yih-Dar's avatar
Yih-Dar committed
426
427
                      - v0.5-torch-{{ checksum "setup.py" }}
                      - v0.5-{{ checksum "setup.py" }}
428
            - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
429
            - run: pip install --upgrade pip
430
            - run: pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm]
431
            - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html
432
            - run: pip install https://github.com/kpu/kenlm/archive/master.zip
433
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
434
                  key: v0.5-torch-{{ checksum "setup.py" }}
435
436
                  paths:
                      - '~/.cache/pip'
437
438
439
440
441
            - run: python utils/tests_fetcher.py | tee test_preparation.txt
            - store_artifacts:
                  path: ~/transformers/test_preparation.txt
            - run: |
                  if [ -f test_list.txt ]; then
Yih-Dar's avatar
Yih-Dar committed
442
                    python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_torch -m is_pipeline_test $(cat test_list.txt) | tee tests_output.txt
443
                  fi
444
            - store_artifacts:
445
446
447
                  path: ~/transformers/tests_output.txt
            - store_artifacts:
                  path: ~/transformers/reports
448

449
450
451
    run_tests_pipelines_torch_all:
        working_directory: ~/transformers
        docker:
452
            - image: cimg/python:3.7.12
453
454
455
456
457
458
459
460
461
462
        environment:
            OMP_NUM_THREADS: 1
            RUN_PIPELINE_TESTS: yes
            TRANSFORMERS_IS_CI: yes
        resource_class: xlarge
        parallelism: 1
        steps:
            - checkout
            - restore_cache:
                  keys:
Yih-Dar's avatar
Yih-Dar committed
463
464
                      - v0.5-torch-{{ checksum "setup.py" }}
                      - v0.5-{{ checksum "setup.py" }}
465
            - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
466
            - run: pip install --upgrade pip
467
            - run: pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm]
468
            - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html
469
            - run: pip install https://github.com/kpu/kenlm/archive/master.zip
470
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
471
                  key: v0.5-torch-{{ checksum "setup.py" }}
472
473
474
                  paths:
                      - '~/.cache/pip'
            - run: |
Yih-Dar's avatar
Yih-Dar committed
475
                  python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_torch -m is_pipeline_test tests | tee tests_output.txt
476
477
478
479
            - store_artifacts:
                  path: ~/transformers/tests_output.txt
            - store_artifacts:
                  path: ~/transformers/reports
480

481
482
483
    run_tests_pipelines_tf:
        working_directory: ~/transformers
        docker:
484
            - image: cimg/python:3.7.12
485
486
        environment:
            OMP_NUM_THREADS: 1
487
488
            RUN_PIPELINE_TESTS: yes
            TRANSFORMERS_IS_CI: yes
489
490
491
492
493
494
        resource_class: xlarge
        parallelism: 1
        steps:
            - checkout
            - restore_cache:
                  keys:
Yih-Dar's avatar
Yih-Dar committed
495
496
                      - v0.5-tf-{{ checksum "setup.py" }}
                      - v0.5-{{ checksum "setup.py" }}
497
            - run: pip install --upgrade pip
498
            - run: pip install .[sklearn,tf-cpu,testing,sentencepiece]
Kamal Raj's avatar
Kamal Raj committed
499
            - run: pip install tensorflow_probability
500
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
501
                  key: v0.5-tf-{{ checksum "setup.py" }}
502
503
                  paths:
                      - '~/.cache/pip'
504
505
506
507
508
            - run: python utils/tests_fetcher.py | tee test_preparation.txt
            - store_artifacts:
                  path: ~/transformers/test_preparation.txt
            - run: |
                  if [ -f test_list.txt ]; then
Yih-Dar's avatar
Yih-Dar committed
509
                    python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_tf $(cat test_list.txt) -m is_pipeline_test | tee tests_output.txt
510
                  fi
511
512
            - store_artifacts:
                  path: ~/transformers/tests_output.txt
513
            - store_artifacts:
514
515
                  path: ~/transformers/reports

516
517
518
    run_tests_pipelines_tf_all:
        working_directory: ~/transformers
        docker:
519
            - image: cimg/python:3.7.12
520
521
522
523
524
525
526
527
528
529
        environment:
            OMP_NUM_THREADS: 1
            RUN_PIPELINE_TESTS: yes
            TRANSFORMERS_IS_CI: yes
        resource_class: xlarge
        parallelism: 1
        steps:
            - checkout
            - restore_cache:
                  keys:
Yih-Dar's avatar
Yih-Dar committed
530
531
                      - v0.5-tf-{{ checksum "setup.py" }}
                      - v0.5-{{ checksum "setup.py" }}
532
533
            - run: pip install --upgrade pip
            - run: pip install .[sklearn,tf-cpu,testing,sentencepiece]
Kamal Raj's avatar
Kamal Raj committed
534
            - run: pip install tensorflow_probability
535
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
536
                  key: v0.5-tf-{{ checksum "setup.py" }}
537
538
539
                  paths:
                      - '~/.cache/pip'
            - run: |
Yih-Dar's avatar
Yih-Dar committed
540
                  python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_tf tests -m is_pipeline_test | tee tests_output.txt
541
542
543
544
545
            - store_artifacts:
                  path: ~/transformers/tests_output.txt
            - store_artifacts:
                  path: ~/transformers/reports

Aymeric Augustin's avatar
Aymeric Augustin committed
546
    run_tests_custom_tokenizers:
547
548
        working_directory: ~/transformers
        docker:
549
            - image: cimg/python:3.7.12
550
551
        environment:
            RUN_CUSTOM_TOKENIZERS: yes
552
            TRANSFORMERS_IS_CI: yes
553
554
        steps:
            - checkout
555
556
            - restore_cache:
                  keys:
Yih-Dar's avatar
Yih-Dar committed
557
558
                      - v0.5-custom_tokenizers-{{ checksum "setup.py" }}
                      - v0.5-{{ checksum "setup.py" }}
559
            - run: pip install --upgrade pip
560
            - run: pip install .[ja,testing,sentencepiece,jieba,spacy,ftfy,rjieba]
561
            - run: python -m unidic download
562
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
563
                  key: v0.5-custom_tokenizers-{{ checksum "setup.py" }}
564
565
                  paths:
                      - '~/.cache/pip'
566
567
            - run: |
                  if [ -f test_list.txt ]; then
Yih-Dar's avatar
Yih-Dar committed
568
                    python -m pytest --max-worker-restart=0 -s --make-reports=tests_custom_tokenizers ./tests/test_tokenization_bert_japanese.py ./tests/test_tokenization_openai.py | tee tests_output.txt
569
                  fi
570
571
            - run: |
                  if [ -f test_list.txt ]; then
Yih-Dar's avatar
Yih-Dar committed
572
                    python -m pytest -n 1 --max-worker-restart=0 tests/test_tokenization_clip.py --dist=loadfile -s --make-reports=tests_tokenization_clip --durations=100 | tee tests_output.txt
573
                  fi
574
575
            - store_artifacts:
                  path: ~/transformers/tests_output.txt
576
            - store_artifacts:
577
578
                  path: ~/transformers/reports

Aymeric Augustin's avatar
Aymeric Augustin committed
579
    run_examples_torch:
580
581
        working_directory: ~/transformers
        docker:
582
            - image: cimg/python:3.7.12
583
584
        environment:
            OMP_NUM_THREADS: 1
585
            TRANSFORMERS_IS_CI: yes
586
587
588
589
        resource_class: xlarge
        parallelism: 1
        steps:
            - checkout
590
591
            - restore_cache:
                  keys:
Yih-Dar's avatar
Yih-Dar committed
592
593
                      - v0.5-torch_examples-{{ checksum "setup.py" }}
                      - v0.5-{{ checksum "setup.py" }}
594
            - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
595
            - run: pip install --upgrade pip
596
            - run: pip install .[sklearn,torch,sentencepiece,testing,torch-speech]
Sylvain Gugger's avatar
Sylvain Gugger committed
597
            - run: pip install -r examples/pytorch/_tests_requirements.txt
598
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
599
                  key: v0.5-torch_examples-{{ checksum "setup.py" }}
600
601
                  paths:
                      - '~/.cache/pip'
602
            - run: python utils/tests_fetcher.py --filters examples tests | tee test_preparation.txt
603
604
605
606
            - store_artifacts:
                  path: ~/transformers/test_preparation.txt
            - run: |
                  if [ -f test_list.txt ]; then
Yih-Dar's avatar
Yih-Dar committed
607
                    python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -s --make-reports=examples_torch ./examples/pytorch/ | tee tests_output.txt
608
                  fi
609
            - store_artifacts:
610
611
612
                  path: ~/transformers/examples_output.txt
            - store_artifacts:
                  path: ~/transformers/reports
613

614
615
616
    run_examples_torch_all:
        working_directory: ~/transformers
        docker:
617
            - image: cimg/python:3.7.12
618
619
620
621
622
623
624
625
626
        environment:
            OMP_NUM_THREADS: 1
            TRANSFORMERS_IS_CI: yes
        resource_class: xlarge
        parallelism: 1
        steps:
            - checkout
            - restore_cache:
                  keys:
Yih-Dar's avatar
Yih-Dar committed
627
628
                      - v0.5-torch_examples-{{ checksum "setup.py" }}
                      - v0.5-{{ checksum "setup.py" }}
629
            - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
630
            - run: pip install --upgrade pip
631
            - run: pip install .[sklearn,torch,sentencepiece,testing,torch-speech]
632
633
            - run: pip install -r examples/pytorch/_tests_requirements.txt
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
634
                  key: v0.5-torch_examples-{{ checksum "setup.py" }}
635
636
637
                  paths:
                      - '~/.cache/pip'
            - run: |
Yih-Dar's avatar
Yih-Dar committed
638
                  TRANSFORMERS_IS_CI=1 python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -s --make-reports=examples_torch ./examples/pytorch/ | tee examples_output.txt
639
640
641
642
            - store_artifacts:
                  path: ~/transformers/examples_output.txt
            - store_artifacts:
                  path: ~/transformers/reports
643

Suraj Patil's avatar
Suraj Patil committed
644
645
646
    run_examples_flax:
        working_directory: ~/transformers
        docker:
647
            - image: cimg/python:3.7.12
Suraj Patil's avatar
Suraj Patil committed
648
649
650
651
652
653
654
655
656
        environment:
            OMP_NUM_THREADS: 1
            TRANSFORMERS_IS_CI: yes
        resource_class: xlarge
        parallelism: 1
        steps:
            - checkout
            - restore_cache:
                keys:
Yih-Dar's avatar
Yih-Dar committed
657
658
                    - v0.5-flax_examples-{{ checksum "setup.py" }}
                    - v0.5-{{ checksum "setup.py" }}
Suraj Patil's avatar
Suraj Patil committed
659
            - run: pip install --upgrade pip
660
            - run: pip install .[flax,testing,sentencepiece]
Suraj Patil's avatar
Suraj Patil committed
661
662
            - run: pip install -r examples/flax/_tests_requirements.txt
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
663
                  key: v0.5-flax_examples-{{ checksum "setup.py" }}
Suraj Patil's avatar
Suraj Patil committed
664
665
666
667
668
669
670
                  paths:
                      - '~/.cache/pip'
            - run: python utils/tests_fetcher.py --filters examples tests | tee test_preparation.txt
            - store_artifacts:
                  path: ~/transformers/test_preparation.txt
            - run: |
                  if [ -f test_list.txt ]; then
Yih-Dar's avatar
Yih-Dar committed
671
                    python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -s --make-reports=examples_flax ./examples/flax/ | tee tests_output.txt
Suraj Patil's avatar
Suraj Patil committed
672
673
674
675
676
                  fi
            - store_artifacts:
                  path: ~/transformers/flax_examples_output.txt
            - store_artifacts:
                  path: ~/transformers/reports
677

Suraj Patil's avatar
Suraj Patil committed
678
679
680
    run_examples_flax_all:
        working_directory: ~/transformers
        docker:
681
            - image: cimg/python:3.7.12
Suraj Patil's avatar
Suraj Patil committed
682
683
684
685
686
687
688
689
690
        environment:
            OMP_NUM_THREADS: 1
            TRANSFORMERS_IS_CI: yes
        resource_class: xlarge
        parallelism: 1
        steps:
            - checkout
            - restore_cache:
                keys:
Yih-Dar's avatar
Yih-Dar committed
691
692
                    - v0.5-flax_examples-{{ checksum "setup.py" }}
                    - v0.5-{{ checksum "setup.py" }}
Suraj Patil's avatar
Suraj Patil committed
693
            - run: pip install --upgrade pip
694
            - run: pip install .[flax,testing,sentencepiece]
Suraj Patil's avatar
Suraj Patil committed
695
696
            - run: pip install -r examples/flax/_tests_requirements.txt
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
697
                  key: v0.5-flax_examples-{{ checksum "setup.py" }}
Suraj Patil's avatar
Suraj Patil committed
698
699
700
                  paths:
                      - '~/.cache/pip'
            - run: |
Yih-Dar's avatar
Yih-Dar committed
701
                  TRANSFORMERS_IS_CI=1 python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -s --make-reports=examples_flax ./examples/flax/ | tee examples_output.txt
Suraj Patil's avatar
Suraj Patil committed
702
703
704
705
706
            - store_artifacts:
                  path: ~/transformers/flax_examples_output.txt
            - store_artifacts:
                  path: ~/transformers/reports

Sylvain Gugger's avatar
Sylvain Gugger committed
707
    run_tests_hub:
708
709
        working_directory: ~/transformers
        docker:
710
            - image: cimg/python:3.7.12
711
        environment:
Sylvain Gugger's avatar
Sylvain Gugger committed
712
            HUGGINGFACE_CO_STAGING: yes
713
714
            RUN_GIT_LFS_TESTS: yes
            TRANSFORMERS_IS_CI: yes
715
716
717
718
        resource_class: xlarge
        parallelism: 1
        steps:
            - checkout
Sylvain Gugger's avatar
Sylvain Gugger committed
719
720
            - restore_cache:
                  keys:
Yih-Dar's avatar
Yih-Dar committed
721
722
                      - v0.5-hub-{{ checksum "setup.py" }}
                      - v0.5-{{ checksum "setup.py" }}
723
            - run: sudo apt-get -y update && sudo apt-get install git-lfs
724
725
726
727
            - run: |
                git config --global user.email "ci@dummy.com"
                git config --global user.name "ci"
            - run: pip install --upgrade pip
Sylvain Gugger's avatar
Sylvain Gugger committed
728
729
            - run: pip install .[torch,sentencepiece,testing]
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
730
                  key: v0.5-hub-{{ checksum "setup.py" }}
Sylvain Gugger's avatar
Sylvain Gugger committed
731
732
                  paths:
                      - '~/.cache/pip'
733
734
735
736
737
            - run: python utils/tests_fetcher.py | tee test_preparation.txt
            - store_artifacts:
                  path: ~/transformers/test_preparation.txt
            - run: |
                  if [ -f test_list.txt ]; then
Yih-Dar's avatar
Yih-Dar committed
738
                    python -m pytest --max-worker-restart=0 -sv --make-reports=tests_hub $(cat test_list.txt) -m is_staging_test | tee tests_output.txt
739
                  fi
740
741
742
743
            - store_artifacts:
                  path: ~/transformers/tests_output.txt
            - store_artifacts:
                  path: ~/transformers/reports
744

745
746
747
    run_tests_hub_all:
        working_directory: ~/transformers
        docker:
748
            - image: cimg/python:3.7.12
749
750
751
752
753
754
755
756
757
758
        environment:
            HUGGINGFACE_CO_STAGING: yes
            RUN_GIT_LFS_TESTS: yes
            TRANSFORMERS_IS_CI: yes
        resource_class: xlarge
        parallelism: 1
        steps:
            - checkout
            - restore_cache:
                  keys:
Yih-Dar's avatar
Yih-Dar committed
759
760
                      - v0.5-hub-{{ checksum "setup.py" }}
                      - v0.5-{{ checksum "setup.py" }}
761
            - run: sudo apt-get -y update && sudo apt-get install git-lfs
762
763
764
765
766
767
            - run: |
                git config --global user.email "ci@dummy.com"
                git config --global user.name "ci"
            - run: pip install --upgrade pip
            - run: pip install .[torch,sentencepiece,testing]
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
768
                  key: v0.5-hub-{{ checksum "setup.py" }}
769
770
771
                  paths:
                      - '~/.cache/pip'
            - run: |
Yih-Dar's avatar
Yih-Dar committed
772
                  python -m pytest --max-worker-restart=0 -sv --make-reports=tests_hub tests -m is_staging_test | tee tests_output.txt
773
774
775
776
            - store_artifacts:
                  path: ~/transformers/tests_output.txt
            - store_artifacts:
                  path: ~/transformers/reports
777

778
779
780
    run_tests_onnxruntime:
        working_directory: ~/transformers
        docker:
781
            - image: cimg/python:3.7.12
782
783
784
785
786
787
788
789
790
        environment:
            OMP_NUM_THREADS: 1
            TRANSFORMERS_IS_CI: yes
        resource_class: xlarge
        parallelism: 1
        steps:
            - checkout
            - restore_cache:
                  keys:
Yih-Dar's avatar
Yih-Dar committed
791
792
                      - v0.5-torch-{{ checksum "setup.py" }}
                      - v0.5-{{ checksum "setup.py" }}
793
            - run: pip install --upgrade pip
794
            - run: pip install .[torch,testing,sentencepiece,onnxruntime,vision,rjieba]
795
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
796
                  key: v0.5-onnx-{{ checksum "setup.py" }}
797
798
                  paths:
                      - '~/.cache/pip'
799
800
801
802
803
            - run: python utils/tests_fetcher.py | tee test_preparation.txt
            - store_artifacts:
                  path: ~/transformers/test_preparation.txt
            - run: |
                  if [ -f test_list.txt ]; then
Yih-Dar's avatar
Yih-Dar committed
804
                    python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_onnx $(cat test_list.txt) -k onnx | tee tests_output.txt
805
                  fi
806
807
808
809
            - store_artifacts:
                  path: ~/transformers/tests_output.txt
            - store_artifacts:
                  path: ~/transformers/reports
810

811
812
813
    run_tests_onnxruntime_all:
        working_directory: ~/transformers
        docker:
814
            - image: cimg/python:3.7.12
815
816
817
818
819
820
821
822
823
        environment:
            OMP_NUM_THREADS: 1
            TRANSFORMERS_IS_CI: yes
        resource_class: xlarge
        parallelism: 1
        steps:
            - checkout
            - restore_cache:
                  keys:
Yih-Dar's avatar
Yih-Dar committed
824
825
                      - v0.5-torch-{{ checksum "setup.py" }}
                      - v0.5-{{ checksum "setup.py" }}
826
            - run: pip install --upgrade pip
lewtun's avatar
lewtun committed
827
            - run: pip install .[torch,testing,sentencepiece,onnxruntime,vision]
828
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
829
                  key: v0.5-onnx-{{ checksum "setup.py" }}
830
831
832
                  paths:
                      - '~/.cache/pip'
            - run: |
Yih-Dar's avatar
Yih-Dar committed
833
                  python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_onnx tests -k onnx | tee tests_output.txt
834
835
836
837
838
            - store_artifacts:
                  path: ~/transformers/tests_output.txt
            - store_artifacts:
                  path: ~/transformers/reports

Aymeric Augustin's avatar
Aymeric Augustin committed
839
840
841
    check_code_quality:
        working_directory: ~/transformers
        docker:
842
            - image: cimg/python:3.7.12
Lysandre's avatar
Lysandre committed
843
        resource_class: large
844
845
        environment:
            TRANSFORMERS_IS_CI: yes
Aymeric Augustin's avatar
Aymeric Augustin committed
846
847
848
        parallelism: 1
        steps:
            - checkout
849
850
            - restore_cache:
                  keys:
Yih-Dar's avatar
Yih-Dar committed
851
852
                      - v0.5-code_quality-{{ checksum "setup.py" }}
                      - v0.5-{{ checksum "setup.py" }}
853
            - run: pip install --upgrade pip
854
            - run: pip install .[all,quality]
855
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
856
                  key: v0.5-code_quality-{{ checksum "setup.py" }}
857
858
                  paths:
                      - '~/.cache/pip'
Sylvain Gugger's avatar
Sylvain Gugger committed
859
            - run: black --check --preview examples tests src utils
860
            - run: isort --check-only examples tests src utils
Sylvain Gugger's avatar
Sylvain Gugger committed
861
            - run: python utils/custom_init_isort.py --check_only
862
            - run: python utils/sort_auto_mappings.py --check_only
863
            - run: flake8 examples tests src utils
864
            - run: doc-builder style src/transformers docs/source --max_len 119 --check_only --path_to_docs docs/source
Sylvain Gugger's avatar
Sylvain Gugger committed
865
            - run: python utils/check_doc_toc.py
866

867
    check_repository_consistency:
R茅mi Louf's avatar
R茅mi Louf committed
868
869
        working_directory: ~/transformers
        docker:
870
            - image: cimg/python:3.7.12
Sylvain Gugger's avatar
Sylvain Gugger committed
871
872
873
        resource_class: large
        environment:
            TRANSFORMERS_IS_CI: yes
R茅mi Louf's avatar
R茅mi Louf committed
874
875
876
        parallelism: 1
        steps:
            - checkout
Sylvain Gugger's avatar
Sylvain Gugger committed
877
878
            - restore_cache:
                  keys:
Yih-Dar's avatar
Yih-Dar committed
879
880
                      - v0.5-repository_consistency-{{ checksum "setup.py" }}
                      - v0.5-{{ checksum "setup.py" }}
Sylvain Gugger's avatar
Sylvain Gugger committed
881
882
883
            - run: pip install --upgrade pip
            - run: pip install .[all,quality]
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
884
                  key: v0.5-repository_consistency-{{ checksum "setup.py" }}
Sylvain Gugger's avatar
Sylvain Gugger committed
885
886
887
888
889
890
891
                  paths:
                      - '~/.cache/pip'
            - run: python utils/check_copies.py
            - run: python utils/check_table.py
            - run: python utils/check_dummies.py
            - run: python utils/check_repo.py
            - run: python utils/check_inits.py
892
            - run: python utils/check_config_docstrings.py
Sylvain Gugger's avatar
Sylvain Gugger committed
893
894
            - run: make deps_table_check_updated
            - run: python utils/tests_fetcher.py --sanity_check
895

NielsRogge's avatar
NielsRogge committed
896
    run_tests_layoutlmv2_and_v3:
897
898
        working_directory: ~/transformers
        docker:
899
            - image: cimg/python:3.7.12
900
901
902
903
904
905
906
907
908
        environment:
            OMP_NUM_THREADS: 1
            TRANSFORMERS_IS_CI: yes
        resource_class: xlarge
        parallelism: 1
        steps:
            - checkout
            - restore_cache:
                  keys:
Yih-Dar's avatar
Yih-Dar committed
909
910
                      - v0.5-torch-{{ checksum "setup.py" }}
                      - v0.5-{{ checksum "setup.py" }}
911
912
913
914
915
916
917
918
            - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
            - run: pip install --upgrade pip
            - run: pip install .[torch,testing,vision]
            - run: pip install torchvision
            - run: python -m pip install 'git+https://github.com/facebookresearch/detectron2.git'
            - run: sudo apt install tesseract-ocr
            - run: pip install pytesseract
            - save_cache:
Yih-Dar's avatar
Yih-Dar committed
919
                  key: v0.5-torch-{{ checksum "setup.py" }}
920
921
922
923
924
925
926
                  paths:
                      - '~/.cache/pip'
            - run: python utils/tests_fetcher.py | tee test_preparation.txt
            - store_artifacts:
                  path: ~/transformers/test_preparation.txt
            - run: |
                  if [ -f test_list.txt ]; then
Yih-Dar's avatar
Yih-Dar committed
927
                    python -m pytest -n 1 --max-worker-restart=0 tests/models/*layoutlmv* --dist=loadfile -s --make-reports=tests_layoutlmv2_and_v3 --durations=100
928
929
930
931
932
933
                  fi
            - store_artifacts:
                  path: ~/transformers/tests_output.txt
            - store_artifacts:
                  path: ~/transformers/reports

934
935
936
# TPU JOBS
    run_examples_tpu:
        docker:
937
            - image: cimg/python:3.7.12
938
939
        environment:
            OMP_NUM_THREADS: 1
940
            TRANSFORMERS_IS_CI: yes
941
942
943
944
945
946
947
948
949
950
951
952
953
        resource_class: xlarge
        parallelism: 1
        steps:
            - checkout
            - go/install
            - *checkout_ml_testing
            - gcp-gke/install
            - gcp-gke/update-kubeconfig-with-credentials:
                  cluster: $GKE_CLUSTER
                  perform-login: true
            - setup_remote_docker
            - *build_push_docker
            - *deploy_cluster
954

955
956
    cleanup-gke-jobs:
        docker:
957
            - image: cimg/python:3.7.12
958
959
960
961
962
963
        steps:
            - gcp-gke/install
            - gcp-gke/update-kubeconfig-with-credentials:
                  cluster: $GKE_CLUSTER
                  perform-login: true
            - *delete_gke_jobs
964

LysandreJik's avatar
LysandreJik committed
965
966
967
968
workflow_filters: &workflow_filters
    filters:
        branches:
            only:
969
                - main
970
workflows:
LysandreJik's avatar
LysandreJik committed
971
972
973
    version: 2
    build_and_test:
        jobs:
Aymeric Augustin's avatar
Aymeric Augustin committed
974
            - check_code_quality
975
            - check_repository_consistency
Aymeric Augustin's avatar
Aymeric Augustin committed
976
            - run_examples_torch
977
            - run_examples_flax
Aymeric Augustin's avatar
Aymeric Augustin committed
978
979
            - run_tests_custom_tokenizers
            - run_tests_torch_and_tf
980
            - run_tests_torch_and_flax
Aymeric Augustin's avatar
Aymeric Augustin committed
981
982
            - run_tests_torch
            - run_tests_tf
983
            - run_tests_flax
984
985
            - run_tests_pipelines_torch
            - run_tests_pipelines_tf
986
            - run_tests_onnxruntime
Sylvain Gugger's avatar
Sylvain Gugger committed
987
            - run_tests_hub
NielsRogge's avatar
NielsRogge committed
988
            - run_tests_layoutlmv2_and_v3
989
990
991
992
993
994
995
    nightly:
        triggers:
            - schedule:
                cron: "0 0 * * *"
                filters:
                    branches:
                        only:
996
                            - main
997
        jobs:
998
            - run_examples_torch_all
999
            - run_examples_flax_all
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
            - run_tests_torch_and_tf_all
            - run_tests_torch_and_flax_all
            - run_tests_torch_all
            - run_tests_tf_all
            - run_tests_flax_all
            - run_tests_pipelines_torch_all
            - run_tests_pipelines_tf_all
            - run_tests_onnxruntime_all
            - run_tests_hub_all

1010
1011
1012
1013
1014
1015
1016
1017
#    tpu_testing_jobs:
#        triggers:
#            - schedule:
#                # Set to run at the first minute of every hour.
#                cron: "0 8 * * *"
#                filters:
#                    branches:
#                        only:
1018
#                            - main
1019
1020
1021
#        jobs:
#            - cleanup-gke-jobs
#            - run_examples_tpu