version: 2.1 orbs: gcp-gke: circleci/gcp-gke@1.0.4 go: circleci/go@1.3.0 # TPU REFERENCES references: checkout_ml_testing: &checkout_ml_testing run: name: Checkout ml-testing-accelerators command: | git clone https://github.com/GoogleCloudPlatform/ml-testing-accelerators.git cd ml-testing-accelerators git fetch origin 5e88ac24f631c27045e62f0e8d5dfcf34e425e25:stable git checkout stable build_push_docker: &build_push_docker run: name: Configure Docker command: | gcloud --quiet auth configure-docker cd docker/transformers-pytorch-tpu if [ -z "$CIRCLE_PR_NUMBER" ]; then docker build --tag "$GCR_IMAGE_PATH:$CIRCLE_WORKFLOW_JOB_ID" -f Dockerfile --build-arg "TEST_IMAGE=1" . ; else docker build --tag "$GCR_IMAGE_PATH:$CIRCLE_WORKFLOW_JOB_ID" -f Dockerfile --build-arg "TEST_IMAGE=1" --build-arg "GITHUB_REF=pull/$CIRCLE_PR_NUMBER/head" . ; fi docker push "$GCR_IMAGE_PATH:$CIRCLE_WORKFLOW_JOB_ID" deploy_cluster: &deploy_cluster run: name: Deploy the job on the kubernetes cluster command: | go get github.com/google/go-jsonnet/cmd/jsonnet && \ export PATH=$PATH:$HOME/go/bin && \ kubectl create -f docker/transformers-pytorch-tpu/dataset.yaml || true && \ job_name=$(jsonnet -J ml-testing-accelerators/ docker/transformers-pytorch-tpu/bert-base-cased.jsonnet --ext-str image=$GCR_IMAGE_PATH --ext-str image-tag=$CIRCLE_WORKFLOW_JOB_ID | kubectl create -f -) && \ job_name=${job_name#job.batch/} && \ job_name=${job_name% created} && \ echo "Waiting on kubernetes job: $job_name" && \ i=0 && \ # 30 checks spaced 30s apart = 900s total. max_checks=30 && \ status_code=2 && \ # Check on the job periodically. Set the status code depending on what # happened to the job in Kubernetes. If we try max_checks times and # still the job hasn't finished, give up and return the starting # non-zero status code. while [ $i -lt $max_checks ]; do ((i++)); if kubectl get jobs $job_name -o jsonpath='Failed:{.status.failed}' | grep "Failed:1"; then status_code=1 && break; elif kubectl get jobs $job_name -o jsonpath='Succeeded:{.status.succeeded}' | grep "Succeeded:1" ; then status_code=0 && break; else echo "Job not finished yet"; fi; sleep 30; done && \ echo "Done waiting. Job status code: $status_code" && \ pod_name=$(kubectl get po -l controller-uid=`kubectl get job $job_name -o "jsonpath={.metadata.labels.controller-uid}"` | awk 'match($0,!/NAME/) {print $1}') && \ echo "GKE pod name: $pod_name" && \ kubectl logs -f $pod_name --container=train echo "Done with log retrieval attempt." && \ gcloud container images delete "$GCR_IMAGE_PATH:$CIRCLE_WORKFLOW_JOB_ID" --force-delete-tags && \ exit $status_code delete_gke_jobs: &delete_gke_jobs run: name: Delete GKE Jobs command: | # Match jobs whose age matches patterns like '1h' or '1d', i.e. any job # that has been around longer than 1hr. First print all columns for # matches, then execute the delete. kubectl get job | awk 'match($4,/[0-9]+[dh]/) {print $0}' kubectl delete job $(kubectl get job | awk 'match($4,/[0-9]+[dh]/) {print $1}') jobs: # Fetch the tests to run fetch_tests: working_directory: ~/transformers docker: - image: cimg/python:3.7.12 parallelism: 1 steps: - checkout - run: pip install --upgrade pip - run: pip install GitPython - run: pip install . - run: mkdir -p test_preparation - run: python utils/tests_fetcher.py | tee tests_fetched_summary.txt - store_artifacts: path: ~/transformers/tests_fetched_summary.txt - run: | if [ -f test_list.txt ]; then mv test_list.txt test_preparation/test_list.txt else touch test_preparation/test_list.txt fi - run: python utils/tests_fetcher.py --filters tests examples | tee examples_tests_fetched_summary.txt - store_artifacts: path: ~/transformers/examples_tests_fetched_summary.txt - run: | if [ -f test_list.txt ]; then mv test_list.txt test_preparation/examples_test_list.txt else touch test_preparation/examples_test_list.txt fi - persist_to_workspace: root: test_preparation/ paths: test_list.txt examples_test_list.txt # To run all tests for the nightly build fetch_all_tests: working_directory: ~/transformers docker: - image: cimg/python:3.7.12 parallelism: 1 steps: - run: | mkdir test_preparation echo "tests" > test_preparation/test_list.txt echo "tests" > test_preparation/examples_test_list.txt - persist_to_workspace: root: test_preparation/ paths: test_list.txt run_tests_torch_and_tf: working_directory: ~/transformers docker: - image: cimg/python:3.7.12 environment: OMP_NUM_THREADS: 1 RUN_PT_TF_CROSS_TESTS: yes TRANSFORMERS_IS_CI: yes PYTEST_TIMEOUT: 120 resource_class: xlarge parallelism: 1 steps: - checkout - attach_workspace: at: ~/transformers/test_preparation - run: | if [ ! -s test_preparation/test_list.txt ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi - restore_cache: keys: - v0.5-torch_and_tf-{{ checksum "setup.py" }} - v0.5-torch_and_tf- - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng git-lfs - run: git lfs install - run: pip install --upgrade pip - run: pip install .[sklearn,tf-cpu,torch,testing,sentencepiece,torch-speech,vision] - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.12.0+cpu.html - run: pip install tensorflow_probability - run: pip install https://github.com/kpu/kenlm/archive/master.zip - run: pip install git+https://github.com/huggingface/accelerate - save_cache: key: v0.5-torch_and_tf-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_tf $(cat test_preparation/test_list.txt) -m is_pt_tf_cross_test --durations=0 | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: path: ~/transformers/reports run_tests_torch_and_flax: working_directory: ~/transformers docker: - image: cimg/python:3.7.12 environment: OMP_NUM_THREADS: 1 RUN_PT_FLAX_CROSS_TESTS: yes TRANSFORMERS_IS_CI: yes PYTEST_TIMEOUT: 120 resource_class: xlarge parallelism: 1 steps: - checkout - attach_workspace: at: ~/transformers/test_preparation - run: | if [ ! -s test_preparation/test_list.txt ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi - restore_cache: keys: - v0.5-torch_and_flax-{{ checksum "setup.py" }} - v0.5-torch_and_flax- - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng - run: pip install --upgrade pip - run: pip install .[sklearn,flax,torch,testing,sentencepiece,torch-speech,vision] - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.12.0+cpu.html - run: pip install https://github.com/kpu/kenlm/archive/master.zip - run: pip install git+https://github.com/huggingface/accelerate - save_cache: key: v0.5-torch_and_flax-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_flax $(cat test_preparation/test_list.txt) -m is_pt_flax_cross_test --durations=0 | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: path: ~/transformers/reports run_tests_torch: working_directory: ~/transformers docker: - image: cimg/python:3.7.12 environment: OMP_NUM_THREADS: 1 TRANSFORMERS_IS_CI: yes PYTEST_TIMEOUT: 120 resource_class: xlarge parallelism: 1 steps: - checkout - attach_workspace: at: ~/transformers/test_preparation - run: | if [ ! -s test_preparation/test_list.txt ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi - restore_cache: keys: - v0.5-torch-{{ checksum "setup.py" }} - v0.5-torch- - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng time - run: pip install --upgrade pip - run: pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm] - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.12.0+cpu.html - run: pip install https://github.com/kpu/kenlm/archive/master.zip - run: pip install git+https://github.com/huggingface/accelerate - save_cache: key: v0.5-torch-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - run: python -m pytest -n 3 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_torch $(cat test_preparation/test_list.txt) | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: path: ~/transformers/reports run_tests_tf: working_directory: ~/transformers docker: - image: cimg/python:3.7.12 environment: OMP_NUM_THREADS: 1 TRANSFORMERS_IS_CI: yes PYTEST_TIMEOUT: 120 resource_class: xlarge parallelism: 1 steps: - checkout - attach_workspace: at: ~/transformers/test_preparation - run: | if [ ! -s test_preparation/test_list.txt ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi - restore_cache: keys: - v0.5-tf-{{ checksum "setup.py" }} - v0.5-tf- - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng - run: pip install --upgrade pip - run: pip install .[sklearn,tf-cpu,testing,sentencepiece,tf-speech,vision] - run: pip install tensorflow_probability - run: pip install https://github.com/kpu/kenlm/archive/master.zip - save_cache: key: v0.5-tf-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_tf $(cat test_preparation/test_list.txt) | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: path: ~/transformers/reports run_tests_flax: working_directory: ~/transformers docker: - image: cimg/python:3.7.12 environment: OMP_NUM_THREADS: 1 TRANSFORMERS_IS_CI: yes PYTEST_TIMEOUT: 120 resource_class: xlarge parallelism: 1 steps: - checkout - attach_workspace: at: ~/transformers/test_preparation - run: | if [ ! -s test_preparation/test_list.txt ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi - restore_cache: keys: - v0.5-flax-{{ checksum "setup.py" }} - v0.5-flax- - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng - run: pip install --upgrade pip - run: pip install .[flax,testing,sentencepiece,flax-speech,vision] - run: pip install https://github.com/kpu/kenlm/archive/master.zip - save_cache: key: v0.5-flax-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_flax $(cat test_preparation/test_list.txt) | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: path: ~/transformers/reports run_tests_pipelines_torch: working_directory: ~/transformers docker: - image: cimg/python:3.7.12 environment: OMP_NUM_THREADS: 1 RUN_PIPELINE_TESTS: yes TRANSFORMERS_IS_CI: yes PYTEST_TIMEOUT: 120 resource_class: xlarge parallelism: 1 steps: - checkout - attach_workspace: at: ~/transformers/test_preparation - run: | if [ ! -s test_preparation/test_list.txt ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi - restore_cache: keys: - v0.5-torch-{{ checksum "setup.py" }} - v0.5-torch- - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng - run: pip install --upgrade pip - run: pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm] - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.12.0+cpu.html - run: pip install https://github.com/kpu/kenlm/archive/master.zip - save_cache: key: v0.5-torch-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_torch -m is_pipeline_test $(cat test_preparation/test_list.txt) | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: path: ~/transformers/reports run_tests_pipelines_tf: working_directory: ~/transformers docker: - image: cimg/python:3.7.12 environment: OMP_NUM_THREADS: 1 RUN_PIPELINE_TESTS: yes TRANSFORMERS_IS_CI: yes PYTEST_TIMEOUT: 120 resource_class: xlarge parallelism: 1 steps: - checkout - attach_workspace: at: ~/transformers/test_preparation - run: | if [ ! -s test_preparation/test_list.txt ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi - restore_cache: keys: - v0.5-tf-{{ checksum "setup.py" }} - v0.5-tf- - run: pip install --upgrade pip - run: pip install .[sklearn,tf-cpu,testing,sentencepiece] - run: pip install tensorflow_probability - save_cache: key: v0.5-tf-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_tf $(cat test_preparation/test_list.txt) -m is_pipeline_test | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: path: ~/transformers/reports run_tests_custom_tokenizers: working_directory: ~/transformers docker: - image: cimg/python:3.7.12 environment: RUN_CUSTOM_TOKENIZERS: yes TRANSFORMERS_IS_CI: yes PYTEST_TIMEOUT: 120 steps: - checkout - attach_workspace: at: ~/transformers/test_preparation - run: | if [ ! -s test_preparation/test_list.txt ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi - restore_cache: keys: - v0.5-custom_tokenizers-{{ checksum "setup.py" }} - v0.5-custom_tokenizers- - run: sudo apt-get -y update && sudo apt-get install -y cmake - run: name: install jumanpp command: | wget https://github.com/ku-nlp/jumanpp/releases/download/v2.0.0-rc3/jumanpp-2.0.0-rc3.tar.xz tar xvf jumanpp-2.0.0-rc3.tar.xz mkdir jumanpp-2.0.0-rc3/bld cd jumanpp-2.0.0-rc3/bld sudo cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local sudo make install - run: pip install --upgrade pip - run: pip install .[ja,testing,sentencepiece,jieba,spacy,ftfy,rjieba] - run: python -m unidic download - save_cache: key: v0.5-custom_tokenizers-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - run: python -m pytest --max-worker-restart=0 -s --make-reports=tests_custom_tokenizers ./tests/models/bert_japanese/test_tokenization_bert_japanese.py ./tests/models/openai/test_tokenization_openai.py ./tests/models/clip/test_tokenization_clip.py | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: path: ~/transformers/reports run_examples_torch: working_directory: ~/transformers docker: - image: cimg/python:3.7.12 environment: OMP_NUM_THREADS: 1 TRANSFORMERS_IS_CI: yes PYTEST_TIMEOUT: 120 resource_class: xlarge parallelism: 1 steps: - checkout - attach_workspace: at: ~/transformers/test_preparation - run: | if [ ! -s test_preparation/examples_test_list.txt ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi - restore_cache: keys: - v0.5-torch_examples-{{ checksum "setup.py" }} - v0.5-torch_examples- - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng - run: pip install --upgrade pip - run: pip install .[sklearn,torch,sentencepiece,testing,torch-speech] - run: pip install -r examples/pytorch/_tests_requirements.txt - save_cache: key: v0.5-torch_examples-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -s --make-reports=examples_torch ./examples/pytorch/ | tee tests_output.txt - store_artifacts: path: ~/transformers/examples_output.txt - store_artifacts: path: ~/transformers/reports run_examples_tensorflow: working_directory: ~/transformers docker: - image: cimg/python:3.7.12 environment: OMP_NUM_THREADS: 1 TRANSFORMERS_IS_CI: yes PYTEST_TIMEOUT: 120 resource_class: xlarge parallelism: 1 steps: - checkout - attach_workspace: at: ~/transformers/test_preparation - run: | if [ ! -s test_preparation/examples_test_list.txt ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi - restore_cache: keys: - v0.5-tensorflow_examples-{{ checksum "setup.py" }} - v0.5-tensorflow_examples- - run: pip install --upgrade pip - run: pip install .[sklearn,tensorflow,sentencepiece,testing] - run: pip install -r examples/tensorflow/_tests_requirements.txt - save_cache: key: v0.5-tensorflow_examples-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -s --make-reports=examples_tensorflow ./examples/tensorflow/ | tee tests_output.txt - store_artifacts: path: ~/transformers/tensorflow_examples_output.txt - store_artifacts: path: ~/transformers/reports run_examples_flax: working_directory: ~/transformers docker: - image: cimg/python:3.7.12 environment: OMP_NUM_THREADS: 1 TRANSFORMERS_IS_CI: yes PYTEST_TIMEOUT: 120 resource_class: xlarge parallelism: 1 steps: - checkout - attach_workspace: at: ~/transformers/test_preparation - run: | if [ ! -s test_preparation/examples_test_list.txt ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi - restore_cache: keys: - v0.5-flax_examples-{{ checksum "setup.py" }} - v0.5-flax_examples- - run: pip install --upgrade pip - run: pip install .[flax,testing,sentencepiece] - run: pip install -r examples/flax/_tests_requirements.txt - save_cache: key: v0.5-flax_examples-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -s --make-reports=examples_flax ./examples/flax/ | tee tests_output.txt - store_artifacts: path: ~/transformers/flax_examples_output.txt - store_artifacts: path: ~/transformers/reports run_tests_hub: working_directory: ~/transformers docker: - image: cimg/python:3.7.12 environment: HUGGINGFACE_CO_STAGING: yes RUN_GIT_LFS_TESTS: yes TRANSFORMERS_IS_CI: yes PYTEST_TIMEOUT: 120 resource_class: xlarge parallelism: 1 steps: - checkout - attach_workspace: at: ~/transformers/test_preparation - run: | if [ ! -s test_preparation/test_list.txt ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi - restore_cache: keys: - v0.5-hub-{{ checksum "setup.py" }} - v0.5-hub- - run: sudo apt-get -y update && sudo apt-get install git-lfs - run: | git config --global user.email "ci@dummy.com" git config --global user.name "ci" - run: pip install --upgrade pip - run: pip install .[torch,sentencepiece,testing] - save_cache: key: v0.5-hub-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - run: python -m pytest --max-worker-restart=0 -sv --make-reports=tests_hub $(cat test_preparation/test_list.txt) -m is_staging_test | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: path: ~/transformers/reports run_tests_onnxruntime: working_directory: ~/transformers docker: - image: cimg/python:3.7.12 environment: OMP_NUM_THREADS: 1 TRANSFORMERS_IS_CI: yes PYTEST_TIMEOUT: 120 resource_class: xlarge parallelism: 1 steps: - checkout - attach_workspace: at: ~/transformers/test_preparation - run: | if [ ! -s test_preparation/test_list.txt ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi - restore_cache: keys: - v0.5-onnx-{{ checksum "setup.py" }} - v0.5-onnx- - run: pip install --upgrade pip - run: pip install .[torch,tf,testing,sentencepiece,onnxruntime,vision,rjieba] - save_cache: key: v0.5-onnx-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - run: python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_onnx $(cat test_preparation/test_list.txt) -k onnx | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: path: ~/transformers/reports check_code_quality: working_directory: ~/transformers docker: - image: cimg/python:3.7.12 resource_class: large environment: TRANSFORMERS_IS_CI: yes PYTEST_TIMEOUT: 120 parallelism: 1 steps: - checkout - restore_cache: keys: - v0.5-code_quality-{{ checksum "setup.py" }} - v0.5-code_quality- - run: pip install --upgrade pip - run: pip install .[all,quality] - save_cache: key: v0.5-code_quality-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - run: black --check --preview examples tests src utils - run: isort --check-only examples tests src utils - run: python utils/custom_init_isort.py --check_only - run: python utils/sort_auto_mappings.py --check_only - run: flake8 examples tests src utils - run: doc-builder style src/transformers docs/source --max_len 119 --check_only --path_to_docs docs/source - run: python utils/check_doc_toc.py check_repository_consistency: working_directory: ~/transformers docker: - image: cimg/python:3.7.12 resource_class: large environment: TRANSFORMERS_IS_CI: yes PYTEST_TIMEOUT: 120 parallelism: 1 steps: - checkout - restore_cache: keys: - v0.5-repository_consistency-{{ checksum "setup.py" }} - v0.5-repository_consistency- - run: pip install --upgrade pip - run: pip install .[all,quality] - save_cache: key: v0.5-repository_consistency-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - run: python utils/check_copies.py - run: python utils/check_table.py - run: python utils/check_dummies.py - run: python utils/check_repo.py - run: python utils/check_inits.py - run: python utils/check_config_docstrings.py - run: make deps_table_check_updated - run: python utils/tests_fetcher.py --sanity_check - run: python utils/update_metadata.py --check-only run_tests_layoutlmv2_and_v3: working_directory: ~/transformers docker: - image: cimg/python:3.7.12 environment: OMP_NUM_THREADS: 1 TRANSFORMERS_IS_CI: yes PYTEST_TIMEOUT: 120 resource_class: xlarge parallelism: 1 steps: - checkout - attach_workspace: at: ~/transformers/test_preparation - run: | if [ ! -s test_preparation/test_list.txt ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi - restore_cache: keys: - v0.5-torch-{{ checksum "setup.py" }} - v0.5-torch- - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev - run: pip install --upgrade pip - run: pip install .[torch,testing,vision] - run: pip install torchvision # The commit `36a65a0907d90ed591479b2ebaa8b61cfa0b4ef0` in `detectron2` break things. # See https://github.com/facebookresearch/detectron2/commit/36a65a0907d90ed591479b2ebaa8b61cfa0b4ef0#comments. # TODO: Revert this change back once the above issue is fixed. - run: python -m pip install 'git+https://github.com/facebookresearch/detectron2.git' - run: sudo apt install tesseract-ocr - run: pip install pytesseract - save_cache: key: v0.5-torch-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - run: python -m pytest -n 1 --max-worker-restart=0 tests/models/*layoutlmv* --dist=loadfile -s --make-reports=tests_layoutlmv2_and_v3 --durations=100 - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: path: ~/transformers/reports # TPU JOBS run_examples_tpu: docker: - image: cimg/python:3.7.12 environment: OMP_NUM_THREADS: 1 TRANSFORMERS_IS_CI: yes resource_class: xlarge parallelism: 1 steps: - checkout - go/install - *checkout_ml_testing - gcp-gke/install - gcp-gke/update-kubeconfig-with-credentials: cluster: $GKE_CLUSTER perform-login: true - setup_remote_docker - *build_push_docker - *deploy_cluster cleanup-gke-jobs: docker: - image: cimg/python:3.7.12 steps: - gcp-gke/install - gcp-gke/update-kubeconfig-with-credentials: cluster: $GKE_CLUSTER perform-login: true - *delete_gke_jobs workflow_filters: &workflow_filters filters: branches: only: - main workflows: version: 2 build_and_test: jobs: - check_code_quality - check_repository_consistency - fetch_tests - run_examples_torch: requires: - fetch_tests - run_examples_tensorflow: requires: - fetch_tests - run_examples_flax: requires: - fetch_tests - run_tests_custom_tokenizers: requires: - fetch_tests - run_tests_torch_and_tf: requires: - fetch_tests - run_tests_torch_and_flax: requires: - fetch_tests - run_tests_torch: requires: - fetch_tests - run_tests_tf: requires: - fetch_tests - run_tests_flax: requires: - fetch_tests - run_tests_pipelines_torch: requires: - fetch_tests - run_tests_pipelines_tf: requires: - fetch_tests - run_tests_onnxruntime: requires: - fetch_tests - run_tests_hub: requires: - fetch_tests - run_tests_layoutlmv2_and_v3: requires: - fetch_tests nightly: triggers: - schedule: cron: "0 0 * * *" filters: branches: only: - main jobs: - fetch_all_tests - run_examples_torch: requires: - fetch_all_tests - run_examples_tensorflow: requires: - fetch_all_tests - run_examples_flax: requires: - fetch_all_tests - run_tests_custom_tokenizers: requires: - fetch_all_tests - run_tests_torch_and_tf: requires: - fetch_all_tests - run_tests_torch_and_flax: requires: - fetch_all_tests - run_tests_torch: requires: - fetch_all_tests - run_tests_tf: requires: - fetch_all_tests - run_tests_flax: requires: - fetch_all_tests - run_tests_pipelines_torch: requires: - fetch_all_tests - run_tests_pipelines_tf: requires: - fetch_all_tests - run_tests_onnxruntime: requires: - fetch_all_tests - run_tests_hub: requires: - fetch_all_tests - run_tests_layoutlmv2_and_v3: requires: - fetch_all_tests # tpu_testing_jobs: # triggers: # - schedule: # # Set to run at the first minute of every hour. # cron: "0 8 * * *" # filters: # branches: # only: # - main # jobs: # - cleanup-gke-jobs # - run_examples_tpu