name: Self-hosted runner (scheduled) # Note that each job's dependencies go into a corresponding docker file. # # For example for `run_all_tests_torch_cuda_extensions_gpu` the docker image is # `huggingface/transformers-pytorch-deepspeed-latest-gpu`, which can be found at # `docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile` on: workflow_call: inputs: job: required: true type: string slack_report_channel: required: true type: string env: HF_HOME: /mnt/cache TRANSFORMERS_IS_CI: yes OMP_NUM_THREADS: 8 MKL_NUM_THREADS: 8 RUN_SLOW: yes # For gated repositories, we still need to agree to share information on the Hub repo. page in order to get access. # This token is created under the bot `hf-transformers-bot`. HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }} SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }} TF_FORCE_GPU_ALLOW_GROWTH: true RUN_PT_TF_CROSS_TESTS: 1 CUDA_VISIBLE_DEVICES: 0,1 NUM_SLICES: 2 jobs: setup: if: contains(fromJSON('["run_tests_gpu", "run_tests_quantization_torch_gpu"]'), inputs.job) name: Setup strategy: matrix: machine_type: [single-gpu, multi-gpu] runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci] container: image: huggingface/transformers-all-latest-gpu options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ outputs: folder_slices: ${{ steps.set-matrix.outputs.folder_slices }} slice_ids: ${{ steps.set-matrix.outputs.slice_ids }} quantization_matrix: ${{ steps.set-matrix-quantization.outputs.quantization_matrix }} steps: - name: Update clone working-directory: /transformers run: | git fetch && git checkout ${{ github.sha }} - name: Cleanup working-directory: /transformers run: | rm -rf tests/__pycache__ rm -rf tests/models/__pycache__ rm -rf reports - name: Show installed libraries and their versions working-directory: /transformers run: pip freeze - id: set-matrix if: ${{ inputs.job == 'run_tests_gpu' }} name: Identify models to test working-directory: /transformers/tests run: | echo "folder_slices=$(python3 ../utils/split_model_tests.py --num_splits ${{ env.NUM_SLICES }})" >> $GITHUB_OUTPUT echo "slice_ids=$(python3 -c 'd = list(range(${{ env.NUM_SLICES }})); print(d)')" >> $GITHUB_OUTPUT - id: set-matrix-quantization if: ${{ inputs.job == 'run_tests_quantization_torch_gpu' }} name: Identify quantization method to test working-directory: /transformers/tests run: | echo "quantization_matrix=$(python3 -c 'import os; tests = os.getcwd(); quantization_tests = os.listdir(os.path.join(tests, "quantization")); d = sorted(list(filter(os.path.isdir, [f"quantization/{x}" for x in quantization_tests]))) ; print(d)')" >> $GITHUB_OUTPUT - name: NVIDIA-SMI run: | nvidia-smi run_tests_gpu: if: ${{ inputs.job == 'run_tests_gpu' }} name: " " needs: setup strategy: fail-fast: false matrix: machine_type: [single-gpu, multi-gpu] slice_id: ${{ fromJSON(needs.setup.outputs.slice_ids) }} uses: ./.github/workflows/model_jobs.yml with: folder_slices: ${{ needs.setup.outputs.folder_slices }} machine_type: ${{ matrix.machine_type }} slice_id: ${{ matrix.slice_id }} secrets: inherit run_pipelines_torch_gpu: if: ${{ inputs.job == 'run_pipelines_torch_gpu' }} name: PyTorch pipelines strategy: fail-fast: false matrix: machine_type: [single-gpu, multi-gpu] runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci] container: image: huggingface/transformers-pytorch-gpu options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ steps: - name: Update clone working-directory: /transformers run: git fetch && git checkout ${{ github.sha }} - name: Reinstall transformers in edit mode (remove the one installed during docker image build) working-directory: /transformers run: python3 -m pip uninstall -y transformers && python3 -m pip install -e . - name: NVIDIA-SMI run: | nvidia-smi - name: Environment working-directory: /transformers run: | python3 utils/print_env.py - name: Show installed libraries and their versions working-directory: /transformers run: pip freeze - name: Run all pipeline tests on GPU working-directory: /transformers run: | python3 -m pytest -n 1 -v --dist=loadfile --make-reports=${{ matrix.machine_type }}_tests_torch_pipeline_gpu tests/pipelines - name: Failure short reports if: ${{ failure() }} continue-on-error: true run: cat /transformers/reports/${{ matrix.machine_type }}_tests_torch_pipeline_gpu/failures_short.txt - name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_tests_torch_pipeline_gpu" if: ${{ always() }} uses: actions/upload-artifact@v4 with: name: ${{ matrix.machine_type }}_run_tests_torch_pipeline_gpu path: /transformers/reports/${{ matrix.machine_type }}_tests_torch_pipeline_gpu run_pipelines_tf_gpu: if: ${{ inputs.job == 'run_pipelines_tf_gpu' }} name: TensorFlow pipelines strategy: fail-fast: false matrix: machine_type: [single-gpu, multi-gpu] runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci] container: image: huggingface/transformers-tensorflow-gpu options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ steps: - name: Update clone working-directory: /transformers run: | git fetch && git checkout ${{ github.sha }} - name: Reinstall transformers in edit mode (remove the one installed during docker image build) working-directory: /transformers run: python3 -m pip uninstall -y transformers && python3 -m pip install -e . - name: NVIDIA-SMI run: | nvidia-smi - name: Environment working-directory: /transformers run: | python3 utils/print_env.py - name: Show installed libraries and their versions working-directory: /transformers run: pip freeze - name: Run all pipeline tests on GPU working-directory: /transformers run: | python3 -m pytest -n 1 -v --dist=loadfile --make-reports=${{ matrix.machine_type }}_tests_tf_pipeline_gpu tests/pipelines - name: Failure short reports if: ${{ always() }} run: | cat /transformers/reports/${{ matrix.machine_type }}_tests_tf_pipeline_gpu/failures_short.txt - name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_tests_tf_pipeline_gpu" if: ${{ always() }} uses: actions/upload-artifact@v4 with: name: ${{ matrix.machine_type }}_run_tests_tf_pipeline_gpu path: /transformers/reports/${{ matrix.machine_type }}_tests_tf_pipeline_gpu run_examples_gpu: if: ${{ inputs.job == 'run_examples_gpu' }} name: Examples directory strategy: fail-fast: false matrix: machine_type: [single-gpu] runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci] container: image: huggingface/transformers-all-latest-gpu options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ steps: - name: Update clone working-directory: /transformers run: git fetch && git checkout ${{ github.sha }} - name: Reinstall transformers in edit mode (remove the one installed during docker image build) working-directory: /transformers run: python3 -m pip uninstall -y transformers && python3 -m pip install -e . - name: NVIDIA-SMI run: | nvidia-smi - name: Environment working-directory: /transformers run: | python3 utils/print_env.py - name: Show installed libraries and their versions working-directory: /transformers run: pip freeze - name: Run examples tests on GPU working-directory: /transformers run: | pip install -r examples/pytorch/_tests_requirements.txt python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_examples_gpu examples/pytorch - name: Failure short reports if: ${{ failure() }} continue-on-error: true run: cat /transformers/reports/${{ matrix.machine_type }}_examples_gpu/failures_short.txt - name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_examples_gpu" if: ${{ always() }} uses: actions/upload-artifact@v4 with: name: ${{ matrix.machine_type }}_run_examples_gpu path: /transformers/reports/${{ matrix.machine_type }}_examples_gpu run_all_tests_torch_cuda_extensions_gpu: if: ${{ inputs.job == 'run_all_tests_torch_cuda_extensions_gpu' }} name: Torch CUDA extension tests strategy: fail-fast: false matrix: machine_type: [single-gpu, multi-gpu] runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci] container: image: huggingface/transformers-pytorch-deepspeed-latest-gpu options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ steps: - name: Update clone working-directory: /workspace/transformers run: git fetch && git checkout ${{ github.sha }} - name: Reinstall transformers in edit mode (remove the one installed during docker image build) working-directory: /workspace/transformers run: python3 -m pip uninstall -y transformers && python3 -m pip install -e . - name: Remove cached torch extensions run: rm -rf /github/home/.cache/torch_extensions/ # To avoid unknown test failures - name: Pre build DeepSpeed *again* working-directory: /workspace run: | python3 -m pip uninstall -y deepspeed DS_DISABLE_NINJA=1 DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check - name: NVIDIA-SMI run: | nvidia-smi - name: Environment working-directory: /workspace/transformers run: | python utils/print_env.py - name: Show installed libraries and their versions working-directory: /workspace/transformers run: pip freeze - name: Run all tests on GPU working-directory: /workspace/transformers run: | python -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_torch_cuda_extensions_gpu tests/deepspeed tests/extended - name: Failure short reports if: ${{ failure() }} continue-on-error: true run: cat /workspace/transformers/reports/${{ matrix.machine_type }}_tests_torch_cuda_extensions_gpu/failures_short.txt - name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_tests_torch_cuda_extensions_gpu_test_reports" if: ${{ always() }} uses: actions/upload-artifact@v4 with: name: ${{ matrix.machine_type }}_run_tests_torch_cuda_extensions_gpu_test_reports path: /workspace/transformers/reports/${{ matrix.machine_type }}_tests_torch_cuda_extensions_gpu run_tests_quantization_torch_gpu: if: ${{ inputs.job == 'run_tests_quantization_torch_gpu' }} name: " " needs: setup strategy: fail-fast: false matrix: folders: ${{ fromJson(needs.setup.outputs.quantization_matrix) }} machine_type: [single-gpu, multi-gpu] runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci] container: image: huggingface/transformers-quantization-latest-gpu options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ steps: - name: Echo folder ${{ matrix.folders }} shell: bash run: | echo "${{ matrix.folders }}" matrix_folders=${{ matrix.folders }} matrix_folders=${matrix_folders/'quantization/'/'quantization_'} echo "$matrix_folders" echo "matrix_folders=$matrix_folders" >> $GITHUB_ENV - name: Update clone working-directory: /transformers run: git fetch && git checkout ${{ github.sha }} - name: Reinstall transformers in edit mode (remove the one installed during docker image build) working-directory: /transformers run: python3 -m pip uninstall -y transformers && python3 -m pip install -e . - name: NVIDIA-SMI run: | nvidia-smi - name: Environment working-directory: /transformers run: | python3 utils/print_env.py - name: Show installed libraries and their versions working-directory: /transformers run: pip freeze - name: Run quantization tests on GPU working-directory: /transformers run: | python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_quantization_torch_gpu_${{ matrix.folders }} tests/${{ matrix.folders }} - name: Failure short reports if: ${{ failure() }} continue-on-error: true run: cat /transformers/reports/${{ matrix.machine_type }}_tests_quantization_torch_gpu_${{ matrix.folders }}/failures_short.txt - name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_tests_quantization_torch_gpu_${{ env.matrix_folders }}" if: ${{ always() }} uses: actions/upload-artifact@v4 with: name: ${{ matrix.machine_type }}_run_tests_quantization_torch_gpu_${{ env.matrix_folders }} path: /transformers/reports/${{ matrix.machine_type }}_tests_quantization_torch_gpu_${{ matrix.folders }} run_extract_warnings: # Let's only do this for the job `run_tests_gpu` to simplify the (already complex) logic. if: ${{ always() && inputs.job == 'run_tests_gpu' }} name: Extract warnings in CI artifacts runs-on: ubuntu-22.04 needs: [setup, run_tests_gpu] steps: - name: Checkout transformers uses: actions/checkout@v4 with: fetch-depth: 2 - name: Install transformers run: pip install transformers - name: Show installed libraries and their versions run: pip freeze - name: Create output directory run: mkdir warnings_in_ci - uses: actions/download-artifact@v4 with: path: warnings_in_ci - name: Show artifacts run: echo "$(python3 -c 'import os; d = os.listdir(); print(d)')" working-directory: warnings_in_ci - name: Extract warnings in CI artifacts run: | python3 utils/extract_warnings.py --workflow_run_id ${{ github.run_id }} --output_dir warnings_in_ci --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} --from_gh echo "$(python3 -c 'import os; import json; fp = open("warnings_in_ci/selected_warnings.json"); d = json.load(fp); d = "\n".join(d) ;print(d)')" - name: Upload artifact if: ${{ always() }} uses: actions/upload-artifact@v4 with: name: warnings_in_ci path: warnings_in_ci/selected_warnings.json send_results: name: Slack Report needs: [ setup, run_tests_gpu, run_pipelines_torch_gpu, run_pipelines_tf_gpu, run_examples_gpu, run_all_tests_torch_cuda_extensions_gpu, run_tests_quantization_torch_gpu, run_extract_warnings ] if: ${{ always() }} uses: ./.github/workflows/slack-report.yml with: job: ${{ inputs.job }} # This would be `skipped` if `setup` is skipped. setup_status: ${{ needs.setup.result }} slack_report_channel: ${{ inputs.slack_report_channel }} # This would be an empty string if `setup` is skipped. folder_slices: ${{ needs.setup.outputs.folder_slices }} quantization_matrix: ${{ needs.setup.outputs.quantization_matrix }} secrets: inherit