"IMG/git@developer.sourcefind.cn:dadigang/Ventoy.git" did not exist on "feea11e2bb2bc5f42ceca64255900e3917d87f98"
Unverified Commit 6ae71ec8 authored by Yih-Dar's avatar Yih-Dar Committed by GitHub
Browse files

Update `runs-on` in workflow files (#26435)



* update

* fix

---------
Co-authored-by: default avatarydshieh <ydshieh@users.noreply.github.com>
parent 78dd1202
...@@ -20,7 +20,7 @@ env: ...@@ -20,7 +20,7 @@ env:
jobs: jobs:
run_doctests: run_doctests:
runs-on: [self-hosted, doc-tests-gpu] runs-on: [single-gpu, nvidia-gpu, t4, doctest-ci]
container: container:
image: huggingface/transformers-all-latest-gpu image: huggingface/transformers-all-latest-gpu
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
......
...@@ -39,7 +39,7 @@ jobs: ...@@ -39,7 +39,7 @@ jobs:
strategy: strategy:
matrix: matrix:
machine_type: [single-gpu, multi-gpu] machine_type: [single-gpu, multi-gpu]
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker-past-ci') }} runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
container: container:
image: huggingface/transformers-all-latest-torch-nightly-gpu image: huggingface/transformers-all-latest-torch-nightly-gpu
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
...@@ -54,7 +54,7 @@ jobs: ...@@ -54,7 +54,7 @@ jobs:
strategy: strategy:
matrix: matrix:
machine_type: [single-gpu, multi-gpu] machine_type: [single-gpu, multi-gpu]
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker-past-ci') }} runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
container: container:
image: huggingface/transformers-all-latest-torch-nightly-gpu image: huggingface/transformers-all-latest-torch-nightly-gpu
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
...@@ -94,7 +94,7 @@ jobs: ...@@ -94,7 +94,7 @@ jobs:
matrix: matrix:
folders: ${{ fromJson(needs.setup.outputs.matrix) }} folders: ${{ fromJson(needs.setup.outputs.matrix) }}
machine_type: [single-gpu] machine_type: [single-gpu]
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker-past-ci') }} runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
container: container:
image: huggingface/transformers-all-latest-torch-nightly-gpu image: huggingface/transformers-all-latest-torch-nightly-gpu
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
...@@ -155,7 +155,7 @@ jobs: ...@@ -155,7 +155,7 @@ jobs:
matrix: matrix:
folders: ${{ fromJson(needs.setup.outputs.matrix) }} folders: ${{ fromJson(needs.setup.outputs.matrix) }}
machine_type: [multi-gpu] machine_type: [multi-gpu]
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker-past-ci') }} runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
container: container:
image: huggingface/transformers-all-latest-torch-nightly-gpu image: huggingface/transformers-all-latest-torch-nightly-gpu
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
...@@ -215,7 +215,7 @@ jobs: ...@@ -215,7 +215,7 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
machine_type: [single-gpu, multi-gpu] machine_type: [single-gpu, multi-gpu]
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker-past-ci') }} runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
needs: setup needs: setup
container: container:
image: huggingface/transformers-pytorch-deepspeed-nightly-gpu image: huggingface/transformers-pytorch-deepspeed-nightly-gpu
......
...@@ -50,7 +50,7 @@ jobs: ...@@ -50,7 +50,7 @@ jobs:
strategy: strategy:
matrix: matrix:
machine_type: [single-gpu, multi-gpu] machine_type: [single-gpu, multi-gpu]
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker-past-ci') }} runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
container: container:
image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
...@@ -65,7 +65,7 @@ jobs: ...@@ -65,7 +65,7 @@ jobs:
strategy: strategy:
matrix: matrix:
machine_type: [single-gpu, multi-gpu] machine_type: [single-gpu, multi-gpu]
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker-past-ci') }} runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
container: container:
image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
...@@ -101,7 +101,7 @@ jobs: ...@@ -101,7 +101,7 @@ jobs:
matrix: matrix:
folders: ${{ fromJson(needs.setup.outputs.matrix) }} folders: ${{ fromJson(needs.setup.outputs.matrix) }}
machine_type: [single-gpu] machine_type: [single-gpu]
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker-past-ci') }} runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
container: container:
image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
...@@ -177,7 +177,7 @@ jobs: ...@@ -177,7 +177,7 @@ jobs:
matrix: matrix:
folders: ${{ fromJson(needs.setup.outputs.matrix) }} folders: ${{ fromJson(needs.setup.outputs.matrix) }}
machine_type: [multi-gpu] machine_type: [multi-gpu]
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker-past-ci') }} runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
container: container:
image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
...@@ -253,7 +253,7 @@ jobs: ...@@ -253,7 +253,7 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
machine_type: [single-gpu, multi-gpu] machine_type: [single-gpu, multi-gpu]
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker-past-ci') }} runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
needs: setup needs: setup
container: container:
image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu
......
...@@ -45,7 +45,7 @@ jobs: ...@@ -45,7 +45,7 @@ jobs:
strategy: strategy:
matrix: matrix:
machine_type: [single-gpu, multi-gpu] machine_type: [single-gpu, multi-gpu]
runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}'] runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, push-ci]
container: container:
image: huggingface/transformers-all-latest-gpu-push-ci image: huggingface/transformers-all-latest-gpu-push-ci
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
...@@ -60,7 +60,7 @@ jobs: ...@@ -60,7 +60,7 @@ jobs:
strategy: strategy:
matrix: matrix:
machine_type: [single-gpu, multi-gpu] machine_type: [single-gpu, multi-gpu]
runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}'] runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, push-ci]
container: container:
image: huggingface/transformers-all-latest-gpu-push-ci image: huggingface/transformers-all-latest-gpu-push-ci
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
...@@ -158,7 +158,7 @@ jobs: ...@@ -158,7 +158,7 @@ jobs:
matrix: matrix:
folders: ${{ fromJson(needs.setup.outputs.matrix) }} folders: ${{ fromJson(needs.setup.outputs.matrix) }}
machine_type: [single-gpu] machine_type: [single-gpu]
runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}'] runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, push-ci]
container: container:
image: huggingface/transformers-all-latest-gpu-push-ci image: huggingface/transformers-all-latest-gpu-push-ci
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
...@@ -251,7 +251,7 @@ jobs: ...@@ -251,7 +251,7 @@ jobs:
matrix: matrix:
folders: ${{ fromJson(needs.setup.outputs.matrix) }} folders: ${{ fromJson(needs.setup.outputs.matrix) }}
machine_type: [multi-gpu] machine_type: [multi-gpu]
runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}'] runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, push-ci]
container: container:
image: huggingface/transformers-all-latest-gpu-push-ci image: huggingface/transformers-all-latest-gpu-push-ci
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
...@@ -344,7 +344,7 @@ jobs: ...@@ -344,7 +344,7 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
machine_type: [single-gpu] machine_type: [single-gpu]
runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}'] runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, push-ci]
container: container:
image: huggingface/transformers-pytorch-deepspeed-latest-gpu-push-ci image: huggingface/transformers-pytorch-deepspeed-latest-gpu-push-ci
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
...@@ -434,7 +434,7 @@ jobs: ...@@ -434,7 +434,7 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
machine_type: [multi-gpu] machine_type: [multi-gpu]
runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}'] runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, push-ci]
container: container:
image: huggingface/transformers-pytorch-deepspeed-latest-gpu-push-ci image: huggingface/transformers-pytorch-deepspeed-latest-gpu-push-ci
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
......
...@@ -43,7 +43,7 @@ jobs: ...@@ -43,7 +43,7 @@ jobs:
strategy: strategy:
matrix: matrix:
machine_type: [single-gpu, multi-gpu] machine_type: [single-gpu, multi-gpu]
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker') }} runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci]
container: container:
image: huggingface/transformers-all-latest-gpu image: huggingface/transformers-all-latest-gpu
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
...@@ -58,7 +58,7 @@ jobs: ...@@ -58,7 +58,7 @@ jobs:
strategy: strategy:
matrix: matrix:
machine_type: [single-gpu, multi-gpu] machine_type: [single-gpu, multi-gpu]
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker') }} runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci]
container: container:
image: huggingface/transformers-all-latest-gpu image: huggingface/transformers-all-latest-gpu
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
...@@ -98,7 +98,7 @@ jobs: ...@@ -98,7 +98,7 @@ jobs:
matrix: matrix:
folders: ${{ fromJson(needs.setup.outputs.matrix) }} folders: ${{ fromJson(needs.setup.outputs.matrix) }}
machine_type: [single-gpu] machine_type: [single-gpu]
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker') }} runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci]
container: container:
image: huggingface/transformers-all-latest-gpu image: huggingface/transformers-all-latest-gpu
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
...@@ -159,7 +159,7 @@ jobs: ...@@ -159,7 +159,7 @@ jobs:
matrix: matrix:
folders: ${{ fromJson(needs.setup.outputs.matrix) }} folders: ${{ fromJson(needs.setup.outputs.matrix) }}
machine_type: [multi-gpu] machine_type: [multi-gpu]
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker') }} runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci]
container: container:
image: huggingface/transformers-all-latest-gpu image: huggingface/transformers-all-latest-gpu
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
...@@ -219,7 +219,7 @@ jobs: ...@@ -219,7 +219,7 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
machine_type: [single-gpu] machine_type: [single-gpu]
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker') }} runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci]
container: container:
image: huggingface/transformers-all-latest-gpu image: huggingface/transformers-all-latest-gpu
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
...@@ -270,7 +270,7 @@ jobs: ...@@ -270,7 +270,7 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
machine_type: [single-gpu, multi-gpu] machine_type: [single-gpu, multi-gpu]
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker') }} runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci]
container: container:
image: huggingface/transformers-pytorch-gpu image: huggingface/transformers-pytorch-gpu
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
...@@ -320,7 +320,7 @@ jobs: ...@@ -320,7 +320,7 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
machine_type: [single-gpu, multi-gpu] machine_type: [single-gpu, multi-gpu]
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker') }} runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci]
container: container:
image: huggingface/transformers-tensorflow-gpu image: huggingface/transformers-tensorflow-gpu
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
...@@ -371,7 +371,7 @@ jobs: ...@@ -371,7 +371,7 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
machine_type: [single-gpu, multi-gpu] machine_type: [single-gpu, multi-gpu]
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker') }} runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci]
needs: setup needs: setup
container: container:
image: huggingface/transformers-pytorch-deepspeed-latest-gpu image: huggingface/transformers-pytorch-deepspeed-latest-gpu
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment