Unverified Commit 71fc3317 authored by Yih-Dar's avatar Yih-Dar Committed by GitHub
Browse files

Separate Push CI images from Scheduled CI (#19170)



* separate images

* Fix condition
Co-authored-by: default avatarydshieh <ydshieh@users.noreply.github.com>
parent fa4eeb4f
...@@ -6,6 +6,10 @@ on: ...@@ -6,6 +6,10 @@ on:
- docker-image* - docker-image*
repository_dispatch: repository_dispatch:
workflow_call: workflow_call:
inputs:
image_postfix:
required: true
type: string
schedule: schedule:
- cron: "0 1 * * *" - cron: "0 1 * * *"
...@@ -38,10 +42,12 @@ jobs: ...@@ -38,10 +42,12 @@ jobs:
build-args: | build-args: |
REF=main REF=main
push: true push: true
tags: huggingface/transformers-all-latest-gpu tags: huggingface/transformers-all-latest-gpu${{ inputs.image_postfix }}
latest-with-torch-nightly-docker: latest-with-torch-nightly-docker:
name: "Nightly PyTorch + Stable TensorFlow" name: "Nightly PyTorch + Stable TensorFlow"
# Push CI doesn't need this image
if: inputs.image_postfix != '-push-ci'
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- -
...@@ -91,10 +97,12 @@ jobs: ...@@ -91,10 +97,12 @@ jobs:
build-args: | build-args: |
REF=main REF=main
push: true push: true
tags: huggingface/transformers-pytorch-deepspeed-latest-gpu tags: huggingface/transformers-pytorch-deepspeed-latest-gpu${{ inputs.image_postfix }}
nightly-torch-deepspeed-docker: nightly-torch-deepspeed-docker:
name: "Nightly PyTorch + DeepSpeed" name: "Nightly PyTorch + DeepSpeed"
# Push CI doesn't need this image
if: inputs.image_postfix != '-push-ci'
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- -
...@@ -121,6 +129,8 @@ jobs: ...@@ -121,6 +129,8 @@ jobs:
doc-builder: doc-builder:
name: "Doc builder" name: "Doc builder"
# Push CI doesn't need this image
if: inputs.image_postfix != '-push-ci'
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- -
...@@ -145,6 +155,8 @@ jobs: ...@@ -145,6 +155,8 @@ jobs:
latest-pytorch: latest-pytorch:
name: "Latest PyTorch [dev]" name: "Latest PyTorch [dev]"
# Push CI doesn't need this image
if: inputs.image_postfix != '-push-ci'
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- -
...@@ -171,6 +183,8 @@ jobs: ...@@ -171,6 +183,8 @@ jobs:
latest-tensorflow: latest-tensorflow:
name: "Latest TensorFlow [dev]" name: "Latest TensorFlow [dev]"
# Push CI doesn't need this image
if: inputs.image_postfix != '-push-ci'
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- -
......
...@@ -40,6 +40,8 @@ jobs: ...@@ -40,6 +40,8 @@ jobs:
needs: check-for-setup needs: check-for-setup
if: (github.event_name == 'push') && (needs.check-for-setup.outputs.changed == '1') if: (github.event_name == 'push') && (needs.check-for-setup.outputs.changed == '1')
uses: ./.github/workflows/build-docker-images.yml uses: ./.github/workflows/build-docker-images.yml
with:
image_postfix: "-push-ci"
secrets: inherit secrets: inherit
run_push_ci: run_push_ci:
......
...@@ -47,7 +47,7 @@ jobs: ...@@ -47,7 +47,7 @@ jobs:
machine_type: [single-gpu, multi-gpu] machine_type: [single-gpu, multi-gpu]
runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}'] runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}']
container: container:
image: huggingface/transformers-all-latest-gpu image: huggingface/transformers-all-latest-gpu-push-ci
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
steps: steps:
- name: NVIDIA-SMI - name: NVIDIA-SMI
...@@ -62,7 +62,7 @@ jobs: ...@@ -62,7 +62,7 @@ jobs:
machine_type: [single-gpu, multi-gpu] machine_type: [single-gpu, multi-gpu]
runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}'] runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}']
container: container:
image: huggingface/transformers-all-latest-gpu image: huggingface/transformers-all-latest-gpu-push-ci
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
outputs: outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }} matrix: ${{ steps.set-matrix.outputs.matrix }}
...@@ -158,7 +158,7 @@ jobs: ...@@ -158,7 +158,7 @@ jobs:
machine_type: [single-gpu] machine_type: [single-gpu]
runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}'] runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}']
container: container:
image: huggingface/transformers-all-latest-gpu image: huggingface/transformers-all-latest-gpu-push-ci
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
steps: steps:
# Necessary to get the correct branch name and commit SHA for `workflow_run` event # Necessary to get the correct branch name and commit SHA for `workflow_run` event
...@@ -243,7 +243,7 @@ jobs: ...@@ -243,7 +243,7 @@ jobs:
machine_type: [multi-gpu] machine_type: [multi-gpu]
runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}'] runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}']
container: container:
image: huggingface/transformers-all-latest-gpu image: huggingface/transformers-all-latest-gpu-push-ci
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
steps: steps:
# Necessary to get the correct branch name and commit SHA for `workflow_run` event # Necessary to get the correct branch name and commit SHA for `workflow_run` event
...@@ -328,7 +328,7 @@ jobs: ...@@ -328,7 +328,7 @@ jobs:
machine_type: [single-gpu] machine_type: [single-gpu]
runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}'] runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}']
container: container:
image: huggingface/transformers-pytorch-deepspeed-latest-gpu image: huggingface/transformers-pytorch-deepspeed-latest-gpu-push-ci
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
steps: steps:
# Necessary to get the correct branch name and commit SHA for `workflow_run` event # Necessary to get the correct branch name and commit SHA for `workflow_run` event
...@@ -410,7 +410,7 @@ jobs: ...@@ -410,7 +410,7 @@ jobs:
machine_type: [multi-gpu] machine_type: [multi-gpu]
runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}'] runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}']
container: container:
image: huggingface/transformers-pytorch-deepspeed-latest-gpu image: huggingface/transformers-pytorch-deepspeed-latest-gpu-push-ci
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
steps: steps:
# Necessary to get the correct branch name and commit SHA for `workflow_run` event # Necessary to get the correct branch name and commit SHA for `workflow_run` event
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment