"git@developer.sourcefind.cn:OpenDAS/fairseq.git" did not exist on "6fc03d3cf4a482083240b436741a56c32c8cf09b"
Unverified Commit 57636ad4 authored by vb's avatar vb Committed by GitHub
Browse files

purge HF_HUB_ENABLE_HF_TRANSFER; promote Xet (#12497)



* purge HF_HUB_ENABLE_HF_TRANSFER; promote Xet

* purge HF_HUB_ENABLE_HF_TRANSFER; promote Xet x2

* restrict docker build test to the ones we actually use in CI.

---------
Co-authored-by: default avatarYiYi Xu <yixu310@gmail.com>
Co-authored-by: default avatarSayak Paul <spsayakpaul@gmail.com>
parent cefc2cf8
...@@ -7,7 +7,7 @@ on: ...@@ -7,7 +7,7 @@ on:
env: env:
DIFFUSERS_IS_CI: yes DIFFUSERS_IS_CI: yes
HF_HUB_ENABLE_HF_TRANSFER: 1 HF_XET_HIGH_PERFORMANCE: 1
HF_HOME: /mnt/cache HF_HOME: /mnt/cache
OMP_NUM_THREADS: 8 OMP_NUM_THREADS: 8
MKL_NUM_THREADS: 8 MKL_NUM_THREADS: 8
......
...@@ -42,18 +42,39 @@ jobs: ...@@ -42,18 +42,39 @@ jobs:
CHANGED_FILES: ${{ steps.file_changes.outputs.all }} CHANGED_FILES: ${{ steps.file_changes.outputs.all }}
run: | run: |
echo "$CHANGED_FILES" echo "$CHANGED_FILES"
for FILE in $CHANGED_FILES; do ALLOWED_IMAGES=(
diffusers-pytorch-cpu
diffusers-pytorch-cuda
diffusers-pytorch-xformers-cuda
diffusers-pytorch-minimum-cuda
diffusers-doc-builder
)
declare -A IMAGES_TO_BUILD=()
for FILE in $CHANGED_FILES; do
# skip anything that isn't still on disk # skip anything that isn't still on disk
if [[ ! -f "$FILE" ]]; then if [[ ! -e "$FILE" ]]; then
echo "Skipping removed file $FILE" echo "Skipping removed file $FILE"
continue continue
fi
if [[ "$FILE" == docker/*Dockerfile ]]; then
DOCKER_PATH="${FILE%/Dockerfile}"
DOCKER_TAG=$(basename "$DOCKER_PATH")
echo "Building Docker image for $DOCKER_TAG"
docker build -t "$DOCKER_TAG" "$DOCKER_PATH"
fi fi
for IMAGE in "${ALLOWED_IMAGES[@]}"; do
if [[ "$FILE" == docker/${IMAGE}/* ]]; then
IMAGES_TO_BUILD["$IMAGE"]=1
fi
done
done
if [[ ${#IMAGES_TO_BUILD[@]} -eq 0 ]]; then
echo "No relevant Docker changes detected."
exit 0
fi
for IMAGE in "${!IMAGES_TO_BUILD[@]}"; do
DOCKER_PATH="docker/${IMAGE}"
echo "Building Docker image for $IMAGE"
docker build -t "$IMAGE" "$DOCKER_PATH"
done done
if: steps.file_changes.outputs.all != '' if: steps.file_changes.outputs.all != ''
......
...@@ -7,7 +7,7 @@ on: ...@@ -7,7 +7,7 @@ on:
env: env:
DIFFUSERS_IS_CI: yes DIFFUSERS_IS_CI: yes
HF_HUB_ENABLE_HF_TRANSFER: 1 HF_XET_HIGH_PERFORMANCE: 1
OMP_NUM_THREADS: 8 OMP_NUM_THREADS: 8
MKL_NUM_THREADS: 8 MKL_NUM_THREADS: 8
PYTEST_TIMEOUT: 600 PYTEST_TIMEOUT: 600
......
...@@ -26,7 +26,7 @@ concurrency: ...@@ -26,7 +26,7 @@ concurrency:
env: env:
DIFFUSERS_IS_CI: yes DIFFUSERS_IS_CI: yes
HF_HUB_ENABLE_HF_TRANSFER: 1 HF_XET_HIGH_PERFORMANCE: 1
OMP_NUM_THREADS: 4 OMP_NUM_THREADS: 4
MKL_NUM_THREADS: 4 MKL_NUM_THREADS: 4
PYTEST_TIMEOUT: 60 PYTEST_TIMEOUT: 60
......
...@@ -22,7 +22,7 @@ concurrency: ...@@ -22,7 +22,7 @@ concurrency:
env: env:
DIFFUSERS_IS_CI: yes DIFFUSERS_IS_CI: yes
HF_HUB_ENABLE_HF_TRANSFER: 1 HF_XET_HIGH_PERFORMANCE: 1
OMP_NUM_THREADS: 4 OMP_NUM_THREADS: 4
MKL_NUM_THREADS: 4 MKL_NUM_THREADS: 4
PYTEST_TIMEOUT: 60 PYTEST_TIMEOUT: 60
......
...@@ -24,7 +24,7 @@ env: ...@@ -24,7 +24,7 @@ env:
DIFFUSERS_IS_CI: yes DIFFUSERS_IS_CI: yes
OMP_NUM_THREADS: 8 OMP_NUM_THREADS: 8
MKL_NUM_THREADS: 8 MKL_NUM_THREADS: 8
HF_HUB_ENABLE_HF_TRANSFER: 1 HF_XET_HIGH_PERFORMANCE: 1
PYTEST_TIMEOUT: 600 PYTEST_TIMEOUT: 600
PIPELINE_USAGE_CUTOFF: 1000000000 # set high cutoff so that only always-test pipelines run PIPELINE_USAGE_CUTOFF: 1000000000 # set high cutoff so that only always-test pipelines run
......
...@@ -14,7 +14,7 @@ env: ...@@ -14,7 +14,7 @@ env:
DIFFUSERS_IS_CI: yes DIFFUSERS_IS_CI: yes
OMP_NUM_THREADS: 8 OMP_NUM_THREADS: 8
MKL_NUM_THREADS: 8 MKL_NUM_THREADS: 8
HF_HUB_ENABLE_HF_TRANSFER: 1 HF_XET_HIGH_PERFORMANCE: 1
PYTEST_TIMEOUT: 600 PYTEST_TIMEOUT: 600
PIPELINE_USAGE_CUTOFF: 50000 PIPELINE_USAGE_CUTOFF: 50000
......
...@@ -18,7 +18,7 @@ env: ...@@ -18,7 +18,7 @@ env:
HF_HOME: /mnt/cache HF_HOME: /mnt/cache
OMP_NUM_THREADS: 8 OMP_NUM_THREADS: 8
MKL_NUM_THREADS: 8 MKL_NUM_THREADS: 8
HF_HUB_ENABLE_HF_TRANSFER: 1 HF_XET_HIGH_PERFORMANCE: 1
PYTEST_TIMEOUT: 600 PYTEST_TIMEOUT: 600
RUN_SLOW: no RUN_SLOW: no
......
...@@ -8,7 +8,7 @@ env: ...@@ -8,7 +8,7 @@ env:
HF_HOME: /mnt/cache HF_HOME: /mnt/cache
OMP_NUM_THREADS: 8 OMP_NUM_THREADS: 8
MKL_NUM_THREADS: 8 MKL_NUM_THREADS: 8
HF_HUB_ENABLE_HF_TRANSFER: 1 HF_XET_HIGH_PERFORMANCE: 1
PYTEST_TIMEOUT: 600 PYTEST_TIMEOUT: 600
RUN_SLOW: no RUN_SLOW: no
......
...@@ -33,7 +33,7 @@ RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/diffusers. ...@@ -33,7 +33,7 @@ RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/diffusers.
RUN uv pip install --no-cache-dir \ RUN uv pip install --no-cache-dir \
accelerate \ accelerate \
numpy==1.26.4 \ numpy==1.26.4 \
hf_transfer \ hf_xet \
setuptools==69.5.1 \ setuptools==69.5.1 \
bitsandbytes \ bitsandbytes \
torchao \ torchao \
......
...@@ -44,6 +44,6 @@ RUN python3 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \ ...@@ -44,6 +44,6 @@ RUN python3 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
scipy \ scipy \
tensorboard \ tensorboard \
transformers \ transformers \
hf_transfer hf_xet
CMD ["/bin/bash"] CMD ["/bin/bash"]
\ No newline at end of file
...@@ -38,13 +38,12 @@ RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \ ...@@ -38,13 +38,12 @@ RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
datasets \ datasets \
hf-doc-builder \ hf-doc-builder \
huggingface-hub \ huggingface-hub \
hf_transfer \ hf_xet \
Jinja2 \ Jinja2 \
librosa \ librosa \
numpy==1.26.4 \ numpy==1.26.4 \
scipy \ scipy \
tensorboard \ tensorboard \
transformers \ transformers
hf_transfer
CMD ["/bin/bash"] CMD ["/bin/bash"]
\ No newline at end of file
...@@ -31,7 +31,7 @@ RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/diffusers. ...@@ -31,7 +31,7 @@ RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/diffusers.
RUN uv pip install --no-cache-dir \ RUN uv pip install --no-cache-dir \
accelerate \ accelerate \
numpy==1.26.4 \ numpy==1.26.4 \
hf_transfer hf_xet
RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
......
...@@ -44,6 +44,6 @@ RUN uv pip install --no-cache-dir \ ...@@ -44,6 +44,6 @@ RUN uv pip install --no-cache-dir \
accelerate \ accelerate \
numpy==1.26.4 \ numpy==1.26.4 \
pytorch-lightning \ pytorch-lightning \
hf_transfer hf_xet
CMD ["/bin/bash"] CMD ["/bin/bash"]
...@@ -47,6 +47,6 @@ RUN uv pip install --no-cache-dir \ ...@@ -47,6 +47,6 @@ RUN uv pip install --no-cache-dir \
accelerate \ accelerate \
numpy==1.26.4 \ numpy==1.26.4 \
pytorch-lightning \ pytorch-lightning \
hf_transfer hf_xet
CMD ["/bin/bash"] CMD ["/bin/bash"]
...@@ -44,7 +44,7 @@ RUN uv pip install --no-cache-dir \ ...@@ -44,7 +44,7 @@ RUN uv pip install --no-cache-dir \
accelerate \ accelerate \
numpy==1.26.4 \ numpy==1.26.4 \
pytorch-lightning \ pytorch-lightning \
hf_transfer \ hf_xet \
xformers xformers
CMD ["/bin/bash"] CMD ["/bin/bash"]
...@@ -29,7 +29,7 @@ The benchmark results for Flux and CogVideoX can be found in [this](https://gith ...@@ -29,7 +29,7 @@ The benchmark results for Flux and CogVideoX can be found in [this](https://gith
The tests, and the expected slices, were obtained from the `aws-g6e-xlarge-plus` GPU test runners. To run the slow tests, use the following command or an equivalent: The tests, and the expected slices, were obtained from the `aws-g6e-xlarge-plus` GPU test runners. To run the slow tests, use the following command or an equivalent:
```bash ```bash
HF_HUB_ENABLE_HF_TRANSFER=1 RUN_SLOW=1 pytest -s tests/quantization/torchao/test_torchao.py::SlowTorchAoTests HF_XET_HIGH_PERFORMANCE=1 RUN_SLOW=1 pytest -s tests/quantization/torchao/test_torchao.py::SlowTorchAoTests
``` ```
`diffusers-cli`: `diffusers-cli`:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment