Unverified Commit 4519ad6c authored by Muyang Li's avatar Muyang Li Committed by GitHub
Browse files

chore: add V1 flux tests (#742)

* make linter happy

* add tests for qwen-image-edit-2509

* update

* update

* flux schnell test runnable

* update the test score

* make linter happy

* add fp4 results

* fix the test score

* add tests for flux_dev

* update the test score

* add flux.1-krea

* fix the krea tests

* update

* update

* add kontext

* update

* fix kontext

* update

* add flux.1-depth

* add flux-tools

* finish flux tools

* add more flux examples

* update

* update3

* update

* update score

* update

* update
parent 5b9af2f1
...@@ -92,8 +92,8 @@ jobs: ...@@ -92,8 +92,8 @@ jobs:
source $(conda info --base)/etc/profile.d/conda.sh source $(conda info --base)/etc/profile.d/conda.sh
conda activate test_env || { echo "Failed to activate conda env"; exit 1; } conda activate test_env || { echo "Failed to activate conda env"; exit 1; }
which python which python
pytest -s -x tests/flux/test_flux_examples.py pytest -vv -x tests/flux/test_flux_examples.py
pytest -s -x tests/v1/test_examples.py pytest -vv -x tests/v1/test_examples.py
python .github/workflows/run_all_tests.py python .github/workflows/run_all_tests.py
- name: clean up - name: clean up
if: always() if: always()
......
import subprocess import subprocess
from pathlib import Path from pathlib import Path
from tqdm import tqdm
def run_all_tests(): def run_all_tests():
test_dir = Path("tests") test_dir = Path("tests")
...@@ -21,9 +23,9 @@ def run_all_tests(): ...@@ -21,9 +23,9 @@ def run_all_tests():
print(f" {test_file}") print(f" {test_file}")
failed_tests = [] failed_tests = []
for test_file in test_files: for test_file in tqdm(test_files):
print(f"Running {test_file} ...") print(f"Running {test_file} ...")
result = subprocess.run(["pytest", "--reruns", "2", "--reruns-delay", "0", "-s", "-x", test_file]) result = subprocess.run(["pytest", "--reruns", "2", "--reruns-delay", "0", "-vv", "-x", test_file])
if result.returncode != 0: if result.returncode != 0:
print(f"Test failed: {test_file}") print(f"Test failed: {test_file}")
failed_tests.append(test_file) failed_tests.append(test_file)
......
# Use an NVIDIA base image with CUDA support
ARG CUDA_IMAGE="12.8.1-devel-ubuntu24.04"
FROM nvidia/cuda:${CUDA_IMAGE}
ENV DEBIAN_FRONTEND=noninteractive
ARG PYTHON_VERSION=3.11
ARG TORCH_VERSION=2.6
ARG TORCHVISION_VERSION=0.21
ARG TORCHAUDIO_VERSION=2.6
ARG CUDA_SHORT_VERSION=12.8
# Set working directory
WORKDIR /
RUN echo PYTHON_VERSION=${PYTHON_VERSION} \
&& echo CUDA_SHORT_VERSION=${CUDA_SHORT_VERSION} \
&& echo TORCH_VERSION=${TORCH_VERSION} \
&& echo TORCHVISION_VERSION=${TORCHVISION_VERSION} \
&& echo TORCHAUDIO_VERSION=${TORCHAUDIO_VERSION}
# Setup timezone and install system dependencies
RUN 'tzdata tzdata/Areas select America' | debconf-set-selections \
&& echo 'tzdata tzdata/Zones/America select New_York' | debconf-set-selections \
&& apt update -y \
&& apt install software-properties-common -y \
&& add-apt-repository ppa:deadsnakes/ppa -y \
&& apt update && apt install wget git -y && apt clean
# Install Miniconda
ENV CONDA_DIR=/opt/conda
RUN wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O /tmp/miniconda.sh \
&& bash /tmp/miniconda.sh -b -p ${CONDA_DIR} \
&& rm /tmp/miniconda.sh \
&& ${CONDA_DIR}/bin/conda clean -afy
ENV PATH=${CONDA_DIR}/bin:$PATH
RUN conda init bash
RUN conda create -y -n nunchaku python=${PYTHON_VERSION} \
&& conda install -y -n nunchaku -c conda-forge gxx=11 gcc=11 \
&& conda clean -afy
SHELL ["conda", "run", "-n", "nunchaku", "/bin/bash", "-c"]
# Install building dependencies
RUN pip install torch==${TORCH_VERSION} torchvision==${TORCHVISION_VERSION} torchaudio==${TORCHAUDIO_VERSION} --index-url https://download.pytorch.org/whl/cu124
RUN pip install ninja wheel diffusers transformers accelerate sentencepiece protobuf huggingface_hub comfy-cli
# Start building
RUN git clone https://github.com/mit-han-lab/nunchaku.git \
&& cd nunchaku \
&& git submodule init \
&& git submodule update \
&& NUNCHAKU_INSTALL_MODE=ALL python setup.py develop
RUN cd .. && git clone https://github.com/comfyanonymous/ComfyUI \
&& cd ComfyUI && pip install -r requirements.txt \
&& cd custom_nodes && git clone https://github.com/ltdrdata/ComfyUI-Manager comfyui-manager \
&& git clone https://github.com/mit-han-lab/ComfyUI-nunchaku.git \
&& cd .. && mkdir -p user/default/workflows/ && cp -r custom_nodes/ComfyUI-nunchaku/workflows/ user/default/workflows/nunchaku_examples
# Use an NVIDIA base image with CUDA support
ARG CUDA_IMAGE="12.8.1-devel-ubuntu24.04"
FROM nvidia/cuda:${CUDA_IMAGE}
ENV DEBIAN_FRONTEND=noninteractive
ARG PYTHON_VERSION=3.11
ARG TORCH_VERSION=2.7
ARG TORCHVISION_VERSION=0.21
ARG TORCHAUDIO_VERSION=2.6
ARG CUDA_SHORT_VERSION=12.8
# Set working directory
WORKDIR /
RUN echo PYTHON_VERSION=${PYTHON_VERSION} \
&& echo CUDA_SHORT_VERSION=${CUDA_SHORT_VERSION} \
&& echo TORCH_VERSION=${TORCH_VERSION} \
&& echo TORCHVISION_VERSION=${TORCHVISION_VERSION} \
&& echo TORCHAUDIO_VERSION=${TORCHAUDIO_VERSION}
# Setup timezone and install system dependencies
RUN 'tzdata tzdata/Areas select America' | debconf-set-selections \
&& echo 'tzdata tzdata/Zones/America select New_York' | debconf-set-selections \
&& apt update -y \
&& apt install software-properties-common -y \
&& add-apt-repository ppa:deadsnakes/ppa -y \
&& apt update && apt install wget git -y && apt clean
# Install Miniconda
ENV CONDA_DIR=/opt/conda
RUN wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O /tmp/miniconda.sh \
&& bash /tmp/miniconda.sh -b -p ${CONDA_DIR} \
&& rm /tmp/miniconda.sh \
&& ${CONDA_DIR}/bin/conda clean -afy
ENV PATH=${CONDA_DIR}/bin:$PATH
RUN conda init bash
RUN conda create -y -n nunchaku python=${PYTHON_VERSION} \
&& conda install -y -n nunchaku -c conda-forge gxx=11 gcc=11 \
&& conda clean -afy
SHELL ["conda", "run", "-n", "nunchaku", "/bin/bash", "-c"]
# Install building dependencies
RUN pip install --pre torch==2.7.0.dev20250307+cu128 torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu128
RUN pip install ninja wheel diffusers transformers accelerate sentencepiece protobuf huggingface_hub comfy-cli
# Start building
RUN git clone https://github.com/mit-han-lab/nunchaku.git \
&& cd nunchaku \
&& git submodule init \
&& git submodule update \
&& NUNCHAKU_INSTALL_MODE=ALL python setup.py develop
RUN cd .. && git clone https://github.com/comfyanonymous/ComfyUI \
&& cd ComfyUI && pip install -r requirements.txt \
&& cd custom_nodes && git clone https://github.com/ltdrdata/ComfyUI-Manager comfyui-manager \
&& git clone https://github.com/mit-han-lab/ComfyUI-nunchaku.git \
&& cd .. && mkdir -p user/default/workflows/ && cp -r custom_nodes/ComfyUI-nunchaku/workflows/ user/default/workflows/nunchaku_examples
# Use an NVIDIA base image with CUDA support
ARG CUDA_IMAGE="12.8.1-devel-ubuntu24.04"
FROM nvidia/cuda:${CUDA_IMAGE}
ENV DEBIAN_FRONTEND=noninteractive
ARG PYTHON_VERSION=3.11
ARG TORCH_VERSION=2.8
ARG TORCHVISION_VERSION=0.21
ARG TORCHAUDIO_VERSION=2.6
ARG CUDA_SHORT_VERSION=12.8
# Set working directory
WORKDIR /
RUN echo PYTHON_VERSION=${PYTHON_VERSION} \
&& echo CUDA_SHORT_VERSION=${CUDA_SHORT_VERSION} \
&& echo TORCH_VERSION=${TORCH_VERSION} \
&& echo TORCHVISION_VERSION=${TORCHVISION_VERSION} \
&& echo TORCHAUDIO_VERSION=${TORCHAUDIO_VERSION}
# Setup timezone and install system dependencies
RUN 'tzdata tzdata/Areas select America' | debconf-set-selections \
&& echo 'tzdata tzdata/Zones/America select New_York' | debconf-set-selections \
&& apt update -y \
&& apt install software-properties-common -y \
&& add-apt-repository ppa:deadsnakes/ppa -y \
&& apt update && apt install wget git -y && apt clean
# Install Miniconda
ENV CONDA_DIR=/opt/conda
RUN wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O /tmp/miniconda.sh \
&& bash /tmp/miniconda.sh -b -p ${CONDA_DIR} \
&& rm /tmp/miniconda.sh \
&& ${CONDA_DIR}/bin/conda clean -afy
ENV PATH=${CONDA_DIR}/bin:$PATH
RUN conda init bash
RUN conda create -y -n nunchaku python=${PYTHON_VERSION} \
&& conda install -y -n nunchaku -c conda-forge gxx=11 gcc=11 \
&& conda clean -afy
SHELL ["conda", "run", "-n", "nunchaku", "/bin/bash", "-c"]
# Install building dependencies
RUN pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu128
RUN pip install ninja wheel diffusers transformers accelerate sentencepiece protobuf huggingface_hub comfy-cli
# Start building
RUN git clone https://github.com/mit-han-lab/nunchaku.git \
&& cd nunchaku \
&& git submodule init \
&& git submodule update \
&& NUNCHAKU_INSTALL_MODE=ALL python setup.py develop
RUN cd .. && git clone https://github.com/comfyanonymous/ComfyUI \
&& cd ComfyUI && pip install -r requirements.txt \
&& cd custom_nodes && git clone https://github.com/ltdrdata/ComfyUI-Manager comfyui-manager \
&& git clone https://github.com/mit-han-lab/ComfyUI-nunchaku.git \
&& cd .. && mkdir -p user/default/workflows/ && cp -r custom_nodes/ComfyUI-nunchaku/workflows/ user/default/workflows/nunchaku_examples
...@@ -5,8 +5,8 @@ from diffusers.utils import load_image ...@@ -5,8 +5,8 @@ from diffusers.utils import load_image
from nunchaku import NunchakuFluxTransformer2dModel from nunchaku import NunchakuFluxTransformer2dModel
from nunchaku.utils import get_precision from nunchaku.utils import get_precision
image = load_image("https://huggingface.co/mit-han-lab/svdq-int4-flux.1-fill-dev/resolve/main/example.png") image = load_image("https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/cup.png")
mask = load_image("https://huggingface.co/mit-han-lab/svdq-int4-flux.1-fill-dev/resolve/main/mask.png") mask = load_image("https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/cup_mask.png")
precision = get_precision() # auto-detect your precision is 'int4' or 'fp4' based on your GPU precision = get_precision() # auto-detect your precision is 'int4' or 'fp4' based on your GPU
transformer = NunchakuFluxTransformer2dModel.from_pretrained( transformer = NunchakuFluxTransformer2dModel.from_pretrained(
...@@ -16,7 +16,7 @@ pipe = FluxFillPipeline.from_pretrained( ...@@ -16,7 +16,7 @@ pipe = FluxFillPipeline.from_pretrained(
"black-forest-labs/FLUX.1-Fill-dev", transformer=transformer, torch_dtype=torch.bfloat16 "black-forest-labs/FLUX.1-Fill-dev", transformer=transformer, torch_dtype=torch.bfloat16
).to("cuda") ).to("cuda")
image = pipe( image = pipe(
prompt="A wooden basket of a cat.", prompt="a white paper cup",
image=image, image=image,
mask_image=mask, mask_image=mask,
height=1024, height=1024,
......
import torch
from controlnet_aux import CannyDetector
from diffusers import FluxControlPipeline
from diffusers.utils import load_image
from nunchaku import NunchakuFluxTransformer2DModelV2
from nunchaku.utils import get_precision
precision = get_precision() # auto-detect your precision is 'int4' or 'fp4' based on your GPU
transformer = NunchakuFluxTransformer2DModelV2.from_pretrained(
f"nunchaku-tech/nunchaku-flux.1-canny-dev/svdq-{precision}_r32-flux.1-canny-dev.safetensors"
)
pipe = FluxControlPipeline.from_pretrained(
"black-forest-labs/FLUX.1-Canny-dev", transformer=transformer, torch_dtype=torch.bfloat16
).to("cuda")
prompt = (
"A robot made of exotic candies and chocolates of different kinds. "
"The background is filled with confetti and celebratory gifts."
)
control_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png")
processor = CannyDetector()
control_image = processor(
control_image, low_threshold=50, high_threshold=200, detect_resolution=1024, image_resolution=1024
)
image = pipe(
prompt=prompt, control_image=control_image, height=1024, width=1024, num_inference_steps=20, guidance_scale=30.0
).images[0]
image.save(f"flux.1-canny-dev-{precision}.png")
import torch
from diffusers import FluxControlPipeline
from diffusers.utils import load_image
from image_gen_aux import DepthPreprocessor
from nunchaku import NunchakuFluxTransformer2DModelV2
from nunchaku.utils import get_precision
precision = get_precision() # auto-detect your precision is 'int4' or 'fp4' based on your GPU
transformer = NunchakuFluxTransformer2DModelV2.from_pretrained(
f"nunchaku-tech/nunchaku-flux.1-depth-dev/svdq-{precision}_r32-flux.1-depth-dev.safetensors"
)
pipe = FluxControlPipeline.from_pretrained(
"black-forest-labs/FLUX.1-Depth-dev",
transformer=transformer,
torch_dtype=torch.bfloat16,
).to("cuda")
prompt = (
"A robot made of exotic candies and chocolates of different kinds. "
"The background is filled with confetti and celebratory gifts."
)
control_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png")
processor = DepthPreprocessor.from_pretrained("LiheYoung/depth-anything-large-hf")
control_image = processor(control_image)[0].convert("RGB")
image = pipe(
prompt=prompt, control_image=control_image, height=1024, width=1024, num_inference_steps=20, guidance_scale=10.0
).images[0]
image.save(f"flux.1-depth-dev-{precision}.png")
...@@ -11,5 +11,5 @@ transformer = NunchakuFluxTransformer2DModelV2.from_pretrained( ...@@ -11,5 +11,5 @@ transformer = NunchakuFluxTransformer2DModelV2.from_pretrained(
pipeline = FluxPipeline.from_pretrained( pipeline = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-dev", transformer=transformer, torch_dtype=torch.bfloat16 "black-forest-labs/FLUX.1-dev", transformer=transformer, torch_dtype=torch.bfloat16
).to("cuda") ).to("cuda")
image = pipeline("A cat holding a sign that says hello world", num_inference_steps=50, guidance_scale=3.5).images[0] image = pipeline("A cat holding a sign that says hello world", num_inference_steps=20, guidance_scale=3.5).images[0]
image.save(f"flux.1-dev-{precision}.png") image.save(f"flux.1-dev-{precision}.png")
import torch
from diffusers import FluxFillPipeline
from diffusers.utils import load_image
from nunchaku import NunchakuFluxTransformer2DModelV2
from nunchaku.utils import get_precision
image = load_image("https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/cup.png")
mask = load_image("https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/cup_mask.png")
precision = get_precision() # auto-detect your precision is 'int4' or 'fp4' based on your GPU
transformer = NunchakuFluxTransformer2DModelV2.from_pretrained(
f"nunchaku-tech/nunchaku-flux.1-fill-dev/svdq-{precision}_r32-flux.1-fill-dev.safetensors"
)
pipe = FluxFillPipeline.from_pretrained(
"black-forest-labs/FLUX.1-Fill-dev", transformer=transformer, torch_dtype=torch.bfloat16
).to("cuda")
image = pipe(
prompt="a white paper cup",
image=image,
mask_image=mask,
height=1024,
width=1024,
guidance_scale=30,
num_inference_steps=50,
max_sequence_length=512,
).images[0]
image.save(f"flux.1-fill-dev-{precision}.png")
import torch
from diffusers import FluxKontextPipeline
from diffusers.utils import load_image
from nunchaku import NunchakuFluxTransformer2DModelV2
from nunchaku.utils import get_precision
transformer = NunchakuFluxTransformer2DModelV2.from_pretrained(
f"nunchaku-tech/nunchaku-flux.1-kontext-dev/svdq-{get_precision()}_r32-flux.1-kontext-dev.safetensors"
)
pipeline = FluxKontextPipeline.from_pretrained(
"black-forest-labs/FLUX.1-Kontext-dev", transformer=transformer, torch_dtype=torch.bfloat16
).to("cuda")
image = load_image(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/yarn-art-pikachu.png"
).convert("RGB")
prompt = "Make Pikachu hold a sign that says 'Nunchaku is awesome', yarn art style, detailed, vibrant colors"
image = pipeline(image=image, prompt=prompt, num_inference_steps=20, guidance_scale=2.5).images[0]
image.save("flux-kontext-dev.png")
import torch import torch
from diffusers import FluxPipeline from diffusers import FluxPipeline
from nunchaku.models.transformers.transformer_flux_v2 import NunchakuFluxTransformer2DModelV2 from nunchaku import NunchakuFluxTransformer2DModelV2
from nunchaku.utils import get_precision from nunchaku.utils import get_precision
precision = get_precision() # auto-detect your precision is 'int4' or 'fp4' based on your GPU precision = get_precision() # auto-detect your precision is 'int4' or 'fp4' based on your GPU
transformer = NunchakuFluxTransformer2DModelV2.from_pretrained( transformer = NunchakuFluxTransformer2DModelV2.from_pretrained(
f"nunchaku-tech/nunchaku-flux.1-dev/svdq-{precision}_r32-flux.1-dev.safetensors" f"nunchaku-tech/nunchaku-flux.1-krea-dev/svdq-{precision}_r32-flux.1-krea-dev.safetensors"
) )
pipeline = FluxPipeline.from_pretrained( pipeline = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-dev", transformer=transformer, torch_dtype=torch.bfloat16 "black-forest-labs/FLUX.1-krea-dev", torch_dtype=torch.bfloat16, transformer=transformer
).to("cuda") ).to("cuda")
image = pipeline("A cat holding a sign that says hello world", num_inference_steps=50, guidance_scale=3.5).images[0] prompt = (
image.save(f"flux.1-dev-{precision}.png") "Tiny paper origami kingdom, a river flowing through a lush valley, bright saturated image,"
"a fox to the left, deer to the right, birds in the sky, bushes and tress all around"
)
image = pipeline(prompt, height=1024, width=1024, guidance_scale=4.5, num_inference_steps=20).images[0]
image.save("flux-krea-dev.png")
import torch
from diffusers import FluxPipeline, FluxPriorReduxPipeline
from diffusers.utils import load_image
from nunchaku import NunchakuFluxTransformer2DModelV2
from nunchaku.utils import get_precision
precision = get_precision()
pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-Redux-dev", torch_dtype=torch.bfloat16
).to("cuda")
transformer = NunchakuFluxTransformer2DModelV2.from_pretrained(
f"nunchaku-tech/nunchaku-flux.1-dev/svdq-{precision}_r32-flux.1-dev.safetensors"
)
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-dev",
text_encoder=None,
text_encoder_2=None,
transformer=transformer,
torch_dtype=torch.bfloat16,
).to("cuda")
image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png")
pipe_prior_output = pipe_prior_redux(image)
images = pipe(guidance_scale=2.5, num_inference_steps=20, **pipe_prior_output).images
images[0].save(f"flux.1-redux-dev-{precision}.png")
...@@ -132,7 +132,7 @@ def fused_qkv_norm_rottary( ...@@ -132,7 +132,7 @@ def fused_qkv_norm_rottary(
quantized_x, ascales, lora_act = proj.quantize(x) quantized_x, ascales, lora_act = proj.quantize(x)
if output is None: if output is None:
output = torch.empty(quantized_x.shape[0], proj.out_features, dtype=x.dtype, device=x.device) output = torch.empty(batch_size * seq_len, proj.out_features, dtype=x.dtype, device=x.device)
if isinstance(output, tuple): if isinstance(output, tuple):
assert len(output) == 3 assert len(output) == 3
...@@ -174,7 +174,5 @@ def fused_qkv_norm_rottary( ...@@ -174,7 +174,5 @@ def fused_qkv_norm_rottary(
norm_k=norm_k.weight if norm_k is not None else None, norm_k=norm_k.weight if norm_k is not None else None,
rotary_emb=rotary_emb, rotary_emb=rotary_emb,
) )
if seq_len * batch_size < output.shape[0]:
output = output[: seq_len * batch_size, :]
output = output.view(batch_size, seq_len, -1) output = output.view(batch_size, seq_len, -1)
return output return output
...@@ -24,7 +24,9 @@ if __name__ == "__main__": ...@@ -24,7 +24,9 @@ if __name__ == "__main__":
torch_dtype = torch.float16 if is_turing() else torch.bfloat16 torch_dtype = torch.float16 if is_turing() else torch.bfloat16
transformer = NunchakuFluxTransformer2dModel.from_pretrained( transformer = NunchakuFluxTransformer2dModel.from_pretrained(
f"mit-han-lab/svdq-{precision}-flux.1-schnell", torch_dtype=torch_dtype, offload=True f"nunchaku-tech/nunchaku-flux.1-schnell/svdq-{precision}_r32-flux.1-schnell.safetensors",
torch_dtype=torch_dtype,
offload=True,
) )
pipeline = FluxPipeline.from_pretrained( pipeline = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", transformer=transformer, torch_dtype=torch_dtype "black-forest-labs/FLUX.1-schnell", transformer=transformer, torch_dtype=torch_dtype
......
...@@ -34,7 +34,7 @@ dependencies = [ ...@@ -34,7 +34,7 @@ dependencies = [
optional-dependencies.ci = [ optional-dependencies.ci = [
"controlnet-aux==0.0.10", "controlnet-aux==0.0.10",
"datasets==3.6", "datasets==3.6",
"diffusers @ git+https://github.com/huggingface/diffusers@a72bc0c", "diffusers @ git+https://github.com/huggingface/diffusers@041501a",
"facexlib==0.3", "facexlib==0.3",
"image-gen-aux @ git+https://github.com/asomoza/image_gen_aux.git", "image-gen-aux @ git+https://github.com/asomoza/image_gen_aux.git",
"insightface==0.7.3", "insightface==0.7.3",
......
#!/bin/bash
# Define the versions for Python, Torch, and CUDA
NUNCHAKU_VERSION=$1
python_versions=("3.10" "3.11" "3.12" "3.13")
torch_versions=("2.5" "2.6")
cuda_versions=("12.4" "12.8")
# Loop through all combinations of Python, Torch, and CUDA versions
for python_version in "${python_versions[@]}"; do
for torch_version in "${torch_versions[@]}"; do
# Skip building for Python 3.13 and PyTorch 2.5
if [[ "$python_version" == "3.13" && "$torch_version" == "2.5" ]]; then
echo "Skipping Python 3.13 with PyTorch 2.5"
continue
fi
for cuda_version in "${cuda_versions[@]}"; do
bash scripts/build_docker.sh "$python_version" "$torch_version" "$cuda_version" "$NUNCHAKU_VERSION"
done
done
done
for python_version in "${python_versions[@]}"; do
for cuda_version in "${cuda_versions[@]}"; do
bash scripts/build_docker_torch27.sh "$python_version" "2.7" "$cuda_version" "$NUNCHAKU_VERSION"
bash scripts/build_docker_torch28.sh "$python_version" "2.8" "$cuda_version" "$NUNCHAKU_VERSION"
done
done
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment