# Changed from https://github.com/GaParmar/img2img-turbo/blob/main/gradio_sketch2image.py import os import random import time from datetime import datetime import GPUtil import torch from controlnet_aux import CannyDetector from diffusers import FluxControlPipeline from image_gen_aux import DepthPreprocessor from PIL import Image from nunchaku.models.safety_checker import SafetyChecker from nunchaku.models.transformers.transformer_flux import NunchakuFluxTransformer2dModel from utils import get_args from vars import ( DEFAULT_GUIDANCE_CANNY, DEFAULT_GUIDANCE_DEPTH, DEFAULT_INFERENCE_STEP_CANNY, DEFAULT_INFERENCE_STEP_DEPTH, DEFAULT_STYLE_NAME, EXAMPLES, HEIGHT, MAX_SEED, STYLE_NAMES, STYLES, WIDTH, ) # import gradio last to avoid conflicts with other imports import gradio as gr args = get_args() pipeline_class = None processor = None model_name = None model_name = f"{args.model}-dev" pipeline_class = FluxControlPipeline if args.model == "canny": processor = CannyDetector() else: assert args.model == "depth", f"Model {args.model} not supported" processor = DepthPreprocessor.from_pretrained("LiheYoung/depth-anything-large-hf") if args.precision == "bf16": pipeline = pipeline_class.from_pretrained( f"black-forest-labs/FLUX.1-{model_name.capitalize()}", torch_dtype=torch.bfloat16 ) pipeline = pipeline.to("cuda") pipeline.precision = "bf16" else: assert args.precision == "int4" pipeline_init_kwargs = {} transformer = NunchakuFluxTransformer2dModel.from_pretrained(f"mit-han-lab/svdq-int4-flux.1-{model_name}") pipeline_init_kwargs["transformer"] = transformer if args.use_qencoder: from nunchaku.models.text_encoders.t5_encoder import NunchakuT5EncoderModel text_encoder_2 = NunchakuT5EncoderModel.from_pretrained("mit-han-lab/svdq-flux.1-t5") pipeline_init_kwargs["text_encoder_2"] = text_encoder_2 pipeline = pipeline_class.from_pretrained( f"black-forest-labs/FLUX.1-{model_name.capitalize()}", torch_dtype=torch.bfloat16, **pipeline_init_kwargs ) pipeline = pipeline.to("cuda") pipeline.precision = "int4" safety_checker = SafetyChecker("cuda", disabled=args.no_safety_checker) def run( image, prompt: str, style: str, prompt_template: str, num_inference_steps: int, guidance_scale: float, seed: int ) -> tuple[Image, str]: if args.model == "canny": processed_img = processor(image["composite"]).convert("RGB") else: assert args.model == "depth" processed_img = processor(image["composite"])[0].convert("RGB") is_unsafe_prompt = False if not safety_checker(prompt): is_unsafe_prompt = True prompt = "A peaceful world." prompt = prompt_template.format(prompt=prompt) print(f"Prompt: {prompt}") start_time = time.time() result_image = pipeline( prompt=prompt, control_image=processed_img, height=HEIGHT, width=WIDTH, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, generator=torch.Generator().manual_seed(seed), ).images[0] latency = time.time() - start_time if latency < 1: latency = latency * 1000 latency_str = f"{latency:.2f}ms" else: latency_str = f"{latency:.2f}s" if is_unsafe_prompt: latency_str += " (Unsafe prompt detected)" torch.cuda.empty_cache() if args.count_use: if os.path.exists(f"{args.model}-use_count.txt"): with open(f"{args.model}-use_count.txt", "r") as f: count = int(f.read()) else: count = 0 count += 1 current_time = datetime.now() print(f"{current_time}: {count}") with open(f"{args.model}-use_count.txt", "w") as f: f.write(str(count)) with open(f"{args.model}-use_record.txt", "a") as f: f.write(f"{current_time}: {count}\n") return result_image, latency_str with gr.Blocks(css_paths="assets/style.css", title=f"SVDQuant Flux.1-{model_name} Demo") as demo: with open("assets/description.html", "r") as f: DESCRIPTION = f.read() gpus = GPUtil.getGPUs() if len(gpus) > 0: gpu = gpus[0] memory = gpu.memoryTotal / 1024 device_info = f"Running on {gpu.name} with {memory:.0f} GiB memory." else: device_info = "Running on CPU 🥶 This demo does not work on CPU." notice = f'Notice: We will replace unsafe prompts with a default prompt: "A peaceful world."' def get_header_str(): if args.count_use: if os.path.exists(f"{args.model}-use_count.txt"): with open(f"{args.model}-use_count.txt", "r") as f: count = int(f.read()) else: count = 0 count_info = ( f"