import os import json from PIL import Image import torch from diffusers import QwenImageEditPipeline output_dir = "./output_images_A800" os.makedirs(output_dir, exist_ok=True) pipeline = QwenImageEditPipeline.from_pretrained("/home/zwq/model/Qwen-Image-Edit") print("pipeline loaded") pipeline.to(torch.bfloat16) pipeline.to("cuda") pipeline.set_progress_bar_config(disable=None) json_file_path = "./infer.json" try: with open(json_file_path, 'r', encoding='utf-8') as f: inference_data = json.load(f) except FileNotFoundError: print(f"Error: JSON file not found at {json_file_path}") exit() except json.JSONDecodeError: print(f"Error: Could not decode JSON from {json_file_path}. Please check file format.") exit() print(f"Loaded {len(inference_data)} inference tasks from {json_file_path}") for i, task in enumerate(inference_data): input_image_path = task.get("input_image") prompt = task.get("prompt") if not input_image_path or not prompt: print(f"Skipping task {i+1} due to missing 'input_image' or 'prompt'. Task data: {task}") continue try: image = Image.open(input_image_path).convert("RGB") print(f"\nProcessing task {i+1}:") print(f" Input Image: {input_image_path}") print(f" Prompt: {prompt}") inputs = { "image": image, "prompt": prompt, "generator": torch.manual_seed(0), "true_cfg_scale": 4.0, "negative_prompt": " ", "num_inference_steps": 50, } with torch.inference_mode(): output = pipeline(**inputs) output_image = output.images[0] base_name = os.path.splitext(os.path.basename(input_image_path))[0] output_image_name = f"{base_name}_output_{i+1}.png" output_image_path = os.path.join(output_dir, output_image_name) output_image.save(output_image_path) print(f" Image saved at {os.path.abspath(output_image_path)}") except FileNotFoundError: print(f"Error: Input image not found at {input_image_path}. Skipping this task.") except Exception as e: print(f"An error occurred while processing task {i+1} (Image: {input_image_path}, Prompt: {prompt}): {e}") print("\nAll inference tasks completed.")