Commit d6b1d859 authored by chenpangpang's avatar chenpangpang
Browse files

解决gradio在高版本上通过端口映射方式找不到theme.css问题;添加建立公开链接需要的frpc_linux_amd64_v0.2到镜像中;删除不必要的依赖

parent 28412ec4
......@@ -3,10 +3,13 @@ RUN cd /root && git clone -b gpu https://developer.hpccube.com/codes/chenpangpan
WORKDIR /root/tile-upscaler/Tile-Upscaler
RUN pip install -r requirements.txt
COPY chenyh/tile-upscaler/models /root/tile-upscaler/Tile-Upscaler/models
COPY chenyh/tile-upscaler/CompVis /root/tile-upscaler/Tile-Upscaler/CompVis
#########
# Prod #
#########
FROM image.sourcefind.cn:5000/gpu/admin/base/jupyterlab-pytorch:2.2.0-python3.10-cuda12.1-ubuntu22.04
RUN apt-get update && apt-get install -y libglib2.0-0 libgl1-mesa-glx
COPY --from=base /opt/conda/lib/python3.10/site-packages /opt/conda/lib/python3.10/site-packages
COPY --from=base /root/tile-upscaler/Tile-Upscaler /root/Tile-Upscaler
COPY chenyh/tile-upscaler/frpc_linux_amd64_v0.2 /opt/conda/lib/python3.10/site-packages/gradio
......@@ -15,3 +15,16 @@
mv lllyasviel/ControlNet-v1-1/control_v11f1e_sd15_tile.pth models/ControlNet
mv stabilityai/sd-vae-ft-mse-original/vae-ft-mse-840000-ema-pruned.safetensors models/VAE
```
模型文件tree:
```
.
├── CompVis
│ └── stable-diffusion-safety-checker
└── models
├── ControlNet
├── embeddings
├── Lora
├── models
├── upscalers
└── VAE
```
import spaces
import os
import requests
......@@ -58,191 +57,191 @@ def download_models():
for model, (url, folder, filename) in models.items():
download_file(url, folder, filename)
download_models()
# def timer_func(func):
# def wrapper(*args, **kwargs):
# start_time = time.time()
# result = func(*args, **kwargs)
# end_time = time.time()
# print(f"{func.__name__} took {end_time - start_time:.2f} seconds")
# return result
# return wrapper
#
# class LazyLoadPipeline:
# def __init__(self):
# self.pipe = None
#
# @timer_func
# def load(self):
# if self.pipe is None:
# print("Starting to load the pipeline...")
# self.pipe = self.setup_pipeline()
# print(f"Moving pipeline to device: {device}")
# self.pipe.to(device)
# if USE_TORCH_COMPILE:
# print("Compiling the model...")
# self.pipe.unet = torch.compile(self.pipe.unet, mode="reduce-overhead", fullgraph=True)
#
# @timer_func
# def setup_pipeline(self):
# print("Setting up the pipeline...")
# controlnet = ControlNetModel.from_single_file(
# "models/ControlNet/control_v11f1e_sd15_tile.pth", torch_dtype=torch.float16
# )
# safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker")
# model_path = "models/models/Stable-diffusion/juggernaut_reborn.safetensors"
# pipe = StableDiffusionControlNetImg2ImgPipeline.from_single_file(
# model_path,
# controlnet=controlnet,
# torch_dtype=torch.float16,
# use_safetensors=True,
# safety_checker=safety_checker
# )
# vae = AutoencoderKL.from_single_file(
# "models/VAE/vae-ft-mse-840000-ema-pruned.safetensors",
# torch_dtype=torch.float16
# )
# pipe.vae = vae
# pipe.load_textual_inversion("models/embeddings/verybadimagenegative_v1.3.pt")
# pipe.load_textual_inversion("models/embeddings/JuggernautNegative-neg.pt")
# pipe.load_lora_weights("models/Lora/SDXLrender_v2.0.safetensors")
# pipe.fuse_lora(lora_scale=0.5)
# pipe.load_lora_weights("models/Lora/more_details.safetensors")
# pipe.fuse_lora(lora_scale=1.)
# pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
# pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.3, b2=1.4)
# return pipe
#
# def __call__(self, *args, **kwargs):
# return self.pipe(*args, **kwargs)
#
# class LazyRealESRGAN:
# def __init__(self, device, scale):
# self.device = device
# self.scale = scale
# self.model = None
#
# def load_model(self):
# if self.model is None:
# self.model = RealESRGAN(self.device, scale=self.scale)
# self.model.load_weights(f'models/upscalers/RealESRGAN_x{self.scale}.pth', download=False)
# def predict(self, img):
# self.load_model()
# return self.model.predict(img)
#
# lazy_realesrgan_x2 = LazyRealESRGAN(device, scale=2)
# lazy_realesrgan_x4 = LazyRealESRGAN(device, scale=4)
#
# @timer_func
# def resize_and_upscale(input_image, resolution):
# scale = 2 if resolution <= 2048 else 4
# input_image = input_image.convert("RGB")
# W, H = input_image.size
# k = float(resolution) / min(H, W)
# H = int(round(H * k / 64.0)) * 64
# W = int(round(W * k / 64.0)) * 64
# img = input_image.resize((W, H), resample=Image.LANCZOS)
# if scale == 2:
# img = lazy_realesrgan_x2.predict(img)
# else:
# img = lazy_realesrgan_x4.predict(img)
# return img
#
# @timer_func
# def create_hdr_effect(original_image, hdr):
# if hdr == 0:
# return original_image
# cv_original = cv2.cvtColor(np.array(original_image), cv2.COLOR_RGB2BGR)
# factors = [1.0 - 0.9 * hdr, 1.0 - 0.7 * hdr, 1.0 - 0.45 * hdr,
# 1.0 - 0.25 * hdr, 1.0, 1.0 + 0.2 * hdr,
# 1.0 + 0.4 * hdr, 1.0 + 0.6 * hdr, 1.0 + 0.8 * hdr]
# images = [cv2.convertScaleAbs(cv_original, alpha=factor) for factor in factors]
# merge_mertens = cv2.createMergeMertens()
# hdr_image = merge_mertens.process(images)
# hdr_image_8bit = np.clip(hdr_image * 255, 0, 255).astype('uint8')
# return Image.fromarray(cv2.cvtColor(hdr_image_8bit, cv2.COLOR_BGR2RGB))
#
# lazy_pipe = LazyLoadPipeline()
# lazy_pipe.load()
#
# def prepare_image(input_image, resolution, hdr):
# condition_image = resize_and_upscale(input_image, resolution)
# condition_image = create_hdr_effect(condition_image, hdr)
# return condition_image
#
# # @spaces.GPU
# @timer_func
# def gradio_process_image(input_image, resolution, num_inference_steps, strength, hdr, guidance_scale):
# print("Starting image processing...")
# torch.cuda.empty_cache()
#
# condition_image = prepare_image(input_image, resolution, hdr)
#
# prompt = "masterpiece, best quality, highres"
# negative_prompt = "low quality, normal quality, ugly, blurry, blur, lowres, bad anatomy, bad hands, cropped, worst quality, verybadimagenegative_v1.3, JuggernautNegative-neg"
#
# options = {
# "prompt": prompt,
# "negative_prompt": negative_prompt,
# "image": condition_image,
# "control_image": condition_image,
# "width": condition_image.size[0],
# "height": condition_image.size[1],
# "strength": strength,
# "num_inference_steps": num_inference_steps,
# "guidance_scale": guidance_scale,
# "generator": torch.Generator(device=device).manual_seed(0),
# }
#
# print("Running inference...")
# result = lazy_pipe(**options).images[0]
# print("Image processing completed successfully")
#
# # Convert input_image and result to numpy arrays
# input_array = np.array(input_image)
# result_array = np.array(result)
#
# return [input_array, result_array]
#
# title = """<h1 align="center">Image Upscaler with Tile Controlnet</h1>
# <p align="center">The main ideas come from</p>
# <p><center>
# <a href="https://github.com/philz1337x/clarity-upscaler" target="_blank">[philz1337x]</a>
# <a href="https://github.com/BatouResearch/controlnet-tile-upscale" target="_blank">[Pau-Lozano]</a>
# </center></p>
# """
#
# with gr.Blocks() as demo:
# gr.HTML(title)
# with gr.Row():
# with gr.Column():
# input_image = gr.Image(type="pil", label="Input Image")
# run_button = gr.Button("Enhance Image")
# with gr.Column():
# output_slider = ImageSlider(label="Before / After", type="numpy")
# with gr.Accordion("Advanced Options", open=False):
# resolution = gr.Slider(minimum=256, maximum=2048, value=512, step=256, label="Resolution")
# num_inference_steps = gr.Slider(minimum=1, maximum=50, value=20, step=1, label="Number of Inference Steps")
# strength = gr.Slider(minimum=0, maximum=1, value=0.4, step=0.01, label="Strength")
# hdr = gr.Slider(minimum=0, maximum=1, value=0, step=0.1, label="HDR Effect")
# guidance_scale = gr.Slider(minimum=0, maximum=20, value=3, step=0.5, label="Guidance Scale")
#
# run_button.click(fn=gradio_process_image,
# inputs=[input_image, resolution, num_inference_steps, strength, hdr, guidance_scale],
# outputs=output_slider)
#
# # Add examples with all required inputs
# gr.Examples(
# examples=[
# ["image1.jpg", 512, 20, 0.4, 0, 3],
# ["image2.png", 512, 20, 0.4, 0, 3],
# ["image3.png", 512, 20, 0.4, 0, 3],
# ],
# inputs=[input_image, resolution, num_inference_steps, strength, hdr, guidance_scale],
# outputs=output_slider,
# fn=gradio_process_image,
# cache_examples=True,
# )
#
# demo.launch(share=True)
\ No newline at end of file
# download_models()
def timer_func(func):
def wrapper(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
print(f"{func.__name__} took {end_time - start_time:.2f} seconds")
return result
return wrapper
class LazyLoadPipeline:
def __init__(self):
self.pipe = None
@timer_func
def load(self):
if self.pipe is None:
print("Starting to load the pipeline...")
self.pipe = self.setup_pipeline()
print(f"Moving pipeline to device: {device}")
self.pipe.to(device)
if USE_TORCH_COMPILE:
print("Compiling the model...")
self.pipe.unet = torch.compile(self.pipe.unet, mode="reduce-overhead", fullgraph=True)
@timer_func
def setup_pipeline(self):
print("Setting up the pipeline...")
controlnet = ControlNetModel.from_single_file(
"models/ControlNet/control_v11f1e_sd15_tile.pth", torch_dtype=torch.float16
)
safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker")
model_path = "models/models/Stable-diffusion/juggernaut_reborn.safetensors"
pipe = StableDiffusionControlNetImg2ImgPipeline.from_single_file(
model_path,
controlnet=controlnet,
torch_dtype=torch.float16,
use_safetensors=True,
safety_checker=safety_checker
)
vae = AutoencoderKL.from_single_file(
"models/VAE/vae-ft-mse-840000-ema-pruned.safetensors",
torch_dtype=torch.float16
)
pipe.vae = vae
pipe.load_textual_inversion("models/embeddings/verybadimagenegative_v1.3.pt")
pipe.load_textual_inversion("models/embeddings/JuggernautNegative-neg.pt")
pipe.load_lora_weights("models/Lora/SDXLrender_v2.0.safetensors")
pipe.fuse_lora(lora_scale=0.5)
pipe.load_lora_weights("models/Lora/more_details.safetensors")
pipe.fuse_lora(lora_scale=1.)
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.3, b2=1.4)
return pipe
def __call__(self, *args, **kwargs):
return self.pipe(*args, **kwargs)
class LazyRealESRGAN:
def __init__(self, device, scale):
self.device = device
self.scale = scale
self.model = None
def load_model(self):
if self.model is None:
self.model = RealESRGAN(self.device, scale=self.scale)
self.model.load_weights(f'models/upscalers/RealESRGAN_x{self.scale}.pth', download=False)
def predict(self, img):
self.load_model()
return self.model.predict(img)
lazy_realesrgan_x2 = LazyRealESRGAN(device, scale=2)
lazy_realesrgan_x4 = LazyRealESRGAN(device, scale=4)
@timer_func
def resize_and_upscale(input_image, resolution):
scale = 2 if resolution <= 2048 else 4
input_image = input_image.convert("RGB")
W, H = input_image.size
k = float(resolution) / min(H, W)
H = int(round(H * k / 64.0)) * 64
W = int(round(W * k / 64.0)) * 64
img = input_image.resize((W, H), resample=Image.LANCZOS)
if scale == 2:
img = lazy_realesrgan_x2.predict(img)
else:
img = lazy_realesrgan_x4.predict(img)
return img
@timer_func
def create_hdr_effect(original_image, hdr):
if hdr == 0:
return original_image
cv_original = cv2.cvtColor(np.array(original_image), cv2.COLOR_RGB2BGR)
factors = [1.0 - 0.9 * hdr, 1.0 - 0.7 * hdr, 1.0 - 0.45 * hdr,
1.0 - 0.25 * hdr, 1.0, 1.0 + 0.2 * hdr,
1.0 + 0.4 * hdr, 1.0 + 0.6 * hdr, 1.0 + 0.8 * hdr]
images = [cv2.convertScaleAbs(cv_original, alpha=factor) for factor in factors]
merge_mertens = cv2.createMergeMertens()
hdr_image = merge_mertens.process(images)
hdr_image_8bit = np.clip(hdr_image * 255, 0, 255).astype('uint8')
return Image.fromarray(cv2.cvtColor(hdr_image_8bit, cv2.COLOR_BGR2RGB))
lazy_pipe = LazyLoadPipeline()
lazy_pipe.load()
def prepare_image(input_image, resolution, hdr):
condition_image = resize_and_upscale(input_image, resolution)
condition_image = create_hdr_effect(condition_image, hdr)
return condition_image
@timer_func
def gradio_process_image(input_image, resolution, num_inference_steps, strength, hdr, guidance_scale):
print("Starting image processing...")
torch.cuda.empty_cache()
condition_image = prepare_image(input_image, resolution, hdr)
prompt = "masterpiece, best quality, highres"
negative_prompt = "low quality, normal quality, ugly, blurry, blur, lowres, bad anatomy, bad hands, cropped, worst quality, verybadimagenegative_v1.3, JuggernautNegative-neg"
options = {
"prompt": prompt,
"negative_prompt": negative_prompt,
"image": condition_image,
"control_image": condition_image,
"width": condition_image.size[0],
"height": condition_image.size[1],
"strength": strength,
"num_inference_steps": num_inference_steps,
"guidance_scale": guidance_scale,
"generator": torch.Generator(device=device).manual_seed(0),
}
print("Running inference...")
result = lazy_pipe(**options).images[0]
print("Image processing completed successfully")
# Convert input_image and result to numpy arrays
input_array = np.array(input_image)
result_array = np.array(result)
return [input_array, result_array]
title = """<h1 align="center">Image Upscaler with Tile Controlnet</h1>
<p align="center">The main ideas come from</p>
<p><center>
<a href="https://github.com/philz1337x/clarity-upscaler" target="_blank">[philz1337x]</a>
<a href="https://github.com/BatouResearch/controlnet-tile-upscale" target="_blank">[Pau-Lozano]</a>
</center></p>
"""
with gr.Blocks(css="./theme.css") as demo:
gr.HTML(title)
with gr.Row():
with gr.Column():
input_image = gr.Image(type="pil", label="Input Image")
run_button = gr.Button("Enhance Image")
with gr.Column():
output_slider = ImageSlider(label="Before / After", type="numpy")
with gr.Accordion("Advanced Options", open=False):
resolution = gr.Slider(minimum=256, maximum=2048, value=512, step=256, label="Resolution")
num_inference_steps = gr.Slider(minimum=1, maximum=50, value=20, step=1, label="Number of Inference Steps")
strength = gr.Slider(minimum=0, maximum=1, value=0.4, step=0.01, label="Strength")
hdr = gr.Slider(minimum=0, maximum=1, value=0, step=0.1, label="HDR Effect")
guidance_scale = gr.Slider(minimum=0, maximum=20, value=3, step=0.5, label="Guidance Scale")
run_button.click(fn=gradio_process_image,
inputs=[input_image, resolution, num_inference_steps, strength, hdr, guidance_scale],
outputs=output_slider)
# Add examples with all required inputs
gr.Examples(
examples=[
["image1.jpg", 512, 20, 0.4, 0, 3],
["image2.png", 512, 20, 0.4, 0, 3],
["image3.png", 512, 20, 0.4, 0, 3],
],
inputs=[input_image, resolution, num_inference_steps, strength, hdr, guidance_scale],
outputs=output_slider,
fn=gradio_process_image,
cache_examples=True,
)
demo.launch(share=True)
\ No newline at end of file
git+https://github.com/doevent/Real-ESRGAN.git
opencv-python
spaces
diffusers
torch
torchvision
......
:root {
--name: default;
--primary-50: #fff7ed;
--primary-100: #ffedd5;
--primary-200: #fed7aa;
--primary-300: #fdba74;
--primary-400: #fb923c;
--primary-500: #f97316;
--primary-600: #ea580c;
--primary-700: #c2410c;
--primary-800: #9a3412;
--primary-900: #7c2d12;
--primary-950: #6c2e12;
--secondary-50: #eff6ff;
--secondary-100: #dbeafe;
--secondary-200: #bfdbfe;
--secondary-300: #93c5fd;
--secondary-400: #60a5fa;
--secondary-500: #3b82f6;
--secondary-600: #2563eb;
--secondary-700: #1d4ed8;
--secondary-800: #1e40af;
--secondary-900: #1e3a8a;
--secondary-950: #1d3660;
--neutral-50: #f9fafb;
--neutral-100: #f3f4f6;
--neutral-200: #e5e7eb;
--neutral-300: #d1d5db;
--neutral-400: #9ca3af;
--neutral-500: #6b7280;
--neutral-600: #4b5563;
--neutral-700: #374151;
--neutral-800: #1f2937;
--neutral-900: #111827;
--neutral-950: #0b0f19;
--spacing-xxs: 1px;
--spacing-xs: 2px;
--spacing-sm: 4px;
--spacing-md: 6px;
--spacing-lg: 8px;
--spacing-xl: 10px;
--spacing-xxl: 16px;
--radius-xxs: 1px;
--radius-xs: 2px;
--radius-sm: 4px;
--radius-md: 6px;
--radius-lg: 8px;
--radius-xl: 12px;
--radius-xxl: 22px;
--text-xxs: 9px;
--text-xs: 10px;
--text-sm: 12px;
--text-md: 14px;
--text-lg: 16px;
--text-xl: 22px;
--text-xxl: 26px;
--font: 'Source Sans Pro', 'ui-sans-serif', 'system-ui', sans-serif;
--font-mono: 'IBM Plex Mono', 'ui-monospace', 'Consolas', monospace;
--body-background-fill: var(--background-fill-primary);
--body-text-color: var(--neutral-800);
--body-text-size: var(--text-md);
--body-text-weight: 400;
--embed-radius: var(--radius-lg);
--color-accent: var(--primary-500);
--color-accent-soft: var(--primary-50);
--background-fill-primary: white;
--background-fill-secondary: var(--neutral-50);
--border-color-accent: var(--primary-300);
--border-color-primary: var(--neutral-200);
--link-text-color: var(--secondary-600);
--link-text-color-active: var(--secondary-600);
--link-text-color-hover: var(--secondary-700);
--link-text-color-visited: var(--secondary-500);
--body-text-color-subdued: var(--neutral-400);
--accordion-text-color: var(--body-text-color);
--table-text-color: var(--body-text-color);
--shadow-drop: rgba(0,0,0,0.05) 0px 1px 2px 0px;
--shadow-drop-lg: 0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1);
--shadow-inset: rgba(0,0,0,0.05) 0px 2px 4px 0px inset;
--shadow-spread: 3px;
--block-background-fill: var(--background-fill-primary);
--block-border-color: var(--border-color-primary);
--block-border-width: 1px;
--block-info-text-color: var(--body-text-color-subdued);
--block-info-text-size: var(--text-sm);
--block-info-text-weight: 400;
--block-label-background-fill: var(--background-fill-primary);
--block-label-border-color: var(--border-color-primary);
--block-label-border-width: 1px;
--block-label-shadow: var(--block-shadow);
--block-label-text-color: var(--neutral-500);
--block-label-margin: 0;
--block-label-padding: var(--spacing-sm) var(--spacing-lg);
--block-label-radius: calc(var(--radius-lg) - 1px) 0 calc(var(--radius-lg) - 1px) 0;
--block-label-right-radius: 0 calc(var(--radius-lg) - 1px) 0 calc(var(--radius-lg) - 1px);
--block-label-text-size: var(--text-sm);
--block-label-text-weight: 400;
--block-padding: var(--spacing-xl) calc(var(--spacing-xl) + 2px);
--block-radius: var(--radius-lg);
--block-shadow: var(--shadow-drop);
--block-title-background-fill: none;
--block-title-border-color: none;
--block-title-border-width: 0px;
--block-title-text-color: var(--neutral-500);
--block-title-padding: 0;
--block-title-radius: none;
--block-title-text-size: var(--text-md);
--block-title-text-weight: 400;
--container-radius: var(--radius-lg);
--form-gap-width: 1px;
--layout-gap: var(--spacing-xxl);
--panel-background-fill: var(--background-fill-secondary);
--panel-border-color: var(--border-color-primary);
--panel-border-width: 0;
--section-header-text-size: var(--text-md);
--section-header-text-weight: 400;
--border-color-accent-subdued: var(--primary-200);
--code-background-fill: var(--neutral-100);
--checkbox-background-color: var(--background-fill-primary);
--checkbox-background-color-focus: var(--checkbox-background-color);
--checkbox-background-color-hover: var(--checkbox-background-color);
--checkbox-background-color-selected: var(--secondary-600);
--checkbox-border-color: var(--neutral-300);
--checkbox-border-color-focus: var(--secondary-500);
--checkbox-border-color-hover: var(--neutral-300);
--checkbox-border-color-selected: var(--secondary-600);
--checkbox-border-radius: var(--radius-sm);
--checkbox-border-width: var(--input-border-width);
--checkbox-label-background-fill: linear-gradient(to top, var(--neutral-50), white);
--checkbox-label-background-fill-hover: linear-gradient(to top, var(--neutral-100), white);
--checkbox-label-background-fill-selected: var(--checkbox-label-background-fill);
--checkbox-label-border-color: var(--border-color-primary);
--checkbox-label-border-color-hover: var(--checkbox-label-border-color);
--checkbox-label-border-width: var(--input-border-width);
--checkbox-label-gap: var(--spacing-lg);
--checkbox-label-padding: var(--spacing-md) calc(2 * var(--spacing-md));
--checkbox-label-shadow: var(--shadow-drop);
--checkbox-label-text-size: var(--text-md);
--checkbox-label-text-weight: 400;
--checkbox-check: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M12.207 4.793a1 1 0 010 1.414l-5 5a1 1 0 01-1.414 0l-2-2a1 1 0 011.414-1.414L6.5 9.086l4.293-4.293a1 1 0 011.414 0z'/%3e%3c/svg%3e");
--radio-circle: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle cx='8' cy='8' r='3'/%3e%3c/svg%3e");
--checkbox-shadow: var(--input-shadow);
--checkbox-label-text-color: var(--body-text-color);
--checkbox-label-text-color-selected: var(--checkbox-label-text-color);
--error-background-fill: #fef2f2;
--error-border-color: #b91c1c;
--error-border-width: 1px;
--error-text-color: #b91c1c;
--error-icon-color: #b91c1c;
--input-background-fill: white;
--input-background-fill-focus: var(--secondary-500);
--input-background-fill-hover: var(--input-background-fill);
--input-border-color: var(--border-color-primary);
--input-border-color-focus: var(--secondary-300);
--input-border-color-hover: var(--input-border-color);
--input-border-width: 1px;
--input-padding: var(--spacing-xl);
--input-placeholder-color: var(--neutral-400);
--input-radius: var(--radius-lg);
--input-shadow: 0 0 0 var(--shadow-spread) transparent, var(--shadow-inset);
--input-shadow-focus: 0 0 0 var(--shadow-spread) var(--secondary-50), var(--shadow-inset);
--input-text-size: var(--text-md);
--input-text-weight: 400;
--loader-color: var(--color-accent);
--prose-text-size: var(--text-md);
--prose-text-weight: 400;
--prose-header-text-weight: 600;
--slider-color: #2563eb;
--stat-background-fill: linear-gradient(to right, var(--primary-400), var(--primary-200));
--table-border-color: var(--neutral-300);
--table-even-background-fill: white;
--table-odd-background-fill: var(--neutral-50);
--table-radius: var(--radius-lg);
--table-row-focus: var(--color-accent-soft);
--button-border-width: var(--input-border-width);
--button-cancel-background-fill: linear-gradient(to bottom right, #fee2e2, #fecaca);
--button-cancel-background-fill-hover: linear-gradient(to bottom right, #fee2e2, #fee2e2);
--button-cancel-border-color: #fecaca;
--button-cancel-border-color-hover: var(--button-cancel-border-color);
--button-cancel-text-color: #dc2626;
--button-cancel-text-color-hover: var(--button-cancel-text-color);
--button-large-padding: var(--spacing-lg) calc(2 * var(--spacing-lg));
--button-large-radius: var(--radius-lg);
--button-large-text-size: var(--text-lg);
--button-large-text-weight: 600;
--button-primary-background-fill: linear-gradient(to bottom right, var(--primary-100), var(--primary-300));
--button-primary-background-fill-hover: linear-gradient(to bottom right, var(--primary-100), var(--primary-200));
--button-primary-border-color: var(--primary-200);
--button-primary-border-color-hover: var(--button-primary-border-color);
--button-primary-text-color: var(--primary-600);
--button-primary-text-color-hover: var(--button-primary-text-color);
--button-secondary-background-fill: linear-gradient(to bottom right, var(--neutral-100), var(--neutral-200));
--button-secondary-background-fill-hover: linear-gradient(to bottom right, var(--neutral-100), var(--neutral-100));
--button-secondary-border-color: var(--neutral-200);
--button-secondary-border-color-hover: var(--button-secondary-border-color);
--button-secondary-text-color: var(--neutral-700);
--button-secondary-text-color-hover: var(--button-secondary-text-color);
--button-shadow: var(--shadow-drop);
--button-shadow-active: var(--shadow-inset);
--button-shadow-hover: var(--shadow-drop-lg);
--button-small-padding: var(--spacing-sm) calc(2 * var(--spacing-sm));
--button-small-radius: var(--radius-lg);
--button-small-text-size: var(--text-md);
--button-small-text-weight: 400;
--button-transition: none;
}
.dark {
--body-background-fill: var(--background-fill-primary);
--body-text-color: var(--neutral-100);
--color-accent-soft: var(--neutral-700);
--background-fill-primary: var(--neutral-950);
--background-fill-secondary: var(--neutral-900);
--border-color-accent: var(--neutral-600);
--border-color-primary: var(--neutral-700);
--link-text-color-active: var(--secondary-500);
--link-text-color: var(--secondary-500);
--link-text-color-hover: var(--secondary-400);
--link-text-color-visited: var(--secondary-600);
--body-text-color-subdued: var(--neutral-400);
--accordion-text-color: var(--body-text-color);
--table-text-color: var(--body-text-color);
--shadow-spread: 1px;
--block-background-fill: var(--neutral-800);
--block-border-color: var(--border-color-primary);
--block_border_width: None;
--block-info-text-color: var(--body-text-color-subdued);
--block-label-background-fill: var(--background-fill-secondary);
--block-label-border-color: var(--border-color-primary);
--block_label_border_width: None;
--block-label-text-color: var(--neutral-200);
--block_shadow: None;
--block_title_background_fill: None;
--block_title_border_color: None;
--block_title_border_width: None;
--block-title-text-color: var(--neutral-200);
--panel-background-fill: var(--background-fill-secondary);
--panel-border-color: var(--border-color-primary);
--panel_border_width: None;
--border-color-accent-subdued: var(--border-color-accent);
--code-background-fill: var(--neutral-800);
--checkbox-background-color: var(--neutral-800);
--checkbox-background-color-focus: var(--checkbox-background-color);
--checkbox-background-color-hover: var(--checkbox-background-color);
--checkbox-background-color-selected: var(--secondary-600);
--checkbox-border-color: var(--neutral-700);
--checkbox-border-color-focus: var(--secondary-500);
--checkbox-border-color-hover: var(--neutral-600);
--checkbox-border-color-selected: var(--secondary-600);
--checkbox-border-width: var(--input-border-width);
--checkbox-label-background-fill: linear-gradient(to top, var(--neutral-900), var(--neutral-800));
--checkbox-label-background-fill-hover: linear-gradient(to top, var(--neutral-900), var(--neutral-800));
--checkbox-label-background-fill-selected: var(--checkbox-label-background-fill);
--checkbox-label-border-color: var(--border-color-primary);
--checkbox-label-border-color-hover: var(--checkbox-label-border-color);
--checkbox-label-border-width: var(--input-border-width);
--checkbox-label-text-color: var(--body-text-color);
--checkbox-label-text-color-selected: var(--checkbox-label-text-color);
--error-background-fill: var(--neutral-900);
--error-border-color: #ef4444;
--error_border_width: None;
--error-text-color: #fef2f2;
--error-icon-color: #ef4444;
--input-background-fill: var(--neutral-800);
--input-background-fill-focus: var(--secondary-600);
--input-background-fill-hover: var(--input-background-fill);
--input-border-color: var(--border-color-primary);
--input-border-color-focus: var(--neutral-700);
--input-border-color-hover: var(--input-border-color);
--input_border_width: None;
--input-placeholder-color: var(--neutral-500);
--input_shadow: None;
--input-shadow-focus: 0 0 0 var(--shadow-spread) var(--neutral-700), var(--shadow-inset);
--loader_color: None;
--slider_color: None;
--stat-background-fill: linear-gradient(to right, var(--primary-400), var(--primary-600));
--table-border-color: var(--neutral-700);
--table-even-background-fill: var(--neutral-950);
--table-odd-background-fill: var(--neutral-900);
--table-row-focus: var(--color-accent-soft);
--button-border-width: var(--input-border-width);
--button-cancel-background-fill: linear-gradient(to bottom right, #dc2626, #b91c1c);
--button-cancel-background-fill-hover: linear-gradient(to bottom right, #dc2626, #dc2626);
--button-cancel-border-color: #dc2626;
--button-cancel-border-color-hover: var(--button-cancel-border-color);
--button-cancel-text-color: white;
--button-cancel-text-color-hover: var(--button-cancel-text-color);
--button-primary-background-fill: linear-gradient(to bottom right, var(--primary-500), var(--primary-600));
--button-primary-background-fill-hover: linear-gradient(to bottom right, var(--primary-500), var(--primary-500));
--button-primary-border-color: var(--primary-500);
--button-primary-border-color-hover: var(--button-primary-border-color);
--button-primary-text-color: white;
--button-primary-text-color-hover: var(--button-primary-text-color);
--button-secondary-background-fill: linear-gradient(to bottom right, var(--neutral-600), var(--neutral-700));
--button-secondary-background-fill-hover: linear-gradient(to bottom right, var(--neutral-600), var(--neutral-600));
--button-secondary-border-color: var(--neutral-600);
--button-secondary-border-color-hover: var(--button-secondary-border-color);
--button-secondary-text-color: white;
--button-secondary-text-color-hover: var(--button-secondary-text-color);
--name: default;
--primary-50: #fff7ed;
--primary-100: #ffedd5;
--primary-200: #fed7aa;
--primary-300: #fdba74;
--primary-400: #fb923c;
--primary-500: #f97316;
--primary-600: #ea580c;
--primary-700: #c2410c;
--primary-800: #9a3412;
--primary-900: #7c2d12;
--primary-950: #6c2e12;
--secondary-50: #eff6ff;
--secondary-100: #dbeafe;
--secondary-200: #bfdbfe;
--secondary-300: #93c5fd;
--secondary-400: #60a5fa;
--secondary-500: #3b82f6;
--secondary-600: #2563eb;
--secondary-700: #1d4ed8;
--secondary-800: #1e40af;
--secondary-900: #1e3a8a;
--secondary-950: #1d3660;
--neutral-50: #f9fafb;
--neutral-100: #f3f4f6;
--neutral-200: #e5e7eb;
--neutral-300: #d1d5db;
--neutral-400: #9ca3af;
--neutral-500: #6b7280;
--neutral-600: #4b5563;
--neutral-700: #374151;
--neutral-800: #1f2937;
--neutral-900: #111827;
--neutral-950: #0b0f19;
--spacing-xxs: 1px;
--spacing-xs: 2px;
--spacing-sm: 4px;
--spacing-md: 6px;
--spacing-lg: 8px;
--spacing-xl: 10px;
--spacing-xxl: 16px;
--radius-xxs: 1px;
--radius-xs: 2px;
--radius-sm: 4px;
--radius-md: 6px;
--radius-lg: 8px;
--radius-xl: 12px;
--radius-xxl: 22px;
--text-xxs: 9px;
--text-xs: 10px;
--text-sm: 12px;
--text-md: 14px;
--text-lg: 16px;
--text-xl: 22px;
--text-xxl: 26px;
--font: 'Source Sans Pro', 'ui-sans-serif', 'system-ui', sans-serif;
--font-mono: 'IBM Plex Mono', 'ui-monospace', 'Consolas', monospace;
--body-text-size: var(--text-md);
--body-text-weight: 400;
--embed-radius: var(--radius-lg);
--color-accent: var(--primary-500);
--shadow-drop: rgba(0,0,0,0.05) 0px 1px 2px 0px;
--shadow-drop-lg: 0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1);
--shadow-inset: rgba(0,0,0,0.05) 0px 2px 4px 0px inset;
--block-border-width: 1px;
--block-info-text-size: var(--text-sm);
--block-info-text-weight: 400;
--block-label-border-width: 1px;
--block-label-shadow: var(--block-shadow);
--block-label-margin: 0;
--block-label-padding: var(--spacing-sm) var(--spacing-lg);
--block-label-radius: calc(var(--radius-lg) - 1px) 0 calc(var(--radius-lg) - 1px) 0;
--block-label-right-radius: 0 calc(var(--radius-lg) - 1px) 0 calc(var(--radius-lg) - 1px);
--block-label-text-size: var(--text-sm);
--block-label-text-weight: 400;
--block-padding: var(--spacing-xl) calc(var(--spacing-xl) + 2px);
--block-radius: var(--radius-lg);
--block-shadow: var(--shadow-drop);
--block-title-background-fill: none;
--block-title-border-color: none;
--block-title-border-width: 0px;
--block-title-padding: 0;
--block-title-radius: none;
--block-title-text-size: var(--text-md);
--block-title-text-weight: 400;
--container-radius: var(--radius-lg);
--form-gap-width: 1px;
--layout-gap: var(--spacing-xxl);
--panel-border-width: 0;
--section-header-text-size: var(--text-md);
--section-header-text-weight: 400;
--checkbox-border-radius: var(--radius-sm);
--checkbox-label-gap: var(--spacing-lg);
--checkbox-label-padding: var(--spacing-md) calc(2 * var(--spacing-md));
--checkbox-label-shadow: var(--shadow-drop);
--checkbox-label-text-size: var(--text-md);
--checkbox-label-text-weight: 400;
--checkbox-check: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M12.207 4.793a1 1 0 010 1.414l-5 5a1 1 0 01-1.414 0l-2-2a1 1 0 011.414-1.414L6.5 9.086l4.293-4.293a1 1 0 011.414 0z'/%3e%3c/svg%3e");
--radio-circle: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle cx='8' cy='8' r='3'/%3e%3c/svg%3e");
--checkbox-shadow: var(--input-shadow);
--error-border-width: 1px;
--input-border-width: 1px;
--input-padding: var(--spacing-xl);
--input-radius: var(--radius-lg);
--input-shadow: 0 0 0 var(--shadow-spread) transparent, var(--shadow-inset);
--input-text-size: var(--text-md);
--input-text-weight: 400;
--loader-color: var(--color-accent);
--prose-text-size: var(--text-md);
--prose-text-weight: 400;
--prose-header-text-weight: 600;
--slider-color: #2563eb;
--table-radius: var(--radius-lg);
--button-large-padding: var(--spacing-lg) calc(2 * var(--spacing-lg));
--button-large-radius: var(--radius-lg);
--button-large-text-size: var(--text-lg);
--button-large-text-weight: 600;
--button-shadow: var(--shadow-drop);
--button-shadow-active: var(--shadow-inset);
--button-shadow-hover: var(--shadow-drop-lg);
--button-small-padding: var(--spacing-sm) calc(2 * var(--spacing-sm));
--button-small-radius: var(--radius-lg);
--button-small-text-size: var(--text-md);
--button-small-text-weight: 400;
--button-transition: none;
}
\ No newline at end of file
......@@ -12,6 +12,7 @@ model_list = [
"philz1337x/loras",
"lllyasviel/ControlNet-v1-1",
"stabilityai/sd-vae-ft-mse-original",
"CompVis/stable-diffusion-safety-checker"
]
for model_path in model_list:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment