Commit d8864e63 authored by dengjb's avatar dengjb
Browse files

update codes

parent ba4486d8
This diff is collapsed.
from datetime import datetime
import os
import shutil
from .ui import StableDiffusionUI
from .utils import save_image_info
from IPython.display import clear_output, display
import ipywidgets as widgets
from ipywidgets import Layout,HBox,VBox,Box
from . import views
class StableDiffusionUI_txt2img(StableDiffusionUI):
def __init__(self, **kwargs):
super().__init__() #暂且不处理pipline
CLASS_NAME = self.__class__.__name__ \
+ '_{:X}'.format(hash(self))[-4:]
STYLE_SHEETS = '''
@media (max-width:576px) {
{root} .standard_size,
{root} .superres_model_name {
order: -1;
}
{root} button.run_button,
{root} button.collect_button {
width: 45% !important;
}
}
'''
#默认参数覆盖次序:
#user_config.py > config.py > 当前args > views.py
args = { #注意无效Key错误
"prompt": '',
"negative_prompt": '',
"width": 512,
"height": 512,
}
args.update(kwargs)
widget_opt = self.widget_opt
# 提示词部分
view_prompts = views.createPromptsView(
value = args['prompt'],
negative_value = args['negative_prompt'],
)
widget_opt['prompt'] = view_prompts['prompt']
widget_opt['negative_prompt'] = view_prompts['negative_prompt']
# 图片尺寸部分
view_width_height = views.createWidthHeightView(
width_value = args['width'],
height_value = args['height'],
step64 = True,
)
widget_opt['width'] = view_width_height['width']
widget_opt['height'] = view_width_height['height']
for key in (
'standard_size',
'num_return_images',
'enable_parsing',
'num_inference_steps',
'guidance_scale',
'max_embeddings_multiples',
'fp16',
'seed',
'superres_model_name',
'output_dir',
'sampler',
'model_name',
'concepts_library_dir'
):
widget_opt[key] = views.createView(key)
if key in args:
widget_opt[key].value = args[key]
# 事件处理绑定
def on_standard_size_change(change):
widget_opt['width'].value = change.new // 10000
widget_opt['height'].value = change.new % 10000
widget_opt['standard_size'].observe(
on_standard_size_change,
names = 'value'
)
def on_seed_change(change):
if change.new != -1:
widget_opt['num_return_images'].value = 1
def on_num_return_images(change):
if change.new != 1:
widget_opt['seed'].value = -1
widget_opt['seed'].observe(on_seed_change, names='value')
widget_opt['num_return_images'].observe(on_num_return_images, names='value')
# 按钮x2
self.run_button = views.createView('run_button')
self.collect_button = views.createView('collect_button')
self._output_collections = []
self.run_button.on_click(self.on_run_button_click)
self.collect_button.on_click(self.on_collect_button_click)
# 样式表
STYLE_SHEETS = ('<style>' \
+ views.SHARED_STYLE_SHEETS \
+ STYLE_SHEETS \
+ view_prompts.style_sheets \
+ view_width_height.style_sheets \
+ '</style>'
).replace('{root}', '.' + CLASS_NAME)
#
self.gui = views.createView("box_gui",
class_name = CLASS_NAME,
children = [
widgets.HTML(STYLE_SHEETS),
view_prompts.container,
views.createView("box_main",
[
widget_opt['standard_size'],
view_width_height.container,
widget_opt['superres_model_name'],
widget_opt['num_inference_steps'],
widget_opt['guidance_scale'],
widget_opt['sampler'],
widget_opt['num_return_images'],
widget_opt['seed'],
widget_opt['enable_parsing'],
widget_opt['max_embeddings_multiples'],
widget_opt['fp16'],
widget_opt['model_name'],
widget_opt['output_dir'],
widget_opt['concepts_library_dir']
]),
HBox(
(self.run_button,self.collect_button,),
layout = Layout(
justify_content = 'space-around',
max_width = '100%',
)
),
self.run_button_out
],
)
def on_collect_button_click(self, b):
with self.run_button_out:
dir = datetime.now().strftime(f'Favorates/{self.task}-%m%d/')
info = '收藏图片到 ' + dir
dir = './' + dir
os.makedirs(dir, exist_ok=True)
for file in self._output_collections:
if os.path.isfile(file):
shutil.move(file, dir)
print(info + os.path.basename(file))
file = file[:-4] + '.txt'
if os.path.isfile(file):
shutil.move(file, dir)
self._output_collections.clear()
self.collect_button.disabled = True
def on_run_button_click(self, b):
with self.run_button_out:
self._output_collections.clear()
self.collect_button.disabled = True
self.run_button.disabled = True
try:
super().on_run_button_click(b)
finally:
self.run_button.disabled = False
self.collect_button.disabled = len(self._output_collections) < 1
def on_image_generated(self, image, options, count = 0, total = 1, image_info = None):
image_path = save_image_info(image, options.output_dir)
self._output_collections.append(image_path)
if count % 5 == 0:
clear_output()
try:
# 使显示的图片包含嵌入信息
display(widgets.Image.from_file(image_path))
except:
display(image)
print('Seed = ', image.argument['seed'],
' (%d / %d ... %.2f%%)'%(count + 1, total, (count + 1.) / total * 100))
from .ui import StableDiffusionUI
import ipywidgets as widgets
from ipywidgets import Layout,HBox,VBox,Box
class SuperResolutionUI(StableDiffusionUI):
def __init__(self, pipeline, **kwargs):
super().__init__(pipeline = pipeline)
self.task = 'superres'
#默认参数覆盖次序:
#user_config.py > config.py > 当前args > views.py
args = { #注意无效Key错误
"image_path": 'resources/image_Kurisu.png',
"superres_model_name": 'falsr_a',
"output_dir": 'outputs/highres',
}
args.update(kwargs)
widget_opt = self.widget_opt
layoutCol12 = Layout(
flex = "12 12 90%",
margin = "0.5em",
align_items = "center"
)
styleDescription = {
'description_width': "9rem"
}
widget_opt['image_path'] = widgets.Text(
layout=layoutCol12, style=styleDescription,
description='需要超分的图片路径' ,
value=args['image_path'],
disabled=False
)
widget_opt['superres_model_name'] = widgets.Dropdown(
layout=layoutCol12, style=styleDescription,
description='超分模型的名字',
value=args['superres_model_name'],
options=["falsr_a", "falsr_b", "falsr_c"],
disabled=False
)
widget_opt['output_dir'] = widgets.Text(
layout=layoutCol12, style=styleDescription,
description='图片的保存路径',
value=args['output_dir'],
disabled=False
)
self.run_button = widgets.Button(
description='点击超分图片!',
disabled=False,
button_style='success', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click to run (settings will update automatically)',
icon='check'
)
self.run_button.on_click(self.on_run_button_click)
self.gui = widgets.Box([
widget_opt['image_path'],
widget_opt['superres_model_name'],
widget_opt['output_dir'],
self.run_button,
self.run_button_out
], layout = Layout(
display = "flex",
flex_flow = "row wrap", #HBox会覆写此属性
align_items = "center",
# max_width = '100%',
margin="0 45px 0 0"
))
from .env import DEBUG_UI
from .config import config
if DEBUG_UI:
print('==================================================')
print('调试环境')
print('==================================================')
from .StableDiffusionUI_txt2img import StableDiffusionUI_txt2img
from .StableDiffusionUI_img2img import StableDiffusionUI_img2img
gui_txt2img = StableDiffusionUI_txt2img(**config['txt2img'])
gui_img2img = StableDiffusionUI_img2img(**config['img2img'])
gui_inpaint = gui_img2img
else:
from .ui import (
StableDiffusionUI_text_inversion,
StableDiffusionUI_dreambooth,
pipeline_superres,
pipeline,
StableDiffusionUI_convert,
)
from .StableDiffusionUI_txt2img import StableDiffusionUI_txt2img
from .StableDiffusionUI_img2img import StableDiffusionUI_img2img
from .SuperResolutionUI import SuperResolutionUI
gui_txt2img = StableDiffusionUI_txt2img(
**config['txt2img']
)
gui_img2img = StableDiffusionUI_img2img(
**config['img2img']
)
gui_superres = SuperResolutionUI(
pipeline = pipeline_superres,
**config['superres']
)
gui_train_text_inversion = StableDiffusionUI_text_inversion(
**config['train_text_inversion']
)
gui_text_inversion = StableDiffusionUI_txt2img(
**config['text_inversion']
)
gui_dreambooth = StableDiffusionUI_dreambooth( #dreamboothUI
**config['dreambooth']
)
gui_convert = StableDiffusionUI_convert(
**config['convert']
)
gui_inpaint = gui_img2img
\ No newline at end of file
config = {
"txt2img": {
"prompt": 'extremely detailed CG unity 8k wallpaper,black long hair,cute face,1 adult girl,happy, green skirt dress, flower pattern in dress,solo,green gown,art of light novel,in field',
"negative_prompt": 'lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry',
"width": 512,
"height": 512,
# "seed": -1,
# "num_return_images": 1,
# "num_inference_steps": 50,
# "guidance_scale": 7.5,
# "fp16": 'float16',
# "superres_model_name": '无',
# "max_embeddings_multiples": '3',
# "enable_parsing": '圆括号 () 加强权重',
# "sampler": 'default',
"model_name": 'MoososCap/NOVEL-MODEL',
"output_dir": 'outputs/txt2img',
},
"img2img": {
"prompt": 'red dress',
"negative_prompt": 'lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry',
"width": -1,
"height": -1,
"num_return_images": 1,
"strength": 0.8,
"model_name": 'MoososCap/NOVEL-MODEL',
"image_path": 'resources/cat2.jpg',
"mask_path": 'resources/mask8.jpg',
"output_dir": 'outputs/img2img',
},
"superres": {
"image_path": 'resources/image_Kurisu.png',
"superres_model_name": 'falsr_a',
"output_dir": 'outputs/highres',
},
"train_text_inversion": {
"learnable_property": 'object',
"placeholder_token": '<Alice>',
"initializer_token": 'girl',
"repeats": '100',
"train_data_dir": 'resources/Alices',
"output_dir": 'outputs/textual_inversion',
"height": 512,
"width": 512,
"learning_rate": 5e-4,
"max_train_steps": 1000,
"save_steps": 200,
"model_name": "MoososCap/NOVEL-MODEL",
},
"text_inversion": {
"width": 512,
"height": 512,
"prompt": '<Alice> at the lake',
"negative_prompt": '',
"output_dir": 'outputs/text_inversion_txt2img',
},
"dreambooth": { #Dreambooth配置段
"pretrained_model_name_or_path": "MoososCap/NOVEL-MODEL",
"instance_data_dir": 'resources/Alices',
"instance_prompt": 'a photo of Alices',
"class_data_dir": 'resources/Girls',
"class_prompt": 'a photo of girls',
"num_class_images": 100,
"prior_loss_weight": 1.0,
"with_prior_preservation": True,
#"num_train_epochs": 1,
"max_train_steps": 1000,
"save_steps": 1000,
"train_text_encoder": False,
"height": 512,
"width": 512,
"learning_rate": 5e-6,
"lr_scheduler": "constant",
"lr_warmup_steps": 500,
"center_crop": True,
"output_dir": 'outputs/dreambooth',
},
"convert": {
"checkpoint_path": '',
'dump_path': 'outputs/convert'
},
}
try:
from user_config import config as _config
for k in _config:
if k in config:
config[k].update(_config[k])
else:
config[k] = _config[k]
except:
pass
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
DEBUG_UI = False
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
model:
base_learning_rate: 1.0e-04
target: ldm.models.diffusion.ddpm.LatentDiffusion
params:
linear_start: 0.00085
linear_end: 0.0120
num_timesteps_cond: 1
log_every_t: 200
timesteps: 1000
first_stage_key: "jpg"
cond_stage_key: "txt"
image_size: 64
channels: 4
cond_stage_trainable: false # Note: different from the one we trained before
conditioning_key: crossattn
monitor: val/loss_simple_ema
scale_factor: 0.18215
use_ema: True
scheduler_config: # 10000 warmup steps
target: ldm.lr_scheduler.LambdaLinearScheduler
params:
warm_up_steps: [ 10000 ]
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
f_start: [ 1.e-6 ]
f_max: [ 1. ]
f_min: [ 1. ]
unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
params:
image_size: 32 # unused
in_channels: 4
out_channels: 4
model_channels: 320
attention_resolutions: [ 4, 2, 1 ]
num_res_blocks: 2
channel_mult: [ 1, 2, 4, 4 ]
num_heads: 8
use_spatial_transformer: True
transformer_depth: 1
context_dim: 768
use_checkpoint: True
legacy: False
first_stage_config:
target: ldm.models.autoencoder.AutoencoderKL
params:
embed_dim: 4
monitor: val/rec_loss
ddconfig:
double_z: true
z_channels: 4
resolution: 512
in_channels: 3
out_ch: 3
ch: 128
ch_mult:
- 1
- 2
- 4
- 4
num_res_blocks: 2
attn_resolutions: []
dropout: 0.0
lossconfig:
target: torch.nn.Identity
cond_stage_config:
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
import os
# check main envs
def check_install(verbose = True):
try:
import paddle
except:
print("please install paddle==2.4.2 with version dtk-23.04 before running")
exit()
try:
print('checking install.......')
import safetensors
from ppdiffusers.utils import image_grid
from paddlenlp.transformers.clip.feature_extraction import CLIPFeatureExtractor
from paddlenlp.transformers import FeatureExtractionMixin
import ipywidgets
import PIL
import tqdm
print('检测完成,库完整')
except (ModuleNotFoundError, ImportError, AttributeError):
if verbose: print('检测到库不完整, 正在安装库')
os.system("pip install -U pip -i https://mirror.baidu.com/pypi/simple")
os.system("pip install -U OmegaConf --user")
os.system("pip install ppdiffusers==0.9.0 --user")
os.system("pip install paddlenlp==2.4.9 --user")
os.system("pip install -U safetensors --user")
os.system("pip install ipython==8.14.0")
os.system("pip install ipywidgets==8.0.7")
os.system("pip install pillow==9.5.0")
os.system("pip install tqdm==4.65.0")
def start_end(name,func):
print(f'------- test {name} start -------')
func.on_run_button_click('test')
print(f'------- test {name} finished -------')
if __name__=='__main__':
check_install()
from ui import gui_train_text_inversion,gui_txt2img,gui_img2img
start_end('txt2img',gui_txt2img)
start_end('img2img',gui_img2img)
start_end('txt2img train',gui_train_text_inversion)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment