Commit 82bb2665 authored by chenpangpang's avatar chenpangpang
Browse files

feat: 初始提交

parent 28b6bf0f
Pipeline #1605 canceled with stages
weights/
*.pt
*.bin
*.safetensors
.*
!.gitignore
__pycache__
FROM image.sourcefind.cn:5000/gpu/admin/base/jupyterlab-pytorch:2.2.0-python3.10-cuda12.1-ubuntu22.04 as base
ARG IMAGE=flux-lora-the-explorer
ARG IMAGE_UPPER=flux-lora-the-explorer
ARG BRANCH=gpu
RUN cd /root && git clone -b $BRANCH http://developer.hpccube.com/codes/chenpangpang/$IMAGE.git
WORKDIR /root/$IMAGE/$IMAGE_UPPER
RUN pip install -r requirements.txt
#########
# Prod #
#########
FROM image.sourcefind.cn:5000/gpu/admin/base/jupyterlab-pytorch:2.2.0-python3.10-cuda12.1-ubuntu22.04
ARG IMAGE=flux-lora-the-explorer
ARG IMAGE_UPPER=flux-lora-the-explorer
COPY chenyh/$IMAGE/frpc_linux_amd64_v0.2 /opt/conda/lib/python3.10/site-packages/gradio/
RUN chmod +x /opt/conda/lib/python3.10/site-packages/gradio/frpc_linux_amd64_v0.2
COPY chenyh/$IMAGE/* /root/$IMAGE_UPPER/
COPY --from=base /opt/conda/lib/python3.10/site-packages /opt/conda/lib/python3.10/site-packages
COPY --from=base /root/$IMAGE/$IMAGE_UPPER /root/$IMAGE_UPPER
COPY --from=base /root/$IMAGE/启动器.ipynb /root/$IMAGE/start.sh /root/$IMAGE/assets /root/
\ No newline at end of file
---
title: FLUX LoRa the Explorer
emoji: 🏆
colorFrom: red
colorTo: pink
sdk: gradio
sdk_version: 4.41.0
app_file: app.py
pinned: false
license: mit
models:
- black-forest-labs/FLUX.1-dev
- alvdansen/frosting_lane_flux
- XLabs-AI/flux-RealismLora
- alvdansen/softserve_anime
- davisbro/half_illustration
- Norod78/Flux_1_Dev_LoRA_Paper-Cutout-Style
- linoyts/yarn_art_Flux_LoRA
- kudzueye/Boreal
- XLabs-AI/flux-lora-collection
- martintomov/retrofuturism-flux
- dataautogpt3/FLUX-SyntheticAnime
- veryVANYA/ps1-style-flux
- multimodalart/flux-tarot-v1
- alfredplpl/flux.1-dev-modern-anime-lora
---
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
import gradio as gr
import json
import logging
import torch
from PIL import Image
import spaces
from diffusers import DiffusionPipeline
import copy
import random
import time
# Load LoRAs from JSON file
with open('loras.json', 'r') as f:
loras = json.load(f)
# Initialize the base model
base_model = "black-forest-labs/FLUX.1-dev"
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
MAX_SEED = 2**32-1
class calculateDuration:
def __init__(self, activity_name=""):
self.activity_name = activity_name
def __enter__(self):
self.start_time = time.time()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.end_time = time.time()
self.elapsed_time = self.end_time - self.start_time
if self.activity_name:
print(f"Elapsed time for {self.activity_name}: {self.elapsed_time:.6f} seconds")
else:
print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
def update_selection(evt: gr.SelectData, width, height):
selected_lora = loras[evt.index]
new_placeholder = f"Type a prompt for {selected_lora['title']}"
lora_repo = selected_lora["repo"]
updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✨"
if "aspect" in selected_lora:
if selected_lora["aspect"] == "portrait":
width = 768
height = 1024
elif selected_lora["aspect"] == "landscape":
width = 1024
height = 768
return (
gr.update(placeholder=new_placeholder),
updated_text,
evt.index,
width,
height,
)
@spaces.GPU(duration=70)
def generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress):
pipe.to("cuda")
generator = torch.Generator(device="cuda").manual_seed(seed)
with calculateDuration("Generating image"):
# Generate image
image = pipe(
prompt=f"{prompt} {trigger_word}",
num_inference_steps=steps,
guidance_scale=cfg_scale,
width=width,
height=height,
generator=generator,
joint_attention_kwargs={"scale": lora_scale},
).images[0]
return image
def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
if selected_index is None:
raise gr.Error("You must select a LoRA before proceeding.")
selected_lora = loras[selected_index]
lora_path = selected_lora["repo"]
trigger_word = selected_lora["trigger_word"]
# Load LoRA weights
with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"):
if "weights" in selected_lora:
pipe.load_lora_weights(lora_path, weight_name=selected_lora["weights"])
else:
pipe.load_lora_weights(lora_path)
# Set random seed for reproducibility
with calculateDuration("Randomizing seed"):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
image = generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress)
pipe.to("cpu")
pipe.unload_lora_weights()
return image, seed
run_lora.zerogpu = True
css = '''
#gen_btn{height: 100%}
#title{text-align: center}
#title h1{font-size: 3em; display:inline-flex; align-items:center}
#title img{width: 100px; margin-right: 0.5em}
#gallery .grid-wrap{height: 10vh}
'''
with gr.Blocks(theme=gr.themes.Soft(), css=css) as app:
title = gr.HTML(
"""<h1><img src="https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer/resolve/main/flux_lora.png" alt="LoRA"> FLUX LoRA the Explorer</h1>""",
elem_id="title",
)
selected_index = gr.State(None)
with gr.Row():
with gr.Column(scale=3):
prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Type a prompt after selecting a LoRA")
with gr.Column(scale=1, elem_id="gen_column"):
generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn")
with gr.Row():
with gr.Column(scale=3):
selected_info = gr.Markdown("")
gallery = gr.Gallery(
[(item["image"], item["title"]) for item in loras],
label="LoRA Gallery",
allow_preview=False,
columns=3,
elem_id="gallery"
)
with gr.Column(scale=4):
result = gr.Image(label="Generated Image")
with gr.Row():
with gr.Accordion("Advanced Settings", open=False):
with gr.Column():
with gr.Row():
cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=3.5)
steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=28)
with gr.Row():
width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
with gr.Row():
randomize_seed = gr.Checkbox(True, label="Randomize seed")
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=1, step=0.01, value=0.95)
gallery.select(
update_selection,
inputs=[width, height],
outputs=[prompt, selected_info, selected_index, width, height]
)
gr.on(
triggers=[generate_button.click, prompt.submit],
fn=run_lora,
inputs=[prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale],
outputs=[result, seed]
)
app.queue()
app.launch()
\ No newline at end of file
[
{
"image": "https://huggingface.co/multimodalart/flux-tarot-v1/resolve/main/images/e5f2761e5d474e6ba492d20dca0fa26f_e78f1524074b42b6ac49643ffad50ac6.png",
"title": "Tarot v1",
"repo": "multimodalart/flux-tarot-v1",
"trigger_word": "in the style of TOK a trtcrd, tarot style",
"aspect": "portrait"
},
{
"image": "https://huggingface.co/alvdansen/frosting_lane_flux/resolve/main/images/content%20-%202024-08-11T005936.346.jpeg",
"title": "Frosting Lane Flux",
"repo": "alvdansen/frosting_lane_flux",
"trigger_word": ""
},
{
"image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/picture-6-rev1.png?raw=true",
"title": "flux-Realism",
"repo": "XLabs-AI/flux-RealismLora",
"trigger_word": ""
},
{
"image": "https://huggingface.co/nerijs/animation2k-flux/resolve/main/images/Q8-oVxNnXvZ9HNrgbNpGw_02762aaaba3b47859ee5fe9403a371e3.png",
"title": "animation2k",
"repo": "nerijs/animation2k-flux",
"trigger_word": ""
},
{
"image":"https://huggingface.co/alvdansen/softserve_anime/resolve/main/images/ComfyUI_00062_.png",
"title":"SoftServe Anime",
"repo": "alvdansen/softserve_anime",
"trigger_word": ""
},
{
"image": "https://huggingface.co/veryVANYA/ps1-style-flux/resolve/main/24439220.jpeg",
"title": "PS1 style",
"repo": "veryVANYA/ps1-style-flux",
"trigger_word": "ps1 game screenshot"
},
{
"image": "https://huggingface.co/alvdansen/flux-koda/resolve/main/images/ComfyUI_00566_%20(2).png",
"title": "flux koda",
"repo": "alvdansen/flux-koda",
"trigger_word": "flmft style"
},
{
"image": "https://pbs.twimg.com/media/GU7NsZPa8AA4Ddl?format=jpg&name=4096x4096",
"title": "Half Illustration",
"repo": "davisbro/half_illustration",
"trigger_word": "in the style of TOK"
},
{
"image":"https://pbs.twimg.com/media/GVRiSH7WgAAnI4P?format=jpg&name=medium",
"title":"wrong",
"repo": "fofr/flux-wrong",
"trigger_word": "WRNG"
},
{
"image":"https://huggingface.co/linoyts/yarn_art_Flux_LoRA/resolve/main/yarn_art_2.png",
"title":"Yarn Art",
"repo": "linoyts/yarn_art_Flux_LoRA",
"trigger_word": ", yarn art style"
},
{
"image": "https://huggingface.co/SebastianBodza/flux_lora_aquarel_watercolor/resolve/main/images/ascend.webp",
"title": "Aquarell Watercolor",
"repo": "SebastianBodza/Flux_Aquarell_Watercolor_v2",
"trigger_word": "in a watercolor style, AQUACOLTOK. White background."
},
{
"image": "https://huggingface.co/dataautogpt3/FLUX-SyntheticAnime/resolve/main/assets/angel.png",
"title": "SyntheticAnime",
"repo": "dataautogpt3/FLUX-SyntheticAnime",
"trigger_word": "1980s anime screengrab, VHS quality"
},
{
"image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_14.png?raw=true",
"title": "flux-anime",
"repo": "XLabs-AI/flux-lora-collection",
"weights": "anime_lora.safetensors",
"trigger_word": ", anime"
},
{
"image": "https://huggingface.co/kudzueye/Boreal/resolve/main/images/ComfyUI_00845_.png",
"title": "Boreal",
"repo": "kudzueye/Boreal",
"weights": "boreal-flux-dev-lora-v04_1000_steps.safetensors",
"trigger_word": "phone photo"
},
{
"image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_18.png?raw=true",
"title": "flux-disney",
"repo": "XLabs-AI/flux-lora-collection",
"weights": "disney_lora.safetensors",
"trigger_word": ", disney style"
},
{
"image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_23.png?raw=true",
"title": "flux-art",
"repo": "XLabs-AI/flux-lora-collection",
"weights": "art_lora.safetensors",
"trigger_word": ", art"
},
{
"image": "https://huggingface.co/martintomov/retrofuturism-flux/resolve/main/images/2e40deba-858e-454f-ae1c-d1ba2adb6a65.jpeg",
"title": "Retrofuturism Flux",
"repo": "martintomov/retrofuturism-flux",
"trigger_word": ", retrofuturism"
}
]
\ No newline at end of file
torch
git+https://github.com/huggingface/diffusers
spaces
transformers
peft
sentencepiece
\ No newline at end of file
# pip install huggingface-cli
import os
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
model_list = [
"openbmb/MiniCPM-V-2_6",
]
os.system("pip install -U huggingface-hub")
for model_path in model_list:
os.system(
f"huggingface-cli download --resume-download {model_path} --local-dir ./{model_path} --local-dir-use-symlinks False")
#!/bin/bash
cd /root/MiniCPM-V-2_6
python app.py
{
"cells": [
{
"cell_type": "markdown",
"id": "e5c5a211-2ccd-4341-af10-ac546484b91f",
"metadata": {
"tags": []
},
"source": [
"## 项目介绍\n",
"- 原项目地址:https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer\n",
"- flux-lora-the-explorer:基于FLUX.1-dev训练的lora模型的图片生成器\n",
"## 使用说明\n",
"- 启动和重启 Notebook 点上方工具栏中的「重启并运行所有单元格」。出现如下内容就算成功了:\n",
" - `Running on local URL: http://0.0.0.0:7860`\n",
" - `Running on public URL: https://xxxxxxxxxxxxxxx.gradio.live`\n",
"- 通过以下方式开启页面:\n",
" - 控制台打开「自定义服务」了,访问自定义服务端口号设置为7860\n",
" - 直接打开显示的公开链接`public URL`\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "53a96614-e2d2-4710-a82b-0d5ca9cb9872",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"# 启动\n",
"!sh start.sh"
]
},
{
"cell_type": "markdown",
"source": [
"---\n",
"**扫码关注公众号,获取更多资讯**<br>\n",
"<div align=center>\n",
"<img src=\"assets/二维码.jpeg\" width = 20% />\n",
"</div>\n"
],
"metadata": {
"collapsed": false
},
"id": "2f54158c2967bc25"
},
{
"cell_type": "code",
"outputs": [],
"source": [],
"metadata": {
"collapsed": false
},
"id": "6dc59fbbcf222b6b"
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment