run_gradio.py 11.2 KB
Newer Older
Zhekai Zhang's avatar
Zhekai Zhang committed
1
2
# Changed from https://huggingface.co/spaces/playgroundai/playground-v2.5/blob/main/app.py
import argparse
Muyang Li's avatar
Muyang Li committed
3
import os
Zhekai Zhang's avatar
Zhekai Zhang committed
4
5
import random
import time
muyangli's avatar
muyangli committed
6
from datetime import datetime
Zhekai Zhang's avatar
Zhekai Zhang committed
7
8
9
10
11

import spaces
import torch
from peft.tuners import lora
from utils import get_pipeline
12
from vars import DEFAULT_HEIGHT, DEFAULT_WIDTH, EXAMPLES, LORA_PATHS, MAX_SEED, PROMPT_TEMPLATES
Zhekai Zhang's avatar
Zhekai Zhang committed
13

Muyang Li's avatar
Muyang Li committed
14
15
from nunchaku.models.safety_checker import SafetyChecker

16
# import gradio last to avoid conflicts with other imports
Muyang Li's avatar
Muyang Li committed
17
import gradio as gr  # noqa: isort: skip
18

Zhekai Zhang's avatar
Zhekai Zhang committed
19
20
21
22
23
24
25
26
27
28
29
30

def get_args() -> argparse.Namespace:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-m", "--model", type=str, default="schnell", choices=["schnell", "dev"], help="Which FLUX.1 model to use"
    )
    parser.add_argument(
        "-p",
        "--precisions",
        type=str,
        default=["int4"],
        nargs="*",
31
        choices=["int4", "fp4", "bf16"],
Zhekai Zhang's avatar
Zhekai Zhang committed
32
33
34
35
        help="Which precisions to use",
    )
    parser.add_argument("--use-qencoder", action="store_true", help="Whether to use 4-bit text encoder")
    parser.add_argument("--no-safety-checker", action="store_true", help="Disable safety checker")
Muyang Li's avatar
Muyang Li committed
36
    parser.add_argument("--count-use", action="store_true", help="Whether to count the number of uses")
37
    parser.add_argument("--gradio-root-path", type=str, default="")
Zhekai Zhang's avatar
Zhekai Zhang committed
38
39
40
41
42
43
    return parser.parse_args()


args = get_args()


muyangli's avatar
muyangli committed
44
pipeline_init_kwargs = {}
Zhekai Zhang's avatar
Zhekai Zhang committed
45
46
47
48
49
50
pipelines = []
for i, precision in enumerate(args.precisions):
    pipeline = get_pipeline(
        model_name=args.model,
        precision=precision,
        use_qencoder=args.use_qencoder,
muyangli's avatar
muyangli committed
51
        device="cuda",
Zhekai Zhang's avatar
Zhekai Zhang committed
52
        lora_name="All",
muyangli's avatar
muyangli committed
53
        pipeline_init_kwargs={**pipeline_init_kwargs},
Zhekai Zhang's avatar
Zhekai Zhang committed
54
55
56
57
    )
    pipeline.cur_lora_name = "None"
    pipeline.cur_lora_weight = 0
    pipelines.append(pipeline)
muyangli's avatar
muyangli committed
58
59
60
61
    if i == 0:
        pipeline_init_kwargs["vae"] = pipeline.vae
        pipeline_init_kwargs["text_encoder"] = pipeline.text_encoder
        pipeline_init_kwargs["text_encoder_2"] = pipeline.text_encoder_2
Zhekai Zhang's avatar
Zhekai Zhang committed
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76

safety_checker = SafetyChecker("cuda", disabled=args.no_safety_checker)


@spaces.GPU(enable_queue=True)
def generate(
    prompt: str = None,
    height: int = 1024,
    width: int = 1024,
    num_inference_steps: int = 4,
    guidance_scale: float = 0,
    lora_name: str = "None",
    lora_weight: float = 1,
    seed: int = 0,
):
muyangli's avatar
muyangli committed
77
    print(f"Generating image with prompt: {prompt}")
Zhekai Zhang's avatar
Zhekai Zhang committed
78
79
80
81
82
83
84
85
    is_unsafe_prompt = False
    if not safety_checker(prompt):
        is_unsafe_prompt = True
        prompt = "A peaceful world."
    prompt = PROMPT_TEMPLATES[lora_name].format(prompt=prompt)
    images, latency_strs = [], []
    for i, pipeline in enumerate(pipelines):
        precision = args.precisions[i]
Muyang Li's avatar
Muyang Li committed
86
        gr.Progress(track_tqdm=True)
Zhekai Zhang's avatar
Zhekai Zhang committed
87
88
89
90
91
92
93
94
95
96
97
98
99
        if pipeline.cur_lora_name != lora_name:
            if precision == "bf16":
                for m in pipeline.transformer.modules():
                    if isinstance(m, lora.LoraLayer):
                        if pipeline.cur_lora_name != "None":
                            if pipeline.cur_lora_name in m.scaling:
                                m.scaling[pipeline.cur_lora_name] = 0
                        if lora_name != "None":
                            if lora_name in m.scaling:
                                m.scaling[lora_name] = lora_weight
            else:
                assert precision == "int4"
                if lora_name != "None":
100
101
102
                    lora_path = LORA_PATHS[lora_name]
                    lora_path = os.path.join(lora_path["name_or_path"], lora_path["weight_name"])
                    pipeline.transformer.update_lora_params(lora_path)
103
                    pipeline.transformer.set_lora_strength(lora_weight)
Zhekai Zhang's avatar
Zhekai Zhang committed
104
                else:
105
                    pipeline.transformer.set_lora_strength(0)
Zhekai Zhang's avatar
Zhekai Zhang committed
106
107
108
109
110
111
112
113
114
        elif lora_name != "None":
            if precision == "bf16":
                if pipeline.cur_lora_weight != lora_weight:
                    for m in pipeline.transformer.modules():
                        if isinstance(m, lora.LoraLayer):
                            if lora_name in m.scaling:
                                m.scaling[lora_name] = lora_weight
            else:
                assert precision == "int4"
115
                pipeline.transformer.set_lora_strength(lora_weight)
Zhekai Zhang's avatar
Zhekai Zhang committed
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
        pipeline.cur_lora_name = lora_name
        pipeline.cur_lora_weight = lora_weight

        start_time = time.time()
        image = pipeline(
            prompt=prompt,
            height=height,
            width=width,
            num_inference_steps=num_inference_steps,
            guidance_scale=guidance_scale,
            generator=torch.Generator().manual_seed(seed),
        ).images[0]
        end_time = time.time()
        latency = end_time - start_time
        if latency < 1:
            latency = latency * 1000
            latency_str = f"{latency:.2f}ms"
        else:
            latency_str = f"{latency:.2f}s"
        images.append(image)
        latency_strs.append(latency_str)
    if is_unsafe_prompt:
        for i in range(len(latency_strs)):
            latency_strs[i] += " (Unsafe prompt detected)"
    torch.cuda.empty_cache()
Muyang Li's avatar
Muyang Li committed
141
142

    if args.count_use:
muyangli's avatar
muyangli committed
143
144
        if os.path.exists(f"{args.model}-use_count.txt"):
            with open(f"{args.model}-use_count.txt", "r") as f:
Muyang Li's avatar
Muyang Li committed
145
146
147
148
                count = int(f.read())
        else:
            count = 0
        count += 1
muyangli's avatar
muyangli committed
149
150
151
        current_time = datetime.now()
        print(f"{current_time}: {count}")
        with open(f"{args.model}-use_count.txt", "w") as f:
Muyang Li's avatar
Muyang Li committed
152
            f.write(str(count))
muyangli's avatar
muyangli committed
153
154
        with open(f"{args.model}-use_record.txt", "a") as f:
            f.write(f"{current_time}: {count}\n")
Muyang Li's avatar
Muyang Li committed
155

Zhekai Zhang's avatar
Zhekai Zhang committed
156
157
158
159
160
    return *images, *latency_strs


with open("./assets/description.html", "r") as f:
    DESCRIPTION = f.read()
161
162
163
164
165
166
167

# Get the GPU properties
if torch.cuda.device_count() > 0:
    gpu_properties = torch.cuda.get_device_properties(0)
    gpu_memory = gpu_properties.total_memory / (1024**3)  # Convert to GiB
    gpu_name = torch.cuda.get_device_name(0)
    device_info = f"Running on {gpu_name} with {gpu_memory:.0f} GiB memory."
Zhekai Zhang's avatar
Zhekai Zhang committed
168
169
else:
    device_info = "Running on CPU 🥶 This demo does not work on CPU."
Muyang Li's avatar
Muyang Li committed
170
notice = '<strong>Notice:</strong>&nbsp;We will replace unsafe prompts with a default prompt: "A peaceful world."'
Zhekai Zhang's avatar
Zhekai Zhang committed
171
172
173
174
175

with gr.Blocks(
    css_paths=[f"assets/frame{len(args.precisions)}.css", "assets/common.css"],
    title=f"SVDQuant FLUX.1-{args.model} Demo",
) as demo:
muyangli's avatar
muyangli committed
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196

    def get_header_str():

        if args.count_use:
            if os.path.exists(f"{args.model}-use_count.txt"):
                with open(f"{args.model}-use_count.txt", "r") as f:
                    count = int(f.read())
            else:
                count = 0
            count_info = (
                f"<div style='display: flex; justify-content: center; align-items: center; text-align: center;'>"
                f"<span style='font-size: 18px; font-weight: bold;'>Total inference runs: </span>"
                f"<span style='font-size: 18px; color:red; font-weight: bold;'>&nbsp;{count}</span></div>"
            )
        else:
            count_info = ""
        header_str = DESCRIPTION.format(model=args.model, device_info=device_info, notice=notice, count_info=count_info)
        return header_str

    header = gr.HTML(get_header_str())
    demo.load(fn=get_header_str, outputs=header)
Zhekai Zhang's avatar
Zhekai Zhang committed
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
    with gr.Row():
        image_results, latency_results = [], []
        for i, precision in enumerate(args.precisions):
            with gr.Column():
                gr.Markdown(f"# {precision.upper()}", elem_id="image_header")
                with gr.Group():
                    image_result = gr.Image(
                        format="png",
                        image_mode="RGB",
                        label="Result",
                        show_label=False,
                        show_download_button=True,
                        interactive=False,
                    )
                    latency_result = gr.Text(label="Inference Latency", show_label=True)
                    image_results.append(image_result)
                    latency_results.append(latency_result)
    with gr.Row():
        prompt = gr.Text(
            label="Prompt", show_label=False, max_lines=1, placeholder="Enter your prompt", container=False, scale=4
        )
        run_button = gr.Button("Run", scale=1)
    if args.model == "dev":
        with gr.Row():
            lora_name = gr.Dropdown(label="LoRA Name", choices=PROMPT_TEMPLATES.keys(), value="None", scale=1)
            prompt_template = gr.Textbox(
                label="LoRA Prompt Template", value=PROMPT_TEMPLATES["None"], scale=1, max_lines=1
            )
    else:
        lora_name = "None"

    with gr.Row():
        seed = gr.Slider(label="Seed", show_label=True, minimum=0, maximum=MAX_SEED, value=233, step=1, scale=4)
        randomize_seed = gr.Button("Random Seed", scale=1, min_width=50, elem_id="random_seed")
    with gr.Accordion("Advanced options", open=False):
        with gr.Group():
            if args.model == "schnell":
                num_inference_steps = gr.Slider(label="Sampling Steps", minimum=1, maximum=8, step=1, value=4)
                guidance_scale = 0
                lora_weight = 0
            elif args.model == "dev":
                num_inference_steps = gr.Slider(label="Sampling Steps", minimum=10, maximum=50, step=1, value=25)
                guidance_scale = gr.Slider(label="Guidance Scale", minimum=1, maximum=10, step=0.1, value=3.5)
                lora_weight = gr.Slider(label="LoRA Weight", minimum=0, maximum=2, step=0.1, value=1)
            else:
                raise NotImplementedError(f"Model {args.model} not implemented")
    if args.model == "schnell":

        def generate_func(prompt, num_inference_steps, seed):
            return generate(
                prompt, DEFAULT_HEIGHT, DEFAULT_WIDTH, num_inference_steps, guidance_scale, lora_name, lora_weight, seed
            )

        input_args = [prompt, num_inference_steps, seed]
    elif args.model == "dev":

        def generate_func(prompt, num_inference_steps, guidance_scale, lora_name, lora_weight, seed):
            return generate(
                prompt, DEFAULT_HEIGHT, DEFAULT_WIDTH, num_inference_steps, guidance_scale, lora_name, lora_weight, seed
            )

        input_args = [prompt, num_inference_steps, guidance_scale, lora_name, lora_weight, seed]

    gr.Examples(
        examples=EXAMPLES[args.model], inputs=input_args, outputs=[*image_results, *latency_results], fn=generate_func
    )

    gr.on(
        triggers=[prompt.submit, run_button.click],
        fn=generate_func,
        inputs=input_args,
        outputs=[*image_results, *latency_results],
muyangli's avatar
muyangli committed
269
        api_name=False,
Zhekai Zhang's avatar
Zhekai Zhang committed
270
271
272
273
274
275
276
277
278
279
280
281
282
    )
    randomize_seed.click(
        lambda: random.randint(0, MAX_SEED), inputs=[], outputs=seed, api_name=False, queue=False
    ).then(fn=generate_func, inputs=input_args, outputs=[*image_results, *latency_results], api_name=False, queue=False)

    if args.model == "dev":
        lora_name.change(
            lambda x: PROMPT_TEMPLATES[x],
            inputs=[lora_name],
            outputs=[prompt_template],
            api_name=False,
            queue=False,
        )
283
284
    gr.Markdown("MIT Accessibility: https://accessibility.mit.edu/", elem_id="accessibility")

Zhekai Zhang's avatar
Zhekai Zhang committed
285
286

if __name__ == "__main__":
287
    demo.queue(max_size=20).launch(server_name="0.0.0.0", debug=True, share=True, root_path=args.gradio_root_path)