Commit e9ad0535 authored by muyangli's avatar muyangli
Browse files

[major] support SANA

parent 9eb2cee0
...@@ -3,6 +3,7 @@ import argparse ...@@ -3,6 +3,7 @@ import argparse
import os import os
import random import random
import time import time
from datetime import datetime
import GPUtil import GPUtil
import spaces import spaces
...@@ -40,18 +41,24 @@ def get_args() -> argparse.Namespace: ...@@ -40,18 +41,24 @@ def get_args() -> argparse.Namespace:
args = get_args() args = get_args()
pipeline_init_kwargs = {}
pipelines = [] pipelines = []
for i, precision in enumerate(args.precisions): for i, precision in enumerate(args.precisions):
pipeline = get_pipeline( pipeline = get_pipeline(
model_name=args.model, model_name=args.model,
precision=precision, precision=precision,
use_qencoder=args.use_qencoder, use_qencoder=args.use_qencoder,
device=f"cuda:{i}", device="cuda",
lora_name="All", lora_name="All",
pipeline_init_kwargs={**pipeline_init_kwargs},
) )
pipeline.cur_lora_name = "None" pipeline.cur_lora_name = "None"
pipeline.cur_lora_weight = 0 pipeline.cur_lora_weight = 0
pipelines.append(pipeline) pipelines.append(pipeline)
if i == 0:
pipeline_init_kwargs["vae"] = pipeline.vae
pipeline_init_kwargs["text_encoder"] = pipeline.text_encoder
pipeline_init_kwargs["text_encoder_2"] = pipeline.text_encoder_2
safety_checker = SafetyChecker("cuda", disabled=args.no_safety_checker) safety_checker = SafetyChecker("cuda", disabled=args.no_safety_checker)
...@@ -67,6 +74,7 @@ def generate( ...@@ -67,6 +74,7 @@ def generate(
lora_weight: float = 1, lora_weight: float = 1,
seed: int = 0, seed: int = 0,
): ):
print(f"Generating image with prompt: {prompt}")
is_unsafe_prompt = False is_unsafe_prompt = False
if not safety_checker(prompt): if not safety_checker(prompt):
is_unsafe_prompt = True is_unsafe_prompt = True
...@@ -130,15 +138,18 @@ def generate( ...@@ -130,15 +138,18 @@ def generate(
torch.cuda.empty_cache() torch.cuda.empty_cache()
if args.count_use: if args.count_use:
if os.path.exists("use_count.txt"): if os.path.exists(f"{args.model}-use_count.txt"):
with open("use_count.txt", "r") as f: with open(f"{args.model}-use_count.txt", "r") as f:
count = int(f.read()) count = int(f.read())
else: else:
count = 0 count = 0
count += 1 count += 1
print(f"Use count: {count}") current_time = datetime.now()
with open("use_count.txt", "w") as f: print(f"{current_time}: {count}")
with open(f"{args.model}-use_count.txt", "w") as f:
f.write(str(count)) f.write(str(count))
with open(f"{args.model}-use_record.txt", "a") as f:
f.write(f"{current_time}: {count}\n")
return *images, *latency_strs return *images, *latency_strs
...@@ -158,7 +169,27 @@ with gr.Blocks( ...@@ -158,7 +169,27 @@ with gr.Blocks(
css_paths=[f"assets/frame{len(args.precisions)}.css", "assets/common.css"], css_paths=[f"assets/frame{len(args.precisions)}.css", "assets/common.css"],
title=f"SVDQuant FLUX.1-{args.model} Demo", title=f"SVDQuant FLUX.1-{args.model} Demo",
) as demo: ) as demo:
gr.HTML(DESCRIPTION.format(model=args.model, device_info=device_info, notice=notice))
def get_header_str():
if args.count_use:
if os.path.exists(f"{args.model}-use_count.txt"):
with open(f"{args.model}-use_count.txt", "r") as f:
count = int(f.read())
else:
count = 0
count_info = (
f"<div style='display: flex; justify-content: center; align-items: center; text-align: center;'>"
f"<span style='font-size: 18px; font-weight: bold;'>Total inference runs: </span>"
f"<span style='font-size: 18px; color:red; font-weight: bold;'>&nbsp;{count}</span></div>"
)
else:
count_info = ""
header_str = DESCRIPTION.format(model=args.model, device_info=device_info, notice=notice, count_info=count_info)
return header_str
header = gr.HTML(get_header_str())
demo.load(fn=get_header_str, outputs=header)
with gr.Row(): with gr.Row():
image_results, latency_results = [], [] image_results, latency_results = [], []
for i, precision in enumerate(args.precisions): for i, precision in enumerate(args.precisions):
......
...@@ -22,8 +22,8 @@ def get_pipeline( ...@@ -22,8 +22,8 @@ def get_pipeline(
lora_name: str = "None", lora_name: str = "None",
lora_weight: float = 1, lora_weight: float = 1,
device: str | torch.device = "cuda", device: str | torch.device = "cuda",
pipeline_init_kwargs: dict = {},
) -> FluxPipeline: ) -> FluxPipeline:
pipeline_init_kwargs = {}
if model_name == "schnell": if model_name == "schnell":
if precision == "int4": if precision == "int4":
assert torch.device(device).type == "cuda", "int4 only supported on CUDA devices" assert torch.device(device).type == "cuda", "int4 only supported on CUDA devices"
...@@ -56,7 +56,9 @@ def get_pipeline( ...@@ -56,7 +56,9 @@ def get_pipeline(
) )
else: else:
assert precision == "bf16" assert precision == "bf16"
pipeline = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16) pipeline = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16, **pipeline_init_kwargs
)
if lora_name == "All": if lora_name == "All":
# Pre-load all the LoRA weights for demo use # Pre-load all the LoRA weights for demo use
for name, path in LORA_PATHS.items(): for name, path in LORA_PATHS.items():
......
# Nunchaku [SANA](https://nvlabs.github.io/Sana/) Models
## Text-to-Image Gradio Demo
```shell
python run_gradio.py
```
* By default, the Gemma-2B model is loaded as a safety checker. To disable this feature and save GPU memory, use `--no-safety-checker`.
* By default, only the INT4 DiT is loaded. Use `-p int4 bf16` to add a BF16 DiT for side-by-side comparison, or `-p bf16` to load only the BF16 model.
## Command Line Inference
We provide a script, [generate.py](generate.py), that generates an image from a text prompt directly from the command line, similar to the demo. Simply run:
```shell
python generate.py --prompt "You Text Prompt"
```
* The generated image will be saved as `output.png` by default. You can specify a different path using the `-o` or `--output-path` options.
* By default, the script uses our INT4 model. To use the BF16 model instead, specify `-p bf16`.
* You can adjust the number of inference steps and classifier-free guidance scale with `-t` and `-g`, respectively. The defaults are 20 steps and a guidance scale of 5.
* In addition to the classifier-free guidance, you can also adjust the [PAG guidance](https://arxiv.org/abs/2403.17377) scale with `--pag-scale`. The default is 2.
## Latency Benchmark
To measure the latency of our INT4 models, use the following command:
```shell
python latency.py
```
* Adjust the number of inference steps and the guidance scale using `-t` and `-g`, respectively. The defaults are 20 steps and a guidance scale of 5.
* You can also adjust the [PAG guidance](https://arxiv.org/abs/2403.17377) scale with `--pag-scale`. The default is 2.
* By default, the script measures the end-to-end latency for generating a single image. To measure the latency of a single DiT forward step instead, use the `--mode step` flag.
* Specify the number of warmup and test runs using `--warmup-times` and `--test-times`. The defaults are 2 warmup runs and 10 test runs.
\ No newline at end of file
h1{text-align:center}
h2{text-align:center}
#random_seed {height: 72px;}
#accessibility {
text-align: center; /* Center-aligns the text */
margin: auto; /* Centers the element horizontally */
}
\ No newline at end of file
<div style="display: flex; justify-content: center; align-items: center; text-align: center;">
<div>
<h1>
<img src="https://github.com/mit-han-lab/nunchaku/raw/refs/heads/main/assets/logo.svg"
alt="logo"
style="height: 40px; width: auto; display: block; margin: auto;"/>
<a href='https://nvlabs.github.io/Sana/'>SANA-1600M</a> Demo
</h1>
<h2>
SVDQuant: Absorbing Outliers by Low-Rank Components for 4-Bit Diffusion Models
</h2>
<h3>
<a href='https://lmxyy.me'>Muyang Li*</a>,
<a href='https://yujunlin.com'>Yujun Lin*</a>,
<a href='https://hanlab.mit.edu/team/zhekai-zhang'>Zhekai Zhang*</a>,
<a href='https://www.tianle.website/#/'>Tianle Cai</a>,
<a href='https://xiuyuli.com'>Xiuyu Li</a>,
<br>
<a href='https://github.com/JerryGJX'>Junxian Guo</a>,
<a href='https://xieenze.github.io'>Enze Xie</a>,
<a href='https://cs.stanford.edu/~chenlin/'>Chenlin Meng</a>,
<a href='https://www.cs.cmu.edu/~junyanz/'>Jun-Yan Zhu</a>,
and <a href='https://hanlab.mit.edu/songhan'>Song Han</a>
</h3>
<div style="display: flex; justify-content: center; align-items: center; text-align: center;">
<a href="https://arxiv.org/abs/2411.05007">[Paper]</a>
&nbsp;
<a href='https://github.com/mit-han-lab/nunchaku'>
[Code]
</a>
&nbsp;
<a href='https://hanlab.mit.edu/projects/svdquant'>
[Website]
</a>
&nbsp;
<a href='https://hanlab.mit.edu/blog/svdquant'>
[Blog]
</a>
</div>
<h4>Quantization Library:
<a href='https://github.com/mit-han-lab/deepcompressor'>DeepCompressor</a>
&nbsp;
Inference Engine: <a href='https://github.com/mit-han-lab/nunchaku'>Nunchaku</a>
</h4>
<div style="display: flex; justify-content: center; align-items: center; text-align: center;">
{device_info}
</div>
<div style="display: flex; justify-content: center; align-items: center; text-align: center;">
{notice}
</div>
{count_info}
</div>
</div>
\ No newline at end of file
.gradio-container{max-width: 560px !important}
.gradio-container{max-width: 1200px !important}
import argparse
import os
import torch
from utils import get_pipeline
def get_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
"-p", "--precision", type=str, default="int4", choices=["int4", "bf16"], help="Which precision to use"
)
parser.add_argument(
"--prompt", type=str, default="A cat holding a sign that says hello world", help="Prompt for the image"
)
parser.add_argument("--seed", type=int, default=2333, help="Random seed (-1 for random)")
parser.add_argument("-t", "--num-inference-steps", type=int, default=20, help="Number of inference steps")
parser.add_argument("-o", "--output-path", type=str, default="output.png", help="Image output path")
parser.add_argument("-g", "--guidance-scale", type=float, default=5, help="Guidance scale.")
parser.add_argument("--pag-scale", type=float, default=2.0, help="PAG scale")
parser.add_argument("--height", type=int, default=1024, help="Height of the image")
parser.add_argument("--width", type=int, default=1024, help="Width of the image")
parser.add_argument("--use-qencoder", action="store_true", help="Whether to use 4-bit text encoder")
known_args, _ = parser.parse_known_args()
args = parser.parse_args()
return args
def main():
args = get_args()
pipeline = get_pipeline(precision=args.precision, use_qencoder=args.use_qencoder, device="cuda")
prompt = args.prompt
image = pipeline(
prompt=prompt,
num_inference_steps=args.num_inference_steps,
guidance_scale=args.guidance_scale,
generator=torch.Generator().manual_seed(args.seed) if args.seed >= 0 else None,
).images[0]
output_dir = os.path.dirname(os.path.abspath(os.path.expanduser(args.output_path)))
os.makedirs(output_dir, exist_ok=True)
image.save(args.output_path)
if __name__ == "__main__":
main()
import argparse
import time
import torch
from torch import nn
from tqdm import trange
from utils import get_pipeline
def get_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
"-p", "--precision", type=str, default="int4", choices=["int4", "bf16"], help="Which precision to use"
)
parser.add_argument("-t", "--num-inference-steps", type=int, default=20, help="Number of inference steps")
parser.add_argument("-g", "--guidance-scale", type=float, default=5, help="Guidance scale")
parser.add_argument("--pag-scale", type=float, default=2.0, help="PAG scale")
parser.add_argument("--height", type=int, default=1024, help="Height of the image")
parser.add_argument("--width", type=int, default=1024, help="Width of the image")
# Test related
parser.add_argument("--warmup-times", type=int, default=2, help="Number of warmup times")
parser.add_argument("--test-times", type=int, default=10, help="Number of test times")
parser.add_argument(
"--mode",
type=str,
default="end2end",
choices=["end2end", "step"],
help="Measure mode: end-to-end latency or per-step latency",
)
parser.add_argument(
"--ignore_ratio", type=float, default=0.2, help="Ignored ratio of the slowest and fastest steps"
)
args = parser.parse_args()
return args
def main():
args = get_args()
pipeline = get_pipeline(precision=args.precision, device="cuda")
dummy_prompt = "A cat holding a sign that says hello world"
latency_list = []
if args.mode == "end2end":
pipeline.set_progress_bar_config(position=1, desc="Step", leave=False)
for _ in trange(args.warmup_times, desc="Warmup", position=0, leave=False):
pipeline(
prompt=dummy_prompt,
height=args.height,
width=args.width,
num_inference_steps=args.num_inference_steps,
guidance_scale=args.guidance_scale,
)
torch.cuda.synchronize()
for _ in trange(args.test_times, desc="Warmup", position=0, leave=False):
start_time = time.time()
pipeline(
prompt=dummy_prompt,
height=args.height,
width=args.width,
num_inference_steps=args.num_inference_steps,
guidance_scale=args.guidance_scale,
)
torch.cuda.synchronize()
end_time = time.time()
latency_list.append(end_time - start_time)
elif args.mode == "step":
inputs = {}
def get_input_hook(module: nn.Module, input_args, input_kwargs):
inputs["args"] = input_args
inputs["kwargs"] = input_kwargs
pipeline.transformer.register_forward_pre_hook(get_input_hook, with_kwargs=True)
pipeline(
prompt=dummy_prompt,
height=args.height,
width=args.width,
num_inference_steps=1,
guidance_scale=args.guidance_scale,
output_type="latent",
)
for _ in trange(args.warmup_times, desc="Warmup", position=0, leave=False):
pipeline.transformer(*inputs["args"], **inputs["kwargs"])
torch.cuda.synchronize()
for _ in trange(args.test_times, desc="Warmup", position=0, leave=False):
start_time = time.time()
pipeline.transformer(*inputs["args"], **inputs["kwargs"])
torch.cuda.synchronize()
end_time = time.time()
latency_list.append(end_time - start_time)
latency_list = sorted(latency_list)
ignored_count = int(args.ignore_ratio * len(latency_list) / 2)
if ignored_count > 0:
latency_list = latency_list[ignored_count:-ignored_count]
print(f"Latency: {sum(latency_list) / len(latency_list):.5f} s")
if __name__ == "__main__":
main()
# Changed from https://huggingface.co/spaces/playgroundai/playground-v2.5/blob/main/app.py
import argparse
import os
import random
import time
from datetime import datetime
import GPUtil
# import gradio last to avoid conflicts with other imports
import gradio as gr
import spaces
import torch
from nunchaku.models.safety_checker import SafetyChecker
from utils import get_pipeline
from vars import EXAMPLES, MAX_SEED
def get_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
"-p",
"--precisions",
type=str,
default=["int4"],
nargs="*",
choices=["int4", "bf16"],
help="Which precisions to use",
)
parser.add_argument("--use-qencoder", action="store_true", help="Whether to use 4-bit text encoder")
parser.add_argument("--no-safety-checker", action="store_true", help="Disable safety checker")
parser.add_argument("--count-use", action="store_true", help="Whether to count the number of uses")
return parser.parse_args()
args = get_args()
pipelines = []
pipeline_init_kwargs = {}
for i, precision in enumerate(args.precisions):
pipeline = get_pipeline(
precision=precision,
use_qencoder=args.use_qencoder,
device="cuda",
pipeline_init_kwargs={**pipeline_init_kwargs},
)
pipelines.append(pipeline)
if i == 0:
pipeline_init_kwargs["vae"] = pipeline.vae
pipeline_init_kwargs["text_encoder"] = pipeline.text_encoder
safety_checker = SafetyChecker("cuda", disabled=args.no_safety_checker)
@spaces.GPU(enable_queue=True)
def generate(
prompt: str = None,
height: int = 1024,
width: int = 1024,
num_inference_steps: int = 4,
guidance_scale: float = 0,
pag_scale: float = 0,
seed: int = 0,
):
print(f"Prompt: {prompt}")
is_unsafe_prompt = False
if not safety_checker(prompt):
is_unsafe_prompt = True
prompt = "A peaceful world."
images, latency_strs = [], []
for i, pipeline in enumerate(pipelines):
progress = gr.Progress(track_tqdm=True)
start_time = time.time()
image = pipeline(
prompt=prompt,
height=height,
width=width,
guidance_scale=guidance_scale,
pag_scale=pag_scale,
num_inference_steps=num_inference_steps,
generator=torch.Generator().manual_seed(seed),
).images[0]
end_time = time.time()
latency = end_time - start_time
if latency < 1:
latency = latency * 1000
latency_str = f"{latency:.2f}ms"
else:
latency_str = f"{latency:.2f}s"
images.append(image)
latency_strs.append(latency_str)
if is_unsafe_prompt:
for i in range(len(latency_strs)):
latency_strs[i] += " (Unsafe prompt detected)"
torch.cuda.empty_cache()
if args.count_use:
if os.path.exists("use_count.txt"):
with open("use_count.txt", "r") as f:
count = int(f.read())
else:
count = 0
count += 1
current_time = datetime.now()
print(f"{current_time}: {count}")
with open("use_count.txt", "w") as f:
f.write(str(count))
with open("use_record.txt", "a") as f:
f.write(f"{current_time}: {count}\n")
return *images, *latency_strs
with open("./assets/description.html", "r") as f:
DESCRIPTION = f.read()
gpus = GPUtil.getGPUs()
if len(gpus) > 0:
gpu = gpus[0]
memory = gpu.memoryTotal / 1024
device_info = f"Running on {gpu.name} with {memory:.0f} GiB memory."
else:
device_info = "Running on CPU 🥶 This demo does not work on CPU."
notice = f'<strong>Notice:</strong>&nbsp;We will replace unsafe prompts with a default prompt: "A peaceful world."'
with gr.Blocks(
css_paths=[f"assets/frame{len(args.precisions)}.css", "assets/common.css"],
title=f"SVDQuant SANA-1600M Demo",
) as demo:
def get_header_str():
if args.count_use:
if os.path.exists("use_count.txt"):
with open("use_count.txt", "r") as f:
count = int(f.read())
else:
count = 0
count_info = (
f"<div style='display: flex; justify-content: center; align-items: center; text-align: center;'>"
f"<span style='font-size: 18px; font-weight: bold;'>Total inference runs: </span>"
f"<span style='font-size: 18px; color:red; font-weight: bold;'>&nbsp;{count}</span></div>"
)
else:
count_info = ""
header_str = DESCRIPTION.format(device_info=device_info, notice=notice, count_info=count_info)
return header_str
header = gr.HTML(get_header_str())
demo.load(fn=get_header_str, outputs=header)
with gr.Row():
image_results, latency_results = [], []
for i, precision in enumerate(args.precisions):
with gr.Column():
gr.Markdown(f"# {precision.upper()}", elem_id="image_header")
with gr.Group():
image_result = gr.Image(
format="png",
image_mode="RGB",
label="Result",
show_label=False,
show_download_button=True,
interactive=False,
)
latency_result = gr.Text(label="Inference Latency", show_label=True)
image_results.append(image_result)
latency_results.append(latency_result)
with gr.Row():
prompt = gr.Text(
label="Prompt", show_label=False, max_lines=1, placeholder="Enter your prompt", container=False, scale=4
)
run_button = gr.Button("Run", scale=1)
with gr.Row():
seed = gr.Slider(label="Seed", show_label=True, minimum=0, maximum=MAX_SEED, value=233, step=1, scale=4)
randomize_seed = gr.Button("Random Seed", scale=1, min_width=50, elem_id="random_seed")
with gr.Accordion("Advanced options", open=False):
with gr.Group():
height = gr.Slider(label="Height", minimum=256, maximum=4096, step=32, value=1024)
width = gr.Slider(label="Width", minimum=256, maximum=4096, step=32, value=1024)
with gr.Group():
num_inference_steps = gr.Slider(label="Sampling Steps", minimum=10, maximum=50, step=1, value=20)
guidance_scale = gr.Slider(label="Guidance Scale", minimum=1, maximum=10, step=0.1, value=5)
pag_scale = gr.Slider(label="PAG Scale", minimum=0, maximum=10, step=0.1, value=2.0)
input_args = [prompt, height, width, num_inference_steps, guidance_scale, pag_scale, seed]
gr.Examples(examples=EXAMPLES, inputs=input_args, outputs=[*image_results, *latency_results], fn=generate)
gr.on(
triggers=[prompt.submit, run_button.click],
fn=generate,
inputs=input_args,
outputs=[*image_results, *latency_results],
api_name="run",
)
randomize_seed.click(
lambda: random.randint(0, MAX_SEED), inputs=[], outputs=seed, api_name=False, queue=False
).then(fn=generate, inputs=input_args, outputs=[*image_results, *latency_results], api_name=False, queue=False)
gr.Markdown("MIT Accessibility: https://accessibility.mit.edu/", elem_id="accessibility")
if __name__ == "__main__":
demo.queue(max_size=20).launch(server_name="0.0.0.0", debug=True, share=True)
import torch
from diffusers import SanaPAGPipeline
from nunchaku.models.transformer_sana import NunchakuSanaTransformer2DModel
def hash_str_to_int(s: str) -> int:
"""Hash a string to an integer."""
modulus = 10**9 + 7 # Large prime modulus
hash_int = 0
for char in s:
hash_int = (hash_int * 31 + ord(char)) % modulus
return hash_int
def get_pipeline(
precision: str, use_qencoder: bool = False, device: str | torch.device = "cuda", pipeline_init_kwargs: dict = {}
) -> SanaPAGPipeline:
if precision == "int4":
assert torch.device(device).type == "cuda", "int4 only supported on CUDA devices"
transformer = NunchakuSanaTransformer2DModel.from_pretrained("mit-han-lab/svdq-int4-sana-1600m", pag_layers=8)
pipeline_init_kwargs["transformer"] = transformer
if use_qencoder:
raise NotImplementedError("Quantized encoder not supported for Sana for now")
else:
assert precision == "bf16"
pipeline = SanaPAGPipeline.from_pretrained(
"Efficient-Large-Model/Sana_1600M_1024px_BF16_diffusers",
variant="bf16",
torch_dtype=torch.bfloat16,
pag_applied_layers="transformer_blocks.8",
**pipeline_init_kwargs
)
if precision == "int4":
pipeline._set_pag_attn_processor = lambda *args, **kwargs: None
pipeline = pipeline.to(device)
return pipeline
MAX_IMAGE_SIZE = 2048
MAX_SEED = 1000000000
DEFAULT_HEIGHT = 1024
DEFAULT_WIDTH = 1024
# num_inference_steps, guidance_scale, pag_scale, seed
EXAMPLES = [
[
"🐶 Wearing 🕶 flying on the 🌈",
1024,
1024,
20,
5,
2,
2,
],
[
"大漠孤烟直, 长河落日圆",
1024,
1024,
20,
5,
2,
23,
],
[
"Pirate ship trapped in a cosmic maelstrom nebula, rendered in cosmic beach whirlpool engine, "
"volumetric lighting, spectacular, ambient lights, light pollution, cinematic atmosphere, "
"art nouveau style, illustration art artwork by SenseiJaye, intricate detail.",
1024,
1024,
20,
5,
2,
233,
],
[
"A photo of a Eurasian lynx in a sunlit forest, with tufted ears and a spotted coat. The lynx should be "
"sharply focused, gazing into the distance, while the background is softly blurred for depth. Use cinematic "
"lighting with soft rays filtering through the trees, and capture the scene with a shallow depth of field "
"for a natural, peaceful atmosphere. 8K resolution, highly detailed, photorealistic, "
"cinematic lighting, ultra-HD.",
1024,
1024,
20,
5,
2,
2333,
],
[
"A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage. "
"She wears a black leather jacket, a long red dress, and black boots, and carries a black purse. "
"She wears sunglasses and red lipstick. She walks confidently and casually. "
"The street is damp and reflective, creating a mirror effect of the colorful lights. "
"Many pedestrians walk about.",
1024,
1024,
20,
5,
2,
23333,
],
[
"Cozy bedroom with vintage wooden furniture and a large circular window covered in lush green vines, "
"opening to a misty forest. Soft, ambient lighting highlights the bed with crumpled blankets, a bookshelf, "
"and a desk. The atmosphere is serene and natural. 8K resolution, highly detailed, photorealistic, "
"cinematic lighting, ultra-HD.",
1024,
1024,
20,
5,
2,
233333,
],
]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment