Commit 675cc057 authored by chenpangpang's avatar chenpangpang
Browse files

feat: 解决第一次生成视频时输出视频框显示不正常问题

parent 018689a0
...@@ -6,7 +6,7 @@ import time ...@@ -6,7 +6,7 @@ import time
import gradio as gr import gradio as gr
import torch import torch
from diffusers import CogVideoXPipeline, CogVideoXDDIMScheduler,CogVideoXDPMScheduler from diffusers import CogVideoXPipeline, CogVideoXDDIMScheduler, CogVideoXDPMScheduler
from datetime import datetime, timedelta from datetime import datetime, timedelta
from diffusers.image_processor import VaeImageProcessor from diffusers.image_processor import VaeImageProcessor
...@@ -17,7 +17,6 @@ from rife_model import load_rife_model, rife_inference_with_latents ...@@ -17,7 +17,6 @@ from rife_model import load_rife_model, rife_inference_with_latents
device = "cuda" if torch.cuda.is_available() else "cpu" device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16).to(device) pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16).to(device)
pipe.scheduler = CogVideoXDPMScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing") pipe.scheduler = CogVideoXDPMScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
...@@ -95,7 +94,7 @@ def infer( ...@@ -95,7 +94,7 @@ def infer(
num_inference_steps: int, num_inference_steps: int,
guidance_scale: float, guidance_scale: float,
seed: int = -1, seed: int = -1,
#progress=gr.Progress(track_tqdm=True), # progress=gr.Progress(track_tqdm=True),
): ):
if seed == -1: if seed == -1:
seed = random.randint(0, 2 ** 8 - 1) seed = random.randint(0, 2 ** 8 - 1)
...@@ -159,7 +158,7 @@ with gr.Blocks() as demo: ...@@ -159,7 +158,7 @@ with gr.Blocks() as demo:
<div style="text-align: center; font-size: 15px; font-weight: bold; color: red; margin-bottom: 20px;"> <div style="text-align: center; font-size: 15px; font-weight: bold; color: red; margin-bottom: 20px;">
⚠️ This demo is for academic research and experiential use only. ⚠️ This demo is for academic research and experiential use only.
</div> </div>
""") """)
with gr.Row(): with gr.Row():
with gr.Column(): with gr.Column():
...@@ -262,14 +261,13 @@ with gr.Blocks() as demo: ...@@ -262,14 +261,13 @@ with gr.Blocks() as demo:
scale_status, scale_status,
rife_status, rife_status,
progress=gr.Progress(track_tqdm=True) progress=gr.Progress(track_tqdm=True)
): ):
latents, seed = infer( latents, seed = infer(
prompt, prompt,
num_inference_steps=50, # NOT Changed num_inference_steps=50, # NOT Changed
guidance_scale=7.0, # NOT Changed guidance_scale=7.0, # NOT Changed
seed=seed_value, seed=seed_value,
#progress=progress, # progress=progress,
) )
if scale_status: if scale_status:
latents = utils.upscale_batch_and_concatenate(upscale_model, latents, device) latents = utils.upscale_batch_and_concatenate(upscale_model, latents, device)
...@@ -308,5 +306,8 @@ with gr.Blocks() as demo: ...@@ -308,5 +306,8 @@ with gr.Blocks() as demo:
enhance_button.click(enhance_prompt_func, inputs=[prompt], outputs=[prompt]) enhance_button.click(enhance_prompt_func, inputs=[prompt], outputs=[prompt])
if __name__ == "__main__": if __name__ == "__main__":
generate(
"A garden comes to life as a kaleidoscope of butterflies flutters amidst the blossoms, their delicate wings casting shadows on the petals below. In the background, a grand fountain cascades water with a gentle splendor, its rhythmic sound providing a soothing backdrop. Beneath the cool shade of a mature tree, a solitary wooden chair invites solitude and reflection, its smooth surface worn by the touch of countless visitors seeking a moment of tranquility in nature's embrace.",
-1, False, False)
demo.queue(max_size=15) demo.queue(max_size=15)
demo.launch(server_name='0.0.0.0', share=True) demo.launch(server_name='0.0.0.0', share=True)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment