inference_animate.py 8.76 KB
Newer Older
chenzk's avatar
v1.0  
chenzk committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
# Copyright (c) Alibaba, Inc. and its affiliates.

import argparse
import datetime
import inspect
import os
import numpy as np
from PIL import Image
from omegaconf import OmegaConf
from collections import OrderedDict

import torch

from diffusers import AutoencoderKL, DDIMScheduler, UniPCMultistepScheduler

from tqdm import tqdm
from transformers import CLIPTextModel, CLIPTokenizer

from facechain.utils import snapshot_download

from facechain_animate.magicanimate.models.unet_controlnet import UNet3DConditionModel
from facechain_animate.magicanimate.models.controlnet import ControlNetModel
from facechain_animate.magicanimate.models.appearance_encoder import AppearanceEncoderModel
from facechain_animate.magicanimate.models.mutual_self_attention import ReferenceAttentionControl
from facechain_animate.magicanimate.pipelines.pipeline_animation import AnimationPipeline
from facechain_animate.magicanimate.utils.util import save_videos_grid
from accelerate.utils import set_seed

from facechain_animate.magicanimate.utils.videoreader import VideoReader
from facechain.utils import join_worker_data_dir

from einops import rearrange, repeat

import csv, pdb, glob
from safetensors import safe_open
import math
from pathlib import Path

class MagicAnimate():
    def __init__(self, uuid, config="facechain_animate/magicanimate/configs/prompts/animation.yaml") -> None:
        if not uuid:
            if os.getenv("MODELSCOPE_ENVIRONMENT") == 'studio':
                return "请登陆后使用! (Please login first)"
            else:
                uuid = 'qw'
        self.save_dir = join_worker_data_dir(uuid, 'animate')

        print("Initializing MagicAnimate Pipeline...")

        config  = OmegaConf.load(config)
        
        inference_config = OmegaConf.load(config.inference_config)

        self.config = config
        self.inference_config = inference_config
        
        
    def __call__(self, source_image, motion_sequence, random_seed, step, guidance_scale, size=512):
        config = self.config
        inference_config = self.inference_config
        *_, func_args = inspect.getargvalues(inspect.currentframe())
        func_args = dict(func_args)

        ### >>> create animation pipeline >>> ###
        sd15_model_dir = snapshot_download('AI-ModelScope/stable-diffusion-v1-5')
        sdvae_model_dir = snapshot_download('zhuzhukeji/sd-vae-ft-mse')
        magicanimate_model_dir = snapshot_download('AI-ModelScope/MagicAnimate')

        tokenizer = CLIPTokenizer.from_pretrained(sd15_model_dir, subfolder="tokenizer")
        text_encoder = CLIPTextModel.from_pretrained(sd15_model_dir, subfolder="text_encoder")
        unet = UNet3DConditionModel.from_pretrained_2d(sd15_model_dir, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(inference_config.unet_additional_kwargs))
        
        vae = AutoencoderKL.from_pretrained(sdvae_model_dir, subfolder="vae")

        self.appearance_encoder = AppearanceEncoderModel.from_pretrained(magicanimate_model_dir, subfolder="appearance_encoder").cuda()
        self.reference_control_writer = ReferenceAttentionControl(self.appearance_encoder, do_classifier_free_guidance=True, mode='write', fusion_blocks=config.fusion_blocks)
        self.reference_control_reader = ReferenceAttentionControl(unet, do_classifier_free_guidance=True, mode='read', fusion_blocks=config.fusion_blocks)
        
        

        ### Load controlnet
        controlnet   = ControlNetModel.from_pretrained(magicanimate_model_dir, subfolder="densepose_controlnet")

        vae.to(torch.float16)
        unet.to(torch.float16)
        text_encoder.to(torch.float16)
        controlnet.to(torch.float16)
        self.appearance_encoder.to(torch.float16)
        
        unet.enable_xformers_memory_efficient_attention()
        self.appearance_encoder.enable_xformers_memory_efficient_attention()
        controlnet.enable_xformers_memory_efficient_attention()

        self.pipeline = AnimationPipeline(
            vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet,
            scheduler=DDIMScheduler(**OmegaConf.to_container(inference_config.noise_scheduler_kwargs)),
            # NOTE: UniPCMultistepScheduler
        ).to("cuda")

        # 1. unet ckpt
        # 1.1 motion module
        motion_module_state_dict = torch.load(os.path.join(magicanimate_model_dir, 'temporal_attention/temporal_attention.ckpt'), map_location="cpu")
        if "global_step" in motion_module_state_dict: func_args.update({"global_step": motion_module_state_dict["global_step"]})
        motion_module_state_dict = motion_module_state_dict['state_dict'] if 'state_dict' in motion_module_state_dict else motion_module_state_dict
        try:
            # extra steps for self-trained models
            state_dict = OrderedDict()
            for key in motion_module_state_dict.keys():
                if key.startswith("module."):
                    _key = key.split("module.")[-1]
                    state_dict[_key] = motion_module_state_dict[key]
                else:
                    state_dict[key] = motion_module_state_dict[key]
            motion_module_state_dict = state_dict
            del state_dict
            missing, unexpected = self.pipeline.unet.load_state_dict(motion_module_state_dict, strict=False)
            assert len(unexpected) == 0
        except:
            _tmp_ = OrderedDict()
            for key in motion_module_state_dict.keys():
                if "motion_modules" in key:
                    if key.startswith("unet."):
                        _key = key.split('unet.')[-1]
                        _tmp_[_key] = motion_module_state_dict[key]
                    else:
                        _tmp_[key] = motion_module_state_dict[key]
            missing, unexpected = unet.load_state_dict(_tmp_, strict=False)
            assert len(unexpected) == 0
            del _tmp_
        del motion_module_state_dict

        self.pipeline.to("cuda")
        self.L = config.L
    
        prompt = n_prompt = ""
        random_seed = int(random_seed)
        step = int(step)
        guidance_scale = float(guidance_scale)
        samples_per_video = []
        # manually set random seed for reproduction
        if random_seed != -1: 
            torch.manual_seed(random_seed)
            set_seed(random_seed)
        else:
            torch.seed()
            
        if motion_sequence.endswith('.mp4'):
            control = VideoReader(motion_sequence).read()
            if control[0].shape[0] != size:
                control = [np.array(Image.fromarray(c).resize((size, size))) for c in control]
            control = np.array(control)
        
        if source_image.shape[0] != size:
            source_image = np.array(Image.fromarray(source_image).resize((size, size)))
        H, W, C = source_image.shape
        
        init_latents = None
        original_length = control.shape[0]
        if control.shape[0] % self.L > 0:
            control = np.pad(control, ((0, self.L-control.shape[0] % self.L), (0, 0), (0, 0), (0, 0)), mode='edge')
        generator = torch.Generator(device=torch.device("cuda:0"))
        generator.manual_seed(torch.initial_seed())
        

        sample = self.pipeline(
            prompt,
            negative_prompt         = n_prompt,
            num_inference_steps     = step,
            guidance_scale          = guidance_scale,
            width                   = W,
            height                  = H,
            video_length            = len(control),
            controlnet_condition    = control,
            init_latents            = init_latents,
            generator               = generator,
            appearance_encoder       = self.appearance_encoder, 
            reference_control_writer = self.reference_control_writer,
            reference_control_reader = self.reference_control_reader,
            source_image             = source_image,
        ).videos

        source_images = np.array([source_image] * original_length)
        source_images = rearrange(torch.from_numpy(source_images), "t h w c -> 1 c t h w") / 255.0
        samples_per_video.append(source_images)
        
        control = control / 255.0
        control = rearrange(control, "t h w c -> 1 c t h w")
        control = torch.from_numpy(control)
        samples_per_video.append(control[:, :, :original_length])

        samples_per_video.append(sample[:, :, :original_length])

        samples_per_video = torch.cat(samples_per_video)

        time_str = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
        savedir = os.path.join(self.save_dir,'outputs')
        if not os.path.exists(savedir):
            os.makedirs(savedir)
        animation_path = f"{savedir}/{time_str}.mp4"

        os.makedirs(savedir, exist_ok=True)
        save_videos_grid(samples_per_video, animation_path)
        
        return animation_path