latent_preview.py 3.47 KB
Newer Older
1
import torch
2
from PIL import Image
3
4
5
6
import struct
import numpy as np
from comfy.cli_args import args, LatentPreviewMethod
from comfy.taesd.taesd import TAESD
comfyanonymous's avatar
comfyanonymous committed
7
import comfy.model_management
8
import folder_paths
9
import comfy.utils
10
import logging
11
12
13

MAX_PREVIEW_RESOLUTION = 512

14
15
16
17
18
19
20
def preview_to_image(latent_image):
        latents_ubyte = (((latent_image + 1.0) / 2.0).clamp(0, 1)  # change scale from -1..1 to 0..1
                            .mul(0xFF)  # to 0..255
                            ).to(device="cpu", dtype=torch.uint8, non_blocking=comfy.model_management.device_supports_non_blocking(latent_image.device))

        return Image.fromarray(latents_ubyte.numpy())

21
22
23
24
25
26
class LatentPreviewer:
    def decode_latent_to_preview(self, x0):
        pass

    def decode_latent_to_preview_image(self, preview_format, x0):
        preview_image = self.decode_latent_to_preview(x0)
27
        return ("JPEG", preview_image, MAX_PREVIEW_RESOLUTION)
28
29
30
31
32
33

class TAESDPreviewerImpl(LatentPreviewer):
    def __init__(self, taesd):
        self.taesd = taesd

    def decode_latent_to_preview(self, x0):
34
35
        x_sample = self.taesd.decode(x0[:1])[0].movedim(0, 2)
        return preview_to_image(x_sample)
36
37
38


class Latent2RGBPreviewer(LatentPreviewer):
39
40
    def __init__(self, latent_rgb_factors):
        self.latent_rgb_factors = torch.tensor(latent_rgb_factors, device="cpu")
41
42

    def decode_latent_to_preview(self, x0):
43
44
        self.latent_rgb_factors = self.latent_rgb_factors.to(dtype=x0.dtype, device=x0.device)
        latent_image = x0[0].permute(1, 2, 0) @ self.latent_rgb_factors
45
        return preview_to_image(latent_image)
46
47


48
def get_previewer(device, latent_format):
49
50
51
52
    previewer = None
    method = args.preview_method
    if method != LatentPreviewMethod.NoPreviews:
        # TODO previewer methods
53
54
        taesd_decoder_path = None
        if latent_format.taesd_decoder_name is not None:
55
56
57
58
59
60
            taesd_decoder_path = next(
                (fn for fn in folder_paths.get_filename_list("vae_approx")
                    if fn.startswith(latent_format.taesd_decoder_name)),
                ""
            )
            taesd_decoder_path = folder_paths.get_full_path("vae_approx", taesd_decoder_path)
61
62
63
64
65
66

        if method == LatentPreviewMethod.Auto:
            method = LatentPreviewMethod.Latent2RGB

        if method == LatentPreviewMethod.TAESD:
            if taesd_decoder_path:
Dr.Lt.Data's avatar
Dr.Lt.Data committed
67
                taesd = TAESD(None, taesd_decoder_path, latent_channels=latent_format.latent_channels).to(device)
68
69
                previewer = TAESDPreviewerImpl(taesd)
            else:
70
                logging.warning("Warning: TAESD previews enabled, but could not find models/vae_approx/{}".format(latent_format.taesd_decoder_name))
71
72

        if previewer is None:
73
74
            if latent_format.latent_rgb_factors is not None:
                previewer = Latent2RGBPreviewer(latent_format.latent_rgb_factors)
75
76
    return previewer

77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
def prepare_callback(model, steps, x0_output_dict=None):
    preview_format = "JPEG"
    if preview_format not in ["JPEG", "PNG"]:
        preview_format = "JPEG"

    previewer = get_previewer(model.load_device, model.model.latent_format)

    pbar = comfy.utils.ProgressBar(steps)
    def callback(step, x0, x, total_steps):
        if x0_output_dict is not None:
            x0_output_dict["x0"] = x0

        preview_bytes = None
        if previewer:
            preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0)
        pbar.update_absolute(step + 1, total_steps, preview_bytes)
    return callback
94