model_management.py 9.69 KB
Newer Older
1
2
3
import psutil
from enum import Enum
from cli_args import args
4

5
6
7
8
9
10
11
class VRAMState(Enum):
    CPU = 0
    NO_VRAM = 1
    LOW_VRAM = 2
    NORMAL_VRAM = 3
    HIGH_VRAM = 4
    MPS = 5
12

13
14
15
# Determine VRAM State
vram_state = VRAMState.NORMAL_VRAM
set_vram_to = VRAMState.NORMAL_VRAM
16

17
total_vram = 0
18
19
total_vram_available_mb = -1

20
accelerate_enabled = False
藍+85CD's avatar
藍+85CD committed
21
xpu_available = False
22

23
24
try:
    import torch
藍+85CD's avatar
藍+85CD committed
25
26
27
28
29
30
    try:
        import intel_extension_for_pytorch as ipex
        if torch.xpu.is_available():
            xpu_available = True
            total_vram = torch.xpu.get_device_properties(torch.xpu.current_device()).total_memory / (1024 * 1024)
    except:
31
        total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024)
32
    total_ram = psutil.virtual_memory().total / (1024 * 1024)
33
    if not args.normalvram and not args.cpu:
34
35
        if total_vram <= 4096:
            print("Trying to enable lowvram mode because your GPU seems to have 4GB or less. If you don't want this use: --normalvram")
36
            set_vram_to = VRAMState.LOW_VRAM
comfyanonymous's avatar
comfyanonymous committed
37
        elif total_vram > total_ram * 1.1 and total_vram > 14336:
38
            print("Enabling highvram mode because your GPU has more vram than your computer has ram. If you don't want this use: --normalvram")
39
            vram_state = VRAMState.HIGH_VRAM
40
41
42
except:
    pass

43
44
45
46
47
try:
    OOM_EXCEPTION = torch.cuda.OutOfMemoryError
except:
    OOM_EXCEPTION = Exception

48
49
XFORMERS_VERSION = ""
XFORMERS_ENABLED_VAE = True
50
51
if args.disable_xformers:
    XFORMERS_IS_AVAILABLE = False
52
53
54
55
else:
    try:
        import xformers
        import xformers.ops
56
        XFORMERS_IS_AVAILABLE = True
57
58
59
60
61
62
63
64
65
66
67
        try:
            XFORMERS_VERSION = xformers.version.__version__
            print("xformers version:", XFORMERS_VERSION)
            if XFORMERS_VERSION.startswith("0.0.18"):
                print()
                print("WARNING: This version of xformers has a major bug where you will get black images when generating high resolution images.")
                print("Please downgrade or upgrade xformers to a different version.")
                print()
                XFORMERS_ENABLED_VAE = False
        except:
            pass
68
    except:
69
        XFORMERS_IS_AVAILABLE = False
70

71
72
ENABLE_PYTORCH_ATTENTION = args.use_pytorch_cross_attention
if ENABLE_PYTORCH_ATTENTION:
73
74
75
    torch.backends.cuda.enable_math_sdp(True)
    torch.backends.cuda.enable_flash_sdp(True)
    torch.backends.cuda.enable_mem_efficient_sdp(True)
76
    XFORMERS_IS_AVAILABLE = False
77

78
79
80
81
82
83
if args.lowvram:
    set_vram_to = VRAMState.LOW_VRAM
elif args.novram:
    set_vram_to = VRAMState.NO_VRAM
elif args.highvram:
    vram_state = VRAMState.HIGH_VRAM
84

85
86
87
88
89
FORCE_FP32 = False
if args.force_fp32:
    print("Forcing FP32, if this improves things please report it.")
    FORCE_FP32 = True

90

91
if set_vram_to in (VRAMState.LOW_VRAM, VRAMState.NO_VRAM):
92
93
94
95
96
97
98
99
    try:
        import accelerate
        accelerate_enabled = True
        vram_state = set_vram_to
    except Exception as e:
        import traceback
        print(traceback.format_exc())
        print("ERROR: COULD NOT ENABLE LOW VRAM MODE.")
100
101

    total_vram_available_mb = (total_vram - 1024) // 2
102
    total_vram_available_mb = int(max(256, total_vram_available_mb))
103

104
105
try:
    if torch.backends.mps.is_available():
106
        vram_state = VRAMState.MPS
107
108
109
except:
    pass

110
111
if args.cpu:
    vram_state = VRAMState.CPU
112

113
print(f"Set vram state to: {vram_state.name}")
114

115
116

current_loaded_model = None
comfyanonymous's avatar
comfyanonymous committed
117
current_gpu_controlnets = []
118

119
120
121
model_accelerated = False


122
123
def unload_model():
    global current_loaded_model
124
    global model_accelerated
comfyanonymous's avatar
comfyanonymous committed
125
    global current_gpu_controlnets
126
127
    global vram_state

128
    if current_loaded_model is not None:
129
130
131
132
        if model_accelerated:
            accelerate.hooks.remove_hook_from_submodules(current_loaded_model.model)
            model_accelerated = False

133
        #never unload models from GPU on high vram
134
        if vram_state != VRAMState.HIGH_VRAM:
135
            current_loaded_model.model.cpu()
136
137
        current_loaded_model.unpatch_model()
        current_loaded_model = None
138

139
    if vram_state != VRAMState.HIGH_VRAM:
140
141
142
143
        if len(current_gpu_controlnets) > 0:
            for n in current_gpu_controlnets:
                n.cpu()
            current_gpu_controlnets = []
144
145
146
147


def load_model_gpu(model):
    global current_loaded_model
148
149
150
    global vram_state
    global model_accelerated

151
152
153
154
155
156
157
158
159
    if model is current_loaded_model:
        return
    unload_model()
    try:
        real_model = model.patch_model()
    except Exception as e:
        model.unpatch_model()
        raise e
    current_loaded_model = model
160
    if vram_state == VRAMState.CPU:
161
        pass
162
    elif vram_state == VRAMState.MPS:
Yurii Mazurevich's avatar
Yurii Mazurevich committed
163
164
165
        mps_device = torch.device("mps")
        real_model.to(mps_device)
        pass
166
    elif vram_state == VRAMState.NORMAL_VRAM or vram_state == VRAMState.HIGH_VRAM:
167
        model_accelerated = False
comfyanonymous's avatar
comfyanonymous committed
168
        real_model.to(get_torch_device())
169
    else:
170
        if vram_state == VRAMState.NO_VRAM:
171
            device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "256MiB", "cpu": "16GiB"})
172
        elif vram_state == VRAMState.LOW_VRAM:
173
            device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "{}MiB".format(total_vram_available_mb), "cpu": "16GiB"})
comfyanonymous's avatar
comfyanonymous committed
174

comfyanonymous's avatar
comfyanonymous committed
175
        accelerate.dispatch_model(real_model, device_map=device_map, main_device=get_torch_device())
176
        model_accelerated = True
177
    return current_loaded_model
178

comfyanonymous's avatar
comfyanonymous committed
179
180
def load_controlnet_gpu(models):
    global current_gpu_controlnets
181
    global vram_state
182
    if vram_state == VRAMState.CPU:
183
        return
184

185
    if vram_state == VRAMState.LOW_VRAM or vram_state == VRAMState.NO_VRAM:
186
187
188
        #don't load controlnets like this if low vram because they will be loaded right before running and unloaded right after
        return

comfyanonymous's avatar
comfyanonymous committed
189
190
191
192
    for m in current_gpu_controlnets:
        if m not in models:
            m.cpu()

193
    device = get_torch_device()
comfyanonymous's avatar
comfyanonymous committed
194
195
    current_gpu_controlnets = []
    for m in models:
196
        current_gpu_controlnets.append(m.to(device))
comfyanonymous's avatar
comfyanonymous committed
197

198

199
200
def load_if_low_vram(model):
    global vram_state
201
    if vram_state == VRAMState.LOW_VRAM or vram_state == VRAMState.NO_VRAM:
comfyanonymous's avatar
comfyanonymous committed
202
        return model.to(get_torch_device())
203
204
205
206
    return model

def unload_if_low_vram(model):
    global vram_state
207
    if vram_state == VRAMState.LOW_VRAM or vram_state == VRAMState.NO_VRAM:
208
209
210
        return model.cpu()
    return model

211
def get_torch_device():
212
    global xpu_available
213
    if vram_state == VRAMState.MPS:
Yurii Mazurevich's avatar
Yurii Mazurevich committed
214
        return torch.device("mps")
215
    if vram_state == VRAMState.CPU:
216
217
        return torch.device("cpu")
    else:
218
219
220
221
        if xpu_available:
            return torch.device("xpu")
        else:
            return torch.cuda.current_device()
222
223
224
225
226

def get_autocast_device(dev):
    if hasattr(dev, 'type'):
        return dev.type
    return "cuda"
227

228

229
def xformers_enabled():
230
    if vram_state == VRAMState.CPU:
231
        return False
232
    return XFORMERS_IS_AVAILABLE
233

234
235
236
237
238

def xformers_enabled_vae():
    enabled = xformers_enabled()
    if not enabled:
        return False
239
240

    return XFORMERS_ENABLED_VAE
241

242
243
244
def pytorch_attention_enabled():
    return ENABLE_PYTORCH_ATTENTION

245
def get_free_memory(dev=None, torch_free_too=False):
246
    global xpu_available
247
    if dev is None:
248
        dev = get_torch_device()
249

Yurii Mazurevich's avatar
Yurii Mazurevich committed
250
    if hasattr(dev, 'type') and (dev.type == 'cpu' or dev.type == 'mps'):
251
252
253
        mem_free_total = psutil.virtual_memory().available
        mem_free_torch = mem_free_total
    else:
254
255
256
257
258
259
260
261
262
263
        if xpu_available:
            mem_free_total = torch.xpu.get_device_properties(dev).total_memory - torch.xpu.memory_allocated(dev)
            mem_free_torch = mem_free_total
        else:
            stats = torch.cuda.memory_stats(dev)
            mem_active = stats['active_bytes.all.current']
            mem_reserved = stats['reserved_bytes.all.current']
            mem_free_cuda, _ = torch.cuda.mem_get_info(dev)
            mem_free_torch = mem_reserved - mem_active
            mem_free_total = mem_free_cuda + mem_free_torch
264
265
266
267
268

    if torch_free_too:
        return (mem_free_total, mem_free_torch)
    else:
        return mem_free_total
269
270
271

def maximum_batch_area():
    global vram_state
272
    if vram_state == VRAMState.NO_VRAM:
273
274
275
276
277
        return 0

    memory_free = get_free_memory() / (1024 * 1024)
    area = ((memory_free - 1024) * 0.9) / (0.6)
    return int(max(area, 0))
278
279
280

def cpu_mode():
    global vram_state
281
    return vram_state == VRAMState.CPU
282

Yurii Mazurevich's avatar
Yurii Mazurevich committed
283
284
def mps_mode():
    global vram_state
285
    return vram_state == VRAMState.MPS
Yurii Mazurevich's avatar
Yurii Mazurevich committed
286

287
def should_use_fp16():
288
    global xpu_available
289
290
291
    if FORCE_FP32:
        return False

292
    if cpu_mode() or mps_mode() or xpu_available:
293
294
295
296
297
        return False #TODO ?

    if torch.cuda.is_bf16_supported():
        return True

comfyanonymous's avatar
comfyanonymous committed
298
    props = torch.cuda.get_device_properties("cuda")
299
300
301
302
    if props.major < 7:
        return False

    #FP32 is faster on those cards?
303
    nvidia_16_series = ["1660", "1650", "1630", "T500", "T550", "T600"]
304
305
306
307
308
309
    for x in nvidia_16_series:
        if x in props.name:
            return False

    return True

310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
#TODO: might be cleaner to put this somewhere else
import threading

class InterruptProcessingException(Exception):
    pass

interrupt_processing_mutex = threading.RLock()

interrupt_processing = False
def interrupt_current_processing(value=True):
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        interrupt_processing = value

def processing_interrupted():
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        return interrupt_processing

def throw_exception_if_processing_interrupted():
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        if interrupt_processing:
            interrupt_processing = False
            raise InterruptProcessingException()