model_management.py 8.44 KB
Newer Older
1
2
3
import psutil
from enum import Enum
from cli_args import args
4

5
6
7
8
9
10
11
class VRAMState(Enum):
    CPU = 0
    NO_VRAM = 1
    LOW_VRAM = 2
    NORMAL_VRAM = 3
    HIGH_VRAM = 4
    MPS = 5
12

13
14
15
# Determine VRAM State
vram_state = VRAMState.NORMAL_VRAM
set_vram_to = VRAMState.NORMAL_VRAM
16

17
total_vram = 0
18
19
total_vram_available_mb = -1

20
accelerate_enabled = False
21

22
23
24
try:
    import torch
    total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024)
25
    total_ram = psutil.virtual_memory().total / (1024 * 1024)
26
    if not args.normalvram and not args.cpu:
27
28
        if total_vram <= 4096:
            print("Trying to enable lowvram mode because your GPU seems to have 4GB or less. If you don't want this use: --normalvram")
29
            set_vram_to = VRAMState.LOW_VRAM
comfyanonymous's avatar
comfyanonymous committed
30
        elif total_vram > total_ram * 1.1 and total_vram > 14336:
31
            print("Enabling highvram mode because your GPU has more vram than your computer has ram. If you don't want this use: --normalvram")
32
            vram_state = VRAMState.HIGH_VRAM
33
34
35
except:
    pass

36
37
38
39
40
try:
    OOM_EXCEPTION = torch.cuda.OutOfMemoryError
except:
    OOM_EXCEPTION = Exception

41
42
if args.disable_xformers:
    XFORMERS_IS_AVAILABLE = False
43
44
45
46
else:
    try:
        import xformers
        import xformers.ops
47
        XFORMERS_IS_AVAILABLE = True
48
    except:
49
        XFORMERS_IS_AVAILABLE = False
50

51
52
ENABLE_PYTORCH_ATTENTION = args.use_pytorch_cross_attention
if ENABLE_PYTORCH_ATTENTION:
53
54
55
    torch.backends.cuda.enable_math_sdp(True)
    torch.backends.cuda.enable_flash_sdp(True)
    torch.backends.cuda.enable_mem_efficient_sdp(True)
56
    XFORMERS_IS_AVAILABLE = False
57

58
59
60
61
62
63
if args.lowvram:
    set_vram_to = VRAMState.LOW_VRAM
elif args.novram:
    set_vram_to = VRAMState.NO_VRAM
elif args.highvram:
    vram_state = VRAMState.HIGH_VRAM
64

65

66
if set_vram_to in (VRAMState.LOW_VRAM, VRAMState.NO_VRAM):
67
68
69
70
71
72
73
74
    try:
        import accelerate
        accelerate_enabled = True
        vram_state = set_vram_to
    except Exception as e:
        import traceback
        print(traceback.format_exc())
        print("ERROR: COULD NOT ENABLE LOW VRAM MODE.")
75
76

    total_vram_available_mb = (total_vram - 1024) // 2
77
    total_vram_available_mb = int(max(256, total_vram_available_mb))
78

79
80
try:
    if torch.backends.mps.is_available():
81
        vram_state = VRAMState.MPS
82
83
84
except:
    pass

85
86
if args.cpu:
    vram_state = VRAMState.CPU
87

88
print(f"Set vram state to: {vram_state.name}")
89

90
91

current_loaded_model = None
comfyanonymous's avatar
comfyanonymous committed
92
current_gpu_controlnets = []
93

94
95
96
model_accelerated = False


97
98
def unload_model():
    global current_loaded_model
99
    global model_accelerated
comfyanonymous's avatar
comfyanonymous committed
100
    global current_gpu_controlnets
101
102
    global vram_state

103
    if current_loaded_model is not None:
104
105
106
107
        if model_accelerated:
            accelerate.hooks.remove_hook_from_submodules(current_loaded_model.model)
            model_accelerated = False

108
        #never unload models from GPU on high vram
109
        if vram_state != VRAMState.HIGH_VRAM:
110
            current_loaded_model.model.cpu()
111
112
        current_loaded_model.unpatch_model()
        current_loaded_model = None
113

114
    if vram_state != VRAMState.HIGH_VRAM:
115
116
117
118
        if len(current_gpu_controlnets) > 0:
            for n in current_gpu_controlnets:
                n.cpu()
            current_gpu_controlnets = []
119
120
121
122


def load_model_gpu(model):
    global current_loaded_model
123
124
125
    global vram_state
    global model_accelerated

126
127
128
129
130
131
132
133
134
    if model is current_loaded_model:
        return
    unload_model()
    try:
        real_model = model.patch_model()
    except Exception as e:
        model.unpatch_model()
        raise e
    current_loaded_model = model
135
    if vram_state == VRAMState.CPU:
136
        pass
137
    elif vram_state == VRAMState.MPS:
Yurii Mazurevich's avatar
Yurii Mazurevich committed
138
139
140
        mps_device = torch.device("mps")
        real_model.to(mps_device)
        pass
141
    elif vram_state == VRAMState.NORMAL_VRAM or vram_state == VRAMState.HIGH_VRAM:
142
143
144
        model_accelerated = False
        real_model.cuda()
    else:
145
        if vram_state == VRAMState.NO_VRAM:
146
            device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "256MiB", "cpu": "16GiB"})
147
        elif vram_state == VRAMState.LOW_VRAM:
148
            device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "{}MiB".format(total_vram_available_mb), "cpu": "16GiB"})
comfyanonymous's avatar
comfyanonymous committed
149

150
151
        accelerate.dispatch_model(real_model, device_map=device_map, main_device="cuda")
        model_accelerated = True
152
    return current_loaded_model
153

comfyanonymous's avatar
comfyanonymous committed
154
155
def load_controlnet_gpu(models):
    global current_gpu_controlnets
156
    global vram_state
157
    if vram_state == VRAMState.CPU:
158
        return
159

160
    if vram_state == VRAMState.LOW_VRAM or vram_state == VRAMState.NO_VRAM:
161
162
163
        #don't load controlnets like this if low vram because they will be loaded right before running and unloaded right after
        return

comfyanonymous's avatar
comfyanonymous committed
164
165
166
167
    for m in current_gpu_controlnets:
        if m not in models:
            m.cpu()

168
    device = get_torch_device()
comfyanonymous's avatar
comfyanonymous committed
169
170
    current_gpu_controlnets = []
    for m in models:
171
        current_gpu_controlnets.append(m.to(device))
comfyanonymous's avatar
comfyanonymous committed
172

173

174
175
def load_if_low_vram(model):
    global vram_state
176
    if vram_state == VRAMState.LOW_VRAM or vram_state == VRAMState.NO_VRAM:
177
178
179
180
181
        return model.cuda()
    return model

def unload_if_low_vram(model):
    global vram_state
182
    if vram_state == VRAMState.LOW_VRAM or vram_state == VRAMState.NO_VRAM:
183
184
185
        return model.cpu()
    return model

186
def get_torch_device():
187
    if vram_state == VRAMState.MPS:
Yurii Mazurevich's avatar
Yurii Mazurevich committed
188
        return torch.device("mps")
189
    if vram_state == VRAMState.CPU:
190
191
192
193
194
195
196
197
        return torch.device("cpu")
    else:
        return torch.cuda.current_device()

def get_autocast_device(dev):
    if hasattr(dev, 'type'):
        return dev.type
    return "cuda"
198

199

200
def xformers_enabled():
201
    if vram_state == VRAMState.CPU:
202
        return False
203
    return XFORMERS_IS_AVAILABLE
204

205
206
207
208
209
210
211
212
213
214
215
216
217

def xformers_enabled_vae():
    enabled = xformers_enabled()
    if not enabled:
        return False
    try:
        #0.0.18 has a bug where Nan is returned when inputs are too big (1152x1920 res images and above)
        if xformers.version.__version__ == "0.0.18":
            return False
    except:
        pass
    return enabled

218
219
220
def pytorch_attention_enabled():
    return ENABLE_PYTORCH_ATTENTION

221
222
def get_free_memory(dev=None, torch_free_too=False):
    if dev is None:
223
        dev = get_torch_device()
224

Yurii Mazurevich's avatar
Yurii Mazurevich committed
225
    if hasattr(dev, 'type') and (dev.type == 'cpu' or dev.type == 'mps'):
226
227
228
229
230
231
232
233
234
235
236
237
238
239
        mem_free_total = psutil.virtual_memory().available
        mem_free_torch = mem_free_total
    else:
        stats = torch.cuda.memory_stats(dev)
        mem_active = stats['active_bytes.all.current']
        mem_reserved = stats['reserved_bytes.all.current']
        mem_free_cuda, _ = torch.cuda.mem_get_info(dev)
        mem_free_torch = mem_reserved - mem_active
        mem_free_total = mem_free_cuda + mem_free_torch

    if torch_free_too:
        return (mem_free_total, mem_free_torch)
    else:
        return mem_free_total
240
241
242

def maximum_batch_area():
    global vram_state
243
    if vram_state == VRAMState.NO_VRAM:
244
245
246
247
248
        return 0

    memory_free = get_free_memory() / (1024 * 1024)
    area = ((memory_free - 1024) * 0.9) / (0.6)
    return int(max(area, 0))
249
250
251

def cpu_mode():
    global vram_state
252
    return vram_state == VRAMState.CPU
253

Yurii Mazurevich's avatar
Yurii Mazurevich committed
254
255
def mps_mode():
    global vram_state
256
    return vram_state == VRAMState.MPS
Yurii Mazurevich's avatar
Yurii Mazurevich committed
257

258
def should_use_fp16():
Yurii Mazurevich's avatar
Yurii Mazurevich committed
259
    if cpu_mode() or mps_mode():
260
261
262
263
264
        return False #TODO ?

    if torch.cuda.is_bf16_supported():
        return True

comfyanonymous's avatar
comfyanonymous committed
265
    props = torch.cuda.get_device_properties("cuda")
266
267
268
269
    if props.major < 7:
        return False

    #FP32 is faster on those cards?
270
    nvidia_16_series = ["1660", "1650", "1630", "T500", "T550", "T600"]
271
272
273
274
275
276
    for x in nvidia_16_series:
        if x in props.name:
            return False

    return True

277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
#TODO: might be cleaner to put this somewhere else
import threading

class InterruptProcessingException(Exception):
    pass

interrupt_processing_mutex = threading.RLock()

interrupt_processing = False
def interrupt_current_processing(value=True):
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        interrupt_processing = value

def processing_interrupted():
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        return interrupt_processing

def throw_exception_if_processing_interrupted():
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        if interrupt_processing:
            interrupt_processing = False
            raise InterruptProcessingException()