model_management.py 7.94 KB
Newer Older
1

2
3
4
5
CPU = 0
NO_VRAM = 1
LOW_VRAM = 2
NORMAL_VRAM = 3
6
HIGH_VRAM = 4
Yurii Mazurevich's avatar
Yurii Mazurevich committed
7
MPS = 4
8
9
10
11

accelerate_enabled = False
vram_state = NORMAL_VRAM

12
total_vram = 0
13
14
total_vram_available_mb = -1

15
import sys
16
import psutil
17
18
19

set_vram_to = NORMAL_VRAM

20
21
22
try:
    import torch
    total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024)
23
24
25
26
27
28
    total_ram = psutil.virtual_memory().total / (1024 * 1024)
    forced_normal_vram = "--normalvram" in sys.argv
    if not forced_normal_vram:
        if total_vram <= 4096:
            print("Trying to enable lowvram mode because your GPU seems to have 4GB or less. If you don't want this use: --normalvram")
            set_vram_to = LOW_VRAM
comfyanonymous's avatar
comfyanonymous committed
29
        elif total_vram > total_ram * 1.1 and total_vram > 14336:
30
31
            print("Enabling highvram mode because your GPU has more vram than your computer has ram. If you don't want this use: --normalvram")
            vram_state = HIGH_VRAM
32
33
34
except:
    pass

35
36
37
38
39
try:
    OOM_EXCEPTION = torch.cuda.OutOfMemoryError
except:
    OOM_EXCEPTION = Exception

40
41
if "--disable-xformers" in sys.argv:
    XFORMERS_IS_AVAILBLE = False
42
43
44
45
46
47
48
49
else:
    try:
        import xformers
        import xformers.ops
        XFORMERS_IS_AVAILBLE = True
    except:
        XFORMERS_IS_AVAILBLE = False

50
51
52
53
54
55
56
57
ENABLE_PYTORCH_ATTENTION = False
if "--use-pytorch-cross-attention" in sys.argv:
    torch.backends.cuda.enable_math_sdp(True)
    torch.backends.cuda.enable_flash_sdp(True)
    torch.backends.cuda.enable_mem_efficient_sdp(True)
    ENABLE_PYTORCH_ATTENTION = True
    XFORMERS_IS_AVAILBLE = False

58

59
60
61
62
if "--lowvram" in sys.argv:
    set_vram_to = LOW_VRAM
if "--novram" in sys.argv:
    set_vram_to = NO_VRAM
63
64
if "--highvram" in sys.argv:
    vram_state = HIGH_VRAM
Yurii Mazurevich's avatar
Yurii Mazurevich committed
65
66
if torch.backends.mps.is_available():
    vram_state = MPS
67

68
if set_vram_to == LOW_VRAM or set_vram_to == NO_VRAM:
69
70
71
72
73
74
75
76
    try:
        import accelerate
        accelerate_enabled = True
        vram_state = set_vram_to
    except Exception as e:
        import traceback
        print(traceback.format_exc())
        print("ERROR: COULD NOT ENABLE LOW VRAM MODE.")
77
78

    total_vram_available_mb = (total_vram - 1024) // 2
79
    total_vram_available_mb = int(max(256, total_vram_available_mb))
80

81
82
if "--cpu" in sys.argv:
    vram_state = CPU
83

Yurii Mazurevich's avatar
Yurii Mazurevich committed
84
print("Set vram state to:", ["CPU", "NO VRAM", "LOW VRAM", "NORMAL VRAM", "HIGH VRAM", "MPS"][vram_state])
85

86
87

current_loaded_model = None
comfyanonymous's avatar
comfyanonymous committed
88
current_gpu_controlnets = []
89

90
91
92
model_accelerated = False


93
94
def unload_model():
    global current_loaded_model
95
    global model_accelerated
comfyanonymous's avatar
comfyanonymous committed
96
    global current_gpu_controlnets
97
98
    global vram_state

99
    if current_loaded_model is not None:
100
101
102
103
        if model_accelerated:
            accelerate.hooks.remove_hook_from_submodules(current_loaded_model.model)
            model_accelerated = False

104
105
106
        #never unload models from GPU on high vram
        if vram_state != HIGH_VRAM:
            current_loaded_model.model.cpu()
107
108
        current_loaded_model.unpatch_model()
        current_loaded_model = None
109
110
111
112
113
114

    if vram_state != HIGH_VRAM:
        if len(current_gpu_controlnets) > 0:
            for n in current_gpu_controlnets:
                n.cpu()
            current_gpu_controlnets = []
115
116
117
118


def load_model_gpu(model):
    global current_loaded_model
119
120
121
    global vram_state
    global model_accelerated

122
123
124
125
126
127
128
129
130
    if model is current_loaded_model:
        return
    unload_model()
    try:
        real_model = model.patch_model()
    except Exception as e:
        model.unpatch_model()
        raise e
    current_loaded_model = model
131
132
    if vram_state == CPU:
        pass
Yurii Mazurevich's avatar
Yurii Mazurevich committed
133
134
135
136
137
138
    elif vram_state == MPS:
        # print(inspect.getmro(real_model.__class__))
        # print(dir(real_model))
        mps_device = torch.device("mps")
        real_model.to(mps_device)
        pass
139
    elif vram_state == NORMAL_VRAM or vram_state == HIGH_VRAM:
140
141
142
143
144
145
        model_accelerated = False
        real_model.cuda()
    else:
        if vram_state == NO_VRAM:
            device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "256MiB", "cpu": "16GiB"})
        elif vram_state == LOW_VRAM:
146
            device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "{}MiB".format(total_vram_available_mb), "cpu": "16GiB"})
comfyanonymous's avatar
comfyanonymous committed
147

148
149
        accelerate.dispatch_model(real_model, device_map=device_map, main_device="cuda")
        model_accelerated = True
150
    return current_loaded_model
151

comfyanonymous's avatar
comfyanonymous committed
152
153
def load_controlnet_gpu(models):
    global current_gpu_controlnets
154
    global vram_state
155
156
    if vram_state == CPU:
        return
Yurii Mazurevich's avatar
Yurii Mazurevich committed
157
158
159
    
    if vram_state == MPS:
        return
160
161
162
163
164

    if vram_state == LOW_VRAM or vram_state == NO_VRAM:
        #don't load controlnets like this if low vram because they will be loaded right before running and unloaded right after
        return

comfyanonymous's avatar
comfyanonymous committed
165
166
167
168
169
170
171
172
    for m in current_gpu_controlnets:
        if m not in models:
            m.cpu()

    current_gpu_controlnets = []
    for m in models:
        current_gpu_controlnets.append(m.cuda())

173

174
175
176
177
178
179
180
181
182
183
184
185
def load_if_low_vram(model):
    global vram_state
    if vram_state == LOW_VRAM or vram_state == NO_VRAM:
        return model.cuda()
    return model

def unload_if_low_vram(model):
    global vram_state
    if vram_state == LOW_VRAM or vram_state == NO_VRAM:
        return model.cpu()
    return model

186
def get_torch_device():
Yurii Mazurevich's avatar
Yurii Mazurevich committed
187
188
    if vram_state == MPS:
        return torch.device("mps")
189
190
191
192
193
194
195
196
197
    if vram_state == CPU:
        return torch.device("cpu")
    else:
        return torch.cuda.current_device()

def get_autocast_device(dev):
    if hasattr(dev, 'type'):
        return dev.type
    return "cuda"
198

199
200
201
202
203
def xformers_enabled():
    if vram_state == CPU:
        return False
    return XFORMERS_IS_AVAILBLE

204
205
206
def pytorch_attention_enabled():
    return ENABLE_PYTORCH_ATTENTION

207
208
def get_free_memory(dev=None, torch_free_too=False):
    if dev is None:
209
        dev = get_torch_device()
210

Yurii Mazurevich's avatar
Yurii Mazurevich committed
211
    if hasattr(dev, 'type') and (dev.type == 'cpu' or dev.type == 'mps'):
212
213
214
215
216
217
218
219
220
221
222
223
224
225
        mem_free_total = psutil.virtual_memory().available
        mem_free_torch = mem_free_total
    else:
        stats = torch.cuda.memory_stats(dev)
        mem_active = stats['active_bytes.all.current']
        mem_reserved = stats['reserved_bytes.all.current']
        mem_free_cuda, _ = torch.cuda.mem_get_info(dev)
        mem_free_torch = mem_reserved - mem_active
        mem_free_total = mem_free_cuda + mem_free_torch

    if torch_free_too:
        return (mem_free_total, mem_free_torch)
    else:
        return mem_free_total
226
227
228
229
230
231
232
233
234

def maximum_batch_area():
    global vram_state
    if vram_state == NO_VRAM:
        return 0

    memory_free = get_free_memory() / (1024 * 1024)
    area = ((memory_free - 1024) * 0.9) / (0.6)
    return int(max(area, 0))
235
236
237
238
239

def cpu_mode():
    global vram_state
    return vram_state == CPU

Yurii Mazurevich's avatar
Yurii Mazurevich committed
240
241
242
243
def mps_mode():
    global vram_state
    return vram_state == MPS

244
def should_use_fp16():
Yurii Mazurevich's avatar
Yurii Mazurevich committed
245
    if cpu_mode() or mps_mode():
246
247
248
249
250
        return False #TODO ?

    if torch.cuda.is_bf16_supported():
        return True

comfyanonymous's avatar
comfyanonymous committed
251
    props = torch.cuda.get_device_properties("cuda")
252
253
254
255
    if props.major < 7:
        return False

    #FP32 is faster on those cards?
256
    nvidia_16_series = ["1660", "1650", "1630", "T500", "T550", "T600"]
257
258
259
260
261
262
    for x in nvidia_16_series:
        if x in props.name:
            return False

    return True

263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
#TODO: might be cleaner to put this somewhere else
import threading

class InterruptProcessingException(Exception):
    pass

interrupt_processing_mutex = threading.RLock()

interrupt_processing = False
def interrupt_current_processing(value=True):
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        interrupt_processing = value

def processing_interrupted():
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        return interrupt_processing

def throw_exception_if_processing_interrupted():
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        if interrupt_processing:
            interrupt_processing = False
            raise InterruptProcessingException()