"profiler/profile_conv_fwd_bias_relu.cpp" did not exist on "e823d518cb46ad61ddb3c70eac8529e0a58af1f8"
model_management.py 7.92 KB
Newer Older
1

2
3
4
5
CPU = 0
NO_VRAM = 1
LOW_VRAM = 2
NORMAL_VRAM = 3
6
HIGH_VRAM = 4
Yurii Mazurevich's avatar
Yurii Mazurevich committed
7
MPS = 5
8
9
10
11

accelerate_enabled = False
vram_state = NORMAL_VRAM

12
total_vram = 0
13
14
total_vram_available_mb = -1

15
import sys
16
import psutil
17

Francesco Yoshi Gobbo's avatar
Francesco Yoshi Gobbo committed
18
19
forced_cpu = "--cpu" in sys.argv

20
21
set_vram_to = NORMAL_VRAM

22
23
24
try:
    import torch
    total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024)
25
26
    total_ram = psutil.virtual_memory().total / (1024 * 1024)
    forced_normal_vram = "--normalvram" in sys.argv
27
    if not forced_normal_vram and not forced_cpu:
28
29
30
        if total_vram <= 4096:
            print("Trying to enable lowvram mode because your GPU seems to have 4GB or less. If you don't want this use: --normalvram")
            set_vram_to = LOW_VRAM
comfyanonymous's avatar
comfyanonymous committed
31
        elif total_vram > total_ram * 1.1 and total_vram > 14336:
32
33
            print("Enabling highvram mode because your GPU has more vram than your computer has ram. If you don't want this use: --normalvram")
            vram_state = HIGH_VRAM
34
35
36
except:
    pass

37
38
39
40
41
try:
    OOM_EXCEPTION = torch.cuda.OutOfMemoryError
except:
    OOM_EXCEPTION = Exception

42
43
if "--disable-xformers" in sys.argv:
    XFORMERS_IS_AVAILBLE = False
44
45
46
47
48
49
50
51
else:
    try:
        import xformers
        import xformers.ops
        XFORMERS_IS_AVAILBLE = True
    except:
        XFORMERS_IS_AVAILBLE = False

52
53
54
55
56
57
58
59
ENABLE_PYTORCH_ATTENTION = False
if "--use-pytorch-cross-attention" in sys.argv:
    torch.backends.cuda.enable_math_sdp(True)
    torch.backends.cuda.enable_flash_sdp(True)
    torch.backends.cuda.enable_mem_efficient_sdp(True)
    ENABLE_PYTORCH_ATTENTION = True
    XFORMERS_IS_AVAILBLE = False

60

61
62
63
64
if "--lowvram" in sys.argv:
    set_vram_to = LOW_VRAM
if "--novram" in sys.argv:
    set_vram_to = NO_VRAM
65
66
if "--highvram" in sys.argv:
    vram_state = HIGH_VRAM
67

68

69
if set_vram_to == LOW_VRAM or set_vram_to == NO_VRAM:
70
71
72
73
74
75
76
77
    try:
        import accelerate
        accelerate_enabled = True
        vram_state = set_vram_to
    except Exception as e:
        import traceback
        print(traceback.format_exc())
        print("ERROR: COULD NOT ENABLE LOW VRAM MODE.")
78
79

    total_vram_available_mb = (total_vram - 1024) // 2
80
    total_vram_available_mb = int(max(256, total_vram_available_mb))
81

82
83
84
85
86
87
try:
    if torch.backends.mps.is_available():
        vram_state = MPS
except:
    pass

Francesco Yoshi Gobbo's avatar
Francesco Yoshi Gobbo committed
88
if forced_cpu:
89
    vram_state = CPU
90

Yurii Mazurevich's avatar
Yurii Mazurevich committed
91
print("Set vram state to:", ["CPU", "NO VRAM", "LOW VRAM", "NORMAL VRAM", "HIGH VRAM", "MPS"][vram_state])
92

93
94

current_loaded_model = None
comfyanonymous's avatar
comfyanonymous committed
95
current_gpu_controlnets = []
96

97
98
99
model_accelerated = False


100
101
def unload_model():
    global current_loaded_model
102
    global model_accelerated
comfyanonymous's avatar
comfyanonymous committed
103
    global current_gpu_controlnets
104
105
    global vram_state

106
    if current_loaded_model is not None:
107
108
109
110
        if model_accelerated:
            accelerate.hooks.remove_hook_from_submodules(current_loaded_model.model)
            model_accelerated = False

111
112
113
        #never unload models from GPU on high vram
        if vram_state != HIGH_VRAM:
            current_loaded_model.model.cpu()
114
115
        current_loaded_model.unpatch_model()
        current_loaded_model = None
116
117
118
119
120
121

    if vram_state != HIGH_VRAM:
        if len(current_gpu_controlnets) > 0:
            for n in current_gpu_controlnets:
                n.cpu()
            current_gpu_controlnets = []
122
123
124
125


def load_model_gpu(model):
    global current_loaded_model
126
127
128
    global vram_state
    global model_accelerated

129
130
131
132
133
134
135
136
137
    if model is current_loaded_model:
        return
    unload_model()
    try:
        real_model = model.patch_model()
    except Exception as e:
        model.unpatch_model()
        raise e
    current_loaded_model = model
138
139
    if vram_state == CPU:
        pass
Yurii Mazurevich's avatar
Yurii Mazurevich committed
140
141
142
143
    elif vram_state == MPS:
        mps_device = torch.device("mps")
        real_model.to(mps_device)
        pass
144
    elif vram_state == NORMAL_VRAM or vram_state == HIGH_VRAM:
145
146
147
148
149
150
        model_accelerated = False
        real_model.cuda()
    else:
        if vram_state == NO_VRAM:
            device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "256MiB", "cpu": "16GiB"})
        elif vram_state == LOW_VRAM:
151
            device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "{}MiB".format(total_vram_available_mb), "cpu": "16GiB"})
comfyanonymous's avatar
comfyanonymous committed
152

153
154
        accelerate.dispatch_model(real_model, device_map=device_map, main_device="cuda")
        model_accelerated = True
155
    return current_loaded_model
156

comfyanonymous's avatar
comfyanonymous committed
157
158
def load_controlnet_gpu(models):
    global current_gpu_controlnets
159
    global vram_state
160
161
    if vram_state == CPU:
        return
162
163
164
165
166

    if vram_state == LOW_VRAM or vram_state == NO_VRAM:
        #don't load controlnets like this if low vram because they will be loaded right before running and unloaded right after
        return

comfyanonymous's avatar
comfyanonymous committed
167
168
169
170
    for m in current_gpu_controlnets:
        if m not in models:
            m.cpu()

171
    device = get_torch_device()
comfyanonymous's avatar
comfyanonymous committed
172
173
    current_gpu_controlnets = []
    for m in models:
174
        current_gpu_controlnets.append(m.to(device))
comfyanonymous's avatar
comfyanonymous committed
175

176

177
178
179
180
181
182
183
184
185
186
187
188
def load_if_low_vram(model):
    global vram_state
    if vram_state == LOW_VRAM or vram_state == NO_VRAM:
        return model.cuda()
    return model

def unload_if_low_vram(model):
    global vram_state
    if vram_state == LOW_VRAM or vram_state == NO_VRAM:
        return model.cpu()
    return model

189
def get_torch_device():
Yurii Mazurevich's avatar
Yurii Mazurevich committed
190
191
    if vram_state == MPS:
        return torch.device("mps")
192
193
194
195
196
197
198
199
200
    if vram_state == CPU:
        return torch.device("cpu")
    else:
        return torch.cuda.current_device()

def get_autocast_device(dev):
    if hasattr(dev, 'type'):
        return dev.type
    return "cuda"
201

202
203
204
205
206
def xformers_enabled():
    if vram_state == CPU:
        return False
    return XFORMERS_IS_AVAILBLE

207
208
209
def pytorch_attention_enabled():
    return ENABLE_PYTORCH_ATTENTION

210
211
def get_free_memory(dev=None, torch_free_too=False):
    if dev is None:
212
        dev = get_torch_device()
213

Yurii Mazurevich's avatar
Yurii Mazurevich committed
214
    if hasattr(dev, 'type') and (dev.type == 'cpu' or dev.type == 'mps'):
215
216
217
218
219
220
221
222
223
224
225
226
227
228
        mem_free_total = psutil.virtual_memory().available
        mem_free_torch = mem_free_total
    else:
        stats = torch.cuda.memory_stats(dev)
        mem_active = stats['active_bytes.all.current']
        mem_reserved = stats['reserved_bytes.all.current']
        mem_free_cuda, _ = torch.cuda.mem_get_info(dev)
        mem_free_torch = mem_reserved - mem_active
        mem_free_total = mem_free_cuda + mem_free_torch

    if torch_free_too:
        return (mem_free_total, mem_free_torch)
    else:
        return mem_free_total
229
230
231
232
233
234
235
236
237

def maximum_batch_area():
    global vram_state
    if vram_state == NO_VRAM:
        return 0

    memory_free = get_free_memory() / (1024 * 1024)
    area = ((memory_free - 1024) * 0.9) / (0.6)
    return int(max(area, 0))
238
239
240
241
242

def cpu_mode():
    global vram_state
    return vram_state == CPU

Yurii Mazurevich's avatar
Yurii Mazurevich committed
243
244
245
246
def mps_mode():
    global vram_state
    return vram_state == MPS

247
def should_use_fp16():
Yurii Mazurevich's avatar
Yurii Mazurevich committed
248
    if cpu_mode() or mps_mode():
249
250
251
252
253
        return False #TODO ?

    if torch.cuda.is_bf16_supported():
        return True

comfyanonymous's avatar
comfyanonymous committed
254
    props = torch.cuda.get_device_properties("cuda")
255
256
257
258
    if props.major < 7:
        return False

    #FP32 is faster on those cards?
259
    nvidia_16_series = ["1660", "1650", "1630", "T500", "T550", "T600"]
260
261
262
263
264
265
    for x in nvidia_16_series:
        if x in props.name:
            return False

    return True

266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
#TODO: might be cleaner to put this somewhere else
import threading

class InterruptProcessingException(Exception):
    pass

interrupt_processing_mutex = threading.RLock()

interrupt_processing = False
def interrupt_current_processing(value=True):
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        interrupt_processing = value

def processing_interrupted():
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        return interrupt_processing

def throw_exception_if_processing_interrupted():
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        if interrupt_processing:
            interrupt_processing = False
            raise InterruptProcessingException()