model_management.py 4.33 KB
Newer Older
1

2
3
4
5
CPU = 0
NO_VRAM = 1
LOW_VRAM = 2
NORMAL_VRAM = 3
6
HIGH_VRAM = 4
7
8
9
10

accelerate_enabled = False
vram_state = NORMAL_VRAM

11
total_vram = 0
12
13
total_vram_available_mb = -1

14
15
16
17
import sys

set_vram_to = NORMAL_VRAM

18
19
20
try:
    import torch
    total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024)
21
22
23
    if total_vram <= 4096 and not "--normalvram" in sys.argv:
        print("Trying to enable lowvram mode because your GPU seems to have 4GB or less. If you don't want this use: --normalvram")
        set_vram_to = LOW_VRAM
24
25
26
except:
    pass

27
28
29
30
if "--lowvram" in sys.argv:
    set_vram_to = LOW_VRAM
if "--novram" in sys.argv:
    set_vram_to = NO_VRAM
31
32
if "--highvram" in sys.argv:
    vram_state = HIGH_VRAM
33
34


35
if set_vram_to == LOW_VRAM or set_vram_to == NO_VRAM:
36
37
38
39
40
41
42
43
    try:
        import accelerate
        accelerate_enabled = True
        vram_state = set_vram_to
    except Exception as e:
        import traceback
        print(traceback.format_exc())
        print("ERROR: COULD NOT ENABLE LOW VRAM MODE.")
44
45

    total_vram_available_mb = (total_vram - 1024) // 2
46
    total_vram_available_mb = int(max(256, total_vram_available_mb))
47
48


49
print("Set vram state to:", ["CPU", "NO VRAM", "LOW VRAM", "NORMAL VRAM", "HIGH VRAM"][vram_state])
50

51
52

current_loaded_model = None
comfyanonymous's avatar
comfyanonymous committed
53
current_gpu_controlnets = []
54

55
56
57
model_accelerated = False


58
59
def unload_model():
    global current_loaded_model
60
    global model_accelerated
comfyanonymous's avatar
comfyanonymous committed
61
    global current_gpu_controlnets
62
63
    global vram_state

64
    if current_loaded_model is not None:
65
66
67
68
        if model_accelerated:
            accelerate.hooks.remove_hook_from_submodules(current_loaded_model.model)
            model_accelerated = False

69
70
71
        #never unload models from GPU on high vram
        if vram_state != HIGH_VRAM:
            current_loaded_model.model.cpu()
72
73
        current_loaded_model.unpatch_model()
        current_loaded_model = None
74
75
76
77
78
79

    if vram_state != HIGH_VRAM:
        if len(current_gpu_controlnets) > 0:
            for n in current_gpu_controlnets:
                n.cpu()
            current_gpu_controlnets = []
80
81
82
83


def load_model_gpu(model):
    global current_loaded_model
84
85
86
    global vram_state
    global model_accelerated

87
88
89
90
91
92
93
94
95
    if model is current_loaded_model:
        return
    unload_model()
    try:
        real_model = model.patch_model()
    except Exception as e:
        model.unpatch_model()
        raise e
    current_loaded_model = model
96
97
    if vram_state == CPU:
        pass
98
    elif vram_state == NORMAL_VRAM or vram_state == HIGH_VRAM:
99
100
101
102
103
104
        model_accelerated = False
        real_model.cuda()
    else:
        if vram_state == NO_VRAM:
            device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "256MiB", "cpu": "16GiB"})
        elif vram_state == LOW_VRAM:
105
            device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "{}MiB".format(total_vram_available_mb), "cpu": "16GiB"})
comfyanonymous's avatar
comfyanonymous committed
106

107
108
        accelerate.dispatch_model(real_model, device_map=device_map, main_device="cuda")
        model_accelerated = True
109
    return current_loaded_model
110

comfyanonymous's avatar
comfyanonymous committed
111
112
def load_controlnet_gpu(models):
    global current_gpu_controlnets
113
114
115
116
117
118
    global vram_state

    if vram_state == LOW_VRAM or vram_state == NO_VRAM:
        #don't load controlnets like this if low vram because they will be loaded right before running and unloaded right after
        return

comfyanonymous's avatar
comfyanonymous committed
119
120
121
122
123
124
125
126
    for m in current_gpu_controlnets:
        if m not in models:
            m.cpu()

    current_gpu_controlnets = []
    for m in models:
        current_gpu_controlnets.append(m.cuda())

127

128
129
130
131
132
133
134
135
136
137
138
139
140
def load_if_low_vram(model):
    global vram_state
    if vram_state == LOW_VRAM or vram_state == NO_VRAM:
        return model.cuda()
    return model

def unload_if_low_vram(model):
    global vram_state
    if vram_state == LOW_VRAM or vram_state == NO_VRAM:
        return model.cpu()
    return model


141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
def get_free_memory():
    dev = torch.cuda.current_device()
    stats = torch.cuda.memory_stats(dev)
    mem_active = stats['active_bytes.all.current']
    mem_reserved = stats['reserved_bytes.all.current']
    mem_free_cuda, _ = torch.cuda.mem_get_info(dev)
    mem_free_torch = mem_reserved - mem_active
    return mem_free_cuda + mem_free_torch

def maximum_batch_area():
    global vram_state
    if vram_state == NO_VRAM:
        return 0

    memory_free = get_free_memory() / (1024 * 1024)
    area = ((memory_free - 1024) * 0.9) / (0.6)
    return int(max(area, 0))