memory.go 12.4 KB
Newer Older
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1
2
3
package llm

import (
4
	"cmp"
5
	"fmt"
Daniel Hiltgen's avatar
Daniel Hiltgen committed
6
	"log/slog"
7
	"maps"
8
	"os"
9
	"slices"
10
11
	"strconv"
	"strings"
Daniel Hiltgen's avatar
Daniel Hiltgen committed
12
13

	"github.com/ollama/ollama/api"
14
	"github.com/ollama/ollama/discover"
15
	"github.com/ollama/ollama/envconfig"
Daniel Hiltgen's avatar
Daniel Hiltgen committed
16
	"github.com/ollama/ollama/format"
Michael Yang's avatar
Michael Yang committed
17
	"github.com/ollama/ollama/fs/ggml"
Daniel Hiltgen's avatar
Daniel Hiltgen committed
18
19
20
)

// This algorithm looks for a complete fit to determine if we need to unload other models
21
func PredictServerFit(allGpus discover.GpuInfoList, f *ggml.GGML, adapters, projectors []string, opts api.Options, numParallel int) (bool, uint64) {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
22
	// Split up the GPUs by type and try them
23
	var estimatedVRAM uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
24
25
	for _, gpus := range allGpus.ByLibrary() {
		var layerCount int
26
		estimate := EstimateGPULayers(gpus, f, projectors, opts, numParallel)
27
		layerCount, estimatedVRAM = estimate.Layers, estimate.VRAMSize
Daniel Hiltgen's avatar
Daniel Hiltgen committed
28
		if opts.NumGPU < 0 {
Michael Yang's avatar
Michael Yang committed
29
			if layerCount > 0 && layerCount >= int(f.KV().BlockCount()+1) {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
30
31
32
33
34
35
36
37
38
39
40
				return true, estimatedVRAM
			}
		} else {
			if layerCount > 0 && layerCount >= opts.NumGPU {
				return true, estimatedVRAM
			}
		}
	}
	return false, estimatedVRAM
}

41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
type MemoryEstimate struct {
	// How many layers we predict we can load
	Layers int

	// The size of the graph which occupies the main GPU
	Graph uint64

	// How much VRAM will be allocated given the number of layers we predict
	VRAMSize uint64

	// The total size of the model if loaded into VRAM.  If all layers are loaded, VRAMSize == TotalSize
	TotalSize uint64

	// For multi-GPU scenarios, this provides the tensor split parameter
	TensorSplit string

	// For multi-GPU scenarios, this is the size in bytes per GPU
	GPUSizes []uint64
59
60
61
62
63
64
65
66
67
68
69
70

	// internal fields for logging purposes
	inferenceLibrary    string
	layersRequested     int
	layersModel         int
	availableList       []string
	kv                  uint64
	allocationsList     []string
	memoryWeights       uint64
	memoryLayerOutput   uint64
	graphFullOffload    uint64
	graphPartialOffload uint64
71
72

	projectorWeights, projectorGraph uint64
73
74
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
75
// Given a model and one or more GPU targets, predict how many layers and bytes we can load, and the total size
Daniel Hiltgen's avatar
Daniel Hiltgen committed
76
// The GPUs provided must all be the same Library
77
func EstimateGPULayers(gpus []discover.GpuInfo, f *ggml.GGML, projectors []string, opts api.Options, numParallel int) MemoryEstimate {
78
79
80
81
82
83
84
85
86
87
	// Graph size for a partial offload, applies to all GPUs
	var graphPartialOffload uint64

	// Graph size when all layers are offloaded, applies to all GPUs
	var graphFullOffload uint64

	// Final graph offload once we know full or partial
	var graphOffload uint64

	// Projectors loaded into GPU0 only
88
89
90
91
92
	var llamaEngineProjectorWeights uint64

	// Projectors loaded with output layer
	var ollamaEngineProjectorWeights uint64
	var ollamaEngineProjectorGraph uint64
93
94
95
96

	// Conditional output size on GPU 0
	var memoryLayerOutput uint64

Daniel Hiltgen's avatar
Daniel Hiltgen committed
97
98
	// The sizes of a layer
	var layerSize uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
99

100
101
102
103
104
105
106
107
108
	// The sum of all the layer sizes (just for logging)
	var memoryWeights uint64

	// True if all the layers are loaded
	var fullyLoaded bool

	// Overflow that didn't fit into the GPU
	var overflow uint64

109
	overhead := envconfig.GpuOverhead()
110
111
112
113
114
	availableList := make([]string, len(gpus))
	for i, gpu := range gpus {
		availableList[i] = format.HumanBytes2(gpu.FreeMemory)
	}
	slog.Debug("evaluating", "library", gpus[0].Library, "gpu_count", len(gpus), "available", availableList)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
115
116

	for _, projector := range projectors {
117
		llamaEngineProjectorWeights += projectorMemoryRequirements(projector)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
118
119
120
121

		// multimodal models require at least 2048 context
		opts.NumCtx = max(opts.NumCtx, 2048)
	}
122
123
124
	if llamaEngineProjectorWeights == 0 {
		ollamaEngineProjectorWeights, ollamaEngineProjectorGraph = f.VisionGraphSize()
		opts.NumCtx = max(opts.NumCtx, 2048)
125
	}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
126

Michael Yang's avatar
Michael Yang committed
127
	layers := f.Tensors().GroupLayers()
128
129
130
131
	// add one layer (chosing the max layer) worth of memory as a buffer
	layerSize = slices.MaxFunc(slices.Collect(maps.Values(layers)), func(a, b ggml.Layer) int {
		return cmp.Compare(a.Size(), b.Size())
	}).Size()
Michael Yang's avatar
Michael Yang committed
132

133
	var kvct string
Michael Yang's avatar
Michael Yang committed
134
135
136
	if envconfig.FlashAttention() &&
		discover.GetGPUInfo().FlashAttentionSupported() &&
		f.SupportsFlashAttention() {
137
		requested := strings.ToLower(envconfig.KvCacheType())
Michael Yang's avatar
Michael Yang committed
138
		if requested != "" && f.SupportsKVCacheType(requested) {
139
140
141
142
			kvct = requested
		}
	}

143
	kv, graphPartialOffload, graphFullOffload := f.GraphSize(uint64(opts.NumCtx), uint64(min(opts.NumCtx, opts.NumBatch)), numParallel, kvct)
144

145
146
147
148
149
150
151
152
	if len(kv) > 0 {
		layerSize += kv[0]
	}

	var kvTotal uint64
	for _, kvLayer := range kv {
		kvTotal += kvLayer
	}
153

Daniel Hiltgen's avatar
Daniel Hiltgen committed
154
	if graphPartialOffload == 0 {
155
		graphPartialOffload = f.KV().GQA() * kvTotal / 6
Daniel Hiltgen's avatar
Daniel Hiltgen committed
156
157
158
159
160
	}
	if graphFullOffload == 0 {
		graphFullOffload = graphPartialOffload
	}

161
162
163
	// on metal there's no partial offload overhead
	if gpus[0].Library == "metal" {
		graphPartialOffload = graphFullOffload
Daniel Hiltgen's avatar
Daniel Hiltgen committed
164
165
166
	} else if len(gpus) > 1 {
		// multigpu should always use the partial graph size
		graphFullOffload = graphPartialOffload
167
168
	}

169
	// Output layer handled at the end if we have space
170
	if layer, ok := layers["output_norm"]; ok {
Michael Yang's avatar
Michael Yang committed
171
		memoryLayerOutput += layer.Size()
172
173
	}
	if layer, ok := layers["output"]; ok {
Michael Yang's avatar
Michael Yang committed
174
		memoryLayerOutput += layer.Size()
175
	} else if layer, ok := layers["token_embd"]; ok {
Michael Yang's avatar
Michael Yang committed
176
		memoryLayerOutput += layer.Size()
Michael Yang's avatar
Michael Yang committed
177
178
	}

179
	gpuZeroOverhead := llamaEngineProjectorWeights
180
181

	// Reduce set of GPUs to only those that have sufficient space to fit overhead and at least one layer
Michael Yang's avatar
Michael Yang committed
182
	var layerCount int
183
184
185
186
	layerCounts := make([]int, len(gpus))
	gpuAllocations := make([]uint64, len(gpus))
	type gs struct {
		i int
187
		g *discover.GpuInfo
188
189
190
191
192
193
194
195
	}
	gpusWithSpace := []gs{}
	for i := range gpus {
		var gzo uint64
		if len(gpusWithSpace) == 0 {
			gzo = gpuZeroOverhead
		}
		// Only include GPUs that can fit the graph, gpu minimum, the layer buffer and at least more layer
196
		if gpus[i].FreeMemory < overhead+gzo+max(graphPartialOffload, graphFullOffload)+gpus[i].MinimumMemory+2*layerSize {
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
			slog.Debug("gpu has too little memory to allocate any layers",
				"id", gpus[i].ID,
				"library", gpus[i].Library,
				"variant", gpus[i].Variant,
				"compute", gpus[i].Compute,
				"driver", fmt.Sprintf("%d.%d", gpus[i].DriverMajor, gpus[i].DriverMinor),
				"name", gpus[i].Name,
				"total", format.HumanBytes2(gpus[i].TotalMemory),
				"available", format.HumanBytes2(gpus[i].FreeMemory),
				"minimum_memory", gpus[i].MinimumMemory,
				"layer_size", format.HumanBytes2(layerSize),
				"gpu_zer_overhead", format.HumanBytes2(gzo),
				"partial_offload", format.HumanBytes2(graphPartialOffload),
				"full_offload", format.HumanBytes2(graphFullOffload),
			)
212
213
214
			continue
		}
		gpusWithSpace = append(gpusWithSpace, gs{i, &gpus[i]})
Daniel Hiltgen's avatar
Daniel Hiltgen committed
215
		gpuAllocations[i] += gpus[i].MinimumMemory + layerSize // We hold off on graph until we know partial vs. full
216
217
218
219
220
221
	}

	var gpuZeroID int
	if len(gpusWithSpace) > 0 {
		gpuZeroID = gpusWithSpace[0].i
		gpuAllocations[gpuZeroID] += gpuZeroOverhead
222
223
	} else {
		overflow += gpuZeroOverhead
224
225
	}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
226
	// For all the layers, find where they can fit on the GPU(s)
227
	for i := int(f.KV().BlockCount()) - 1; i >= 0; i-- {
228
229
		// Some models have inconsistent layer sizes
		if blk, ok := layers[fmt.Sprintf("blk.%d", i)]; ok {
Michael Yang's avatar
Michael Yang committed
230
			layerSize = blk.Size()
231
			layerSize += kv[i]
Michael Yang's avatar
Michael Yang committed
232
			memoryWeights += blk.Size()
233
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
234

235
236
		if opts.NumGPU >= 0 && layerCount >= opts.NumGPU {
			// Stop allocating on GPU(s) once we hit the users target NumGPU
237
			overflow += layerSize
238
239
240
241
242
243
244
			continue
		}

		// distribute the layers across the GPU(s) that have space
		for j := len(gpusWithSpace); j > 0; j-- {
			g := gpusWithSpace[i%j]
			used := gpuAllocations[g.i] + max(graphPartialOffload, graphFullOffload)
245
			if g.g.FreeMemory > overhead+used+layerSize {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
246
				gpuAllocations[g.i] += layerSize
247
				layerCounts[g.i]++
Michael Yang's avatar
typo  
Michael Yang committed
248
				layerCount++
249
250
251
				break
			} else {
				gpusWithSpace = append(gpusWithSpace[:i%j], gpusWithSpace[i%j+1:]...)
Michael Yang's avatar
typo  
Michael Yang committed
252
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
253
		}
254
255
256
257

		if len(gpusWithSpace) == 0 {
			overflow += layerSize
		}
258
	}
Michael Yang's avatar
Michael Yang committed
259
	if layerCount >= int(f.KV().BlockCount()) {
260
261
		fullyLoaded = true
	}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
262
263

	// Determine if we need to consider output then find where it fits
264
265
	memoryLastLayer := memoryLayerOutput + ollamaEngineProjectorWeights + ollamaEngineProjectorGraph
	if memoryLastLayer > 0 {
266
267
268
269
		if opts.NumGPU < 0 || layerCount < opts.NumGPU {
			for j := len(gpusWithSpace); j > 0; j-- {
				g := gpusWithSpace[layerCount%j]
				used := gpuAllocations[g.i] + max(graphPartialOffload, graphFullOffload)
270
271
				if g.g.FreeMemory > overhead+used+memoryLastLayer {
					gpuAllocations[g.i] += memoryLastLayer
272
273
274
275
					layerCounts[g.i]++
					layerCount++
					break
				}
276
277
			}
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
278

Michael Yang's avatar
Michael Yang committed
279
		if layerCount < int(f.KV().BlockCount())+1 {
280
			fullyLoaded = false
281
			overflow += memoryLastLayer
282
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
283
284
	}

285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
	// Add the applicable (full or partial) graph allocations
	for i := range gpus {
		if layerCounts[i] <= 0 {
			continue
		}
		if fullyLoaded {
			gpuAllocations[i] += graphFullOffload
		} else {
			gpuAllocations[i] += graphPartialOffload
		}
	}
	if fullyLoaded {
		graphOffload = graphFullOffload
	} else {
		graphOffload = graphPartialOffload
Daniel Hiltgen's avatar
Daniel Hiltgen committed
300
301
	}

302
303
304
305
	// Summaries for the log
	var memoryRequiredPartial, memoryRequiredTotal uint64
	for i := range gpuAllocations {
		memoryRequiredPartial += gpuAllocations[i]
Daniel Hiltgen's avatar
Daniel Hiltgen committed
306
	}
307
	memoryRequiredTotal = memoryRequiredPartial + overflow
Daniel Hiltgen's avatar
Daniel Hiltgen committed
308

309
310
311
312
313
314
315
316
317
318
319
320
	tensorSplit := ""
	if len(gpus) > 1 {
		splits := make([]string, len(gpus))
		for i, count := range layerCounts {
			splits[i] = strconv.Itoa(count)
		}
		tensorSplit = strings.Join(splits, ",")
	}
	allocationsList := []string{}
	for _, a := range gpuAllocations {
		allocationsList = append(allocationsList, format.HumanBytes2(a))
	}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
321

322
323
324
325
326
327
328
329
330
	estimate := MemoryEstimate{
		TotalSize: memoryRequiredTotal,
		Layers:    0,
		Graph:     0,
		VRAMSize:  0,
		GPUSizes:  []uint64{},

		inferenceLibrary:    gpus[0].Library,
		layersRequested:     opts.NumGPU,
Michael Yang's avatar
Michael Yang committed
331
		layersModel:         int(f.KV().BlockCount()) + 1,
332
		availableList:       availableList,
333
		kv:                  kvTotal,
334
335
336
337
338
		allocationsList:     allocationsList,
		memoryWeights:       memoryWeights,
		memoryLayerOutput:   memoryLayerOutput,
		graphFullOffload:    graphFullOffload,
		graphPartialOffload: graphPartialOffload,
339
340
		projectorWeights:    llamaEngineProjectorWeights + ollamaEngineProjectorWeights,
		projectorGraph:      ollamaEngineProjectorGraph,
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
	}

	if gpus[0].Library == "cpu" {
		return estimate
	}
	if layerCount == 0 {
		slog.Debug("insufficient VRAM to load any model layers")
		return estimate
	}
	estimate.Layers = layerCount
	estimate.Graph = graphOffload
	estimate.VRAMSize = memoryRequiredPartial
	estimate.TotalSize = memoryRequiredTotal
	estimate.TensorSplit = tensorSplit
	estimate.GPUSizes = gpuAllocations
	return estimate
}

Michael Yang's avatar
Michael Yang committed
359
360
361
func (m MemoryEstimate) LogValue() slog.Value {
	attrs := []slog.Attr{
		slog.String("library", m.inferenceLibrary),
Daniel Hiltgen's avatar
Daniel Hiltgen committed
362
363
		slog.Group(
			"layers",
Michael Yang's avatar
Michael Yang committed
364
			// requested number of layers to offload
365
			"requested", m.layersRequested,
366
			// The number of layers the model has (including output)
367
			"model", m.layersModel,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
368
			// estimated number of layers that can be offloaded
369
370
371
			"offload", m.Layers,
			// multi-gpu split for tensors
			"split", m.TensorSplit,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
372
373
374
		),
		slog.Group(
			"memory",
375
			// memory available by GPU for offloading
376
			"available", m.availableList,
Michael Yang's avatar
Michael Yang committed
377
			"gpu_overhead", format.HumanBytes2(envconfig.GpuOverhead()),
Daniel Hiltgen's avatar
Daniel Hiltgen committed
378
379
380
			slog.Group(
				"required",
				// memory required for full offloading
381
				"full", format.HumanBytes2(m.TotalSize),
Daniel Hiltgen's avatar
Daniel Hiltgen committed
382
				// memory required to offload layers.estimate layers
383
				"partial", format.HumanBytes2(m.VRAMSize),
Daniel Hiltgen's avatar
Daniel Hiltgen committed
384
				// memory of KV cache
385
				"kv", format.HumanBytes2(m.kv),
386
				// Allocations across the GPUs
387
				"allocations", m.allocationsList,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
388
389
390
391
			),
			slog.Group(
				"weights",
				// memory of the weights
392
				"total", format.HumanBytes2(m.memoryWeights+m.memoryLayerOutput),
Daniel Hiltgen's avatar
Daniel Hiltgen committed
393
				// memory of repeating layers
Michael Yang's avatar
Michael Yang committed
394
				"repeating", format.HumanBytes2(m.memoryWeights),
Daniel Hiltgen's avatar
Daniel Hiltgen committed
395
				// memory of non-repeating layers
396
				"nonrepeating", format.HumanBytes2(m.memoryLayerOutput),
Daniel Hiltgen's avatar
Daniel Hiltgen committed
397
398
399
400
			),
			slog.Group(
				"graph",
				// memory of graph when fully offloaded
401
				"full", format.HumanBytes2(m.graphFullOffload),
Daniel Hiltgen's avatar
Daniel Hiltgen committed
402
				// memory of graph when not fully offloaded
403
				"partial", format.HumanBytes2(m.graphPartialOffload),
Daniel Hiltgen's avatar
Daniel Hiltgen committed
404
405
			),
		),
Michael Yang's avatar
Michael Yang committed
406
407
408
409
410
411
412
413
414
415
416
	}

	if m.projectorWeights > 0 {
		attrs = append(attrs, slog.Group(
			"projector",
			"weights", format.HumanBytes2(m.projectorWeights),
			"graph", format.HumanBytes2(m.projectorGraph),
		))
	}

	return slog.GroupValue(attrs...)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
417
}
418

419
func projectorMemoryRequirements(filename string) (weights uint64) {
420
421
	file, err := os.Open(filename)
	if err != nil {
422
		return 0
423
424
425
	}
	defer file.Close()

426
	ggml, _, err := ggml.Decode(file, 1024)
427
	if err != nil {
428
		return 0
429
430
	}

Michael Yang's avatar
Michael Yang committed
431
432
	for _, layer := range ggml.Tensors().GroupLayers() {
		weights += layer.Size()
433
434
	}

435
	return weights
436
}