backend.go 12.6 KB
Newer Older
Michael Yang's avatar
Michael Yang committed
1
2
3
4
package ml

import (
	"bytes"
5
	"context"
Michael Yang's avatar
Michael Yang committed
6
7
	"encoding/binary"
	"fmt"
8
	"log/slog"
9
	"math"
Michael Yang's avatar
Michael Yang committed
10
	"slices"
Michael Yang's avatar
Michael Yang committed
11
12
13
	"strconv"
	"strings"

14
15
	"github.com/ollama/ollama/fs"
)
Michael Yang's avatar
Michael Yang committed
16
17

type Backend interface {
Jesse Gross's avatar
Jesse Gross committed
18
19
20
	// Close frees all memory associated with this backend
	Close()

21
	Load(ctx context.Context, progress func(float32)) error
22
23
24
25

	// BackendMemory returns the memory allocations that were made for this model
	BackendMemory() BackendMemory

26
	Config() fs.Config
Michael Yang's avatar
Michael Yang committed
27
28
	Get(name string) Tensor
	NewContext() Context
29
	NewContextSize(size int) Context
Michael Yang's avatar
Michael Yang committed
30
31
}

32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
// BackendCacheConfig should be implemented by backends that need special output
// from the cache to meet specific requirements. It is frequently implemented in
// conjunction with ScaledDotProductAttention.
type BackendCacheConfig interface {
	CacheConfig() CacheConfig
}

// CacheConfig controls optimizations (mostly backend-specific) that may transform
// the output the cache to work better with specific kernels.
type CacheConfig struct {
	// CachePadding specifies the multiple for the number of tokens of cache history
	// that will be returned from cache Get for k, v and mask. The capacity of the
	// cache itself will also be increased to a multiple of this size if needed.
	CachePadding int

	// PermutedV performs Permute(ctx, 1, 2, 0, 3) on v tensors stored via Put
	// and return the permuted version via Get. This uses the cache copy operation
	// to avoid a Contiguous call on the permuted tensor.
	PermutedV bool
51
52
53
54
55
56
57
58

	// MaskDType specifies the data type for generating the mask. If unset it will
	// default to DTypeF32.
	MaskDType DType

	// MaskBatchPadding specifies the multiple for the batch size dimension in the mask.
	// Any position that does not correspond to an actual token will be filled with -Inf.
	MaskBatchPadding int
59
60
}

61
62
63
64
// BackendParams controls how the backend loads and executes models
type BackendParams struct {
	// NumThreads sets the number of threads to use if running on the CPU
	NumThreads int
Michael Yang's avatar
Michael Yang committed
65

66
67
68
69
70
71
72
73
	// MainGPU is the index of the primary GPU to use
	MainGPU int

	// NumGPULayers is the number of layers to offload to GPUs
	NumGPULayers int

	// TensorSplit is the fraction of the model to offload to each GPU
	TensorSplit []float32
74
75
76

	// FlashAttention indicates that we should use a fused flash attention kernel
	FlashAttention bool
77
78
}

79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
// ErrNoMem is returned when panicing due to insufficient memory. It includes
// the attempted memory allocation.
type ErrNoMem struct {
	BackendMemory
}

func (e ErrNoMem) Error() string {
	return fmt.Sprintf("insufficient memory - required allocations: %+v", e.BackendMemory)
}

type AllocationStatus int

const (
	// Unallocated memory - have not yet attempted to allocate
	Unallocated AllocationStatus = iota

	// Failed memory - tried to allocate the memory and did not succeed
	Failed

	// Allocated memory = tried and succeeded to allocate memory
	Allocated
)

// Memory is the size of an allocation and whether it was successful.
type Memory struct {
	Size   uint64
	Status AllocationStatus
}

func (m Memory) String() string {
	s := fmt.Sprint(m.Size)

	switch m.Status {
	case Unallocated:
		s += "U"
	case Failed:
		s += "F"
	case Allocated:
		s += "A"
	}

	return s
}

// DeviceMemory provides a breakdown of the memory needed
// per device, such as a CPU or GPU.
type DeviceMemory struct {
	// Name is the name of the device as labeled by the backend. It
	// may not be persistent across instances of the runner.
	Name string

130
131
132
	// ID is an identifier for the device for matching with system
	// management libraries.
	ID string
133

134
135
136
137
138
139
140
141
142
143
	// Weights is the per-layer memory needed for the model weights.
	Weights []Memory

	// Cache is the per-layer memory needed for the KV cache.
	Cache []Memory

	// Graph is the size of the compute graph. It is not per-layer.
	Graph Memory
}

144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
func memoryPresent(mem []Memory) bool {
	return slices.ContainsFunc(mem, func(m Memory) bool { return m.Size != 0 })
}

func (m DeviceMemory) LogValue() slog.Value {
	var attrs []slog.Attr
	if memoryPresent(m.Weights) {
		attrs = append(attrs, slog.Any("Weights", m.Weights))
	}

	if memoryPresent(m.Cache) {
		attrs = append(attrs, slog.Any("Cache", m.Cache))
	}

	if m.Graph.Size != 0 {
		attrs = append(attrs, slog.Any("Graph", m.Graph))
	}

162
163
	if len(attrs) > 0 && m.ID != "" {
		attrs = append([]slog.Attr{slog.String("ID", m.ID)}, attrs...)
164
165
	}

166
167
168
	return slog.GroupValue(attrs...)
}

169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
// BackendMemory provides the amount of memory required to load the model
// per device based on the BackendParams. In some cases, not all required
// allocations will be known at this point. However, the size of the most recent
// allocation is guaranteed to be provided so that if it failed, the caller can
// accommodate that to make forward progress.
type BackendMemory struct {
	// InputsWeights are always located on the CPU and cannot be moved
	InputWeights Memory

	// CPU model components are located in system memory. This does not
	// include unified memory allocated through the GPU.
	CPU DeviceMemory

	// GPU model components are located on one or more GPUs.
	GPUs []DeviceMemory
}

186
187
188
189
190
191
192
193
194
195
196
197
198
199
func (m BackendMemory) LogValue() slog.Value {
	var attrs []slog.Attr
	if m.InputWeights.Size != 0 {
		attrs = append(attrs, slog.Any("InputWeights", m.InputWeights))
	}

	attrs = append(attrs, slog.Any(m.CPU.Name, m.CPU))
	for _, g := range m.GPUs {
		attrs = append(attrs, slog.Any(g.Name, g))
	}

	return slog.GroupValue(attrs...)
}

200
var backends = make(map[string]func(string, BackendParams) (Backend, error))
201

202
func RegisterBackend(name string, f func(string, BackendParams) (Backend, error)) {
Michael Yang's avatar
Michael Yang committed
203
204
205
206
207
208
209
	if _, ok := backends[name]; ok {
		panic("backend: backend already registered")
	}

	backends[name] = f
}

210
func NewBackend(modelPath string, params BackendParams) (Backend, error) {
Michael Yang's avatar
Michael Yang committed
211
	if backend, ok := backends["ggml"]; ok {
212
		return backend(modelPath, params)
Michael Yang's avatar
Michael Yang committed
213
214
215
216
217
218
	}

	return nil, fmt.Errorf("unsupported backend")
}

type Context interface {
219
	Empty(dtype DType, shape ...int) Tensor
Michael Yang's avatar
Michael Yang committed
220
	Zeros(dtype DType, shape ...int) Tensor
221
222
	FromFloatSlice(s []float32, shape ...int) Tensor
	FromIntSlice(s []int32, shape ...int) Tensor
Michael Yang's avatar
Michael Yang committed
223

Michael Yang's avatar
arange  
Michael Yang committed
224
225
226
	// Arange creates a 1D tensor with values within an interval (start, stop] increased by step.
	Arange(start, stop, step float32, dtype DType) Tensor

227
	Forward(...Tensor) Context
228
	Compute(...Tensor)
229
230
231
232
233

	// Reserve is analogous to Compute but rather than executing a
	// graph, simply preallocates memory. Typically called with a
	// worst case graph to ensure all resources are available for
	// for future inference.
234
	Reserve()
235

236
	MaxGraphNodes() int
237
	Close()
238

239
240
	// Input returns a context appropriate for creating tensors that are
	// inputs to the model (which includes things like output locations)
241
242
243
244
	Input() Context

	// Layer returns a context appropriate for creating intermediate tensors
	Layer(int) Context
Michael Yang's avatar
Michael Yang committed
245
246
247
}

type Tensor interface {
248
249
	Dim(n int) int
	Stride(n int) int
Michael Yang's avatar
Michael Yang committed
250

251
	Shape() []int
Michael Yang's avatar
Michael Yang committed
252
253
254
255
256
	DType() DType

	Bytes() []byte
	Floats() []float32

257
	Neg(ctx Context) Tensor
Michael Yang's avatar
Michael Yang committed
258
	Add(ctx Context, t2 Tensor) Tensor
Michael Yang's avatar
Michael Yang committed
259
	Sub(ctx Context, t2 Tensor) Tensor
Michael Yang's avatar
Michael Yang committed
260
	Mul(ctx Context, t2 Tensor) Tensor
261
262
	Div(ctx Context, t2 Tensor) Tensor

Michael Yang's avatar
Michael Yang committed
263
	Mulmat(ctx Context, t2 Tensor) Tensor
264
	MulmatFullPrec(ctx Context, t2 Tensor) Tensor
Michael Yang's avatar
llama4  
Michael Yang committed
265
	MulmatID(ctx Context, t2, ids Tensor) Tensor
266
	AddID(ctx Context, t2, ids Tensor) Tensor
Michael Yang's avatar
Michael Yang committed
267
268
269
270
271

	Softmax(ctx Context) Tensor
	LayerNorm(ctx Context, weight, bias Tensor, eps float32) Tensor
	RMSNorm(ctx Context, weight Tensor, eps float32) Tensor
	Scale(ctx Context, s float64) Tensor
272
	SumRows(ctx Context) Tensor
Michael Yang's avatar
Michael Yang committed
273

Michael Yang's avatar
Michael Yang committed
274
	AvgPool2D(ctx Context, k, s int, p float32) Tensor
Michael Yang's avatar
Michael Yang committed
275
	Conv2D(ctx Context, weight Tensor, s0, s1, p0, p1, d0, d1 int) Tensor
Michael Yang's avatar
Michael Yang committed
276

277
	IM2Col(ctx Context, weight Tensor, s0, s1, p0, p1, d0, d1 int) Tensor
Michael Yang's avatar
Michael Yang committed
278

279
280
	Sin(ctx Context) Tensor
	Cos(ctx Context) Tensor
Michael Yang's avatar
Michael Yang committed
281
282
	Tanh(ctx Context) Tensor
	GELU(ctx Context) Tensor
Michael Yang's avatar
Michael Yang committed
283
	QuickGELU(ctx Context) Tensor
Michael Yang's avatar
Michael Yang committed
284
	SILU(ctx Context) Tensor
Michael Yang's avatar
Michael Yang committed
285
	RELU(ctx Context) Tensor
Michael Yang's avatar
llama4  
Michael Yang committed
286
	Sigmoid(ctx Context) Tensor
287
	SwiGLU(ctx Context, up Tensor, alpha, limit float32) Tensor
Michael Yang's avatar
Michael Yang committed
288

289
	Reshape(ctx Context, shape ...int) Tensor
Michael Yang's avatar
Michael Yang committed
290
291
	View(ctx Context, offset int, shape ...int) Tensor
	Permute(ctx Context, shape ...int) Tensor
Michael Yang's avatar
Michael Yang committed
292
	Contiguous(ctx Context, shape ...int) Tensor
Michael Yang's avatar
Michael Yang committed
293
	Set(ctx Context, t2 Tensor, offset int, strides ...int) Tensor
Michael Yang's avatar
Michael Yang committed
294

295
	Pad(ctx Context, shape ...int) Tensor
Michael Yang's avatar
Michael Yang committed
296
297

	Stack(ctx Context, dim int, s ...Tensor) Tensor
298
299
300

	// Repeat repeats the tensor n times along dimension dim
	Repeat(ctx Context, dim, n int) Tensor
Michael Yang's avatar
Michael Yang committed
301
302
303
	Concat(ctx Context, t2 Tensor, dim int) Tensor
	Rows(ctx Context, t2 Tensor) Tensor
	Copy(ctx Context, t2 Tensor) Tensor
304
	Duplicate(ctx Context) Tensor
Michael Yang's avatar
llama4  
Michael Yang committed
305
306

	TopK(ctx Context, k int) Tensor
307
	Argsort(ctx Context) Tensor
Michael Yang's avatar
Michael Yang committed
308
309
310
311
312
313
	Mean(ctx Context) Tensor
	Variance(ctx Context) Tensor
	Stddev(ctx Context) Tensor
	Sqr(ctx Context) Tensor
	Sqrt(ctx Context) Tensor
	Clamp(ctx Context, min, max float32) Tensor
Michael Yang's avatar
Michael Yang committed
314
315
}

316
317
318
319
// ScaledDotProductAttention implements a fused attention
// operation equivalent to following code on a tensor named
// query:
//
320
321
322
323
// query = query.Permute(ctx, 0, 2, 1, 3)
// key = key.Permute(ctx, 0, 2, 1, 3)
// value = value.Permute(ctx, 1, 2, 0, 3).Contiguous(ctx)
//
324
325
326
327
328
329
330
331
332
333
334
335
336
// kq := key.MulmatFullPrec(ctx, query)
//
// kq = kq.Scale(ctx, scale)
//
//	if mask != nil {
//		kq = kq.Add(ctx, mask)
//	}
//
// kq = kq.Softmax(ctx)
//
// kqv := value.Mulmat(ctx, kq)
// return kqv.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx)
type ScaledDotProductAttention interface {
337
	ScaledDotProductAttention(ctx Context, key, value, mask, sinks Tensor, scale float64) Tensor
338
339
}

Michael Yang's avatar
Michael Yang committed
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
type number interface {
	~int | ~int8 | ~int16 | ~int32 | ~int64 |
		~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 |
		~float32 | ~float64 |
		~complex64 | ~complex128
}

func mul[T number](s ...T) T {
	p := T(1)
	for _, v := range s {
		p *= v
	}

	return p
}

356
type DumpOptions func(*dumpOptions)
Michael Yang's avatar
Michael Yang committed
357

358
359
360
361
362
// DumpWithPrecision sets the number of decimal places to print. Applies to float32 and float64.
func DumpWithPrecision(n int) DumpOptions {
	return func(opts *dumpOptions) {
		opts.Precision = n
	}
Michael Yang's avatar
Michael Yang committed
363
364
}

365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
// DumpWithThreshold sets the threshold for printing the entire tensor. If the number of elements
// is less than or equal to this value, the entire tensor will be printed. Otherwise, only the
// beginning and end of each dimension will be printed.
func DumpWithThreshold(n int) DumpOptions {
	return func(opts *dumpOptions) {
		opts.Threshold = n
	}
}

// DumpWithEdgeItems sets the number of elements to print at the beginning and end of each dimension.
func DumpWithEdgeItems(n int) DumpOptions {
	return func(opts *dumpOptions) {
		opts.EdgeItems = n
	}
}

type dumpOptions struct {
	Precision, Threshold, EdgeItems int
}

func Dump(ctx Context, t Tensor, optsFuncs ...DumpOptions) string {
	opts := dumpOptions{Precision: 4, Threshold: 1000, EdgeItems: 3}
	for _, optsFunc := range optsFuncs {
		optsFunc(&opts)
	}

	if mul(t.Shape()...) <= opts.Threshold {
		opts.EdgeItems = math.MaxInt
Michael Yang's avatar
Michael Yang committed
393
394
395
396
	}

	switch t.DType() {
	case DTypeF32:
397
398
		return dump[[]float32](ctx, t, opts.EdgeItems, func(f float32) string {
			return strconv.FormatFloat(float64(f), 'f', opts.Precision, 32)
Jesse Gross's avatar
Jesse Gross committed
399
		})
400
	case DTypeF16, DTypeQ80, DTypeQ40:
401
		f32 := ctx.Input().Empty(DTypeF32, t.Shape()...)
Jesse Gross's avatar
Jesse Gross committed
402
		f32 = t.Copy(ctx, f32)
403
404
		return dump[[]float32](ctx, f32, opts.EdgeItems, func(f float32) string {
			return strconv.FormatFloat(float64(f), 'f', opts.Precision, 32)
Michael Yang's avatar
Michael Yang committed
405
406
		})
	case DTypeI32:
407
		return dump[[]int32](ctx, t, opts.EdgeItems, func(i int32) string {
Michael Yang's avatar
Michael Yang committed
408
409
410
411
412
413
414
			return strconv.FormatInt(int64(i), 10)
		})
	default:
		return "<unsupported>"
	}
}

Jesse Gross's avatar
Jesse Gross committed
415
416
func dump[S ~[]E, E number](ctx Context, t Tensor, items int, fn func(E) string) string {
	if t.Bytes() == nil {
417
		ctx.Forward(t).Compute(t)
Michael Yang's avatar
Michael Yang committed
418
419
420
421
422
423
424
425
	}

	s := make(S, mul(t.Shape()...))
	if err := binary.Read(bytes.NewBuffer(t.Bytes()), binary.LittleEndian, &s); err != nil {
		panic(err)
	}

	shape := t.Shape()
Michael Yang's avatar
Michael Yang committed
426
	slices.Reverse(shape)
Michael Yang's avatar
Michael Yang committed
427
428

	var sb strings.Builder
429
430
	var f func([]int, int)
	f = func(dims []int, stride int) {
Michael Yang's avatar
Michael Yang committed
431
		prefix := strings.Repeat(" ", len(shape)-len(dims)+1)
Michael Yang's avatar
Michael Yang committed
432
433
		sb.WriteString("[")
		defer func() { sb.WriteString("]") }()
434
		for i := 0; i < dims[0]; i++ {
Michael Yang's avatar
Michael Yang committed
435
			if i >= items && i < dims[0]-items {
Michael Yang's avatar
Michael Yang committed
436
				sb.WriteString("..., ")
Michael Yang's avatar
Michael Yang committed
437
438
439
440
441
442
443
444
445
446
447
448
449
450
				// skip to next printable element
				skip := dims[0] - 2*items
				if len(dims) > 1 {
					stride += mul(append(dims[1:], skip)...)
					fmt.Fprint(&sb, strings.Repeat("\n", len(dims)-1), prefix)
				}
				i += skip - 1
			} else if len(dims) > 1 {
				f(dims[1:], stride)
				stride += mul(dims[1:]...)
				if i < dims[0]-1 {
					fmt.Fprint(&sb, ",", strings.Repeat("\n", len(dims)-1), prefix)
				}
			} else {
Michael Yang's avatar
Michael Yang committed
451
452
453
454
455
456
				text := fn(s[stride+i])
				if len(text) > 0 && text[0] != '-' {
					sb.WriteString(" ")
				}

				sb.WriteString(text)
Michael Yang's avatar
Michael Yang committed
457
				if i < dims[0]-1 {
Michael Yang's avatar
Michael Yang committed
458
					sb.WriteString(", ")
Michael Yang's avatar
Michael Yang committed
459
460
461
462
463
464
465
466
467
468
469
470
				}
			}
		}
	}
	f(shape, 0)

	return sb.String()
}

type DType int

const (
Jesse Gross's avatar
Jesse Gross committed
471
472
473
	DTypeOther DType = iota
	DTypeF32
	DTypeF16
474
475
	DTypeQ80
	DTypeQ40
Michael Yang's avatar
Michael Yang committed
476
	DTypeI32
Michael Yang's avatar
Michael Yang committed
477
	DTypeMXFP4
Michael Yang's avatar
Michael Yang committed
478
)