ggml.go 37.5 KB
Newer Older
Michael Yang's avatar
Michael Yang committed
1
2
package ggml

3
4
5
6
7
8
// #cgo CPPFLAGS: -I${SRCDIR}/ggml/include
// #include <stdlib.h>
// #include <stdint.h>
// #include "ggml.h"
// #include "ggml-cpu.h"
// #include "ggml-backend.h"
Michael Yang's avatar
Michael Yang committed
9
10
11
import "C"

import (
12
	"context"
Michael Yang's avatar
Michael Yang committed
13
14
15
	"fmt"
	"io"
	"log/slog"
16
	"maps"
Michael Yang's avatar
Michael Yang committed
17
	"os"
18
	"runtime"
19
20
21
	"slices"
	"strconv"
	"strings"
22
	"sync/atomic"
23
	"unicode"
Michael Yang's avatar
Michael Yang committed
24
25
26
	"unsafe"

	"github.com/ollama/ollama/format"
27
28
	"github.com/ollama/ollama/fs"
	fsggml "github.com/ollama/ollama/fs/ggml"
29
	"github.com/ollama/ollama/logutil"
Michael Yang's avatar
Michael Yang committed
30
	"github.com/ollama/ollama/ml"
31
	ggml "github.com/ollama/ollama/ml/backend/ggml/ggml/src"
32
	"github.com/ollama/ollama/ml/nn/rope"
Michael Yang's avatar
Michael Yang committed
33
34
35
	"golang.org/x/sync/errgroup"
)

Michael Yang's avatar
Michael Yang committed
36
37
38
39
40
func devices() []*C.struct_ggml_backend_device {
	ggml.OnceLoad()
	ds := make([]*C.struct_ggml_backend_device, C.ggml_backend_dev_count())
	for i := range ds {
		ds[i] = C.ggml_backend_dev_get(C.size_t(i))
Michael Yang's avatar
Michael Yang committed
41
	}
Michael Yang's avatar
Michael Yang committed
42
43

	return ds
44
}
Michael Yang's avatar
Michael Yang committed
45
46

type Backend struct {
47
48
49
	// modelPath is the location of the model data
	modelPath string

50
51
	meta *fsggml.GGML

52
53
54
55
	// tensorLoadTargets maps from the name of the tensor in the file
	// to the name that is used by the model definition
	tensorLoadTargets map[string][]string

56
57
58
59
	sched         *C.struct_ggml_backend_sched
	schedBackends []*C.struct_ggml_backend
	schedBufts    []*C.struct_ggml_backend_buffer_type

60
	tensors map[string]*C.struct_ggml_tensor
Michael Yang's avatar
Michael Yang committed
61
62

	// input is the backend used for inputs
63
	input *C.struct_ggml_backend_buffer_type
Michael Yang's avatar
Michael Yang committed
64
65

	// layers is the backend used for repeating layers
66
	layers map[int]*C.struct_ggml_backend_buffer_type
67

68
69
70
71
72
73
	// requiredMemory is the cumulative memory allocations needed by the backend
	requiredMemory *ml.BackendMemory

	// btDeviceMemory maps from a buffer type to the memory allocations associated with that device
	btDeviceMemory map[*C.struct_ggml_backend_buffer_type]*ml.DeviceMemory

74
	flashAttention bool
Michael Yang's avatar
Michael Yang committed
75
76
77

	// maxGraphNodes is the maximum allowed number of graph nodes in this scheduler
	maxGraphNodes int
Michael Yang's avatar
Michael Yang committed
78
79
}

80
81
82
83
84
85
86
87
func New(modelPath string, params ml.BackendParams) (ml.Backend, error) {
	r, err := os.Open(modelPath)
	if err != nil {
		return nil, err
	}
	defer r.Close()

	meta, err := fsggml.Decode(r, -1)
Michael Yang's avatar
Michael Yang committed
88
89
90
91
92
93
94
95
96
97
98
99
100
101
	if err != nil {
		return nil, err
	}

	slog.Info(
		"",
		"architecture", meta.KV().Architecture(),
		"file_type", meta.KV().FileType(),
		"name", meta.KV().String("general.name"),
		"description", meta.KV().String("general.description"),
		"num_tensors", len(meta.Tensors().Items()),
		"num_key_values", len(meta.KV()),
	)

102
103
104
	var requiredMemory ml.BackendMemory
	btDeviceMemory := make(map[*C.struct_ggml_backend_buffer_type]*ml.DeviceMemory)

105
	type deviceBufferType struct {
106
107
108
109
110
		d   *C.struct_ggml_backend_device
		bts []*C.struct_ggml_backend_buffer_type
	}

	var cpus, accels, gpus []*C.struct_ggml_backend_device
Michael Yang's avatar
Michael Yang committed
111
	for _, d := range devices() {
112
113
		switch C.ggml_backend_dev_type(d) {
		case C.GGML_BACKEND_DEVICE_TYPE_CPU:
114
115
116
117
			if len(cpus) == 0 {
				// only the first cpu device should be used
				cpus = append(cpus, d)
			}
118
119
		case C.GGML_BACKEND_DEVICE_TYPE_ACCEL:
			accels = append(accels, d)
Michael Yang's avatar
Michael Yang committed
120
		case C.GGML_BACKEND_DEVICE_TYPE_GPU:
121
			gpus = append(gpus, d)
Michael Yang's avatar
Michael Yang committed
122
123
124
		}
	}

125
126
	blocks := int(meta.KV().BlockCount())

Michael Yang's avatar
Michael Yang committed
127
	// create list of buffer types for the cpu
Michael Yang's avatar
Michael Yang committed
128
	cpuDeviceBufferType := deviceBufferType{d: C.ggml_backend_dev_by_type(C.GGML_BACKEND_DEVICE_TYPE_CPU)}
129
130
131
132
	for _, d := range append(accels, append(gpus, cpus...)...) {
		switch C.ggml_backend_dev_type(d) {
		case C.GGML_BACKEND_DEVICE_TYPE_CPU,
			C.GGML_BACKEND_DEVICE_TYPE_ACCEL:
Michael Yang's avatar
Michael Yang committed
133
			cpuDeviceBufferType.bts = append(cpuDeviceBufferType.bts, C.ggml_backend_dev_buffer_type(d))
134
			btDeviceMemory[C.ggml_backend_dev_buffer_type(d)] = &requiredMemory.CPU
Michael Yang's avatar
Michael Yang committed
135
		}
136
137
	}

138
	requiredMemory.CPU.Name = C.GoString(C.ggml_backend_dev_name(cpuDeviceBufferType.d))
139
140
	var props C.struct_ggml_backend_dev_props
	C.ggml_backend_dev_get_props(cpuDeviceBufferType.d, &props)
141
	requiredMemory.CPU.ID = C.GoString(props.id)
142
143
144
	requiredMemory.CPU.Weights = make([]ml.Memory, blocks+1)
	requiredMemory.CPU.Cache = make([]ml.Memory, blocks+1)

Michael Yang's avatar
Michael Yang committed
145
	// create list of buffer types for each gpu
146
	var gpuDeviceBufferTypes []deviceBufferType
147
148
	requiredMemory.GPUs = make([]ml.DeviceMemory, len(gpus))
	for i, d := range gpus {
149
		bt := C.ggml_backend_dev_buffer_type(d)
150
		gpuDeviceBufferTypes = append(gpuDeviceBufferTypes, deviceBufferType{
151
			d:   d,
Michael Yang's avatar
Michael Yang committed
152
			bts: append([]*C.struct_ggml_backend_buffer_type{bt}, cpuDeviceBufferType.bts...),
153
		})
154
155
		btDeviceMemory[bt] = &requiredMemory.GPUs[i]
		requiredMemory.GPUs[i].Name = C.GoString(C.ggml_backend_dev_name(d))
156
157
		var props C.struct_ggml_backend_dev_props
		C.ggml_backend_dev_get_props(d, &props)
158
		requiredMemory.GPUs[i].ID = C.GoString(props.id)
159
160
		requiredMemory.GPUs[i].Weights = make([]ml.Memory, blocks+1)
		requiredMemory.GPUs[i].Cache = make([]ml.Memory, blocks+1)
Michael Yang's avatar
Michael Yang committed
161
162
	}

Michael Yang's avatar
Michael Yang committed
163
164
165
166
167
	useDefaultSplit := true
	for _, s := range params.TensorSplit {
		if s != 0 {
			useDefaultSplit = false
			break
168
		}
Michael Yang's avatar
Michael Yang committed
169
	}
170

Michael Yang's avatar
Michael Yang committed
171
172
173
174
	// calculate splits
	splits := make([]float32, len(gpus))
	if useDefaultSplit {
		// default: split on free memory
175
176
177
178
179
		for i := range splits {
			var free, total C.size_t
			C.ggml_backend_dev_memory(gpus[i], &free, &total)
			splits[i] = float32(free)
		}
Michael Yang's avatar
Michael Yang committed
180
181
	} else {
		splits = params.TensorSplit
182
183
184
	}

	var sum float32
Michael Yang's avatar
Michael Yang committed
185
	// cumulative sum of all splits
186
187
188
189
190
	for i := range splits {
		sum += splits[i]
		splits[i] = sum
	}

Michael Yang's avatar
Michael Yang committed
191
	// normalize splits
192
	for i := range splits {
193
		splits[i] /= sum
194
195
	}

Michael Yang's avatar
Michael Yang committed
196
	// inputs always use cpu
Michael Yang's avatar
Michael Yang committed
197
	input := cpuDeviceBufferType
198

Michael Yang's avatar
Michael Yang committed
199
200
201
	// define a range of gpu layers. anything outside of this range is assigned to the cpu
	gpuRangeStart := max(0, blocks-params.NumGPULayers)
	gpuRangeStop := min(gpuRangeStart+params.NumGPULayers, blocks+1)
Michael Yang's avatar
Michael Yang committed
202
	assignLayer := func(i int) deviceBufferType {
Michael Yang's avatar
Michael Yang committed
203
		if i < gpuRangeStart || i >= gpuRangeStop {
Michael Yang's avatar
Michael Yang committed
204
			return cpuDeviceBufferType
205
		}
206

Michael Yang's avatar
Michael Yang committed
207
		index := slices.IndexFunc(splits, func(f float32) bool { return float32(i-gpuRangeStart)/float32(gpuRangeStop-gpuRangeStart) < f })
208
		if index < 0 || index >= len(gpuDeviceBufferTypes) {
Michael Yang's avatar
Michael Yang committed
209
			return cpuDeviceBufferType
210
211
212
		}

		return gpuDeviceBufferTypes[index]
213
214
	}

Michael Yang's avatar
Michael Yang committed
215
	// repeating layers are assigned based on their index in reverse order, e.g. i / (block_count + 1)
216
	layers := make([]deviceBufferType, blocks)
217
	for i := range layers {
218
		layers[i] = assignLayer(i)
219
220
	}

Michael Yang's avatar
Michael Yang committed
221
	// outputs are assigned iff allowed by splits and configured number of gpu layers
222
	output := assignLayer(blocks)
223
224
225

	maxTensors := len(meta.Tensors().Items())
	maxTensors += 1
Michael Yang's avatar
Michael Yang committed
226
	// each layer has at most 2 extra tensors for rope operations
227
228
	maxTensors += blocks * 2

229
	type tensor struct {
230
		source *fsggml.Tensor
231
232
233
		target string
	}

Michael Yang's avatar
Michael Yang committed
234
	// some tensors are mapped to different names so keep a list
235
236
	targets := make(map[string][]string)

Michael Yang's avatar
Michael Yang committed
237
	// contexts are shared by tensors of the same buffer type
238
	ctxs := make(map[*C.struct_ggml_backend_buffer_type]*C.struct_ggml_context)
239
	createTensor := func(t tensor, bts []*C.struct_ggml_backend_buffer_type, layer int) *C.struct_ggml_tensor {
240
241
		for _, bt := range bts {
			if _, ok := ctxs[bt]; !ok {
Michael Yang's avatar
Michael Yang committed
242
				// slog.Info("XXX before ggml_init")
243
244
245
246
				ctxs[bt] = C.ggml_init(C.struct_ggml_init_params{
					mem_size: C.ggml_tensor_overhead() * C.size_t(maxTensors),
					no_alloc: true,
				})
Michael Yang's avatar
Michael Yang committed
247
				// slog.Info("XXX after ggml_init")
248
			}
Michael Yang's avatar
Michael Yang committed
249

250
251
252
253
254
255
256
257
			targets[t.source.Name] = append(targets[t.source.Name], t.target)

			name := t.source.Name
			if t.target != "" {
				name = t.target
			}

			cname := C.CString(name)
Michael Yang's avatar
Michael Yang committed
258
			defer C.free(unsafe.Pointer(cname))
259
260
261
262
			if tt := C.ggml_get_tensor(ctxs[bt], cname); tt != nil {
				return tt
			}

263
			tt := C.ggml_new_tensor(ctxs[bt], t.source.Kind, C.int(len(t.source.Shape)), (*C.int64_t)(unsafe.Pointer(&t.source.Shape[0])))
Michael Yang's avatar
Michael Yang committed
264
265
			C.ggml_set_name(tt, cname)

266
			slog.Log(context.TODO(), logutil.LevelTrace, "created tensor", "name", name, "shape", t.source.Shape, "dtype", t.source.Kind, "buffer_type", C.GoString(C.ggml_backend_buft_name(bt)))
267
268
269
270
271
272
273
274
275
276

			size := pad(C.ggml_backend_buft_get_alloc_size(bt, tt), C.ggml_backend_buft_get_alignment(bt))
			if layer == -1 {
				// Assume that InputWeights can be allocated - they're always in system memory and can't be moved in any case
				requiredMemory.InputWeights.Status = ml.Allocated
				requiredMemory.InputWeights.Size += uint64(size)
			} else {
				btDeviceMemory[bt].Weights[layer].Size += uint64(size)
			}

277
278
279
280
281
			//nolint:staticcheck // TODO: check if buffer type supports this tensor
			return tt
		}

		return nil
Michael Yang's avatar
Michael Yang committed
282
283
	}

284
	contains := func(s string, parts ...string) bool {
285
286
287
288
289
290
291
292
		split := strings.Split(s, ".")
		for _, part := range parts {
			if slices.Contains(split, part) {
				return true
			}
		}

		return false
Michael Yang's avatar
Michael Yang committed
293
294
	}

295
296
	for _, t := range meta.Tensors().Items() {
		switch {
297
		case contains(t.Name, "position_embd", "token_embd", "token_norm_embd", "token_types"):
298
			createTensor(tensor{source: t}, input.bts, -1)
Michael Yang's avatar
Michael Yang committed
299
			if _, ok := meta.Tensors().GroupLayers()["output"]; !ok && t.Name == "token_embd.weight" {
300
				createTensor(tensor{source: t, target: "output.weight"}, output.bts, blocks)
Michael Yang's avatar
Michael Yang committed
301
			}
Michael Yang's avatar
Michael Yang committed
302
303
304
		case contains(t.Name, "cls", "output", "output_norm",
			"altup_proj", "altup_unembd_proj",
			"per_layer_token_embd", "per_layer_model_proj", "per_layer_proj_norm"):
305
			createTensor(tensor{source: t}, output.bts, blocks)
306
		case strings.HasPrefix(t.Name, "v.") || strings.HasPrefix(t.Name, "mm."):
Michael Yang's avatar
Michael Yang committed
307
			// TODO: assign vision tensors to the gpu if possible
308
			createTensor(tensor{source: t}, output.bts, blocks)
Michael Yang's avatar
Michael Yang committed
309
310
311
312
313
314
		case contains(t.Name, "rope_freqs", "rope_factors_long", "rope_factors_short"):
			// these tensors should be repeated per layer
			for i, layer := range layers {
				createTensor(tensor{
					source: t,
					target: "blk." + strconv.Itoa(i) + "." + t.Name,
315
				}, layer.bts, i)
Michael Yang's avatar
Michael Yang committed
316
			}
317
		default:
Michael Yang's avatar
Michael Yang committed
318
319
320
321
			layerIndex := -1
			if fields := strings.FieldsFunc(t.Name, func(r rune) bool { return !unicode.IsNumber(r) }); len(fields) > 0 {
				if i, err := strconv.Atoi(fields[0]); err == nil {
					layerIndex = i
322
				}
Michael Yang's avatar
Michael Yang committed
323
			}
324

Michael Yang's avatar
Michael Yang committed
325
			if layerIndex >= 0 {
326
				createTensor(tensor{source: t}, layers[layerIndex].bts, layerIndex)
327
			} else {
Michael Yang's avatar
Michael Yang committed
328
				// load all other tensors on the cpu
329
				createTensor(tensor{source: t}, input.bts, -1)
330
331
332
			}
		}
	}
Michael Yang's avatar
Michael Yang committed
333

Michael Yang's avatar
Michael Yang committed
334
335
	// allocate buffers for each context
	bbs := make(map[*C.struct_ggml_context]*C.struct_ggml_backend_buffer, len(ctxs))
336
337
338
339
340
341
	for bt, c := range ctxs {
		if C.ggml_get_first_tensor(c) == nil {
			continue
		}

		b := C.ggml_backend_alloc_ctx_tensors_from_buft(c, bt)
342
343
344
345
346
347
348
349
350
351
		for i := range btDeviceMemory[bt].Weights {
			if btDeviceMemory[bt].Weights[i].Size != 0 {
				if b != nil {
					btDeviceMemory[bt].Weights[i].Status = ml.Allocated
				} else {
					btDeviceMemory[bt].Weights[i].Status = ml.Failed
				}
			}
		}

352
		if b == nil {
353
			panic(ml.ErrNoMem{BackendMemory: requiredMemory})
354
355
		}

356
		C.ggml_backend_buffer_set_usage(b, C.GGML_BACKEND_BUFFER_USAGE_WEIGHTS)
Michael Yang's avatar
Michael Yang committed
357
		bbs[c] = b
358
359
	}

360
361
	// Mimic llama runner logs summarizing layers and memory
	gpuLayers := 0
362
363
364
365
366
367
368
	for _, layer := range layers {
		if C.ggml_backend_dev_type(layer.d) == C.GGML_BACKEND_DEVICE_TYPE_GPU {
			gpuLayers++
		}
	}
	slog.Info(fmt.Sprintf("offloading %d repeating layers to GPU", gpuLayers))

369
	switch C.ggml_backend_dev_type(output.d) {
370
	case C.GGML_BACKEND_DEVICE_TYPE_CPU:
371
		slog.Info("offloading output layer to CPU")
372
	case C.GGML_BACKEND_DEVICE_TYPE_GPU:
373
374
		slog.Info("offloading output layer to GPU")
		gpuLayers++
375
	case C.GGML_BACKEND_DEVICE_TYPE_ACCEL:
376
377
378
		slog.Info("offloading output layer to ACCEL")
	}
	slog.Info(fmt.Sprintf("offloaded %d/%d layers to GPU", gpuLayers, len(layers)+1))
379

380
	for bs := range maps.Values(bbs) {
Michael Yang's avatar
Michael Yang committed
381
		slog.Info("model weights", "buffer", C.GoString(C.ggml_backend_buffer_name(bs)), "size", format.HumanBytes2(uint64(C.ggml_backend_buffer_get_size(bs))))
382
383
	}

Michael Yang's avatar
Michael Yang committed
384
	// map tensor names to tensors for easy lookup later
385
386
387
388
389
390
391
	tensors := make(map[string]*C.struct_ggml_tensor)
	for _, c := range ctxs {
		for t := C.ggml_get_first_tensor(c); t != nil; t = C.ggml_get_next_tensor(c, t) {
			tensors[C.GoString(C.ggml_get_name(t))] = t
		}
	}

392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
	// map devices to backend buffer types so new tensors can be assigned to the correct device
	deviceBufferTypes := make(map[*C.struct_ggml_backend_device]*C.struct_ggml_backend_buffer_type)

	// create backends and buffer types used for the compute graph scheduler
	var schedBackends []*C.struct_ggml_backend
	var schedBufts []*C.struct_ggml_backend_buffer_type
	for _, d := range append(gpus, append(accels, cpus...)...) {
		b := C.ggml_backend_dev_init(d, nil)
		bt := C.ggml_backend_get_default_buffer_type(b)

		deviceBufferTypes[d] = bt

		schedBackends = append(schedBackends, b)
		schedBufts = append(schedBufts, bt)

		if C.ggml_backend_is_cpu(b) {
			// set number of threads for cpu backend
			C.ggml_backend_cpu_set_n_threads(b, C.int(Threads(params.NumThreads)))
		}
	}

	maxGraphNodes := max(8192, len(meta.Tensors().Items())*5)
	return &Backend{
		modelPath:         modelPath,
		flashAttention:    params.FlashAttention,
		meta:              meta,
		tensorLoadTargets: targets,
		tensors:           tensors,
		sched: C.ggml_backend_sched_new(
			(*C.ggml_backend_t)(unsafe.Pointer(&schedBackends[0])),
			(*C.ggml_backend_buffer_type_t)(unsafe.Pointer(&schedBufts[0])),
			C.int(len(schedBackends)),
			C.size_t(maxGraphNodes),
425
			C._Bool(false),
426
427
428
429
430
431
432
433
434
435
436
437
			C._Bool(false),
		),
		schedBackends: schedBackends,
		schedBufts:    schedBufts,
		input:         deviceBufferTypes[input.d],
		layers: func() map[int]*C.struct_ggml_backend_buffer_type {
			m := make(map[int]*C.struct_ggml_backend_buffer_type)
			for i, layer := range layers {
				m[i] = deviceBufferTypes[layer.d]
			}
			return m
		}(),
438
439
440
		requiredMemory: &requiredMemory,
		btDeviceMemory: btDeviceMemory,
		maxGraphNodes:  maxGraphNodes,
441
442
443
444
445
446
447
448
	}, nil
}

func init() {
	ml.RegisterBackend("ggml", New)
}

func (b *Backend) Load(ctx context.Context, progress func(float32)) error {
449
	var doneBytes atomic.Uint64
450
	totalBytes := uint64(b.meta.Length) - b.meta.Tensors().Offset
451
452
453

	g, ctx := errgroup.WithContext(ctx)
	g.SetLimit(runtime.GOMAXPROCS(0))
454
	for _, t := range b.meta.Tensors().Items() {
455
		t := t
456
		g.Go(func() error {
457
			tts := make([]*C.struct_ggml_tensor, max(1, len(b.tensorLoadTargets[t.Name])))
458
			for i := range tts {
459
				target := b.tensorLoadTargets[t.Name][i]
460
461
462
				if target == "" {
					target = t.Name
				}
463

464
				tt, ok := b.tensors[target]
465
466
467
				if !ok {
					return fmt.Errorf("unassigned tensor: %s", t.Name)
				}
Michael Yang's avatar
Michael Yang committed
468

469
470
471
				tts[i] = tt
			}

472
473
			// Create a new FD for each goroutine so that each FD is read sequentially, rather than
			// seeking around within an FD shared between all goroutines.
474
			file, err := os.Open(b.modelPath)
475
			if err != nil {
476
				slog.Warn("file open error", "file", b.modelPath, "error", err)
477
478
479
				return err
			}
			defer file.Close()
480
			sr := io.NewSectionReader(file, int64(b.meta.Tensors().Offset+t.Offset), int64(t.Size()))
481
482
483
484
			bts := make([]byte, 128*format.KibiByte)

			var s uint64
			for s < t.Size() {
485
486
487
488
489
				// Stop if either the parent context has been canceled or if any of the other tensors returned an error
				if err := ctx.Err(); err != nil {
					return err
				}

490
491
				n, err := io.ReadFull(sr, bts[:min(len(bts), int(t.Size()-s))])
				if err != nil {
492
					slog.Warn("file read error", "file", b.modelPath, "error", err)
493
					return err
494
				}
Michael Yang's avatar
Michael Yang committed
495

496
497
				for _, tt := range tts {
					C.ggml_backend_tensor_set(tt, unsafe.Pointer(&bts[0]), C.size_t(s), C.size_t(n))
498
				}
Michael Yang's avatar
Michael Yang committed
499

500
501
				s += uint64(n)

502
				if progress != nil {
503
					done := doneBytes.Add(uint64(n))
504
					progress(float32(done) / float32(totalBytes))
505
506
507
508
509
				}
			}

			return nil
		})
Michael Yang's avatar
Michael Yang committed
510
511
	}

512
	if err := g.Wait(); err != nil {
513
		return err
514
515
	}

516
	return nil
Michael Yang's avatar
Michael Yang committed
517
518
}

519
520
521
522
func (b *Backend) BackendMemory() ml.BackendMemory {
	return *b.requiredMemory
}

523
func (b *Backend) Config() fs.Config {
Michael Yang's avatar
Michael Yang committed
524
525
526
527
	return b.meta.KV()
}

func (b *Backend) Get(name string) ml.Tensor {
528
529
	if t, ok := b.tensors[name]; ok {
		return &Tensor{b: b, t: t}
Michael Yang's avatar
Michael Yang committed
530
531
532
533
534
535
	}

	return nil
}

func (b *Backend) NewContext() ml.Context {
Michael Yang's avatar
Michael Yang committed
536
	return b.NewContextSize(b.maxGraphNodes)
537
538
539
}

func (b *Backend) NewContextSize(n int) ml.Context {
Jesse Gross's avatar
Jesse Gross committed
540
541
542
543
	if n > b.maxGraphNodes {
		panic(fmt.Errorf("requested number of graph nodes (%v) for new context exceeds maximum (%v)", n, b.maxGraphNodes))
	}

544
545
	var allocatedBuffers []*C.struct_ggml_backend_buffer

Michael Yang's avatar
Michael Yang committed
546
547
	// slog.Info("XXX before ggml_init")
	// defer slog.Info("XXX after ggml_init")
Michael Yang's avatar
Michael Yang committed
548
	return &Context{
549
550
		b:             b,
		maxGraphNodes: n,
551
		ctx: C.ggml_init(C.struct_ggml_init_params{
552
			mem_size: C.size_t(n)*C.ggml_tensor_overhead() + C.ggml_graph_overhead_custom(C.size_t(n), false),
553
554
			no_alloc: true,
		}),
555
		allocatedBuffers: &allocatedBuffers,
556
		layer:            -1,
Michael Yang's avatar
Michael Yang committed
557
558
559
	}
}

560
func (b *Backend) CacheConfig() ml.CacheConfig {
561
562
563
564
565
	if b.flashAttention {
		return ml.CacheConfig{CachePadding: 256, MaskDType: ml.DTypeF16, MaskBatchPadding: C.GGML_KQ_MASK_PAD}
	} else {
		return ml.CacheConfig{CachePadding: 32, PermutedV: true}
	}
566
567
}

Michael Yang's avatar
Michael Yang committed
568
type Context struct {
569
	b *Backend
Michael Yang's avatar
Michael Yang committed
570

571
	ctx   *C.struct_ggml_context
Michael Yang's avatar
Michael Yang committed
572
	graph *C.struct_ggml_cgraph
573

574
575
	// buft is the buffer type used for new tensors
	buft *C.struct_ggml_backend_buffer_type
576

577
578
579
580
	// allocatedBuffers are buffers for tensors that we have allocated in this context
	// so that we can free them when we close the context
	allocatedBuffers *[]*C.struct_ggml_backend_buffer

Michael Yang's avatar
Michael Yang committed
581
	// maxGraphNodes is the maximum allowed number of graph nodes in this context
582
	maxGraphNodes int
583
584
585

	// layer is the graph layer that this context is allocating for - assumed to be cache
	layer int
Michael Yang's avatar
Michael Yang committed
586
587
}

588
func (c *Context) Input() ml.Context {
Michael Yang's avatar
Michael Yang committed
589
	if c.b.input != nil {
590
		return &Context{
591
592
593
594
595
			b:                c.b,
			ctx:              c.ctx,
			buft:             c.b.input,
			allocatedBuffers: c.allocatedBuffers,
			maxGraphNodes:    c.maxGraphNodes,
596
			layer:            -1,
597
598
599
		}
	}

600
	return c
601
602
}

603
func (c *Context) Layer(i int) ml.Context {
604
	if buft, ok := c.b.layers[i]; ok {
605
		return &Context{
606
607
608
609
610
			b:                c.b,
			ctx:              c.ctx,
			buft:             buft,
			allocatedBuffers: c.allocatedBuffers,
			maxGraphNodes:    c.maxGraphNodes,
611
			layer:            i,
612
613
614
		}
	}

615
	return c
616
617
}

618
func (c *Context) Forward(tensors ...ml.Tensor) ml.Context {
Michael Yang's avatar
Michael Yang committed
619
	if c.graph == nil {
620
		c.graph = C.ggml_new_graph_custom(c.ctx, C.size_t(c.maxGraphNodes), false)
Michael Yang's avatar
Michael Yang committed
621
622
	}

623
624
625
626
627
	for _, tensor := range tensors {
		C.ggml_build_forward_expand(c.graph, tensor.(*Tensor).t)
	}

	return c
Michael Yang's avatar
Michael Yang committed
628
629
}

630
func (c *Context) Compute(tensors ...ml.Tensor) {
631
632
633
	if status := C.ggml_backend_sched_graph_compute_async(c.b.sched, c.graph); status != C.GGML_STATUS_SUCCESS {
		panic(fmt.Errorf("error computing ggml graph: %v", status))
	}
Michael Yang's avatar
Michael Yang committed
634
	C.ggml_backend_sched_reset(c.b.sched)
Michael Yang's avatar
Michael Yang committed
635

636
637
638
	needSync := true
	sync := func() {
		if needSync {
639
			C.ggml_backend_sched_synchronize(c.b.sched)
640
641
642
			needSync = false
		}
	}
Michael Yang's avatar
Michael Yang committed
643

644
645
646
	for _, t := range tensors {
		if C.ggml_nbytes(t.(*Tensor).t) > 0 {
			t.(*Tensor).sync = sync
647
648
		}
	}
Michael Yang's avatar
Michael Yang committed
649
650
}

651
652
func (c *Context) Reserve() {
	reserved := C.ggml_backend_sched_reserve(c.b.sched, c.graph)
653
654

	slog.Debug("compute graph", "nodes", C.ggml_graph_n_nodes(c.graph), "splits", C.ggml_backend_sched_get_n_splits(c.b.sched))
655
656
657
658
659
660

	// Reserve may get called multiple times for different graphs - we just want the last run, which will contain the max allocations
	for _, bt := range c.b.schedBufts {
		c.b.btDeviceMemory[bt].Graph = ml.Memory{}
	}

661
	for i := range c.b.schedBackends {
662
663
664
665
666
667
668
669
670
671
		bufferStatus := C.ggml_backend_sched_get_attempted_buffer_size(c.b.sched, c.b.schedBackends[i])

		graph := &c.b.btDeviceMemory[c.b.schedBufts[i]].Graph
		graph.Size += uint64(bufferStatus.size)
		if bufferStatus.allocated && graph.Status != ml.Failed {
			graph.Status = ml.Allocated
		} else {
			graph.Status = ml.Failed
		}

672
		slog.Info("compute graph", "backend", C.GoString(C.ggml_backend_name(c.b.schedBackends[i])), "buffer_type", C.GoString(C.ggml_backend_buft_name(c.b.schedBufts[i])),
673
			"size", format.HumanBytes2(uint64(bufferStatus.size)))
674
675
	}

676
677
678
	if !reserved {
		panic(ml.ErrNoMem{BackendMemory: *c.b.requiredMemory})
	}
679
680
}

681
func (c *Context) MaxGraphNodes() int {
682
	return c.maxGraphNodes
Jesse Gross's avatar
Jesse Gross committed
683
684
}

685
686
687
func shapeToGGML(shape []int) *C.int64_t {
	sh := make([]C.int64_t, len(shape))
	for i, s := range shape {
688
		sh[i] = C.int64_t(s)
689
690
691
692
693
	}

	return &sh[0]
}

694
695
696
697
func pad(length, pad C.size_t) C.size_t {
	return ((length + pad - 1) / pad) * pad
}

698
func (c *Context) newTensor(dtype ml.DType, shape []int) ml.Tensor {
699
	if c.buft == nil {
700
		panic("set Input or Layer before creating tensors")
701
702
	}

Michael Yang's avatar
Michael Yang committed
703
704
705
706
707
708
	var cdtype uint32
	switch dtype {
	case ml.DTypeF32:
		cdtype = C.GGML_TYPE_F32
	case ml.DTypeF16:
		cdtype = C.GGML_TYPE_F16
709
710
711
712
	case ml.DTypeQ80:
		cdtype = C.GGML_TYPE_Q8_0
	case ml.DTypeQ40:
		cdtype = C.GGML_TYPE_Q4_0
Michael Yang's avatar
Michael Yang committed
713
714
	case ml.DTypeI32:
		cdtype = C.GGML_TYPE_I32
Michael Yang's avatar
Michael Yang committed
715
716
	case ml.DTypeMXFP4:
		cdtype = C.GGML_TYPE_MXFP4
Michael Yang's avatar
Michael Yang committed
717
718
719
720
	default:
		panic("unsupported dtype")
	}

Jesse Gross's avatar
Jesse Gross committed
721
	if len(shape) < 1 || shape[0] == 0 {
Michael Yang's avatar
Michael Yang committed
722
		var shape C.int64_t = 0
723
		return &Tensor{b: c.b, t: C.ggml_new_tensor(c.ctx, cdtype, 1, &shape)}
Michael Yang's avatar
Michael Yang committed
724
	} else if len(shape) > 4 {
Michael Yang's avatar
Michael Yang committed
725
726
727
728
729
730
731
732
733
		panic("unsupported number of dimensions")
	}

	for _, dim := range shape {
		if dim < 1 {
			panic("invalid shape")
		}
	}

Michael Yang's avatar
Michael Yang committed
734
	t := C.ggml_new_tensor(c.ctx, cdtype, C.int(len(shape)), shapeToGGML(shape))
735
	size := pad(C.ggml_backend_buft_get_alloc_size(c.buft, t), C.ggml_backend_buft_get_alignment(c.buft))
736

737
	b := C.ggml_backend_buft_alloc_buffer(c.buft, size)
738
739
740
741
742
743
744
745
746
747
748
	if c.layer >= 0 {
		cache := &c.b.btDeviceMemory[c.buft].Cache[c.layer]

		cache.Size += uint64(size)
		if b != nil {
			cache.Status = ml.Allocated
		} else {
			cache.Status = ml.Failed
		}
	}

749
	if b == nil {
750
		panic(ml.ErrNoMem{BackendMemory: *c.b.requiredMemory})
751
752
	}

753
	*c.allocatedBuffers = append(*c.allocatedBuffers, b)
Michael Yang's avatar
Michael Yang committed
754
	C.ggml_backend_tensor_alloc(b, t, C.ggml_backend_buffer_get_base(b))
755
	return &Tensor{b: c.b, t: t}
756
757
}

758
func (c *Context) Empty(dtype ml.DType, shape ...int) ml.Tensor {
759
	return c.newTensor(dtype, shape)
760
761
}

762
func (c *Context) Zeros(dtype ml.DType, shape ...int) ml.Tensor {
763
	t := c.newTensor(dtype, shape)
764
765
	C.ggml_set_zero(t.(*Tensor).t)
	return t
Michael Yang's avatar
Michael Yang committed
766
767
}

768
func checkShape[S ~[]E, E any](s S, shape ...int) {
Michael Yang's avatar
Michael Yang committed
769
	n := len(s)
Jesse Gross's avatar
Jesse Gross committed
770
771

	if n == 0 {
772
		return
Jesse Gross's avatar
Jesse Gross committed
773
774
	}

Michael Yang's avatar
Michael Yang committed
775
776
777
778
779
	for _, v := range shape {
		n /= v
	}

	if n != 1 {
780
		panic(fmt.Errorf("invalid shape: %v", shape))
Michael Yang's avatar
Michael Yang committed
781
782
783
	}
}

784
785
func (c *Context) FromFloatSlice(s []float32, shape ...int) ml.Tensor {
	checkShape(s, shape...)
786

787
	t := c.newTensor(ml.DTypeF32, shape)
788

Jesse Gross's avatar
Jesse Gross committed
789
790
791
792
	if len(s) > 0 {
		C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t))
	}

793
	return t
Michael Yang's avatar
Michael Yang committed
794
795
}

796
797
func (c *Context) FromIntSlice(s []int32, shape ...int) ml.Tensor {
	checkShape(s, shape...)
798

799
	t := c.newTensor(ml.DTypeI32, shape)
800

Jesse Gross's avatar
Jesse Gross committed
801
802
803
804
	if len(s) > 0 {
		C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t))
	}

805
	return t
Michael Yang's avatar
Michael Yang committed
806
807
}

Michael Yang's avatar
arange  
Michael Yang committed
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
func (c Context) Arange(start, stop, step float32, dtype ml.DType) ml.Tensor {
	switch dtype {
	case ml.DTypeF32:
		// ggml_arange creates a float32 tensor
		return &Tensor{
			b: c.b,
			t: C.ggml_arange(c.ctx, C.float(start), C.float(stop), C.float(step)),
		}
	case ml.DTypeI32:
		// ggml_cast does not support float32 to int32 conversion
		arange := make([]int32, 0, int((stop-start)/step))
		for i := start; i < stop; i += step {
			arange = append(arange, int32(i))
		}

823
		return c.Input().FromIntSlice(arange, len(arange))
Michael Yang's avatar
arange  
Michael Yang committed
824
825
826
827
828
	default:
		panic("unsupported dtype for arange")
	}
}

Michael Yang's avatar
Michael Yang committed
829
830
func (c *Context) Close() {
	if c != nil {
831
832
833
834
835
		for _, b := range *c.allocatedBuffers {
			C.ggml_backend_buffer_free(b)
		}
		*c.allocatedBuffers = nil

836
837
		C.ggml_free(c.ctx)
	}
Michael Yang's avatar
Michael Yang committed
838
839
840
}

type Tensor struct {
841
	b    *Backend
Michael Yang's avatar
Michael Yang committed
842
	t    *C.struct_ggml_tensor
843
	sync func()
Michael Yang's avatar
Michael Yang committed
844
845
846
847
848
849
850
851
852
853
}

func (t *Tensor) LogValue() slog.Value {
	return slog.GroupValue(
		slog.String("name", C.GoString(C.ggml_get_name(t.t))),
		slog.String("type", C.GoString(C.ggml_type_name(t.t._type))),
		slog.Any("shape", t.Shape()),
	)
}

854
855
func (t *Tensor) Dim(n int) int {
	return int(t.t.ne[n])
Michael Yang's avatar
Michael Yang committed
856
857
}

858
859
func (t *Tensor) Stride(n int) int {
	return int(t.t.nb[n])
Michael Yang's avatar
Michael Yang committed
860
861
}

862
863
func (t *Tensor) Shape() []int {
	shape := make([]int, C.ggml_n_dims(t.t))
Michael Yang's avatar
Michael Yang committed
864
865
866
867
868
869
870
	for i := range shape {
		shape[i] = t.Dim(i)
	}

	return shape
}

871
872
873
874
875
876
877
878
879
func (t *Tensor) Bytes() (data []byte) {
	if t.sync != nil {
		data = make([]byte, C.ggml_nbytes(t.t))

		t.sync()
		C.ggml_backend_tensor_get(t.t, unsafe.Pointer(&data[0]), 0, C.ggml_nbytes(t.t))
	}

	return
Michael Yang's avatar
Michael Yang committed
880
881
}

882
883
884
885
886
887
func (t *Tensor) Floats() (data []float32) {
	if t.sync != nil {
		data = make([]float32, C.ggml_nelements(t.t))

		t.sync()
		C.ggml_backend_tensor_get(t.t, unsafe.Pointer(&data[0]), 0, C.ggml_nbytes(t.t))
Michael Yang's avatar
Michael Yang committed
888
889
890
891
892
893
894
895
896
	}

	return
}

func (t *Tensor) DType() ml.DType {
	switch t.t._type {
	case C.GGML_TYPE_F32:
		return ml.DTypeF32
Jesse Gross's avatar
Jesse Gross committed
897
898
	case C.GGML_TYPE_F16:
		return ml.DTypeF16
899
900
901
902
	case C.GGML_TYPE_Q8_0:
		return ml.DTypeQ80
	case C.GGML_TYPE_Q4_0:
		return ml.DTypeQ40
Michael Yang's avatar
Michael Yang committed
903
904
	case C.GGML_TYPE_I32:
		return ml.DTypeI32
Michael Yang's avatar
Michael Yang committed
905
906
	case C.GGML_TYPE_MXFP4:
		return ml.DTypeMXFP4
Michael Yang's avatar
Michael Yang committed
907
908
909
910
911
	default:
		return ml.DTypeOther
	}
}

912
913
914
915
916
917
918
func (t *Tensor) Neg(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_neg(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
919
920
func (t *Tensor) Add(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
921
		b: t.b,
Michael Yang's avatar
Michael Yang committed
922
923
924
925
		t: C.ggml_add(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

Michael Yang's avatar
Michael Yang committed
926
927
928
929
930
931
932
func (t *Tensor) Sub(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sub(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
func (t *Tensor) Repeat(ctx ml.Context, dim, n int) ml.Tensor {
	if dim < 0 || dim >= C.GGML_MAX_DIMS {
		panic("invalid dimension")
	}

	shape := make([]C.int64_t, C.GGML_MAX_DIMS)
	for i := range C.GGML_MAX_DIMS {
		if i == dim {
			shape[i] = C.int64_t(t.Dim(i) * n)
		} else {
			shape[i] = C.int64_t(t.Dim(i))
		}
	}

	tmpl := C.ggml_new_tensor(ctx.(*Context).ctx, t.t._type, C.int(len(shape)), unsafe.SliceData(shape))
	return &Tensor{
		b: t.b,
		t: C.ggml_repeat(ctx.(*Context).ctx, t.t, tmpl),
	}
}

Michael Yang's avatar
Michael Yang committed
954
955
956
957
958
959
960
961
962
963
func (t *Tensor) Stack(ctx ml.Context, dim int, s ...ml.Tensor) ml.Tensor {
	if len(s) > 0 {
		return t.Concat(ctx, s[0].Stack(ctx, dim, s[1:]...), dim)
	}

	return t
}

func (t *Tensor) Concat(ctx ml.Context, t2 ml.Tensor, dim int) ml.Tensor {
	return &Tensor{
964
		b: t.b,
Michael Yang's avatar
Michael Yang committed
965
966
967
968
		t: C.ggml_concat(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.int(dim)),
	}
}

Michael Yang's avatar
Michael Yang committed
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
func (t *Tensor) Contiguous(ctx ml.Context, shape ...int) ml.Tensor {
	switch len(shape) {
	case 0:
		return &Tensor{
			b: t.b,
			t: C.ggml_cont(ctx.(*Context).ctx, t.t),
		}
	case 1:
		return &Tensor{
			b: t.b,
			t: C.ggml_cont_1d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0])),
		}
	case 2:
		return &Tensor{
			b: t.b,
			t: C.ggml_cont_2d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1])),
		}
	case 3:
		return &Tensor{
			b: t.b,
			t: C.ggml_cont_3d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1]), C.int64_t(shape[2])),
		}
	case 4:
		return &Tensor{
			b: t.b,
			t: C.ggml_cont_4d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1]), C.int64_t(shape[2]), C.int64_t(shape[3])),
		}
	default:
		panic("unsupported number of dimensions")
Michael Yang's avatar
Michael Yang committed
998
999
1000
1001
1002
	}
}

func (t *Tensor) Mul(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1003
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1004
1005
1006
1007
		t: C.ggml_mul(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

1008
1009
1010
1011
1012
1013
1014
func (t *Tensor) Div(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_div(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

Michael Yang's avatar
Michael Yang committed
1015
1016
func (t *Tensor) Mulmat(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1017
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1018
1019
1020
1021
		t: C.ggml_mul_mat(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

1022
1023
1024
1025
1026
func (t *Tensor) MulmatFullPrec(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	mul := C.ggml_mul_mat(ctx.(*Context).ctx, t.t, t2.(*Tensor).t)
	C.ggml_mul_mat_set_prec(mul, C.GGML_PREC_F32)

	return &Tensor{
1027
		b: t.b,
1028
1029
1030
1031
		t: mul,
	}
}

Michael Yang's avatar
llama4  
Michael Yang committed
1032
1033
1034
1035
1036
1037
1038
func (t *Tensor) MulmatID(ctx ml.Context, t2, ids ml.Tensor) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_mul_mat_id(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, ids.(*Tensor).t),
	}
}

Michael Yang's avatar
Michael Yang committed
1039
func (t *Tensor) LayerNorm(ctx ml.Context, w, b ml.Tensor, eps float32) ml.Tensor {
Michael Yang's avatar
llama4  
Michael Yang committed
1040
1041
1042
1043
1044
1045
	tt := C.ggml_norm(ctx.(*Context).ctx, t.t, C.float(eps))
	if w != nil {
		tt = C.ggml_mul(ctx.(*Context).ctx, tt, w.(*Tensor).t)
		if b != nil {
			tt = C.ggml_add(ctx.(*Context).ctx, tt, b.(*Tensor).t)
		}
Michael Yang's avatar
Michael Yang committed
1046
1047
	}

Michael Yang's avatar
llama4  
Michael Yang committed
1048
	return &Tensor{b: t.b, t: tt}
Michael Yang's avatar
Michael Yang committed
1049
1050
1051
}

func (t *Tensor) RMSNorm(ctx ml.Context, w ml.Tensor, eps float32) ml.Tensor {
Michael Yang's avatar
llama4  
Michael Yang committed
1052
1053
1054
1055
1056
1057
	tt := C.ggml_rms_norm(ctx.(*Context).ctx, t.t, C.float(eps))
	if w != nil {
		tt = C.ggml_mul(ctx.(*Context).ctx, tt, w.(*Tensor).t)
	}

	return &Tensor{b: t.b, t: tt}
Michael Yang's avatar
Michael Yang committed
1058
1059
}

1060
func (t *Tensor) Pad(ctx ml.Context, shape ...int) ml.Tensor {
Michael Yang's avatar
Michael Yang committed
1061
1062
	if len(shape) != 4 {
		panic("expected 4 dimensions")
1063
1064
	} else if shape[3] != 0 {
		panic("cuda does not support 4d tensors")
Michael Yang's avatar
Michael Yang committed
1065
1066
1067
	}

	return &Tensor{
1068
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
		t: C.ggml_pad(ctx.(*Context).ctx, t.t, C.int(shape[0]), C.int(shape[1]), C.int(shape[2]), C.int(shape[3])),
	}
}

func (t *Tensor) Permute(ctx ml.Context, shape ...int) ml.Tensor {
	if len(shape) != 4 {
		panic("expected 4 dimensions")
	}

	return &Tensor{
1079
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1080
1081
1082
1083
1084
1085
		t: C.ggml_permute(ctx.(*Context).ctx, t.t, C.int(shape[0]), C.int(shape[1]), C.int(shape[2]), C.int(shape[3])),
	}
}

func (t *Tensor) Rows(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1086
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1087
1088
1089
1090
1091
1092
		t: C.ggml_get_rows(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

func (t *Tensor) Copy(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1093
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1094
1095
1096
1097
		t: C.ggml_cpy(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

1098
func (t *Tensor) Reshape(ctx ml.Context, shape ...int) ml.Tensor {
Michael Yang's avatar
Michael Yang committed
1099
1100
1101
	switch len(shape) {
	case 1:
		return &Tensor{
1102
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1103
1104
1105
1106
			t: C.ggml_reshape_1d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0])),
		}
	case 2:
		return &Tensor{
1107
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1108
1109
1110
1111
			t: C.ggml_reshape_2d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1])),
		}
	case 3:
		return &Tensor{
1112
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1113
1114
1115
1116
			t: C.ggml_reshape_3d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1]), C.int64_t(shape[2])),
		}
	case 4:
		return &Tensor{
1117
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1118
1119
1120
1121
1122
1123
1124
1125
1126
			t: C.ggml_reshape_4d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1]), C.int64_t(shape[2]), C.int64_t(shape[3])),
		}
	default:
		panic("unsupported number of dimensions")
	}
}

func (t *Tensor) Scale(ctx ml.Context, s float64) ml.Tensor {
	return &Tensor{
1127
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1128
1129
1130
1131
		t: C.ggml_scale(ctx.(*Context).ctx, t.t, (C.float)(s)),
	}
}

1132
1133
1134
1135
1136
1137
1138
func (t *Tensor) SumRows(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sum_rows(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1139
1140
func (t *Tensor) Softmax(ctx ml.Context) ml.Tensor {
	return &Tensor{
1141
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1142
1143
1144
1145
		t: C.ggml_soft_max(ctx.(*Context).ctx, t.t),
	}
}

1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
func (t *Tensor) Sin(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sin(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) Cos(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_cos(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1160
1161
func (t *Tensor) Tanh(ctx ml.Context) ml.Tensor {
	return &Tensor{
1162
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1163
1164
1165
1166
		t: C.ggml_tanh_inplace(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
llama4  
Michael Yang committed
1167
1168
1169
1170
1171
1172
1173
func (t *Tensor) Sigmoid(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sigmoid_inplace(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1174
1175
1176
1177
func (t *Tensor) View(ctx ml.Context, offset int, shape ...int) ml.Tensor {
	switch len(shape) {
	case 1:
		return &Tensor{
1178
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1179
1180
1181
1182
			t: C.ggml_view_1d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.size_t(offset)),
		}
	case 3:
		return &Tensor{
1183
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1184
1185
1186
1187
1188
1189
1190
			t: C.ggml_view_2d(ctx.(*Context).ctx, t.t,
				C.int64_t(shape[0]), C.int64_t(shape[2]),
				C.size_t(shape[1]),
				C.size_t(offset)),
		}
	case 5:
		return &Tensor{
1191
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1192
1193
1194
1195
1196
1197
1198
			t: C.ggml_view_3d(ctx.(*Context).ctx, t.t,
				C.int64_t(shape[0]), C.int64_t(shape[2]), C.int64_t(shape[4]),
				C.size_t(shape[1]), C.size_t(shape[3]),
				C.size_t(offset)),
		}
	case 7:
		return &Tensor{
1199
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
			t: C.ggml_view_4d(ctx.(*Context).ctx, t.t,
				C.int64_t(shape[0]), C.int64_t(shape[2]), C.int64_t(shape[4]), C.int64_t(shape[6]),
				C.size_t(shape[1]), C.size_t(shape[3]), C.size_t(shape[5]),
				C.size_t(offset)),
		}
	default:
		panic("unsupported number of dimensions")
	}
}

1210
func (t *Tensor) RoPE(ctx ml.Context, positions ml.Tensor, ropeDim int, ropeBase, ropeScale float32, options ...func(*rope.Options)) ml.Tensor {
1211
	// Default options
Michael Yang's avatar
Michael Yang committed
1212
1213
1214
1215
1216
1217
1218
1219
	opts := rope.Options{
		Factors:               &Tensor{},
		OriginalContextLength: 131072,
		ExtrapolationFactor:   0.,
		AttentionFactor:       1.,
		BetaFast:              32.,
		BetaSlow:              1.,
	}
1220
1221
1222

	// Apply any provided options
	for _, option := range options {
Michael Yang's avatar
Michael Yang committed
1223
		option(&opts)
1224
1225
	}

Jesse Gross's avatar
Jesse Gross committed
1226
1227
1228
1229
1230
	dequant := t.t
	if C.ggml_is_quantized(t.t._type) {
		dequant = C.ggml_cast(ctx.(*Context).ctx, t.t, C.GGML_TYPE_F32)
	}

Michael Yang's avatar
Michael Yang committed
1231
	return &Tensor{
1232
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1233
		t: C.ggml_rope_ext(
1234
1235
			ctx.(*Context).ctx,
			dequant,
1236
1237
			positions.(*Tensor).t,
			opts.Factors.(*Tensor).t,
Michael Yang's avatar
Michael Yang committed
1238
			C.int(ropeDim),
1239
1240
			C.int(opts.Type),
			C.int(opts.OriginalContextLength),
Michael Yang's avatar
Michael Yang committed
1241
1242
			C.float(ropeBase),
			C.float(ropeScale),
Michael Yang's avatar
Michael Yang committed
1243
1244
1245
1246
			C.float(opts.ExtrapolationFactor),
			C.float(opts.AttentionFactor),
			C.float(opts.BetaFast),
			C.float(opts.BetaSlow),
Michael Yang's avatar
Michael Yang committed
1247
1248
1249
1250
		),
	}
}

1251
1252
1253
1254
1255
1256
1257
func (t *Tensor) IM2Col(ctx ml.Context, t2 ml.Tensor, s0, s1, p0, p1, d0, d1 int) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_im2col(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.int(s0), C.int(s1), C.int(p0), C.int(p1), C.int(d0), C.int(d1), true, C.GGML_TYPE_F32),
	}
}

Michael Yang's avatar
Michael Yang committed
1258
1259
func (t *Tensor) GELU(ctx ml.Context) ml.Tensor {
	return &Tensor{
1260
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1261
1262
1263
1264
		t: C.ggml_gelu_inplace(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1265
1266
1267
1268
1269
1270
1271
func (t *Tensor) QuickGELU(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_gelu_quick_inplace(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1272
1273
func (t *Tensor) SILU(ctx ml.Context) ml.Tensor {
	return &Tensor{
1274
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1275
1276
1277
1278
		t: C.ggml_silu_inplace(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1279
1280
1281
1282
1283
1284
1285
func (t *Tensor) RELU(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_relu_inplace(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1286
1287
func (t *Tensor) Conv2D(ctx ml.Context, t2 ml.Tensor, s0, s1, p0, p1, d0, d1 int) ml.Tensor {
	return &Tensor{
1288
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1289
1290
1291
		t: C.ggml_conv_2d(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.int(s0), C.int(s1), C.int(p0), C.int(p1), C.int(d0), C.int(d1)),
	}
}
1292

Michael Yang's avatar
Michael Yang committed
1293
func (t *Tensor) AvgPool2D(ctx ml.Context, k, s int, p float32) ml.Tensor {
Michael Yang's avatar
Michael Yang committed
1294
1295
	return &Tensor{
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1296
		t: C.ggml_pool_2d(ctx.(*Context).ctx, t.t, C.GGML_OP_POOL_AVG, C.int(k), C.int(k), C.int(s), C.int(s), C.float(p), C.float(p)),
Michael Yang's avatar
Michael Yang committed
1297
1298
1299
	}
}

Michael Yang's avatar
Michael Yang committed
1300
1301
1302
1303
func (t *Tensor) Set(ctx ml.Context, t2 ml.Tensor, offset int, strides ...int) ml.Tensor {
	var tt *C.struct_ggml_tensor
	switch len(strides) {
	case 0:
Michael Yang's avatar
Michael Yang committed
1304
		tt = C.ggml_set_1d(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.size_t(offset))
Michael Yang's avatar
Michael Yang committed
1305
	case 1:
Michael Yang's avatar
Michael Yang committed
1306
		tt = C.ggml_set_2d(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.size_t(offset), C.size_t(strides[0]))
Michael Yang's avatar
Michael Yang committed
1307
1308
1309
1310
1311
1312
1313
	default:
		panic("unsupported number of dimensions")
	}

	return &Tensor{b: t.b, t: tt}
}

1314
1315
1316
1317
1318
1319
func (t *Tensor) ScaledDotProductAttention(ctx ml.Context, key, value, mask ml.Tensor, scale float64) ml.Tensor {
	var kqMask *C.struct_ggml_tensor
	if mask != nil {
		kqMask = mask.(*Tensor).t
	}

1320
1321
1322
	query := t.Permute(ctx, 0, 2, 1, 3)
	key = key.Permute(ctx, 0, 2, 1, 3)

1323
1324
	if t.b.flashAttention {
		value = value.Permute(ctx, 0, 2, 1, 3)
1325

1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
		kqv := C.ggml_flash_attn_ext(ctx.(*Context).ctx, query.(*Tensor).t, key.(*Tensor).t, value.(*Tensor).t, kqMask, C.float(scale), 0, 0)
		C.ggml_flash_attn_ext_set_prec(kqv, C.GGML_PREC_F32)
		return &Tensor{b: t.b, t: kqv}
	} else {
		kq := key.MulmatFullPrec(ctx, query)
		kq = &Tensor{
			b: t.b,
			t: C.ggml_soft_max_ext(ctx.(*Context).ctx, kq.(*Tensor).t, kqMask, C.float(scale), 0),
		}

		kqv := value.Mulmat(ctx, kq)
		return kqv.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx)
	}
1339
}
1340
1341
1342
1343
1344
1345
1346

func (t *Tensor) Duplicate(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_dup(ctx.(*Context).ctx, t.t),
	}
}
Michael Yang's avatar
llama4  
Michael Yang committed
1347
1348
1349
1350
1351
1352
1353

func (t *Tensor) TopK(ctx ml.Context, k int) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_top_k(ctx.(*Context).ctx, t.t, C.int(k)),
	}
}
1354
1355
1356
1357
1358
1359
1360

func (t *Tensor) Argsort(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_argsort(ctx.(*Context).ctx, t.t, C.GGML_SORT_ORDER_ASC),
	}
}
Michael Yang's avatar
Michael Yang committed
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399

func (t *Tensor) Mean(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_mean(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) Variance(ctx ml.Context) ml.Tensor {
	return t.Add(ctx, t.Mean(ctx).Scale(ctx, -1)).
		Sqr(ctx).
		SumRows(ctx).
		Scale(ctx, 1/float64(t.Dim(0)))
}

func (t *Tensor) Stddev(ctx ml.Context) ml.Tensor {
	return t.Variance(ctx).Sqrt(ctx)
}

func (t *Tensor) Sqr(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sqr(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) Sqrt(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sqrt(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) Clamp(ctx ml.Context, min, max float32) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_clamp(ctx.(*Context).ctx, t.t, C.float(min), C.float(max)),
	}
}
Michael Yang's avatar
Michael Yang committed
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461

func (c Context) FromBytes(dtype ml.DType, s []uint8, shape ...int) ml.Tensor {
	// Unchecked to handle quantized types
	t := c.newTensor(dtype, shape)
	if len(s) > 0 {
		C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t))
	}

	return t
}

// TODO - DRY this out with New if possible
func newTestBackend(size int) *Backend {
	var cpus []*C.struct_ggml_backend_device
	for _, d := range devices() {
		switch C.ggml_backend_dev_type(d) {
		case C.GGML_BACKEND_DEVICE_TYPE_CPU:
			if len(cpus) == 0 {
				// only the first cpu device should be used
				cpus = append(cpus, d)
				break
			}
		}
	}
	var schedBackends []*C.struct_ggml_backend
	var schedBufts []*C.struct_ggml_backend_buffer_type
	b := C.ggml_backend_dev_init(cpus[0], nil)
	bt := C.ggml_backend_get_default_buffer_type(b)
	C.ggml_backend_cpu_set_n_threads(b, C.int(Threads(runtime.NumCPU())))
	// C.ggml_backend_cpu_set_n_threads(b, 1) // DEBUGGING
	schedBackends = append(schedBackends, b)
	schedBufts = append(schedBufts, bt)
	return &Backend{
		meta: nil,
		sched: C.ggml_backend_sched_new(
			(*C.ggml_backend_t)(unsafe.Pointer(&schedBackends[0])),
			(*C.ggml_backend_buffer_type_t)(unsafe.Pointer(&schedBufts[0])),
			C.int(len(schedBackends)),
			C.size_t(max(8192, size)),
			false,
			false,
		),
		input:         bt,
		maxGraphNodes: max(8192, size),
		schedBackends: schedBackends,
		schedBufts:    schedBufts,
	}
}

func newTestContext(b *Backend, n int) *Context {
	n = max(8192, n)
	// slog.Info("XXX before ggml_init")
	// defer slog.Info("XXX after ggml_init")
	return &Context{
		b:             b,
		maxGraphNodes: n,
		ctx: C.ggml_init(C.struct_ggml_init_params{
			mem_size: C.size_t(n)*C.ggml_tensor_overhead() + C.ggml_graph_overhead_custom(C.size_t(n), false),
			no_alloc: true,
		}),
	}
}