ggml.go 39.9 KB
Newer Older
Michael Yang's avatar
Michael Yang committed
1
2
package ggml

3
4
5
6
7
8
// #cgo CPPFLAGS: -I${SRCDIR}/ggml/include
// #include <stdlib.h>
// #include <stdint.h>
// #include "ggml.h"
// #include "ggml-cpu.h"
// #include "ggml-backend.h"
Michael Yang's avatar
Michael Yang committed
9
10
11
import "C"

import (
12
	"context"
Michael Yang's avatar
Michael Yang committed
13
14
15
	"fmt"
	"io"
	"log/slog"
16
	"maps"
Michael Yang's avatar
Michael Yang committed
17
	"os"
18
	"runtime"
19
20
21
	"slices"
	"strconv"
	"strings"
Jesse Gross's avatar
Jesse Gross committed
22
	"sync"
23
	"sync/atomic"
24
	"unicode"
Michael Yang's avatar
Michael Yang committed
25
26
27
	"unsafe"

	"github.com/ollama/ollama/format"
28
29
	"github.com/ollama/ollama/fs"
	fsggml "github.com/ollama/ollama/fs/ggml"
30
	"github.com/ollama/ollama/logutil"
Michael Yang's avatar
Michael Yang committed
31
	"github.com/ollama/ollama/ml"
32
	ggml "github.com/ollama/ollama/ml/backend/ggml/ggml/src"
33
	"github.com/ollama/ollama/ml/nn/rope"
Michael Yang's avatar
Michael Yang committed
34
35
36
	"golang.org/x/sync/errgroup"
)

Jesse Gross's avatar
Jesse Gross committed
37
38
39
40
41
42
var (
	cpus, accels, gpus []C.ggml_backend_dev_t
	backends           map[C.ggml_backend_dev_t]C.ggml_backend_t
)

var initDevices = sync.OnceFunc(func() {
Michael Yang's avatar
Michael Yang committed
43
44
	ggml.OnceLoad()

Jesse Gross's avatar
Jesse Gross committed
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
	backends = make(map[C.ggml_backend_dev_t]C.ggml_backend_t)
	for i := range C.ggml_backend_dev_count() {
		d := C.ggml_backend_dev_get(i)

		switch C.ggml_backend_dev_type(d) {
		case C.GGML_BACKEND_DEVICE_TYPE_CPU:
			if len(cpus) == 0 {
				// only the first cpu device should be used
				cpus = append(cpus, d)
			}
		case C.GGML_BACKEND_DEVICE_TYPE_ACCEL:
			accels = append(accels, d)
		case C.GGML_BACKEND_DEVICE_TYPE_GPU:
			gpus = append(gpus, d)
		}

		backends[d] = C.ggml_backend_dev_init(d, nil)
	}
})
Michael Yang's avatar
Michael Yang committed
64
65

type Backend struct {
66
67
68
	// modelPath is the location of the model data
	modelPath string

69
70
	meta *fsggml.GGML

71
72
73
74
	// tensorLoadTargets maps from the name of the tensor in the file
	// to the name that is used by the model definition
	tensorLoadTargets map[string][]string

75
76
77
	sched         C.ggml_backend_sched_t
	schedBackends []C.ggml_backend_t
	schedBufts    []C.ggml_backend_buffer_type_t
78

79
	tensors map[string]*C.struct_ggml_tensor
Michael Yang's avatar
Michael Yang committed
80
81

	// input is the backend used for inputs
82
	input C.ggml_backend_buffer_type_t
Michael Yang's avatar
Michael Yang committed
83
84

	// layers is the backend used for repeating layers
85
	layers map[int]C.ggml_backend_buffer_type_t
86

87
88
89
90
	// requiredMemory is the cumulative memory allocations needed by the backend
	requiredMemory *ml.BackendMemory

	// btDeviceMemory maps from a buffer type to the memory allocations associated with that device
91
	btDeviceMemory map[C.ggml_backend_buffer_type_t]*ml.DeviceMemory
92

93
	flashAttention bool
Michael Yang's avatar
Michael Yang committed
94
95
96

	// maxGraphNodes is the maximum allowed number of graph nodes in this scheduler
	maxGraphNodes int
Jesse Gross's avatar
Jesse Gross committed
97
98
99

	// weightBuffers are the GGML contexts and buffers for allocating weights
	weightBuffers map[*C.struct_ggml_context]C.ggml_backend_buffer_t
Michael Yang's avatar
Michael Yang committed
100
101
}

102
103
104
105
106
107
108
109
func New(modelPath string, params ml.BackendParams) (ml.Backend, error) {
	r, err := os.Open(modelPath)
	if err != nil {
		return nil, err
	}
	defer r.Close()

	meta, err := fsggml.Decode(r, -1)
Michael Yang's avatar
Michael Yang committed
110
111
112
113
114
115
116
117
118
119
120
121
122
123
	if err != nil {
		return nil, err
	}

	slog.Info(
		"",
		"architecture", meta.KV().Architecture(),
		"file_type", meta.KV().FileType(),
		"name", meta.KV().String("general.name"),
		"description", meta.KV().String("general.description"),
		"num_tensors", len(meta.Tensors().Items()),
		"num_key_values", len(meta.KV()),
	)

Jesse Gross's avatar
Jesse Gross committed
124
125
	initDevices()

126
	var requiredMemory ml.BackendMemory
127
	btDeviceMemory := make(map[C.ggml_backend_buffer_type_t]*ml.DeviceMemory)
128

129
	type deviceBufferType struct {
130
131
		d   C.ggml_backend_dev_t
		bts []C.ggml_backend_buffer_type_t
132
133
	}

134
135
	blocks := int(meta.KV().BlockCount())

Michael Yang's avatar
Michael Yang committed
136
	// create list of buffer types for the cpu
Michael Yang's avatar
Michael Yang committed
137
	cpuDeviceBufferType := deviceBufferType{d: C.ggml_backend_dev_by_type(C.GGML_BACKEND_DEVICE_TYPE_CPU)}
138
139
140
141
	for _, d := range append(accels, append(gpus, cpus...)...) {
		switch C.ggml_backend_dev_type(d) {
		case C.GGML_BACKEND_DEVICE_TYPE_CPU,
			C.GGML_BACKEND_DEVICE_TYPE_ACCEL:
Michael Yang's avatar
Michael Yang committed
142
			cpuDeviceBufferType.bts = append(cpuDeviceBufferType.bts, C.ggml_backend_dev_buffer_type(d))
143
			btDeviceMemory[C.ggml_backend_dev_buffer_type(d)] = &requiredMemory.CPU
Michael Yang's avatar
Michael Yang committed
144
		}
145
146
	}

147
	requiredMemory.CPU.Name = C.GoString(C.ggml_backend_dev_name(cpuDeviceBufferType.d))
148
149
	var props C.struct_ggml_backend_dev_props
	C.ggml_backend_dev_get_props(cpuDeviceBufferType.d, &props)
150
	requiredMemory.CPU.ID = C.GoString(props.id)
151
152
153
	requiredMemory.CPU.Weights = make([]ml.Memory, blocks+1)
	requiredMemory.CPU.Cache = make([]ml.Memory, blocks+1)

Michael Yang's avatar
Michael Yang committed
154
	// create list of buffer types for each gpu
155
	var gpuDeviceBufferTypes []deviceBufferType
156
157
	requiredMemory.GPUs = make([]ml.DeviceMemory, len(gpus))
	for i, d := range gpus {
158
		bt := C.ggml_backend_dev_buffer_type(d)
159
		gpuDeviceBufferTypes = append(gpuDeviceBufferTypes, deviceBufferType{
160
			d:   d,
161
			bts: append([]C.ggml_backend_buffer_type_t{bt}, cpuDeviceBufferType.bts...),
162
		})
163
164
		btDeviceMemory[bt] = &requiredMemory.GPUs[i]
		requiredMemory.GPUs[i].Name = C.GoString(C.ggml_backend_dev_name(d))
165
166
		var props C.struct_ggml_backend_dev_props
		C.ggml_backend_dev_get_props(d, &props)
167
		requiredMemory.GPUs[i].ID = C.GoString(props.id)
168
169
		requiredMemory.GPUs[i].Weights = make([]ml.Memory, blocks+1)
		requiredMemory.GPUs[i].Cache = make([]ml.Memory, blocks+1)
Michael Yang's avatar
Michael Yang committed
170
171
	}

Michael Yang's avatar
Michael Yang committed
172
173
174
175
176
	useDefaultSplit := true
	for _, s := range params.TensorSplit {
		if s != 0 {
			useDefaultSplit = false
			break
177
		}
Michael Yang's avatar
Michael Yang committed
178
	}
179

Michael Yang's avatar
Michael Yang committed
180
181
182
183
	// calculate splits
	splits := make([]float32, len(gpus))
	if useDefaultSplit {
		// default: split on free memory
184
185
186
187
188
		for i := range splits {
			var free, total C.size_t
			C.ggml_backend_dev_memory(gpus[i], &free, &total)
			splits[i] = float32(free)
		}
Michael Yang's avatar
Michael Yang committed
189
190
	} else {
		splits = params.TensorSplit
191
192
193
	}

	var sum float32
Michael Yang's avatar
Michael Yang committed
194
	// cumulative sum of all splits
195
196
197
198
199
	for i := range splits {
		sum += splits[i]
		splits[i] = sum
	}

Michael Yang's avatar
Michael Yang committed
200
	// normalize splits
201
	for i := range splits {
202
		splits[i] /= sum
203
204
	}

Michael Yang's avatar
Michael Yang committed
205
	// inputs always use cpu
Michael Yang's avatar
Michael Yang committed
206
	input := cpuDeviceBufferType
207

Michael Yang's avatar
Michael Yang committed
208
209
210
	// define a range of gpu layers. anything outside of this range is assigned to the cpu
	gpuRangeStart := max(0, blocks-params.NumGPULayers)
	gpuRangeStop := min(gpuRangeStart+params.NumGPULayers, blocks+1)
Michael Yang's avatar
Michael Yang committed
211
	assignLayer := func(i int) deviceBufferType {
Michael Yang's avatar
Michael Yang committed
212
		if i < gpuRangeStart || i >= gpuRangeStop {
Michael Yang's avatar
Michael Yang committed
213
			return cpuDeviceBufferType
214
		}
215

Michael Yang's avatar
Michael Yang committed
216
		index := slices.IndexFunc(splits, func(f float32) bool { return float32(i-gpuRangeStart)/float32(gpuRangeStop-gpuRangeStart) < f })
217
		if index < 0 || index >= len(gpuDeviceBufferTypes) {
Michael Yang's avatar
Michael Yang committed
218
			return cpuDeviceBufferType
219
220
221
		}

		return gpuDeviceBufferTypes[index]
222
223
	}

Michael Yang's avatar
Michael Yang committed
224
	// repeating layers are assigned based on their index in reverse order, e.g. i / (block_count + 1)
225
	layers := make([]deviceBufferType, blocks)
226
	for i := range layers {
227
		layers[i] = assignLayer(i)
228
229
	}

Michael Yang's avatar
Michael Yang committed
230
	// outputs are assigned iff allowed by splits and configured number of gpu layers
231
	output := assignLayer(blocks)
232
233
234

	maxTensors := len(meta.Tensors().Items())
	maxTensors += 1
Michael Yang's avatar
Michael Yang committed
235
	// each layer has at most 2 extra tensors for rope operations
236
237
	maxTensors += blocks * 2

238
	type tensor struct {
239
		source *fsggml.Tensor
240
241
242
		target string
	}

Michael Yang's avatar
Michael Yang committed
243
	// some tensors are mapped to different names so keep a list
244
245
	targets := make(map[string][]string)

Michael Yang's avatar
Michael Yang committed
246
	// contexts are shared by tensors of the same buffer type
247
248
	ctxs := make(map[C.ggml_backend_buffer_type_t]*C.struct_ggml_context)
	createTensor := func(t tensor, bts []C.ggml_backend_buffer_type_t, layer int) *C.struct_ggml_tensor {
249
250
251
252
253
254
255
		for _, bt := range bts {
			if _, ok := ctxs[bt]; !ok {
				ctxs[bt] = C.ggml_init(C.struct_ggml_init_params{
					mem_size: C.ggml_tensor_overhead() * C.size_t(maxTensors),
					no_alloc: true,
				})
			}
Michael Yang's avatar
Michael Yang committed
256

257
258
259
260
261
262
263
264
			targets[t.source.Name] = append(targets[t.source.Name], t.target)

			name := t.source.Name
			if t.target != "" {
				name = t.target
			}

			cname := C.CString(name)
Michael Yang's avatar
Michael Yang committed
265
			defer C.free(unsafe.Pointer(cname))
266
267
268
269
			if tt := C.ggml_get_tensor(ctxs[bt], cname); tt != nil {
				return tt
			}

270
271
272
273
274
275
276
277
278
279
			kind := t.source.Kind
			if t.source.Kind == 4 {
				// transform raw mxfp4 stream to ggml mxfp4 format
				kind = 39
			} else if t.source.Kind == uint32(fsggml.TensorTypeBF16) && strings.HasSuffix(t.source.Name, "_exps.bias") {
				// transform "_exps.bias" from bf16 to fp32; add_ids only supports fp32 tensors
				kind = uint32(fsggml.TensorTypeF32)
			}

			tt := C.ggml_new_tensor(ctxs[bt], kind, C.int(len(t.source.Shape)), (*C.int64_t)(unsafe.Pointer(&t.source.Shape[0])))
Michael Yang's avatar
Michael Yang committed
280
281
			C.ggml_set_name(tt, cname)

282
			slog.Log(context.TODO(), logutil.LevelTrace, "created tensor", "name", name, "shape", t.source.Shape, "dtype", t.source.Kind, "buffer_type", C.GoString(C.ggml_backend_buft_name(bt)))
283
284
285
286
287
288
289
290
291
292

			size := pad(C.ggml_backend_buft_get_alloc_size(bt, tt), C.ggml_backend_buft_get_alignment(bt))
			if layer == -1 {
				// Assume that InputWeights can be allocated - they're always in system memory and can't be moved in any case
				requiredMemory.InputWeights.Status = ml.Allocated
				requiredMemory.InputWeights.Size += uint64(size)
			} else {
				btDeviceMemory[bt].Weights[layer].Size += uint64(size)
			}

293
294
295
296
297
			//nolint:staticcheck // TODO: check if buffer type supports this tensor
			return tt
		}

		return nil
Michael Yang's avatar
Michael Yang committed
298
299
	}

300
	contains := func(s string, parts ...string) bool {
301
302
303
304
305
306
307
308
		split := strings.Split(s, ".")
		for _, part := range parts {
			if slices.Contains(split, part) {
				return true
			}
		}

		return false
Michael Yang's avatar
Michael Yang committed
309
310
	}

311
312
	for _, t := range meta.Tensors().Items() {
		switch {
313
		case contains(t.Name, "position_embd", "token_embd", "token_norm_embd", "token_types"):
314
			createTensor(tensor{source: t}, input.bts, -1)
Michael Yang's avatar
Michael Yang committed
315
			if _, ok := meta.Tensors().GroupLayers()["output"]; !ok && t.Name == "token_embd.weight" {
316
				createTensor(tensor{source: t, target: "output.weight"}, output.bts, blocks)
Michael Yang's avatar
Michael Yang committed
317
			}
Michael Yang's avatar
Michael Yang committed
318
319
320
		case contains(t.Name, "cls", "output", "output_norm",
			"altup_proj", "altup_unembd_proj",
			"per_layer_token_embd", "per_layer_model_proj", "per_layer_proj_norm"):
321
			createTensor(tensor{source: t}, output.bts, blocks)
322
		case strings.HasPrefix(t.Name, "v.") || strings.HasPrefix(t.Name, "mm."):
Michael Yang's avatar
Michael Yang committed
323
			// TODO: assign vision tensors to the gpu if possible
324
			createTensor(tensor{source: t}, output.bts, blocks)
Michael Yang's avatar
Michael Yang committed
325
326
327
328
329
330
		case contains(t.Name, "rope_freqs", "rope_factors_long", "rope_factors_short"):
			// these tensors should be repeated per layer
			for i, layer := range layers {
				createTensor(tensor{
					source: t,
					target: "blk." + strconv.Itoa(i) + "." + t.Name,
331
				}, layer.bts, i)
Michael Yang's avatar
Michael Yang committed
332
			}
333
		default:
Michael Yang's avatar
Michael Yang committed
334
335
336
337
			layerIndex := -1
			if fields := strings.FieldsFunc(t.Name, func(r rune) bool { return !unicode.IsNumber(r) }); len(fields) > 0 {
				if i, err := strconv.Atoi(fields[0]); err == nil {
					layerIndex = i
338
				}
Michael Yang's avatar
Michael Yang committed
339
			}
340

Michael Yang's avatar
Michael Yang committed
341
			if layerIndex >= 0 {
342
				createTensor(tensor{source: t}, layers[layerIndex].bts, layerIndex)
343
			} else {
Michael Yang's avatar
Michael Yang committed
344
				// load all other tensors on the cpu
345
				createTensor(tensor{source: t}, input.bts, -1)
346
347
348
			}
		}
	}
Michael Yang's avatar
Michael Yang committed
349

Michael Yang's avatar
Michael Yang committed
350
	// allocate buffers for each context
351
	bbs := make(map[*C.struct_ggml_context]C.ggml_backend_buffer_t, len(ctxs))
352
353
354
355
356
357
	for bt, c := range ctxs {
		if C.ggml_get_first_tensor(c) == nil {
			continue
		}

		b := C.ggml_backend_alloc_ctx_tensors_from_buft(c, bt)
358
359
360
361
362
363
364
365
366
367
		for i := range btDeviceMemory[bt].Weights {
			if btDeviceMemory[bt].Weights[i].Size != 0 {
				if b != nil {
					btDeviceMemory[bt].Weights[i].Status = ml.Allocated
				} else {
					btDeviceMemory[bt].Weights[i].Status = ml.Failed
				}
			}
		}

368
		if b == nil {
Jesse Gross's avatar
Jesse Gross committed
369
370
371
372
373
374
375
376
			for _, b := range bbs {
				C.ggml_backend_buffer_free(b)
			}

			for _, ctx := range ctxs {
				C.ggml_free(ctx)
			}

377
			panic(ml.ErrNoMem{BackendMemory: requiredMemory})
378
379
		}

380
		C.ggml_backend_buffer_set_usage(b, C.GGML_BACKEND_BUFFER_USAGE_WEIGHTS)
Michael Yang's avatar
Michael Yang committed
381
		bbs[c] = b
382
383
	}

384
385
	// Mimic llama runner logs summarizing layers and memory
	gpuLayers := 0
386
387
388
389
390
391
392
	for _, layer := range layers {
		if C.ggml_backend_dev_type(layer.d) == C.GGML_BACKEND_DEVICE_TYPE_GPU {
			gpuLayers++
		}
	}
	slog.Info(fmt.Sprintf("offloading %d repeating layers to GPU", gpuLayers))

393
	switch C.ggml_backend_dev_type(output.d) {
394
	case C.GGML_BACKEND_DEVICE_TYPE_CPU:
395
		slog.Info("offloading output layer to CPU")
396
	case C.GGML_BACKEND_DEVICE_TYPE_GPU:
397
398
		slog.Info("offloading output layer to GPU")
		gpuLayers++
399
	case C.GGML_BACKEND_DEVICE_TYPE_ACCEL:
400
401
402
		slog.Info("offloading output layer to ACCEL")
	}
	slog.Info(fmt.Sprintf("offloaded %d/%d layers to GPU", gpuLayers, len(layers)+1))
403

404
	for bs := range maps.Values(bbs) {
Michael Yang's avatar
Michael Yang committed
405
		slog.Info("model weights", "buffer", C.GoString(C.ggml_backend_buffer_name(bs)), "size", format.HumanBytes2(uint64(C.ggml_backend_buffer_get_size(bs))))
406
407
	}

Michael Yang's avatar
Michael Yang committed
408
	// map tensor names to tensors for easy lookup later
409
410
411
412
413
414
415
	tensors := make(map[string]*C.struct_ggml_tensor)
	for _, c := range ctxs {
		for t := C.ggml_get_first_tensor(c); t != nil; t = C.ggml_get_next_tensor(c, t) {
			tensors[C.GoString(C.ggml_get_name(t))] = t
		}
	}

416
	// map devices to backend buffer types so new tensors can be assigned to the correct device
417
	deviceBufferTypes := make(map[C.ggml_backend_dev_t]C.ggml_backend_buffer_type_t)
418
419

	// create backends and buffer types used for the compute graph scheduler
420
421
	var schedBackends []C.ggml_backend_t
	var schedBufts []C.ggml_backend_buffer_type_t
422
	for _, d := range append(gpus, append(accels, cpus...)...) {
Jesse Gross's avatar
Jesse Gross committed
423
		b := backends[d]
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
		bt := C.ggml_backend_get_default_buffer_type(b)

		deviceBufferTypes[d] = bt

		schedBackends = append(schedBackends, b)
		schedBufts = append(schedBufts, bt)

		if C.ggml_backend_is_cpu(b) {
			// set number of threads for cpu backend
			C.ggml_backend_cpu_set_n_threads(b, C.int(Threads(params.NumThreads)))
		}
	}

	maxGraphNodes := max(8192, len(meta.Tensors().Items())*5)
	return &Backend{
		modelPath:         modelPath,
		flashAttention:    params.FlashAttention,
		meta:              meta,
		tensorLoadTargets: targets,
		tensors:           tensors,
		sched: C.ggml_backend_sched_new(
			(*C.ggml_backend_t)(unsafe.Pointer(&schedBackends[0])),
			(*C.ggml_backend_buffer_type_t)(unsafe.Pointer(&schedBufts[0])),
			C.int(len(schedBackends)),
			C.size_t(maxGraphNodes),
449
			C._Bool(false),
450
451
452
453
454
			C._Bool(false),
		),
		schedBackends: schedBackends,
		schedBufts:    schedBufts,
		input:         deviceBufferTypes[input.d],
455
456
		layers: func() map[int]C.ggml_backend_buffer_type_t {
			m := make(map[int]C.ggml_backend_buffer_type_t)
457
458
459
460
461
			for i, layer := range layers {
				m[i] = deviceBufferTypes[layer.d]
			}
			return m
		}(),
462
463
464
		requiredMemory: &requiredMemory,
		btDeviceMemory: btDeviceMemory,
		maxGraphNodes:  maxGraphNodes,
Jesse Gross's avatar
Jesse Gross committed
465
		weightBuffers:  bbs,
466
467
468
469
470
471
472
	}, nil
}

func init() {
	ml.RegisterBackend("ggml", New)
}

Jesse Gross's avatar
Jesse Gross committed
473
474
475
476
477
478
479
480
481
482
483
484
485
func (b *Backend) Close() {
	if b == nil {
		return
	}

	for ctx, b := range b.weightBuffers {
		C.ggml_backend_buffer_free(b)
		C.ggml_free(ctx)
	}

	C.ggml_backend_sched_free(b.sched)
}

486
func (b *Backend) Load(ctx context.Context, progress func(float32)) error {
487
	var doneBytes atomic.Uint64
488
	totalBytes := uint64(b.meta.Length) - b.meta.Tensors().Offset
489
490
491

	g, ctx := errgroup.WithContext(ctx)
	g.SetLimit(runtime.GOMAXPROCS(0))
492
	for _, t := range b.meta.Tensors().Items() {
493
		t := t
494
		g.Go(func() error {
495
			tts := make([]*C.struct_ggml_tensor, max(1, len(b.tensorLoadTargets[t.Name])))
496
			for i := range tts {
497
				target := b.tensorLoadTargets[t.Name][i]
498
499
500
				if target == "" {
					target = t.Name
				}
501

502
				tt, ok := b.tensors[target]
503
504
505
				if !ok {
					return fmt.Errorf("unassigned tensor: %s", t.Name)
				}
Michael Yang's avatar
Michael Yang committed
506

507
508
509
				tts[i] = tt
			}

510
511
			// Create a new FD for each goroutine so that each FD is read sequentially, rather than
			// seeking around within an FD shared between all goroutines.
512
			file, err := os.Open(b.modelPath)
513
			if err != nil {
514
				slog.Warn("file open error", "file", b.modelPath, "error", err)
515
516
517
				return err
			}
			defer file.Close()
518
			sr := io.NewSectionReader(file, int64(b.meta.Tensors().Offset+t.Offset), int64(t.Size()))
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611

			if t.Kind == 4 && tts[0]._type == 39 {
				// source is mxfp4, target is ggml mxfp4

				const BS = 17                             // MXFP4 block size
				bts := make([]byte, 8*BS*format.KibiByte) // ~128k block aligned
				var s uint64
				for s < t.Size() {
					// Stop if either the parent context has been canceled or if any of the other tensors returned an error
					if err := ctx.Err(); err != nil {
						return err
					}
					n, err := io.ReadFull(sr, bts[:min(len(bts), int(t.Size()-s))])
					if err != nil {
						slog.Warn("file read error", "file", b.modelPath, "error", err)
						return err
					}
					for j := range n / BS {
						for i := 1; i < BS; i++ {
							// swap nibbles
							t_lo := bts[j*BS+i] & 0x0F
							t_hi := bts[j*BS+i] & 0xF0
							bts[j*BS+i] = (t_lo << 4) | (t_hi >> 4)
						}
						// transform aaaa...bbbb... to abababab...
						oi := 0
						tmp := [16]byte{}
						for i := 1; i < 9; i++ {
							blk_a0 := bts[j*BS+i] & 0xF0
							blk_a1 := bts[j*BS+i] << 4
							blk_b0 := bts[j*BS+i+8] >> 4
							blk_b1 := bts[j*BS+i+8] & 0x0F
							// swap once more
							out0 := blk_a0 | blk_b0
							out1 := blk_a1 | blk_b1
							out_h0 := out0 & 0xF0
							out_l0 := out0 & 0x0F
							out_h1 := out1 & 0xF0
							out_l1 := out1 & 0x0F
							out0 = (out_h0 >> 4) | (out_l0 << 4)
							out1 = (out_h1 >> 4) | (out_l1 << 4)
							tmp[oi] = out0
							oi++
							tmp[oi] = out1
							oi++
						}
						for i := range tmp {
							bts[j*BS+i+1] = tmp[i]
						}
					}

					for _, tt := range tts {
						C.ggml_backend_tensor_set(tt, unsafe.Pointer(&bts[0]), C.size_t(s), C.size_t(n))
					}

					s += uint64(n)

					if progress != nil {
						done := doneBytes.Add(uint64(n))
						progress(float32(done) / float32(totalBytes))
					}
				}
				return nil
			} else if strings.HasSuffix(t.Name, "_exps.bias") && t.Kind == 30 && tts[0]._type == 0 {
				// source is bf16, target is ggml fp32

				// data is bf16 but we need to convert to fp32
				bts := make([]byte, 128*format.KibiByte)
				var e uint64
				for e < t.Elements() {
					// Stop if either the parent context has been canceled or if any of the other tensors returned an error
					if err := ctx.Err(); err != nil {
						return err
					}
					n, err := io.ReadFull(sr, bts[:min(len(bts), int(t.Elements()-e)*2)])
					if err != nil {
						slog.Warn("file read error", "file", b.modelPath, "error", err)
						return err
					}
					fp32 := ConvertToF32(bts, uint32(fsggml.TensorTypeBF16), uint64(n/2))

					for _, tt := range tts {
						C.ggml_backend_tensor_set(tt, unsafe.Pointer(&fp32[0]), C.size_t(e*4), C.size_t(n*2))
					}
					e += uint64(n / 2)
					if progress != nil {
						done := doneBytes.Add(uint64(n))
						progress(float32(done) / float32(totalBytes))
					}
				}
				return nil
			}

612
613
614
615
			bts := make([]byte, 128*format.KibiByte)

			var s uint64
			for s < t.Size() {
616
617
618
619
620
				// Stop if either the parent context has been canceled or if any of the other tensors returned an error
				if err := ctx.Err(); err != nil {
					return err
				}

621
622
				n, err := io.ReadFull(sr, bts[:min(len(bts), int(t.Size()-s))])
				if err != nil {
623
					slog.Warn("file read error", "file", b.modelPath, "error", err)
624
					return err
625
				}
Michael Yang's avatar
Michael Yang committed
626

627
628
				for _, tt := range tts {
					C.ggml_backend_tensor_set(tt, unsafe.Pointer(&bts[0]), C.size_t(s), C.size_t(n))
629
				}
Michael Yang's avatar
Michael Yang committed
630

631
632
				s += uint64(n)

633
				if progress != nil {
634
					done := doneBytes.Add(uint64(n))
635
					progress(float32(done) / float32(totalBytes))
636
637
638
639
640
				}
			}

			return nil
		})
Michael Yang's avatar
Michael Yang committed
641
642
	}

643
	if err := g.Wait(); err != nil {
644
		return err
645
646
	}

647
	return nil
Michael Yang's avatar
Michael Yang committed
648
649
}

650
651
652
653
func (b *Backend) BackendMemory() ml.BackendMemory {
	return *b.requiredMemory
}

654
func (b *Backend) Config() fs.Config {
Michael Yang's avatar
Michael Yang committed
655
656
657
658
	return b.meta.KV()
}

func (b *Backend) Get(name string) ml.Tensor {
659
660
	if t, ok := b.tensors[name]; ok {
		return &Tensor{b: b, t: t}
Michael Yang's avatar
Michael Yang committed
661
662
663
664
665
666
	}

	return nil
}

func (b *Backend) NewContext() ml.Context {
Michael Yang's avatar
Michael Yang committed
667
	return b.NewContextSize(b.maxGraphNodes)
668
669
670
}

func (b *Backend) NewContextSize(n int) ml.Context {
Jesse Gross's avatar
Jesse Gross committed
671
672
673
674
	if n > b.maxGraphNodes {
		panic(fmt.Errorf("requested number of graph nodes (%v) for new context exceeds maximum (%v)", n, b.maxGraphNodes))
	}

675
	var allocatedBuffers []C.ggml_backend_buffer_t
676

Michael Yang's avatar
Michael Yang committed
677
	return &Context{
678
679
		b:             b,
		maxGraphNodes: n,
680
		ctx: C.ggml_init(C.struct_ggml_init_params{
681
			mem_size: C.size_t(n)*C.ggml_tensor_overhead() + C.ggml_graph_overhead_custom(C.size_t(n), false),
682
683
			no_alloc: true,
		}),
684
		allocatedBuffers: &allocatedBuffers,
685
		layer:            -1,
Michael Yang's avatar
Michael Yang committed
686
687
688
	}
}

689
func (b *Backend) CacheConfig() ml.CacheConfig {
690
691
692
693
694
	if b.flashAttention {
		return ml.CacheConfig{CachePadding: 256, MaskDType: ml.DTypeF16, MaskBatchPadding: C.GGML_KQ_MASK_PAD}
	} else {
		return ml.CacheConfig{CachePadding: 32, PermutedV: true}
	}
695
696
}

Michael Yang's avatar
Michael Yang committed
697
type Context struct {
698
	b *Backend
Michael Yang's avatar
Michael Yang committed
699

700
	ctx   *C.struct_ggml_context
Michael Yang's avatar
Michael Yang committed
701
	graph *C.struct_ggml_cgraph
702

703
	// buft is the buffer type used for new tensors
704
	buft C.ggml_backend_buffer_type_t
705

706
707
	// allocatedBuffers are buffers for tensors that we have allocated in this context
	// so that we can free them when we close the context
708
	allocatedBuffers *[]C.ggml_backend_buffer_t
709

Michael Yang's avatar
Michael Yang committed
710
	// maxGraphNodes is the maximum allowed number of graph nodes in this context
711
	maxGraphNodes int
712
713
714

	// layer is the graph layer that this context is allocating for - assumed to be cache
	layer int
Michael Yang's avatar
Michael Yang committed
715
716
}

717
func (c *Context) Input() ml.Context {
Michael Yang's avatar
Michael Yang committed
718
	if c.b.input != nil {
719
		return &Context{
720
721
722
723
724
			b:                c.b,
			ctx:              c.ctx,
			buft:             c.b.input,
			allocatedBuffers: c.allocatedBuffers,
			maxGraphNodes:    c.maxGraphNodes,
725
			layer:            -1,
726
727
728
		}
	}

729
	return c
730
731
}

732
func (c *Context) Layer(i int) ml.Context {
733
	if buft, ok := c.b.layers[i]; ok {
734
		return &Context{
735
736
737
738
739
			b:                c.b,
			ctx:              c.ctx,
			buft:             buft,
			allocatedBuffers: c.allocatedBuffers,
			maxGraphNodes:    c.maxGraphNodes,
740
			layer:            i,
741
742
743
		}
	}

744
	return c
745
746
}

747
func (c *Context) Forward(tensors ...ml.Tensor) ml.Context {
Michael Yang's avatar
Michael Yang committed
748
	if c.graph == nil {
749
		c.graph = C.ggml_new_graph_custom(c.ctx, C.size_t(c.maxGraphNodes), false)
Michael Yang's avatar
Michael Yang committed
750
751
	}

752
753
754
755
756
	for _, tensor := range tensors {
		C.ggml_build_forward_expand(c.graph, tensor.(*Tensor).t)
	}

	return c
Michael Yang's avatar
Michael Yang committed
757
758
}

759
func (c *Context) Compute(tensors ...ml.Tensor) {
760
761
762
	if status := C.ggml_backend_sched_graph_compute_async(c.b.sched, c.graph); status != C.GGML_STATUS_SUCCESS {
		panic(fmt.Errorf("error computing ggml graph: %v", status))
	}
Michael Yang's avatar
Michael Yang committed
763
	C.ggml_backend_sched_reset(c.b.sched)
Michael Yang's avatar
Michael Yang committed
764

765
766
767
	needSync := true
	sync := func() {
		if needSync {
768
			C.ggml_backend_sched_synchronize(c.b.sched)
769
770
771
			needSync = false
		}
	}
Michael Yang's avatar
Michael Yang committed
772

773
774
775
	for _, t := range tensors {
		if C.ggml_nbytes(t.(*Tensor).t) > 0 {
			t.(*Tensor).sync = sync
776
777
		}
	}
Michael Yang's avatar
Michael Yang committed
778
779
}

780
781
func (c *Context) Reserve() {
	reserved := C.ggml_backend_sched_reserve(c.b.sched, c.graph)
782
783

	slog.Debug("compute graph", "nodes", C.ggml_graph_n_nodes(c.graph), "splits", C.ggml_backend_sched_get_n_splits(c.b.sched))
784
785
786
787
788
789

	// Reserve may get called multiple times for different graphs - we just want the last run, which will contain the max allocations
	for _, bt := range c.b.schedBufts {
		c.b.btDeviceMemory[bt].Graph = ml.Memory{}
	}

790
	for i := range c.b.schedBackends {
791
792
793
794
795
796
797
798
799
800
		bufferStatus := C.ggml_backend_sched_get_attempted_buffer_size(c.b.sched, c.b.schedBackends[i])

		graph := &c.b.btDeviceMemory[c.b.schedBufts[i]].Graph
		graph.Size += uint64(bufferStatus.size)
		if bufferStatus.allocated && graph.Status != ml.Failed {
			graph.Status = ml.Allocated
		} else {
			graph.Status = ml.Failed
		}

801
		slog.Info("compute graph", "backend", C.GoString(C.ggml_backend_name(c.b.schedBackends[i])), "buffer_type", C.GoString(C.ggml_backend_buft_name(c.b.schedBufts[i])),
802
			"size", format.HumanBytes2(uint64(bufferStatus.size)))
803
804
	}

805
806
807
	if !reserved {
		panic(ml.ErrNoMem{BackendMemory: *c.b.requiredMemory})
	}
808
809
}

810
func (c *Context) MaxGraphNodes() int {
811
	return c.maxGraphNodes
Jesse Gross's avatar
Jesse Gross committed
812
813
}

814
815
816
func shapeToGGML(shape []int) *C.int64_t {
	sh := make([]C.int64_t, len(shape))
	for i, s := range shape {
817
		sh[i] = C.int64_t(s)
818
819
820
821
822
	}

	return &sh[0]
}

823
824
825
826
func pad(length, pad C.size_t) C.size_t {
	return ((length + pad - 1) / pad) * pad
}

827
func (c *Context) newTensor(dtype ml.DType, shape []int) ml.Tensor {
828
	if c.buft == nil {
829
		panic("set Input or Layer before creating tensors")
830
831
	}

Michael Yang's avatar
Michael Yang committed
832
833
834
835
836
837
	var cdtype uint32
	switch dtype {
	case ml.DTypeF32:
		cdtype = C.GGML_TYPE_F32
	case ml.DTypeF16:
		cdtype = C.GGML_TYPE_F16
838
839
840
841
	case ml.DTypeQ80:
		cdtype = C.GGML_TYPE_Q8_0
	case ml.DTypeQ40:
		cdtype = C.GGML_TYPE_Q4_0
Michael Yang's avatar
Michael Yang committed
842
843
	case ml.DTypeI32:
		cdtype = C.GGML_TYPE_I32
Michael Yang's avatar
Michael Yang committed
844
845
	case ml.DTypeMXFP4:
		cdtype = C.GGML_TYPE_MXFP4
Michael Yang's avatar
Michael Yang committed
846
847
848
849
	default:
		panic("unsupported dtype")
	}

Jesse Gross's avatar
Jesse Gross committed
850
	if len(shape) < 1 || shape[0] == 0 {
Michael Yang's avatar
Michael Yang committed
851
		var shape C.int64_t = 0
852
		return &Tensor{b: c.b, t: C.ggml_new_tensor(c.ctx, cdtype, 1, &shape)}
Michael Yang's avatar
Michael Yang committed
853
	} else if len(shape) > 4 {
Michael Yang's avatar
Michael Yang committed
854
855
856
857
858
859
860
861
862
		panic("unsupported number of dimensions")
	}

	for _, dim := range shape {
		if dim < 1 {
			panic("invalid shape")
		}
	}

Michael Yang's avatar
Michael Yang committed
863
	t := C.ggml_new_tensor(c.ctx, cdtype, C.int(len(shape)), shapeToGGML(shape))
864
	size := pad(C.ggml_backend_buft_get_alloc_size(c.buft, t), C.ggml_backend_buft_get_alignment(c.buft))
865

866
	b := C.ggml_backend_buft_alloc_buffer(c.buft, size)
867
868
869
870
871
872
873
874
875
876
877
	if c.layer >= 0 {
		cache := &c.b.btDeviceMemory[c.buft].Cache[c.layer]

		cache.Size += uint64(size)
		if b != nil {
			cache.Status = ml.Allocated
		} else {
			cache.Status = ml.Failed
		}
	}

878
	if b == nil {
879
		panic(ml.ErrNoMem{BackendMemory: *c.b.requiredMemory})
880
881
	}

882
	*c.allocatedBuffers = append(*c.allocatedBuffers, b)
Michael Yang's avatar
Michael Yang committed
883
	C.ggml_backend_tensor_alloc(b, t, C.ggml_backend_buffer_get_base(b))
884
	return &Tensor{b: c.b, t: t}
885
886
}

887
func (c *Context) Empty(dtype ml.DType, shape ...int) ml.Tensor {
888
	return c.newTensor(dtype, shape)
889
890
}

891
func (c *Context) Zeros(dtype ml.DType, shape ...int) ml.Tensor {
892
	t := c.newTensor(dtype, shape)
893
894
	C.ggml_set_zero(t.(*Tensor).t)
	return t
Michael Yang's avatar
Michael Yang committed
895
896
}

897
func checkShape[S ~[]E, E any](s S, shape ...int) {
Michael Yang's avatar
Michael Yang committed
898
	n := len(s)
Jesse Gross's avatar
Jesse Gross committed
899
900

	if n == 0 {
901
		return
Jesse Gross's avatar
Jesse Gross committed
902
903
	}

Michael Yang's avatar
Michael Yang committed
904
905
906
907
908
	for _, v := range shape {
		n /= v
	}

	if n != 1 {
909
		panic(fmt.Errorf("invalid shape: %v", shape))
Michael Yang's avatar
Michael Yang committed
910
911
912
	}
}

913
914
func (c *Context) FromFloatSlice(s []float32, shape ...int) ml.Tensor {
	checkShape(s, shape...)
915

916
	t := c.newTensor(ml.DTypeF32, shape)
917

Jesse Gross's avatar
Jesse Gross committed
918
919
920
921
	if len(s) > 0 {
		C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t))
	}

922
	return t
Michael Yang's avatar
Michael Yang committed
923
924
}

925
926
func (c *Context) FromIntSlice(s []int32, shape ...int) ml.Tensor {
	checkShape(s, shape...)
927

928
	t := c.newTensor(ml.DTypeI32, shape)
929

Jesse Gross's avatar
Jesse Gross committed
930
931
932
933
	if len(s) > 0 {
		C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t))
	}

934
	return t
Michael Yang's avatar
Michael Yang committed
935
936
}

Michael Yang's avatar
arange  
Michael Yang committed
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
func (c Context) Arange(start, stop, step float32, dtype ml.DType) ml.Tensor {
	switch dtype {
	case ml.DTypeF32:
		// ggml_arange creates a float32 tensor
		return &Tensor{
			b: c.b,
			t: C.ggml_arange(c.ctx, C.float(start), C.float(stop), C.float(step)),
		}
	case ml.DTypeI32:
		// ggml_cast does not support float32 to int32 conversion
		arange := make([]int32, 0, int((stop-start)/step))
		for i := start; i < stop; i += step {
			arange = append(arange, int32(i))
		}

952
		return c.Input().FromIntSlice(arange, len(arange))
Michael Yang's avatar
arange  
Michael Yang committed
953
954
955
956
957
	default:
		panic("unsupported dtype for arange")
	}
}

Michael Yang's avatar
Michael Yang committed
958
959
func (c *Context) Close() {
	if c != nil {
960
961
962
963
964
		for _, b := range *c.allocatedBuffers {
			C.ggml_backend_buffer_free(b)
		}
		*c.allocatedBuffers = nil

965
966
		C.ggml_free(c.ctx)
	}
Michael Yang's avatar
Michael Yang committed
967
968
969
}

type Tensor struct {
970
	b    *Backend
Michael Yang's avatar
Michael Yang committed
971
	t    *C.struct_ggml_tensor
972
	sync func()
Michael Yang's avatar
Michael Yang committed
973
974
975
976
977
978
979
980
981
982
}

func (t *Tensor) LogValue() slog.Value {
	return slog.GroupValue(
		slog.String("name", C.GoString(C.ggml_get_name(t.t))),
		slog.String("type", C.GoString(C.ggml_type_name(t.t._type))),
		slog.Any("shape", t.Shape()),
	)
}

983
984
func (t *Tensor) Dim(n int) int {
	return int(t.t.ne[n])
Michael Yang's avatar
Michael Yang committed
985
986
}

987
988
func (t *Tensor) Stride(n int) int {
	return int(t.t.nb[n])
Michael Yang's avatar
Michael Yang committed
989
990
}

991
992
func (t *Tensor) Shape() []int {
	shape := make([]int, C.ggml_n_dims(t.t))
Michael Yang's avatar
Michael Yang committed
993
994
995
996
997
998
999
	for i := range shape {
		shape[i] = t.Dim(i)
	}

	return shape
}

1000
1001
1002
1003
1004
1005
1006
1007
1008
func (t *Tensor) Bytes() (data []byte) {
	if t.sync != nil {
		data = make([]byte, C.ggml_nbytes(t.t))

		t.sync()
		C.ggml_backend_tensor_get(t.t, unsafe.Pointer(&data[0]), 0, C.ggml_nbytes(t.t))
	}

	return
Michael Yang's avatar
Michael Yang committed
1009
1010
}

1011
1012
1013
1014
1015
1016
func (t *Tensor) Floats() (data []float32) {
	if t.sync != nil {
		data = make([]float32, C.ggml_nelements(t.t))

		t.sync()
		C.ggml_backend_tensor_get(t.t, unsafe.Pointer(&data[0]), 0, C.ggml_nbytes(t.t))
Michael Yang's avatar
Michael Yang committed
1017
1018
1019
1020
1021
1022
1023
1024
1025
	}

	return
}

func (t *Tensor) DType() ml.DType {
	switch t.t._type {
	case C.GGML_TYPE_F32:
		return ml.DTypeF32
Jesse Gross's avatar
Jesse Gross committed
1026
1027
	case C.GGML_TYPE_F16:
		return ml.DTypeF16
1028
1029
1030
1031
	case C.GGML_TYPE_Q8_0:
		return ml.DTypeQ80
	case C.GGML_TYPE_Q4_0:
		return ml.DTypeQ40
Michael Yang's avatar
Michael Yang committed
1032
1033
	case C.GGML_TYPE_I32:
		return ml.DTypeI32
Michael Yang's avatar
Michael Yang committed
1034
1035
	case C.GGML_TYPE_MXFP4:
		return ml.DTypeMXFP4
Michael Yang's avatar
Michael Yang committed
1036
1037
1038
1039
1040
	default:
		return ml.DTypeOther
	}
}

1041
1042
1043
1044
1045
1046
1047
func (t *Tensor) Neg(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_neg(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1048
1049
func (t *Tensor) Add(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1050
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1051
1052
1053
1054
		t: C.ggml_add(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

Michael Yang's avatar
Michael Yang committed
1055
1056
1057
1058
1059
1060
1061
func (t *Tensor) Sub(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sub(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
func (t *Tensor) Repeat(ctx ml.Context, dim, n int) ml.Tensor {
	if dim < 0 || dim >= C.GGML_MAX_DIMS {
		panic("invalid dimension")
	}

	shape := make([]C.int64_t, C.GGML_MAX_DIMS)
	for i := range C.GGML_MAX_DIMS {
		if i == dim {
			shape[i] = C.int64_t(t.Dim(i) * n)
		} else {
			shape[i] = C.int64_t(t.Dim(i))
		}
	}

	tmpl := C.ggml_new_tensor(ctx.(*Context).ctx, t.t._type, C.int(len(shape)), unsafe.SliceData(shape))
	return &Tensor{
		b: t.b,
		t: C.ggml_repeat(ctx.(*Context).ctx, t.t, tmpl),
	}
}

Michael Yang's avatar
Michael Yang committed
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
func (t *Tensor) Stack(ctx ml.Context, dim int, s ...ml.Tensor) ml.Tensor {
	if len(s) > 0 {
		return t.Concat(ctx, s[0].Stack(ctx, dim, s[1:]...), dim)
	}

	return t
}

func (t *Tensor) Concat(ctx ml.Context, t2 ml.Tensor, dim int) ml.Tensor {
	return &Tensor{
1093
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1094
1095
1096
1097
		t: C.ggml_concat(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.int(dim)),
	}
}

Michael Yang's avatar
Michael Yang committed
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
func (t *Tensor) Contiguous(ctx ml.Context, shape ...int) ml.Tensor {
	switch len(shape) {
	case 0:
		return &Tensor{
			b: t.b,
			t: C.ggml_cont(ctx.(*Context).ctx, t.t),
		}
	case 1:
		return &Tensor{
			b: t.b,
			t: C.ggml_cont_1d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0])),
		}
	case 2:
		return &Tensor{
			b: t.b,
			t: C.ggml_cont_2d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1])),
		}
	case 3:
		return &Tensor{
			b: t.b,
			t: C.ggml_cont_3d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1]), C.int64_t(shape[2])),
		}
	case 4:
		return &Tensor{
			b: t.b,
			t: C.ggml_cont_4d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1]), C.int64_t(shape[2]), C.int64_t(shape[3])),
		}
	default:
		panic("unsupported number of dimensions")
Michael Yang's avatar
Michael Yang committed
1127
1128
1129
1130
1131
	}
}

func (t *Tensor) Mul(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1132
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1133
1134
1135
1136
		t: C.ggml_mul(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

1137
1138
1139
1140
1141
1142
1143
func (t *Tensor) Div(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_div(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

Michael Yang's avatar
Michael Yang committed
1144
1145
func (t *Tensor) Mulmat(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1146
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1147
1148
1149
1150
		t: C.ggml_mul_mat(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

1151
1152
1153
1154
1155
func (t *Tensor) MulmatFullPrec(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	mul := C.ggml_mul_mat(ctx.(*Context).ctx, t.t, t2.(*Tensor).t)
	C.ggml_mul_mat_set_prec(mul, C.GGML_PREC_F32)

	return &Tensor{
1156
		b: t.b,
1157
1158
1159
1160
		t: mul,
	}
}

Michael Yang's avatar
llama4  
Michael Yang committed
1161
1162
1163
1164
1165
1166
1167
func (t *Tensor) MulmatID(ctx ml.Context, t2, ids ml.Tensor) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_mul_mat_id(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, ids.(*Tensor).t),
	}
}

1168
1169
1170
1171
1172
1173
1174
func (t *Tensor) AddID(ctx ml.Context, t2, ids ml.Tensor) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_add_id(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, ids.(*Tensor).t),
	}
}

Michael Yang's avatar
Michael Yang committed
1175
func (t *Tensor) LayerNorm(ctx ml.Context, w, b ml.Tensor, eps float32) ml.Tensor {
Michael Yang's avatar
llama4  
Michael Yang committed
1176
1177
1178
1179
1180
1181
	tt := C.ggml_norm(ctx.(*Context).ctx, t.t, C.float(eps))
	if w != nil {
		tt = C.ggml_mul(ctx.(*Context).ctx, tt, w.(*Tensor).t)
		if b != nil {
			tt = C.ggml_add(ctx.(*Context).ctx, tt, b.(*Tensor).t)
		}
Michael Yang's avatar
Michael Yang committed
1182
1183
	}

Michael Yang's avatar
llama4  
Michael Yang committed
1184
	return &Tensor{b: t.b, t: tt}
Michael Yang's avatar
Michael Yang committed
1185
1186
1187
}

func (t *Tensor) RMSNorm(ctx ml.Context, w ml.Tensor, eps float32) ml.Tensor {
Michael Yang's avatar
llama4  
Michael Yang committed
1188
1189
1190
1191
1192
1193
	tt := C.ggml_rms_norm(ctx.(*Context).ctx, t.t, C.float(eps))
	if w != nil {
		tt = C.ggml_mul(ctx.(*Context).ctx, tt, w.(*Tensor).t)
	}

	return &Tensor{b: t.b, t: tt}
Michael Yang's avatar
Michael Yang committed
1194
1195
}

1196
func (t *Tensor) Pad(ctx ml.Context, shape ...int) ml.Tensor {
Michael Yang's avatar
Michael Yang committed
1197
1198
	if len(shape) != 4 {
		panic("expected 4 dimensions")
1199
1200
	} else if shape[3] != 0 {
		panic("cuda does not support 4d tensors")
Michael Yang's avatar
Michael Yang committed
1201
1202
1203
	}

	return &Tensor{
1204
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
		t: C.ggml_pad(ctx.(*Context).ctx, t.t, C.int(shape[0]), C.int(shape[1]), C.int(shape[2]), C.int(shape[3])),
	}
}

func (t *Tensor) Permute(ctx ml.Context, shape ...int) ml.Tensor {
	if len(shape) != 4 {
		panic("expected 4 dimensions")
	}

	return &Tensor{
1215
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1216
1217
1218
1219
1220
1221
		t: C.ggml_permute(ctx.(*Context).ctx, t.t, C.int(shape[0]), C.int(shape[1]), C.int(shape[2]), C.int(shape[3])),
	}
}

func (t *Tensor) Rows(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1222
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1223
1224
1225
1226
1227
1228
		t: C.ggml_get_rows(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

func (t *Tensor) Copy(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1229
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1230
1231
1232
1233
		t: C.ggml_cpy(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

1234
func (t *Tensor) Reshape(ctx ml.Context, shape ...int) ml.Tensor {
Michael Yang's avatar
Michael Yang committed
1235
1236
1237
	switch len(shape) {
	case 1:
		return &Tensor{
1238
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1239
1240
1241
1242
			t: C.ggml_reshape_1d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0])),
		}
	case 2:
		return &Tensor{
1243
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1244
1245
1246
1247
			t: C.ggml_reshape_2d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1])),
		}
	case 3:
		return &Tensor{
1248
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1249
1250
1251
1252
			t: C.ggml_reshape_3d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1]), C.int64_t(shape[2])),
		}
	case 4:
		return &Tensor{
1253
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1254
1255
1256
1257
1258
1259
1260
1261
1262
			t: C.ggml_reshape_4d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1]), C.int64_t(shape[2]), C.int64_t(shape[3])),
		}
	default:
		panic("unsupported number of dimensions")
	}
}

func (t *Tensor) Scale(ctx ml.Context, s float64) ml.Tensor {
	return &Tensor{
1263
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1264
1265
1266
1267
		t: C.ggml_scale(ctx.(*Context).ctx, t.t, (C.float)(s)),
	}
}

1268
1269
1270
1271
1272
1273
1274
func (t *Tensor) SumRows(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sum_rows(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1275
1276
func (t *Tensor) Softmax(ctx ml.Context) ml.Tensor {
	return &Tensor{
1277
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1278
1279
1280
1281
		t: C.ggml_soft_max(ctx.(*Context).ctx, t.t),
	}
}

1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
func (t *Tensor) Sin(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sin(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) Cos(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_cos(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1296
1297
func (t *Tensor) Tanh(ctx ml.Context) ml.Tensor {
	return &Tensor{
1298
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1299
1300
1301
1302
		t: C.ggml_tanh_inplace(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
llama4  
Michael Yang committed
1303
1304
1305
1306
1307
1308
1309
func (t *Tensor) Sigmoid(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sigmoid_inplace(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1310
1311
1312
1313
func (t *Tensor) View(ctx ml.Context, offset int, shape ...int) ml.Tensor {
	switch len(shape) {
	case 1:
		return &Tensor{
1314
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1315
1316
1317
1318
			t: C.ggml_view_1d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.size_t(offset)),
		}
	case 3:
		return &Tensor{
1319
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1320
1321
1322
1323
1324
1325
1326
			t: C.ggml_view_2d(ctx.(*Context).ctx, t.t,
				C.int64_t(shape[0]), C.int64_t(shape[2]),
				C.size_t(shape[1]),
				C.size_t(offset)),
		}
	case 5:
		return &Tensor{
1327
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1328
1329
1330
1331
1332
1333
1334
			t: C.ggml_view_3d(ctx.(*Context).ctx, t.t,
				C.int64_t(shape[0]), C.int64_t(shape[2]), C.int64_t(shape[4]),
				C.size_t(shape[1]), C.size_t(shape[3]),
				C.size_t(offset)),
		}
	case 7:
		return &Tensor{
1335
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
			t: C.ggml_view_4d(ctx.(*Context).ctx, t.t,
				C.int64_t(shape[0]), C.int64_t(shape[2]), C.int64_t(shape[4]), C.int64_t(shape[6]),
				C.size_t(shape[1]), C.size_t(shape[3]), C.size_t(shape[5]),
				C.size_t(offset)),
		}
	default:
		panic("unsupported number of dimensions")
	}
}

1346
func (t *Tensor) RoPE(ctx ml.Context, positions ml.Tensor, ropeDim int, ropeBase, ropeScale float32, options ...func(*rope.Options)) ml.Tensor {
1347
	// Default options
Michael Yang's avatar
Michael Yang committed
1348
1349
1350
1351
1352
1353
1354
1355
	opts := rope.Options{
		Factors:               &Tensor{},
		OriginalContextLength: 131072,
		ExtrapolationFactor:   0.,
		AttentionFactor:       1.,
		BetaFast:              32.,
		BetaSlow:              1.,
	}
1356
1357
1358

	// Apply any provided options
	for _, option := range options {
Michael Yang's avatar
Michael Yang committed
1359
		option(&opts)
1360
1361
	}

Jesse Gross's avatar
Jesse Gross committed
1362
1363
1364
1365
1366
	dequant := t.t
	if C.ggml_is_quantized(t.t._type) {
		dequant = C.ggml_cast(ctx.(*Context).ctx, t.t, C.GGML_TYPE_F32)
	}

Michael Yang's avatar
Michael Yang committed
1367
	return &Tensor{
1368
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1369
		t: C.ggml_rope_ext(
1370
1371
			ctx.(*Context).ctx,
			dequant,
1372
1373
			positions.(*Tensor).t,
			opts.Factors.(*Tensor).t,
Michael Yang's avatar
Michael Yang committed
1374
			C.int(ropeDim),
1375
1376
			C.int(opts.Type),
			C.int(opts.OriginalContextLength),
Michael Yang's avatar
Michael Yang committed
1377
1378
			C.float(ropeBase),
			C.float(ropeScale),
Michael Yang's avatar
Michael Yang committed
1379
1380
1381
1382
			C.float(opts.ExtrapolationFactor),
			C.float(opts.AttentionFactor),
			C.float(opts.BetaFast),
			C.float(opts.BetaSlow),
Michael Yang's avatar
Michael Yang committed
1383
1384
1385
1386
		),
	}
}

1387
1388
1389
1390
1391
1392
1393
func (t *Tensor) IM2Col(ctx ml.Context, t2 ml.Tensor, s0, s1, p0, p1, d0, d1 int) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_im2col(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.int(s0), C.int(s1), C.int(p0), C.int(p1), C.int(d0), C.int(d1), true, C.GGML_TYPE_F32),
	}
}

Michael Yang's avatar
Michael Yang committed
1394
1395
func (t *Tensor) GELU(ctx ml.Context) ml.Tensor {
	return &Tensor{
1396
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1397
1398
1399
1400
		t: C.ggml_gelu_inplace(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1401
1402
1403
1404
1405
1406
1407
func (t *Tensor) QuickGELU(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_gelu_quick_inplace(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1408
1409
func (t *Tensor) SILU(ctx ml.Context) ml.Tensor {
	return &Tensor{
1410
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1411
1412
1413
1414
		t: C.ggml_silu_inplace(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1415
1416
1417
1418
1419
1420
1421
func (t *Tensor) RELU(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_relu_inplace(ctx.(*Context).ctx, t.t),
	}
}

1422
1423
1424
1425
1426
1427
1428
func (t *Tensor) SwiGLU(ctx ml.Context, up ml.Tensor, alpha, limit float32) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_swiglu_oai(ctx.(*Context).ctx, t.t, up.(*Tensor).t, C.float(alpha), C.float(limit)),
	}
}

Michael Yang's avatar
Michael Yang committed
1429
1430
func (t *Tensor) Conv2D(ctx ml.Context, t2 ml.Tensor, s0, s1, p0, p1, d0, d1 int) ml.Tensor {
	return &Tensor{
1431
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1432
1433
1434
		t: C.ggml_conv_2d(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.int(s0), C.int(s1), C.int(p0), C.int(p1), C.int(d0), C.int(d1)),
	}
}
1435

Michael Yang's avatar
Michael Yang committed
1436
func (t *Tensor) AvgPool2D(ctx ml.Context, k, s int, p float32) ml.Tensor {
Michael Yang's avatar
Michael Yang committed
1437
1438
	return &Tensor{
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1439
		t: C.ggml_pool_2d(ctx.(*Context).ctx, t.t, C.GGML_OP_POOL_AVG, C.int(k), C.int(k), C.int(s), C.int(s), C.float(p), C.float(p)),
Michael Yang's avatar
Michael Yang committed
1440
1441
1442
	}
}

Michael Yang's avatar
Michael Yang committed
1443
1444
1445
1446
func (t *Tensor) Set(ctx ml.Context, t2 ml.Tensor, offset int, strides ...int) ml.Tensor {
	var tt *C.struct_ggml_tensor
	switch len(strides) {
	case 0:
Michael Yang's avatar
Michael Yang committed
1447
		tt = C.ggml_set_1d(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.size_t(offset))
Michael Yang's avatar
Michael Yang committed
1448
	case 1:
Michael Yang's avatar
Michael Yang committed
1449
		tt = C.ggml_set_2d(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.size_t(offset), C.size_t(strides[0]))
Michael Yang's avatar
Michael Yang committed
1450
1451
1452
1453
1454
1455
1456
	default:
		panic("unsupported number of dimensions")
	}

	return &Tensor{b: t.b, t: tt}
}

1457
func (t *Tensor) ScaledDotProductAttention(ctx ml.Context, key, value, mask, sinks ml.Tensor, scale float64) ml.Tensor {
1458
1459
1460
1461
1462
	var kqMask *C.struct_ggml_tensor
	if mask != nil {
		kqMask = mask.(*Tensor).t
	}

1463
1464
1465
	query := t.Permute(ctx, 0, 2, 1, 3)
	key = key.Permute(ctx, 0, 2, 1, 3)

1466
1467
	if t.b.flashAttention {
		value = value.Permute(ctx, 0, 2, 1, 3)
1468

1469
		kqv := C.ggml_flash_attn_ext(ctx.(*Context).ctx, query.(*Tensor).t, key.(*Tensor).t, value.(*Tensor).t, kqMask, C.float(scale), 0, 0)
1470
1471
1472
		if sinks != nil {
			C.ggml_flash_attn_ext_add_sinks(kqv, sinks.(*Tensor).t)
		}
1473
1474
1475
1476
1477
1478
1479
1480
		C.ggml_flash_attn_ext_set_prec(kqv, C.GGML_PREC_F32)
		return &Tensor{b: t.b, t: kqv}
	} else {
		kq := key.MulmatFullPrec(ctx, query)
		kq = &Tensor{
			b: t.b,
			t: C.ggml_soft_max_ext(ctx.(*Context).ctx, kq.(*Tensor).t, kqMask, C.float(scale), 0),
		}
1481
1482
1483
		if sinks != nil {
			C.ggml_soft_max_add_sinks(kq.(*Tensor).t, sinks.(*Tensor).t)
		}
1484
1485
1486
1487

		kqv := value.Mulmat(ctx, kq)
		return kqv.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx)
	}
1488
}
1489
1490
1491
1492
1493
1494
1495

func (t *Tensor) Duplicate(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_dup(ctx.(*Context).ctx, t.t),
	}
}
Michael Yang's avatar
llama4  
Michael Yang committed
1496
1497
1498
1499
1500
1501
1502

func (t *Tensor) TopK(ctx ml.Context, k int) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_top_k(ctx.(*Context).ctx, t.t, C.int(k)),
	}
}
1503
1504
1505
1506
1507
1508
1509

func (t *Tensor) Argsort(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_argsort(ctx.(*Context).ctx, t.t, C.GGML_SORT_ORDER_ASC),
	}
}
Michael Yang's avatar
Michael Yang committed
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548

func (t *Tensor) Mean(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_mean(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) Variance(ctx ml.Context) ml.Tensor {
	return t.Add(ctx, t.Mean(ctx).Scale(ctx, -1)).
		Sqr(ctx).
		SumRows(ctx).
		Scale(ctx, 1/float64(t.Dim(0)))
}

func (t *Tensor) Stddev(ctx ml.Context) ml.Tensor {
	return t.Variance(ctx).Sqrt(ctx)
}

func (t *Tensor) Sqr(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sqr(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) Sqrt(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sqrt(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) Clamp(ctx ml.Context, min, max float32) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_clamp(ctx.(*Context).ctx, t.t, C.float(min), C.float(max)),
	}
}
Michael Yang's avatar
Michael Yang committed
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558

func (c Context) FromBytes(dtype ml.DType, s []uint8, shape ...int) ml.Tensor {
	// Unchecked to handle quantized types
	t := c.newTensor(dtype, shape)
	if len(s) > 0 {
		C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t))
	}

	return t
}