ggml.go 43.6 KB
Newer Older
Michael Yang's avatar
Michael Yang committed
1
2
package ggml

3
4
// #cgo linux LDFLAGS: -lrt -lpthread -ldl -lstdc++ -lm
// #cgo windows LDFLAGS: -lpthread
5
6
7
8
9
10
// #cgo CPPFLAGS: -I${SRCDIR}/ggml/include
// #include <stdlib.h>
// #include <stdint.h>
// #include "ggml.h"
// #include "ggml-cpu.h"
// #include "ggml-backend.h"
Michael Yang's avatar
Michael Yang committed
11
12
13
import "C"

import (
14
	"context"
Michael Yang's avatar
Michael Yang committed
15
	"encoding/binary"
Jesse Gross's avatar
Jesse Gross committed
16
	"errors"
Michael Yang's avatar
Michael Yang committed
17
18
19
	"fmt"
	"io"
	"log/slog"
20
	"maps"
Michael Yang's avatar
Michael Yang committed
21
	"os"
22
	"runtime"
23
24
25
	"slices"
	"strconv"
	"strings"
Jesse Gross's avatar
Jesse Gross committed
26
	"sync"
27
	"sync/atomic"
28
	"unicode"
Michael Yang's avatar
Michael Yang committed
29
30
31
	"unsafe"

	"github.com/ollama/ollama/format"
32
33
	"github.com/ollama/ollama/fs"
	fsggml "github.com/ollama/ollama/fs/ggml"
34
	"github.com/ollama/ollama/logutil"
Michael Yang's avatar
Michael Yang committed
35
	"github.com/ollama/ollama/ml"
36
	ggml "github.com/ollama/ollama/ml/backend/ggml/ggml/src"
37
	"github.com/ollama/ollama/ml/nn/rope"
Michael Yang's avatar
Michael Yang committed
38
39
40
	"golang.org/x/sync/errgroup"
)

Jesse Gross's avatar
Jesse Gross committed
41
42
43
44
45
46
var (
	cpus, accels, gpus []C.ggml_backend_dev_t
	backends           map[C.ggml_backend_dev_t]C.ggml_backend_t
)

var initDevices = sync.OnceFunc(func() {
Michael Yang's avatar
Michael Yang committed
47
48
	ggml.OnceLoad()

Jesse Gross's avatar
Jesse Gross committed
49
50
51
52
53
54
55
56
57
58
59
60
	backends = make(map[C.ggml_backend_dev_t]C.ggml_backend_t)
	for i := range C.ggml_backend_dev_count() {
		d := C.ggml_backend_dev_get(i)

		switch C.ggml_backend_dev_type(d) {
		case C.GGML_BACKEND_DEVICE_TYPE_CPU:
			if len(cpus) == 0 {
				// only the first cpu device should be used
				cpus = append(cpus, d)
			}
		case C.GGML_BACKEND_DEVICE_TYPE_ACCEL:
			accels = append(accels, d)
61
62
		case C.GGML_BACKEND_DEVICE_TYPE_GPU,
			C.GGML_BACKEND_DEVICE_TYPE_IGPU:
Jesse Gross's avatar
Jesse Gross committed
63
64
65
66
67
68
			gpus = append(gpus, d)
		}

		backends[d] = C.ggml_backend_dev_init(d, nil)
	}
})
Michael Yang's avatar
Michael Yang committed
69

Jesse Gross's avatar
Jesse Gross committed
70
71
72
73
74
type layerDevice struct {
	d  C.ggml_backend_dev_t
	bt C.ggml_backend_buffer_type_t
}

Michael Yang's avatar
Michael Yang committed
75
type Backend struct {
76
77
78
	// modelPath is the location of the model data
	modelPath string

79
80
	meta *fsggml.GGML

Jesse Gross's avatar
Jesse Gross committed
81
82
83
84
	// allocMemory means that memory should be allocated for tensors and not
	// just a dry run
	allocMemory bool

85
86
87
88
	// tensorLoadTargets maps from the name of the tensor in the file
	// to the name that is used by the model definition
	tensorLoadTargets map[string][]string

89
	schedMu       sync.Mutex // Only one Compute can run at a time
90
91
92
	sched         C.ggml_backend_sched_t
	schedBackends []C.ggml_backend_t
	schedBufts    []C.ggml_backend_buffer_type_t
93

94
	tensors map[string]*C.struct_ggml_tensor
Michael Yang's avatar
Michael Yang committed
95

Jesse Gross's avatar
Jesse Gross committed
96
	// input is the backend buffer type used for inputs
97
	input C.ggml_backend_buffer_type_t
Michael Yang's avatar
Michael Yang committed
98

Jesse Gross's avatar
Jesse Gross committed
99
100
101
	// output is the backend device used for outputs
	output C.ggml_backend_dev_t

Michael Yang's avatar
Michael Yang committed
102
	// layers is the backend used for repeating layers
Jesse Gross's avatar
Jesse Gross committed
103
	layers map[int]layerDevice
104

105
106
107
108
	// requiredMemory is the cumulative memory allocations needed by the backend
	requiredMemory *ml.BackendMemory

	// btDeviceMemory maps from a buffer type to the memory allocations associated with that device
109
	btDeviceMemory map[C.ggml_backend_buffer_type_t]*ml.DeviceMemory
110

111
	flashAttention bool
Michael Yang's avatar
Michael Yang committed
112
113
114

	// maxGraphNodes is the maximum allowed number of graph nodes in this scheduler
	maxGraphNodes int
Jesse Gross's avatar
Jesse Gross committed
115
116
117

	// weightBuffers are the GGML contexts and buffers for allocating weights
	weightBuffers map[*C.struct_ggml_context]C.ggml_backend_buffer_t
Michael Yang's avatar
Michael Yang committed
118
119
}

Jesse Gross's avatar
Jesse Gross committed
120
121
var once sync.Once

122
123
124
125
126
127
128
129
func New(modelPath string, params ml.BackendParams) (ml.Backend, error) {
	r, err := os.Open(modelPath)
	if err != nil {
		return nil, err
	}
	defer r.Close()

	meta, err := fsggml.Decode(r, -1)
Michael Yang's avatar
Michael Yang committed
130
131
132
133
	if err != nil {
		return nil, err
	}

Jesse Gross's avatar
Jesse Gross committed
134
135
136
137
138
139
140
141
142
143
144
	once.Do(func() {
		slog.Info(
			"",
			"architecture", meta.KV().Architecture(),
			"file_type", meta.KV().FileType(),
			"name", meta.KV().String("general.name"),
			"description", meta.KV().String("general.description"),
			"num_tensors", len(meta.Tensors().Items()),
			"num_key_values", len(meta.KV()),
		)
	})
Michael Yang's avatar
Michael Yang committed
145

Jesse Gross's avatar
Jesse Gross committed
146
147
	initDevices()

148
	var requiredMemory ml.BackendMemory
149
	btDeviceMemory := make(map[C.ggml_backend_buffer_type_t]*ml.DeviceMemory)
150

151
	type deviceBufferType struct {
152
153
		d   C.ggml_backend_dev_t
		bts []C.ggml_backend_buffer_type_t
154
155
	}

156
157
	blocks := int(meta.KV().BlockCount())

Michael Yang's avatar
Michael Yang committed
158
	// create list of buffer types for the cpu
Michael Yang's avatar
Michael Yang committed
159
	cpuDeviceBufferType := deviceBufferType{d: C.ggml_backend_dev_by_type(C.GGML_BACKEND_DEVICE_TYPE_CPU)}
160
161
162
163
	for _, d := range append(accels, append(gpus, cpus...)...) {
		switch C.ggml_backend_dev_type(d) {
		case C.GGML_BACKEND_DEVICE_TYPE_CPU,
			C.GGML_BACKEND_DEVICE_TYPE_ACCEL:
Jesse Gross's avatar
Jesse Gross committed
164
165
166
			bt := C.ggml_backend_dev_buffer_type(d)
			cpuDeviceBufferType.bts = append(cpuDeviceBufferType.bts, bt)

167
			btDeviceMemory[C.ggml_backend_dev_buffer_type(d)] = &requiredMemory.CPU
Michael Yang's avatar
Michael Yang committed
168
		}
169
170
	}

171
	requiredMemory.CPU.Name = C.GoString(C.ggml_backend_dev_name(cpuDeviceBufferType.d))
172
173
	var props C.struct_ggml_backend_dev_props
	C.ggml_backend_dev_get_props(cpuDeviceBufferType.d, &props)
174
	requiredMemory.CPU.ID = C.GoString(props.id)
175
	requiredMemory.CPU.Library = C.GoString(props.library)
176
177
	requiredMemory.CPU.Weights = make([]uint64, blocks+1)
	requiredMemory.CPU.Cache = make([]uint64, blocks+1)
178

Michael Yang's avatar
Michael Yang committed
179
	// create list of buffer types for each gpu
180
	var gpuDeviceBufferTypes []deviceBufferType
181
182
	requiredMemory.GPUs = make([]ml.DeviceMemory, len(gpus))
	for i, d := range gpus {
183
		bt := C.ggml_backend_dev_buffer_type(d)
184
		gpuDeviceBufferTypes = append(gpuDeviceBufferTypes, deviceBufferType{
185
			d:   d,
186
			bts: append([]C.ggml_backend_buffer_type_t{bt}, cpuDeviceBufferType.bts...),
187
		})
Jesse Gross's avatar
Jesse Gross committed
188

189
190
		btDeviceMemory[bt] = &requiredMemory.GPUs[i]
		requiredMemory.GPUs[i].Name = C.GoString(C.ggml_backend_dev_name(d))
191
192
		var props C.struct_ggml_backend_dev_props
		C.ggml_backend_dev_get_props(d, &props)
193
		requiredMemory.GPUs[i].ID = C.GoString(props.id)
194
		requiredMemory.GPUs[i].Library = C.GoString(props.library)
195
196
		requiredMemory.GPUs[i].Weights = make([]uint64, blocks+1)
		requiredMemory.GPUs[i].Cache = make([]uint64, blocks+1)
Michael Yang's avatar
Michael Yang committed
197
198
	}

Michael Yang's avatar
Michael Yang committed
199
	// inputs always use cpu
Michael Yang's avatar
Michael Yang committed
200
	input := cpuDeviceBufferType
201

Jesse Gross's avatar
Jesse Gross committed
202
203
204
205
206
	assignLayer := func(layer int) deviceBufferType {
		for _, p := range params.GPULayers {
			for _, l := range p.Layers {
				if l == layer {
					for i := range requiredMemory.GPUs {
207
						if requiredMemory.GPUs[i].DeviceID == p.DeviceID {
Jesse Gross's avatar
Jesse Gross committed
208
209
210
							return gpuDeviceBufferTypes[i]
						}
					}
211

Jesse Gross's avatar
Jesse Gross committed
212
213
214
					return cpuDeviceBufferType
				}
			}
215
216
		}

Jesse Gross's avatar
Jesse Gross committed
217
		return cpuDeviceBufferType
218
219
	}

Michael Yang's avatar
Michael Yang committed
220
	// repeating layers are assigned based on their index in reverse order, e.g. i / (block_count + 1)
221
	layers := make([]deviceBufferType, blocks)
222
	for i := range layers {
223
		layers[i] = assignLayer(i)
224
225
	}

Michael Yang's avatar
Michael Yang committed
226
	// outputs are assigned iff allowed by splits and configured number of gpu layers
227
	output := assignLayer(blocks)
228
229
230

	maxTensors := len(meta.Tensors().Items())
	maxTensors += 1
Michael Yang's avatar
Michael Yang committed
231
	// each layer has at most 2 extra tensors for rope operations
232
233
	maxTensors += blocks * 2

234
	type tensor struct {
235
		source *fsggml.Tensor
236
237
238
		target string
	}

Michael Yang's avatar
Michael Yang committed
239
	// some tensors are mapped to different names so keep a list
240
241
	targets := make(map[string][]string)

Michael Yang's avatar
Michael Yang committed
242
	// contexts are shared by tensors of the same buffer type
243
244
	ctxs := make(map[C.ggml_backend_buffer_type_t]*C.struct_ggml_context)
	createTensor := func(t tensor, bts []C.ggml_backend_buffer_type_t, layer int) *C.struct_ggml_tensor {
245
246
247
248
249
250
251
		for _, bt := range bts {
			if _, ok := ctxs[bt]; !ok {
				ctxs[bt] = C.ggml_init(C.struct_ggml_init_params{
					mem_size: C.ggml_tensor_overhead() * C.size_t(maxTensors),
					no_alloc: true,
				})
			}
Michael Yang's avatar
Michael Yang committed
252

253
254
255
256
257
258
259
260
			targets[t.source.Name] = append(targets[t.source.Name], t.target)

			name := t.source.Name
			if t.target != "" {
				name = t.target
			}

			cname := C.CString(name)
Michael Yang's avatar
Michael Yang committed
261
			defer C.free(unsafe.Pointer(cname))
262
263
264
265
			if tt := C.ggml_get_tensor(ctxs[bt], cname); tt != nil {
				return tt
			}

266
267
268
269
270
271
272
273
274
275
			kind := t.source.Kind
			if t.source.Kind == 4 {
				// transform raw mxfp4 stream to ggml mxfp4 format
				kind = 39
			} else if t.source.Kind == uint32(fsggml.TensorTypeBF16) && strings.HasSuffix(t.source.Name, "_exps.bias") {
				// transform "_exps.bias" from bf16 to fp32; add_ids only supports fp32 tensors
				kind = uint32(fsggml.TensorTypeF32)
			}

			tt := C.ggml_new_tensor(ctxs[bt], kind, C.int(len(t.source.Shape)), (*C.int64_t)(unsafe.Pointer(&t.source.Shape[0])))
Michael Yang's avatar
Michael Yang committed
276
277
			C.ggml_set_name(tt, cname)

278
			logutil.Trace("created tensor", "name", name, "shape", t.source.Shape, "dtype", t.source.Kind, "buffer_type", C.GoString(C.ggml_backend_buft_name(bt)))
279
280
281

			size := pad(C.ggml_backend_buft_get_alloc_size(bt, tt), C.ggml_backend_buft_get_alignment(bt))
			if layer == -1 {
282
				requiredMemory.InputWeights += uint64(size)
283
			} else {
284
				btDeviceMemory[bt].Weights[layer] += uint64(size)
285
286
			}

287
288
289
290
291
			//nolint:staticcheck // TODO: check if buffer type supports this tensor
			return tt
		}

		return nil
Michael Yang's avatar
Michael Yang committed
292
293
	}

294
	contains := func(s string, parts ...string) bool {
295
296
297
298
299
300
301
302
		split := strings.Split(s, ".")
		for _, part := range parts {
			if slices.Contains(split, part) {
				return true
			}
		}

		return false
Michael Yang's avatar
Michael Yang committed
303
304
	}

305
306
	for _, t := range meta.Tensors().Items() {
		switch {
307
		case contains(t.Name, "position_embd", "token_embd", "token_norm_embd", "token_types"):
308
			createTensor(tensor{source: t}, input.bts, -1)
Michael Yang's avatar
Michael Yang committed
309
			if _, ok := meta.Tensors().GroupLayers()["output"]; !ok && t.Name == "token_embd.weight" {
310
				createTensor(tensor{source: t, target: "output.weight"}, output.bts, blocks)
Michael Yang's avatar
Michael Yang committed
311
			}
Michael Yang's avatar
Michael Yang committed
312
313
314
		case contains(t.Name, "cls", "output", "output_norm",
			"altup_proj", "altup_unembd_proj",
			"per_layer_token_embd", "per_layer_model_proj", "per_layer_proj_norm"):
315
			createTensor(tensor{source: t}, output.bts, blocks)
316
		case strings.HasPrefix(t.Name, "v.") || strings.HasPrefix(t.Name, "mm."):
Michael Yang's avatar
Michael Yang committed
317
			// TODO: assign vision tensors to the gpu if possible
318
			createTensor(tensor{source: t}, output.bts, blocks)
Michael Yang's avatar
Michael Yang committed
319
320
321
322
323
324
		case contains(t.Name, "rope_freqs", "rope_factors_long", "rope_factors_short"):
			// these tensors should be repeated per layer
			for i, layer := range layers {
				createTensor(tensor{
					source: t,
					target: "blk." + strconv.Itoa(i) + "." + t.Name,
325
				}, layer.bts, i)
Michael Yang's avatar
Michael Yang committed
326
			}
327
		default:
Michael Yang's avatar
Michael Yang committed
328
329
330
331
			layerIndex := -1
			if fields := strings.FieldsFunc(t.Name, func(r rune) bool { return !unicode.IsNumber(r) }); len(fields) > 0 {
				if i, err := strconv.Atoi(fields[0]); err == nil {
					layerIndex = i
332
				}
Michael Yang's avatar
Michael Yang committed
333
			}
334

Michael Yang's avatar
Michael Yang committed
335
			if layerIndex >= 0 {
336
				createTensor(tensor{source: t}, layers[layerIndex].bts, layerIndex)
337
			} else {
Michael Yang's avatar
Michael Yang committed
338
				// load all other tensors on the cpu
339
				createTensor(tensor{source: t}, input.bts, -1)
340
341
342
			}
		}
	}
Michael Yang's avatar
Michael Yang committed
343

Michael Yang's avatar
Michael Yang committed
344
	// map tensor names to tensors for easy lookup later
345
346
347
348
349
350
351
	tensors := make(map[string]*C.struct_ggml_tensor)
	for _, c := range ctxs {
		for t := C.ggml_get_first_tensor(c); t != nil; t = C.ggml_get_next_tensor(c, t) {
			tensors[C.GoString(C.ggml_get_name(t))] = t
		}
	}

352
	// map devices to backend buffer types so new tensors can be assigned to the correct device
353
	deviceBufferTypes := make(map[C.ggml_backend_dev_t]C.ggml_backend_buffer_type_t)
354
355

	// create backends and buffer types used for the compute graph scheduler
356
357
	var schedBackends []C.ggml_backend_t
	var schedBufts []C.ggml_backend_buffer_type_t
358
	for _, d := range append(gpus, append(accels, cpus...)...) {
Jesse Gross's avatar
Jesse Gross committed
359
		b := backends[d]
360
361
		bt := C.ggml_backend_get_default_buffer_type(b)

Jesse Gross's avatar
Jesse Gross committed
362
363
364
365
366
367
368
		// Always include CPU as a fallback but otherwise, just use the devices where we assigned layers
		if !slices.Contains(cpuDeviceBufferType.bts, bt) {
			if c, ok := ctxs[bt]; !ok || C.ggml_get_first_tensor(c) == nil {
				continue
			}
		}

369
370
371
372
373
374
375
376
377
378
379
380
		deviceBufferTypes[d] = bt

		schedBackends = append(schedBackends, b)
		schedBufts = append(schedBufts, bt)

		if C.ggml_backend_is_cpu(b) {
			// set number of threads for cpu backend
			C.ggml_backend_cpu_set_n_threads(b, C.int(Threads(params.NumThreads)))
		}
	}

	maxGraphNodes := max(8192, len(meta.Tensors().Items())*5)
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420

	sched := C.ggml_backend_sched_new_ext(
		(*C.ggml_backend_t)(unsafe.Pointer(&schedBackends[0])),
		(*C.ggml_backend_buffer_type_t)(unsafe.Pointer(&schedBufts[0])),
		C.int(len(schedBackends)),
		C.size_t(maxGraphNodes),
		C._Bool(false),
		C._Bool(false),
		C._Bool(params.AllocMemory),
	)

	// allocate buffers for each context
	bbs := make(map[*C.struct_ggml_context]C.ggml_backend_buffer_t, len(ctxs))
	for bt, c := range ctxs {
		if C.ggml_get_first_tensor(c) == nil {
			continue
		}

		b := C.ggml_backend_alloc_ctx_tensors_from_buft(c, bt)
		if b == nil {
			for _, b := range bbs {
				C.ggml_backend_buffer_free(b)
			}

			for _, ctx := range ctxs {
				C.ggml_free(ctx)
			}

			panic(ml.ErrNoMem{BackendMemory: requiredMemory})
		}

		C.ggml_backend_buffer_set_usage(b, C.GGML_BACKEND_BUFFER_USAGE_WEIGHTS)
		bbs[c] = b
	}

	for bs := range maps.Values(bbs) {
		logutil.Trace("model weights", "buffer", C.GoString(C.ggml_backend_buffer_name(bs)),
			"size", format.HumanBytes2(uint64(C.ggml_backend_buffer_get_size(bs))))
	}

421
422
	return &Backend{
		modelPath:         modelPath,
Jesse Gross's avatar
Jesse Gross committed
423
		allocMemory:       params.AllocMemory,
424
425
426
427
		flashAttention:    params.FlashAttention,
		meta:              meta,
		tensorLoadTargets: targets,
		tensors:           tensors,
428
429
430
431
432
		sched:             sched,
		schedBackends:     schedBackends,
		schedBufts:        schedBufts,
		input:             deviceBufferTypes[input.d],
		output:            output.d,
Jesse Gross's avatar
Jesse Gross committed
433
434
		layers: func() map[int]layerDevice {
			m := make(map[int]layerDevice)
435
			for i, layer := range layers {
Jesse Gross's avatar
Jesse Gross committed
436
437
438
439
				m[i] = layerDevice{
					d:  layer.d,
					bt: deviceBufferTypes[layer.d],
				}
440
441
442
			}
			return m
		}(),
443
444
445
		requiredMemory: &requiredMemory,
		btDeviceMemory: btDeviceMemory,
		maxGraphNodes:  maxGraphNodes,
Jesse Gross's avatar
Jesse Gross committed
446
		weightBuffers:  bbs,
447
448
449
450
451
452
453
	}, nil
}

func init() {
	ml.RegisterBackend("ggml", New)
}

Jesse Gross's avatar
Jesse Gross committed
454
455
456
457
458
459
460
461
462
463
464
465
466
func (b *Backend) Close() {
	if b == nil {
		return
	}

	for ctx, b := range b.weightBuffers {
		C.ggml_backend_buffer_free(b)
		C.ggml_free(ctx)
	}

	C.ggml_backend_sched_free(b.sched)
}

467
func (b *Backend) Load(ctx context.Context, progress func(float32)) error {
Jesse Gross's avatar
Jesse Gross committed
468
469
470
471
472
473
474
	if !b.allocMemory {
		return errors.New("cannot load model without memory allocation")
	}

	// Mimic llama runner logs summarizing layers and memory
	gpuLayers := 0
	for layer := range maps.Values(b.layers) {
475
476
477
		switch C.ggml_backend_dev_type(layer.d) {
		case C.GGML_BACKEND_DEVICE_TYPE_GPU,
			C.GGML_BACKEND_DEVICE_TYPE_IGPU:
Jesse Gross's avatar
Jesse Gross committed
478
479
480
481
482
483
484
485
			gpuLayers++
		}
	}
	slog.Info(fmt.Sprintf("offloading %d repeating layers to GPU", gpuLayers))

	switch C.ggml_backend_dev_type(b.output) {
	case C.GGML_BACKEND_DEVICE_TYPE_CPU:
		slog.Info("offloading output layer to CPU")
486
487
	case C.GGML_BACKEND_DEVICE_TYPE_GPU,
		C.GGML_BACKEND_DEVICE_TYPE_IGPU:
Jesse Gross's avatar
Jesse Gross committed
488
489
490
491
492
493
494
		slog.Info("offloading output layer to GPU")
		gpuLayers++
	case C.GGML_BACKEND_DEVICE_TYPE_ACCEL:
		slog.Info("offloading output layer to ACCEL")
	}
	slog.Info(fmt.Sprintf("offloaded %d/%d layers to GPU", gpuLayers, len(b.layers)+1))

495
	var doneBytes atomic.Uint64
496
	totalBytes := uint64(b.meta.Length) - b.meta.Tensors().Offset
497
498
499

	g, ctx := errgroup.WithContext(ctx)
	g.SetLimit(runtime.GOMAXPROCS(0))
500
	for _, t := range b.meta.Tensors().Items() {
501
		t := t
502
		g.Go(func() error {
503
			tts := make([]*C.struct_ggml_tensor, max(1, len(b.tensorLoadTargets[t.Name])))
504
			for i := range tts {
505
				target := b.tensorLoadTargets[t.Name][i]
506
507
508
				if target == "" {
					target = t.Name
				}
509

510
				tt, ok := b.tensors[target]
511
512
513
				if !ok {
					return fmt.Errorf("unassigned tensor: %s", t.Name)
				}
Michael Yang's avatar
Michael Yang committed
514

515
516
517
				tts[i] = tt
			}

518
519
			// Create a new FD for each goroutine so that each FD is read sequentially, rather than
			// seeking around within an FD shared between all goroutines.
520
			file, err := os.Open(b.modelPath)
521
			if err != nil {
522
				slog.Warn("file open error", "file", b.modelPath, "error", err)
523
524
525
				return err
			}
			defer file.Close()
526
			sr := io.NewSectionReader(file, int64(b.meta.Tensors().Offset+t.Offset), int64(t.Size()))
527
528
529
530
531
532
533

			if t.Kind == 4 && tts[0]._type == 39 {
				// source is mxfp4, target is ggml mxfp4

				const BS = 17                             // MXFP4 block size
				bts := make([]byte, 8*BS*format.KibiByte) // ~128k block aligned
				var s uint64
534
				var tmp [16]byte
535
536
537
538
539
540
541
542
543
544
545
546
				for s < t.Size() {
					// Stop if either the parent context has been canceled or if any of the other tensors returned an error
					if err := ctx.Err(); err != nil {
						return err
					}
					n, err := io.ReadFull(sr, bts[:min(len(bts), int(t.Size()-s))])
					if err != nil {
						slog.Warn("file read error", "file", b.modelPath, "error", err)
						return err
					}
					for j := range n / BS {
						for i := 1; i < 9; i++ {
547
548
549
550
							// transform a1b2c3 ... x7y8z9 -> 71xa82yb93zc
							a, b := bts[j*BS+i], bts[j*BS+i+8]
							tmp[2*(i-1)] = (a & 0x0F) | (b << 4)
							tmp[2*(i-1)+1] = (a >> 4) | (b & 0xF0)
551
						}
552
						copy(bts[j*BS+1:j*BS+17], tmp[:])
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
					}

					for _, tt := range tts {
						C.ggml_backend_tensor_set(tt, unsafe.Pointer(&bts[0]), C.size_t(s), C.size_t(n))
					}

					s += uint64(n)

					if progress != nil {
						done := doneBytes.Add(uint64(n))
						progress(float32(done) / float32(totalBytes))
					}
				}
				return nil
			} else if strings.HasSuffix(t.Name, "_exps.bias") && t.Kind == 30 && tts[0]._type == 0 {
				// source is bf16, target is ggml fp32

				// data is bf16 but we need to convert to fp32
				bts := make([]byte, 128*format.KibiByte)
				var e uint64
				for e < t.Elements() {
					// Stop if either the parent context has been canceled or if any of the other tensors returned an error
					if err := ctx.Err(); err != nil {
						return err
					}
					n, err := io.ReadFull(sr, bts[:min(len(bts), int(t.Elements()-e)*2)])
					if err != nil {
						slog.Warn("file read error", "file", b.modelPath, "error", err)
						return err
					}
					fp32 := ConvertToF32(bts, uint32(fsggml.TensorTypeBF16), uint64(n/2))

					for _, tt := range tts {
						C.ggml_backend_tensor_set(tt, unsafe.Pointer(&fp32[0]), C.size_t(e*4), C.size_t(n*2))
					}
					e += uint64(n / 2)
					if progress != nil {
						done := doneBytes.Add(uint64(n))
						progress(float32(done) / float32(totalBytes))
					}
				}
				return nil
			}

597
598
599
600
			bts := make([]byte, 128*format.KibiByte)

			var s uint64
			for s < t.Size() {
601
602
603
604
605
				// Stop if either the parent context has been canceled or if any of the other tensors returned an error
				if err := ctx.Err(); err != nil {
					return err
				}

606
607
				n, err := io.ReadFull(sr, bts[:min(len(bts), int(t.Size()-s))])
				if err != nil {
608
					slog.Warn("file read error", "file", b.modelPath, "error", err)
609
					return err
610
				}
Michael Yang's avatar
Michael Yang committed
611

612
613
				for _, tt := range tts {
					C.ggml_backend_tensor_set(tt, unsafe.Pointer(&bts[0]), C.size_t(s), C.size_t(n))
614
				}
Michael Yang's avatar
Michael Yang committed
615

616
617
				s += uint64(n)

618
				if progress != nil {
619
					done := doneBytes.Add(uint64(n))
620
					progress(float32(done) / float32(totalBytes))
621
622
623
624
625
				}
			}

			return nil
		})
Michael Yang's avatar
Michael Yang committed
626
627
	}

628
629
630
631
632
633
634
635
636
637
638
639
	// Cleanup any backend state from devices that we didn't end up using
nextDevice:
	for _, d := range append(gpus, append(accels, cpus...)...) {
		for _, backend := range b.schedBackends {
			if d == C.ggml_backend_get_device(backend) {
				continue nextDevice
			}
		}

		C.ggml_backend_dev_reset(d)
	}

640
	if err := g.Wait(); err != nil {
641
		return err
642
643
	}

644
	return nil
Michael Yang's avatar
Michael Yang committed
645
646
}

647
648
649
650
func (b *Backend) BackendMemory() ml.BackendMemory {
	return *b.requiredMemory
}

651
func (b *Backend) Config() fs.Config {
Michael Yang's avatar
Michael Yang committed
652
653
654
655
	return b.meta.KV()
}

func (b *Backend) Get(name string) ml.Tensor {
656
657
	if t, ok := b.tensors[name]; ok {
		return &Tensor{b: b, t: t}
Michael Yang's avatar
Michael Yang committed
658
659
660
661
662
663
	}

	return nil
}

func (b *Backend) NewContext() ml.Context {
Michael Yang's avatar
Michael Yang committed
664
	return b.NewContextSize(b.maxGraphNodes)
665
666
667
}

func (b *Backend) NewContextSize(n int) ml.Context {
Jesse Gross's avatar
Jesse Gross committed
668
669
670
671
	if n > b.maxGraphNodes {
		panic(fmt.Errorf("requested number of graph nodes (%v) for new context exceeds maximum (%v)", n, b.maxGraphNodes))
	}

672
	var allocatedBuffers []C.ggml_backend_buffer_t
673

Michael Yang's avatar
Michael Yang committed
674
	return &Context{
675
676
		b:             b,
		maxGraphNodes: n,
677
		ctx: C.ggml_init(C.struct_ggml_init_params{
678
			mem_size: C.size_t(n)*C.ggml_tensor_overhead() + C.ggml_graph_overhead_custom(C.size_t(n), false),
679
680
			no_alloc: true,
		}),
681
		allocatedBuffers: &allocatedBuffers,
682
		layer:            -1,
Michael Yang's avatar
Michael Yang committed
683
684
685
	}
}

686
func (b *Backend) CacheConfig() ml.CacheConfig {
687
688
689
690
691
	if b.flashAttention {
		return ml.CacheConfig{CachePadding: 256, MaskDType: ml.DTypeF16, MaskBatchPadding: C.GGML_KQ_MASK_PAD}
	} else {
		return ml.CacheConfig{CachePadding: 32, PermutedV: true}
	}
692
693
}

694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
func (b *Backend) BackendDevices() []ml.DeviceInfo {
	deviceInfos := []ml.DeviceInfo{}
	for _, dev := range gpus {
		// If we have a model loaded, and it's only loaded on a subset of the devices
		// skip idle/unused devices to avoid initializing them and causing VRAM allocations
		if b.allocMemory {
			idleDev := true
			for _, backend := range b.schedBackends {
				if dev == C.ggml_backend_get_device(backend) {
					idleDev = false
					break
				}
			}
			if idleDev {
				slog.Debug("skipping unused backend device", "description", C.GoString(C.ggml_backend_dev_description(dev)))
				continue
			}
		}

		info := ml.DeviceInfo{}
		props := C.struct_ggml_backend_dev_props{}
		C.ggml_backend_dev_get_props(dev, &props)
		info.Name = C.GoString(props.name)
		info.Description = C.GoString(props.description)
		info.ID = C.GoString(props.id)
		info.Library = C.GoString(props.library)
		info.ComputeMajor = (int)(props.compute_major)
		info.ComputeMinor = (int)(props.compute_minor)
		info.DriverMajor = (int)(props.driver_major)
		info.DriverMinor = (int)(props.driver_minor)
		info.Integrated = props.integrated != 0
		if props.library != nil {
			info.Library = C.GoString(props.library)
		}
728
729
730
		if props.device_id != nil {
			info.PCIID = C.GoString(props.device_id)
		}
731
		info.LibraryPath = ggml.LibPaths()
732
733
734
		if props.numeric_id != nil {
			info.FilteredID = C.GoString(props.numeric_id)
		}
735
736
737
738
739
740
741
742
743
744

		C.ggml_backend_dev_memory(dev, &props.memory_free, &props.memory_total)
		info.TotalMemory = (uint64)(props.memory_total)
		info.FreeMemory = (uint64)(props.memory_free)

		deviceInfos = append(deviceInfos, info)
	}
	return deviceInfos
}

Michael Yang's avatar
Michael Yang committed
745
type Context struct {
746
	b *Backend
Michael Yang's avatar
Michael Yang committed
747

748
	ctx   *C.struct_ggml_context
Michael Yang's avatar
Michael Yang committed
749
	graph *C.struct_ggml_cgraph
750

751
	// buft is the buffer type used for new tensors
752
	buft C.ggml_backend_buffer_type_t
753

754
755
	// allocatedBuffers are buffers for tensors that we have allocated in this context
	// so that we can free them when we close the context
756
	allocatedBuffers *[]C.ggml_backend_buffer_t
757

Michael Yang's avatar
Michael Yang committed
758
	// maxGraphNodes is the maximum allowed number of graph nodes in this context
759
	maxGraphNodes int
760
761
762

	// layer is the graph layer that this context is allocating for - assumed to be cache
	layer int
Michael Yang's avatar
Michael Yang committed
763
764
}

765
func (c *Context) Input() ml.Context {
Michael Yang's avatar
Michael Yang committed
766
	if c.b.input != nil {
767
		return &Context{
768
769
770
771
772
			b:                c.b,
			ctx:              c.ctx,
			buft:             c.b.input,
			allocatedBuffers: c.allocatedBuffers,
			maxGraphNodes:    c.maxGraphNodes,
773
			layer:            -1,
774
775
776
		}
	}

777
	return c
778
779
}

780
func (c *Context) Layer(i int) ml.Context {
Jesse Gross's avatar
Jesse Gross committed
781
	if layer, ok := c.b.layers[i]; ok {
782
		return &Context{
783
784
			b:                c.b,
			ctx:              c.ctx,
Jesse Gross's avatar
Jesse Gross committed
785
			buft:             layer.bt,
786
787
			allocatedBuffers: c.allocatedBuffers,
			maxGraphNodes:    c.maxGraphNodes,
788
			layer:            i,
789
790
791
		}
	}

792
	return c
793
794
}

795
func (c *Context) Forward(tensors ...ml.Tensor) ml.Context {
Michael Yang's avatar
Michael Yang committed
796
	if c.graph == nil {
797
		c.graph = C.ggml_new_graph_custom(c.ctx, C.size_t(c.maxGraphNodes), false)
Michael Yang's avatar
Michael Yang committed
798
799
	}

800
801
802
803
804
	for _, tensor := range tensors {
		C.ggml_build_forward_expand(c.graph, tensor.(*Tensor).t)
	}

	return c
Michael Yang's avatar
Michael Yang committed
805
806
}

807
func (c *Context) Compute(tensors ...ml.Tensor) {
808
809
810
811
812
813
814
815
816
	c.ComputeWithNotify(nil, tensors...)
}

func (c *Context) ComputeWithNotify(cb func(), tensors ...ml.Tensor) {
	c.b.schedMu.Lock()
	defer c.b.schedMu.Unlock()
	if cb != nil {
		go cb()
	}
817
818
819
	if status := C.ggml_backend_sched_graph_compute_async(c.b.sched, c.graph); status != C.GGML_STATUS_SUCCESS {
		panic(fmt.Errorf("error computing ggml graph: %v", status))
	}
Michael Yang's avatar
Michael Yang committed
820
	C.ggml_backend_sched_reset(c.b.sched)
Michael Yang's avatar
Michael Yang committed
821

822
823
824
	needSync := true
	sync := func() {
		if needSync {
825
			C.ggml_backend_sched_synchronize(c.b.sched)
826
827
828
			needSync = false
		}
	}
Michael Yang's avatar
Michael Yang committed
829

830
831
832
	for _, t := range tensors {
		if C.ggml_nbytes(t.(*Tensor).t) > 0 {
			t.(*Tensor).sync = sync
833
834
		}
	}
Michael Yang's avatar
Michael Yang committed
835
836
}

837
838
func (c *Context) Reserve() {
	reserved := C.ggml_backend_sched_reserve(c.b.sched, c.graph)
839
840

	slog.Debug("compute graph", "nodes", C.ggml_graph_n_nodes(c.graph), "splits", C.ggml_backend_sched_get_n_splits(c.b.sched))
841
842
843

	// Reserve may get called multiple times for different graphs - we just want the last run, which will contain the max allocations
	for _, bt := range c.b.schedBufts {
844
		c.b.btDeviceMemory[bt].Graph = 0
845
846
	}

847
	for i := range c.b.schedBackends {
848
849
		bufferSize := C.ggml_backend_sched_get_attempted_buffer_size(c.b.sched, c.b.schedBackends[i])
		c.b.btDeviceMemory[c.b.schedBufts[i]].Graph += uint64(bufferSize)
850

851
		logutil.Trace("compute graph", "backend", C.GoString(C.ggml_backend_name(c.b.schedBackends[i])),
852
			"buffer_type", C.GoString(C.ggml_backend_buft_name(c.b.schedBufts[i])), "size", format.HumanBytes2(uint64(bufferSize)))
853
854
	}

855
856
857
	if !reserved {
		panic(ml.ErrNoMem{BackendMemory: *c.b.requiredMemory})
	}
858
859
}

860
func (c *Context) MaxGraphNodes() int {
861
	return c.maxGraphNodes
Jesse Gross's avatar
Jesse Gross committed
862
863
}

864
865
866
func shapeToGGML(shape []int) *C.int64_t {
	sh := make([]C.int64_t, len(shape))
	for i, s := range shape {
867
		sh[i] = C.int64_t(s)
868
869
870
871
872
	}

	return &sh[0]
}

873
874
875
876
func pad(length, pad C.size_t) C.size_t {
	return ((length + pad - 1) / pad) * pad
}

Michael Yang's avatar
Michael Yang committed
877
func (c *Context) newTensor(dtype ml.DType, shape []int) *Tensor {
878
	if c.buft == nil {
879
		panic("set Input or Layer before creating tensors")
880
881
	}

882
	cdtype := ggmlDType(dtype)
Michael Yang's avatar
Michael Yang committed
883

Jesse Gross's avatar
Jesse Gross committed
884
	if len(shape) < 1 || shape[0] == 0 {
Michael Yang's avatar
Michael Yang committed
885
		var shape C.int64_t = 0
886
		return &Tensor{b: c.b, t: C.ggml_new_tensor(c.ctx, cdtype, 1, &shape)}
Michael Yang's avatar
Michael Yang committed
887
	} else if len(shape) > 4 {
Michael Yang's avatar
Michael Yang committed
888
889
890
891
892
893
894
895
896
		panic("unsupported number of dimensions")
	}

	for _, dim := range shape {
		if dim < 1 {
			panic("invalid shape")
		}
	}

Michael Yang's avatar
Michael Yang committed
897
	t := C.ggml_new_tensor(c.ctx, cdtype, C.int(len(shape)), shapeToGGML(shape))
898
	size := pad(C.ggml_backend_buft_get_alloc_size(c.buft, t), C.ggml_backend_buft_get_alignment(c.buft))
899

900
	b := C.ggml_backend_buft_alloc_buffer(c.buft, size)
901
	if c.layer >= 0 {
902
		c.b.btDeviceMemory[c.buft].Cache[c.layer] += uint64(size)
903
904
	}

905
	if b == nil {
906
		panic(ml.ErrNoMem{BackendMemory: *c.b.requiredMemory})
907
908
	}

909
	*c.allocatedBuffers = append(*c.allocatedBuffers, b)
Michael Yang's avatar
Michael Yang committed
910
	C.ggml_backend_tensor_alloc(b, t, C.ggml_backend_buffer_get_base(b))
911
	return &Tensor{b: c.b, t: t}
912
913
}

914
func (c *Context) Empty(dtype ml.DType, shape ...int) ml.Tensor {
915
	return c.newTensor(dtype, shape)
916
917
}

918
func (c *Context) Zeros(dtype ml.DType, shape ...int) ml.Tensor {
919
	t := c.newTensor(dtype, shape)
Jesse Gross's avatar
Jesse Gross committed
920
	if c.b.allocMemory {
Michael Yang's avatar
Michael Yang committed
921
		C.ggml_set_zero(t.t)
Jesse Gross's avatar
Jesse Gross committed
922
	}
923
	return t
Michael Yang's avatar
Michael Yang committed
924
925
}

926
func checkShape[S ~[]E, E any](s S, shape ...int) {
Michael Yang's avatar
Michael Yang committed
927
	n := len(s)
Jesse Gross's avatar
Jesse Gross committed
928
929

	if n == 0 {
930
		return
Jesse Gross's avatar
Jesse Gross committed
931
932
	}

Michael Yang's avatar
Michael Yang committed
933
934
935
936
937
	for _, v := range shape {
		n /= v
	}

	if n != 1 {
938
		panic(fmt.Errorf("invalid shape: %v", shape))
Michael Yang's avatar
Michael Yang committed
939
940
941
	}
}

Michael Yang's avatar
Michael Yang committed
942
943
944
945
946
947
948
949
950
951
952
func (c Context) FromBytes(dtype ml.DType, s []uint8, shape ...int) ml.Tensor {
	// Unchecked to handle quantized types
	t := c.newTensor(dtype, shape)
	if c.b.allocMemory {
		t.FromBytes(s)
	}

	return t
}

func (c *Context) FromFloats(s []float32, shape ...int) ml.Tensor {
953
	checkShape(s, shape...)
954

955
	t := c.newTensor(ml.DTypeF32, shape)
956

Michael Yang's avatar
Michael Yang committed
957
958
	if c.b.allocMemory {
		t.FromFloats(s)
Jesse Gross's avatar
Jesse Gross committed
959
960
	}

961
	return t
Michael Yang's avatar
Michael Yang committed
962
963
}

Michael Yang's avatar
Michael Yang committed
964
func (c *Context) FromInts(s []int32, shape ...int) ml.Tensor {
965
	checkShape(s, shape...)
966

967
	t := c.newTensor(ml.DTypeI32, shape)
Michael Yang's avatar
Michael Yang committed
968
969
	if c.b.allocMemory {
		t.FromInts(s)
Jesse Gross's avatar
Jesse Gross committed
970
971
	}

972
	return t
Michael Yang's avatar
Michael Yang committed
973
974
}

Michael Yang's avatar
arange  
Michael Yang committed
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
func (c Context) Arange(start, stop, step float32, dtype ml.DType) ml.Tensor {
	switch dtype {
	case ml.DTypeF32:
		// ggml_arange creates a float32 tensor
		return &Tensor{
			b: c.b,
			t: C.ggml_arange(c.ctx, C.float(start), C.float(stop), C.float(step)),
		}
	case ml.DTypeI32:
		// ggml_cast does not support float32 to int32 conversion
		arange := make([]int32, 0, int((stop-start)/step))
		for i := start; i < stop; i += step {
			arange = append(arange, int32(i))
		}

Michael Yang's avatar
Michael Yang committed
990
		return c.Input().FromInts(arange, len(arange))
Michael Yang's avatar
arange  
Michael Yang committed
991
992
993
994
995
	default:
		panic("unsupported dtype for arange")
	}
}

Michael Yang's avatar
Michael Yang committed
996
997
func (c *Context) Close() {
	if c != nil {
998
999
1000
1001
1002
		for _, b := range *c.allocatedBuffers {
			C.ggml_backend_buffer_free(b)
		}
		*c.allocatedBuffers = nil

1003
1004
		C.ggml_free(c.ctx)
	}
Michael Yang's avatar
Michael Yang committed
1005
1006
1007
}

type Tensor struct {
1008
	b    *Backend
Michael Yang's avatar
Michael Yang committed
1009
	t    *C.struct_ggml_tensor
1010
	sync func()
Michael Yang's avatar
Michael Yang committed
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
}

func (t *Tensor) LogValue() slog.Value {
	return slog.GroupValue(
		slog.String("name", C.GoString(C.ggml_get_name(t.t))),
		slog.String("type", C.GoString(C.ggml_type_name(t.t._type))),
		slog.Any("shape", t.Shape()),
	)
}

1021
1022
func (t *Tensor) Dim(n int) int {
	return int(t.t.ne[n])
Michael Yang's avatar
Michael Yang committed
1023
1024
}

1025
1026
func (t *Tensor) Stride(n int) int {
	return int(t.t.nb[n])
Michael Yang's avatar
Michael Yang committed
1027
1028
}

1029
1030
func (t *Tensor) Shape() []int {
	shape := make([]int, C.ggml_n_dims(t.t))
Michael Yang's avatar
Michael Yang committed
1031
1032
1033
1034
1035
1036
1037
	for i := range shape {
		shape[i] = t.Dim(i)
	}

	return shape
}

1038
1039
1040
1041
1042
1043
1044
1045
1046
func (t *Tensor) Bytes() (data []byte) {
	if t.sync != nil {
		data = make([]byte, C.ggml_nbytes(t.t))

		t.sync()
		C.ggml_backend_tensor_get(t.t, unsafe.Pointer(&data[0]), 0, C.ggml_nbytes(t.t))
	}

	return
Michael Yang's avatar
Michael Yang committed
1047
1048
}

1049
1050
1051
1052
1053
1054
func (t *Tensor) Floats() (data []float32) {
	if t.sync != nil {
		data = make([]float32, C.ggml_nelements(t.t))

		t.sync()
		C.ggml_backend_tensor_get(t.t, unsafe.Pointer(&data[0]), 0, C.ggml_nbytes(t.t))
Michael Yang's avatar
Michael Yang committed
1055
1056
1057
1058
1059
	}

	return
}

Michael Yang's avatar
Michael Yang committed
1060
1061
1062
1063
1064
1065
func tensorSet[S ~[]E, E byte | float32 | int32](t *Tensor, s S) {
	if len(s) == 0 {
		return
	}
	if int(C.ggml_nbytes(t.t)) != len(s)*binary.Size(s[0]) {
		panic("data size does not match tensor size")
1066
	}
Michael Yang's avatar
Michael Yang committed
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
	C.ggml_backend_tensor_set(t.t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.t))
}

func (t *Tensor) FromBytes(s []byte) {
	tensorSet(t, s)
}

func (t *Tensor) FromFloats(s []float32) {
	tensorSet(t, s)
}

func (t *Tensor) FromInts(s []int32) {
	tensorSet(t, s)
1080
1081
}

Michael Yang's avatar
Michael Yang committed
1082
1083
1084
1085
func (t *Tensor) DType() ml.DType {
	switch t.t._type {
	case C.GGML_TYPE_F32:
		return ml.DTypeF32
Jesse Gross's avatar
Jesse Gross committed
1086
1087
	case C.GGML_TYPE_F16:
		return ml.DTypeF16
1088
1089
1090
1091
	case C.GGML_TYPE_Q8_0:
		return ml.DTypeQ80
	case C.GGML_TYPE_Q4_0:
		return ml.DTypeQ40
Michael Yang's avatar
Michael Yang committed
1092
1093
	case C.GGML_TYPE_I32:
		return ml.DTypeI32
Michael Yang's avatar
Michael Yang committed
1094
1095
	case C.GGML_TYPE_MXFP4:
		return ml.DTypeMXFP4
Michael Yang's avatar
Michael Yang committed
1096
1097
1098
1099
1100
	default:
		return ml.DTypeOther
	}
}

1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
func ggmlDType(dtype ml.DType) uint32 {
	switch dtype {
	case ml.DTypeF32:
		return C.GGML_TYPE_F32
	case ml.DTypeF16:
		return C.GGML_TYPE_F16
	case ml.DTypeQ80:
		return C.GGML_TYPE_Q8_0
	case ml.DTypeQ40:
		return C.GGML_TYPE_Q4_0
	case ml.DTypeI32:
		return C.GGML_TYPE_I32
	case ml.DTypeMXFP4:
		return C.GGML_TYPE_MXFP4
	default:
		panic("unsupported dtype")
	}
}

func (t *Tensor) Cast(ctx ml.Context, dtype ml.DType) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_cast(ctx.(*Context).ctx, t.t, ggmlDType(dtype)),
	}
}

1127
1128
1129
1130
1131
1132
1133
func (t *Tensor) Neg(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_neg(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1134
1135
func (t *Tensor) Add(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1136
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1137
1138
1139
1140
		t: C.ggml_add(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

Michael Yang's avatar
Michael Yang committed
1141
1142
1143
1144
1145
1146
1147
func (t *Tensor) Sub(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sub(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
func (t *Tensor) Repeat(ctx ml.Context, dim, n int) ml.Tensor {
	if dim < 0 || dim >= C.GGML_MAX_DIMS {
		panic("invalid dimension")
	}

	shape := make([]C.int64_t, C.GGML_MAX_DIMS)
	for i := range C.GGML_MAX_DIMS {
		if i == dim {
			shape[i] = C.int64_t(t.Dim(i) * n)
		} else {
			shape[i] = C.int64_t(t.Dim(i))
		}
	}

	tmpl := C.ggml_new_tensor(ctx.(*Context).ctx, t.t._type, C.int(len(shape)), unsafe.SliceData(shape))
	return &Tensor{
		b: t.b,
		t: C.ggml_repeat(ctx.(*Context).ctx, t.t, tmpl),
	}
}

Michael Yang's avatar
Michael Yang committed
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
func (t *Tensor) Stack(ctx ml.Context, dim int, s ...ml.Tensor) ml.Tensor {
	if len(s) > 0 {
		return t.Concat(ctx, s[0].Stack(ctx, dim, s[1:]...), dim)
	}

	return t
}

func (t *Tensor) Concat(ctx ml.Context, t2 ml.Tensor, dim int) ml.Tensor {
	return &Tensor{
1179
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1180
1181
1182
1183
		t: C.ggml_concat(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.int(dim)),
	}
}

Michael Yang's avatar
Michael Yang committed
1184
func (t *Tensor) Contiguous(ctx ml.Context, shape ...int) ml.Tensor {
1185
1186
1187
1188
	if slices.Contains(shape, -1) {
		inferShape(t, shape)
	}

Michael Yang's avatar
Michael Yang committed
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
	switch len(shape) {
	case 0:
		return &Tensor{
			b: t.b,
			t: C.ggml_cont(ctx.(*Context).ctx, t.t),
		}
	case 1:
		return &Tensor{
			b: t.b,
			t: C.ggml_cont_1d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0])),
		}
	case 2:
		return &Tensor{
			b: t.b,
			t: C.ggml_cont_2d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1])),
		}
	case 3:
		return &Tensor{
			b: t.b,
			t: C.ggml_cont_3d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1]), C.int64_t(shape[2])),
		}
	case 4:
		return &Tensor{
			b: t.b,
			t: C.ggml_cont_4d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1]), C.int64_t(shape[2]), C.int64_t(shape[3])),
		}
	default:
		panic("unsupported number of dimensions")
Michael Yang's avatar
Michael Yang committed
1217
1218
1219
1220
1221
	}
}

func (t *Tensor) Mul(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1222
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1223
1224
1225
1226
		t: C.ggml_mul(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

1227
1228
1229
1230
1231
1232
1233
func (t *Tensor) Div(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_div(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

1234
1235
1236
1237
1238
// Mulmat performs matrix multiplication between two tensors.
// If t has shape [m, p, ...] and t2 has shape [m, n, ...],
// Mulmat returns a new Tensor with shape [p, n, ...].
//
// Note: this is similar to matmul(t2, t.tranpose(-1, -2)) in other libraries.
Michael Yang's avatar
Michael Yang committed
1239
1240
func (t *Tensor) Mulmat(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1241
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1242
1243
1244
1245
		t: C.ggml_mul_mat(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

1246
1247
1248
1249
1250
func (t *Tensor) MulmatFullPrec(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	mul := C.ggml_mul_mat(ctx.(*Context).ctx, t.t, t2.(*Tensor).t)
	C.ggml_mul_mat_set_prec(mul, C.GGML_PREC_F32)

	return &Tensor{
1251
		b: t.b,
1252
1253
1254
1255
		t: mul,
	}
}

Michael Yang's avatar
llama4  
Michael Yang committed
1256
1257
1258
1259
1260
1261
1262
func (t *Tensor) MulmatID(ctx ml.Context, t2, ids ml.Tensor) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_mul_mat_id(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, ids.(*Tensor).t),
	}
}

1263
1264
1265
1266
1267
1268
1269
func (t *Tensor) AddID(ctx ml.Context, t2, ids ml.Tensor) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_add_id(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, ids.(*Tensor).t),
	}
}

1270
1271
1272
1273
1274
1275
1276
func (t *Tensor) L2Norm(ctx ml.Context, eps float32) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_l2_norm(ctx.(*Context).ctx, t.t, C.float(eps)),
	}
}

Michael Yang's avatar
Michael Yang committed
1277
func (t *Tensor) LayerNorm(ctx ml.Context, w, b ml.Tensor, eps float32) ml.Tensor {
Michael Yang's avatar
llama4  
Michael Yang committed
1278
1279
1280
1281
1282
1283
	tt := C.ggml_norm(ctx.(*Context).ctx, t.t, C.float(eps))
	if w != nil {
		tt = C.ggml_mul(ctx.(*Context).ctx, tt, w.(*Tensor).t)
		if b != nil {
			tt = C.ggml_add(ctx.(*Context).ctx, tt, b.(*Tensor).t)
		}
Michael Yang's avatar
Michael Yang committed
1284
1285
	}

Michael Yang's avatar
llama4  
Michael Yang committed
1286
	return &Tensor{b: t.b, t: tt}
Michael Yang's avatar
Michael Yang committed
1287
1288
1289
}

func (t *Tensor) RMSNorm(ctx ml.Context, w ml.Tensor, eps float32) ml.Tensor {
Michael Yang's avatar
llama4  
Michael Yang committed
1290
1291
1292
1293
1294
1295
	tt := C.ggml_rms_norm(ctx.(*Context).ctx, t.t, C.float(eps))
	if w != nil {
		tt = C.ggml_mul(ctx.(*Context).ctx, tt, w.(*Tensor).t)
	}

	return &Tensor{b: t.b, t: tt}
Michael Yang's avatar
Michael Yang committed
1296
1297
}

1298
func (t *Tensor) Pad(ctx ml.Context, shape ...int) ml.Tensor {
Michael Yang's avatar
Michael Yang committed
1299
1300
	if len(shape) != 4 {
		panic("expected 4 dimensions")
1301
1302
	} else if shape[3] != 0 {
		panic("cuda does not support 4d tensors")
Michael Yang's avatar
Michael Yang committed
1303
1304
1305
	}

	return &Tensor{
1306
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1307
1308
1309
1310
		t: C.ggml_pad(ctx.(*Context).ctx, t.t, C.int(shape[0]), C.int(shape[1]), C.int(shape[2]), C.int(shape[3])),
	}
}

1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
// Permute permutes t according to order. Permute panics if the number of dimensions
// in order does not match the number of dimensions in t.
func (t *Tensor) Permute(ctx ml.Context, order ...int) ml.Tensor {
	if len(order) != len(t.Shape()) && len(order) != 4 {
		panic("invalid number of dimensions for permute")
	}

	// ggml_permute requires 4 dimensions so fill in the rest
	for i := len(order); i < 4; i++ {
		order = append(order, i)
Michael Yang's avatar
Michael Yang committed
1321
1322
1323
	}

	return &Tensor{
1324
		b: t.b,
1325
		t: C.ggml_permute(ctx.(*Context).ctx, t.t, C.int(order[0]), C.int(order[1]), C.int(order[2]), C.int(order[3])),
Michael Yang's avatar
Michael Yang committed
1326
1327
1328
1329
1330
	}
}

func (t *Tensor) Rows(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1331
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1332
1333
1334
1335
1336
1337
		t: C.ggml_get_rows(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

func (t *Tensor) Copy(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1338
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1339
1340
1341
1342
		t: C.ggml_cpy(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
// inferShape updates shape in place to automatically set a single -1 dimesion
// based on the input tensor and the other dimensions
func inferShape(t *Tensor, shape []int) {
	total := 1
	for _, dim := range t.Shape() {
		total *= dim
	}

	dim := -1
	for i := range shape {
		switch shape[i] {
		case -1:
			if dim != -1 {
				panic("only one dimension can be inferred")
			}
			dim = i
		case 0:
			panic("dimension cannot be zero")
		default:
			if total%shape[i] != 0 {
				panic("cannot infer dimension")
			}

			total /= shape[i]
		}
	}

	if dim != -1 {
		shape[dim] = total
	}
}

1375
func (t *Tensor) Reshape(ctx ml.Context, shape ...int) ml.Tensor {
1376
1377
1378
1379
	if slices.Contains(shape, -1) {
		inferShape(t, shape)
	}

Michael Yang's avatar
Michael Yang committed
1380
1381
1382
	switch len(shape) {
	case 1:
		return &Tensor{
1383
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1384
1385
1386
1387
			t: C.ggml_reshape_1d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0])),
		}
	case 2:
		return &Tensor{
1388
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1389
1390
1391
1392
			t: C.ggml_reshape_2d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1])),
		}
	case 3:
		return &Tensor{
1393
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1394
1395
1396
1397
			t: C.ggml_reshape_3d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1]), C.int64_t(shape[2])),
		}
	case 4:
		return &Tensor{
1398
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1399
1400
1401
1402
1403
1404
1405
1406
1407
			t: C.ggml_reshape_4d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1]), C.int64_t(shape[2]), C.int64_t(shape[3])),
		}
	default:
		panic("unsupported number of dimensions")
	}
}

func (t *Tensor) Scale(ctx ml.Context, s float64) ml.Tensor {
	return &Tensor{
1408
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1409
1410
1411
1412
		t: C.ggml_scale(ctx.(*Context).ctx, t.t, (C.float)(s)),
	}
}

1413
1414
1415
1416
1417
1418
1419
func (t *Tensor) SumRows(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sum_rows(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1420
1421
func (t *Tensor) Softmax(ctx ml.Context) ml.Tensor {
	return &Tensor{
1422
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1423
1424
1425
1426
		t: C.ggml_soft_max(ctx.(*Context).ctx, t.t),
	}
}

1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
func (t *Tensor) Sin(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sin(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) Cos(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_cos(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1441
1442
func (t *Tensor) Tanh(ctx ml.Context) ml.Tensor {
	return &Tensor{
1443
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1444
1445
1446
1447
		t: C.ggml_tanh_inplace(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
llama4  
Michael Yang committed
1448
1449
1450
1451
1452
1453
1454
func (t *Tensor) Sigmoid(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sigmoid_inplace(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1455
1456
1457
1458
func (t *Tensor) View(ctx ml.Context, offset int, shape ...int) ml.Tensor {
	switch len(shape) {
	case 1:
		return &Tensor{
1459
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1460
1461
1462
1463
			t: C.ggml_view_1d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.size_t(offset)),
		}
	case 3:
		return &Tensor{
1464
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1465
1466
1467
1468
1469
1470
1471
			t: C.ggml_view_2d(ctx.(*Context).ctx, t.t,
				C.int64_t(shape[0]), C.int64_t(shape[2]),
				C.size_t(shape[1]),
				C.size_t(offset)),
		}
	case 5:
		return &Tensor{
1472
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1473
1474
1475
1476
1477
1478
1479
			t: C.ggml_view_3d(ctx.(*Context).ctx, t.t,
				C.int64_t(shape[0]), C.int64_t(shape[2]), C.int64_t(shape[4]),
				C.size_t(shape[1]), C.size_t(shape[3]),
				C.size_t(offset)),
		}
	case 7:
		return &Tensor{
1480
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
			t: C.ggml_view_4d(ctx.(*Context).ctx, t.t,
				C.int64_t(shape[0]), C.int64_t(shape[2]), C.int64_t(shape[4]), C.int64_t(shape[6]),
				C.size_t(shape[1]), C.size_t(shape[3]), C.size_t(shape[5]),
				C.size_t(offset)),
		}
	default:
		panic("unsupported number of dimensions")
	}
}

1491
func (t *Tensor) RoPE(ctx ml.Context, positions ml.Tensor, ropeDim int, ropeBase, ropeScale float32, options ...func(*rope.Options)) ml.Tensor {
1492
	// Default options
Michael Yang's avatar
Michael Yang committed
1493
1494
1495
1496
1497
1498
1499
1500
	opts := rope.Options{
		Factors:               &Tensor{},
		OriginalContextLength: 131072,
		ExtrapolationFactor:   0.,
		AttentionFactor:       1.,
		BetaFast:              32.,
		BetaSlow:              1.,
	}
1501
1502
1503

	// Apply any provided options
	for _, option := range options {
Michael Yang's avatar
Michael Yang committed
1504
		option(&opts)
1505
1506
	}

Jesse Gross's avatar
Jesse Gross committed
1507
1508
1509
1510
1511
	dequant := t.t
	if C.ggml_is_quantized(t.t._type) {
		dequant = C.ggml_cast(ctx.(*Context).ctx, t.t, C.GGML_TYPE_F32)
	}

Michael Yang's avatar
Michael Yang committed
1512
	return &Tensor{
1513
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1514
		t: C.ggml_rope_ext(
1515
1516
			ctx.(*Context).ctx,
			dequant,
1517
1518
			positions.(*Tensor).t,
			opts.Factors.(*Tensor).t,
Michael Yang's avatar
Michael Yang committed
1519
			C.int(ropeDim),
1520
1521
			C.int(opts.Type),
			C.int(opts.OriginalContextLength),
Michael Yang's avatar
Michael Yang committed
1522
1523
			C.float(ropeBase),
			C.float(ropeScale),
Michael Yang's avatar
Michael Yang committed
1524
1525
1526
1527
			C.float(opts.ExtrapolationFactor),
			C.float(opts.AttentionFactor),
			C.float(opts.BetaFast),
			C.float(opts.BetaSlow),
Michael Yang's avatar
Michael Yang committed
1528
1529
1530
1531
		),
	}
}

1532
1533
1534
1535
1536
1537
1538
func (t *Tensor) IM2Col(ctx ml.Context, t2 ml.Tensor, s0, s1, p0, p1, d0, d1 int) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_im2col(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.int(s0), C.int(s1), C.int(p0), C.int(p1), C.int(d0), C.int(d1), true, C.GGML_TYPE_F32),
	}
}

1539
1540
1541
1542
1543
1544
1545
func (t *Tensor) GELU(ctx ml.Context, t2 ...ml.Tensor) ml.Tensor {
	if len(t2) > 0 {
		return &Tensor{
			b: t.b,
			t: C.ggml_geglu_split(ctx.(*Context).ctx, t.t, t2[0].(*Tensor).t),
		}
	}
Michael Yang's avatar
Michael Yang committed
1546
	return &Tensor{
1547
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1548
1549
1550
1551
		t: C.ggml_gelu_inplace(ctx.(*Context).ctx, t.t),
	}
}

1552
1553
1554
1555
1556
1557
func (t *Tensor) SILU(ctx ml.Context, t2 ...ml.Tensor) ml.Tensor {
	if len(t2) > 0 {
		return &Tensor{
			b: t.b,
			t: C.ggml_swiglu_split(ctx.(*Context).ctx, t.t, t2[0].(*Tensor).t),
		}
Michael Yang's avatar
Michael Yang committed
1558
	}
Michael Yang's avatar
Michael Yang committed
1559
	return &Tensor{
1560
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1561
1562
1563
1564
		t: C.ggml_silu_inplace(ctx.(*Context).ctx, t.t),
	}
}

1565
1566
1567
1568
1569
1570
1571
func (t *Tensor) RELU(ctx ml.Context, t2 ...ml.Tensor) ml.Tensor {
	if len(t2) > 0 {
		return &Tensor{
			b: t.b,
			t: C.ggml_reglu_split(ctx.(*Context).ctx, t.t, t2[0].(*Tensor).t),
		}
	}
Michael Yang's avatar
Michael Yang committed
1572
1573
1574
1575
1576
1577
	return &Tensor{
		b: t.b,
		t: C.ggml_relu_inplace(ctx.(*Context).ctx, t.t),
	}
}

1578
func (t *Tensor) SILUAlphaLimit(ctx ml.Context, up ml.Tensor, alpha, limit float32) ml.Tensor {
1579
1580
1581
1582
1583
1584
	return &Tensor{
		b: t.b,
		t: C.ggml_swiglu_oai(ctx.(*Context).ctx, t.t, up.(*Tensor).t, C.float(alpha), C.float(limit)),
	}
}

Michael Yang's avatar
Michael Yang committed
1585
1586
func (t *Tensor) Conv2D(ctx ml.Context, t2 ml.Tensor, s0, s1, p0, p1, d0, d1 int) ml.Tensor {
	return &Tensor{
1587
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1588
1589
1590
		t: C.ggml_conv_2d(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.int(s0), C.int(s1), C.int(p0), C.int(p1), C.int(d0), C.int(d1)),
	}
}
1591

1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
func (t *Tensor) Conv3D(ctx ml.Context, t2 ml.Tensor, c, s0, s1, s2, p0, p1, p2, d0, d1, d2 int) ml.Tensor {
	var tt ml.Tensor = &Tensor{
		b: t.b,
		t: C.ggml_conv_3d(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.int64_t(c), C.int(s0), C.int(s1), C.int(s2), C.int(p0), C.int(p1), C.int(p2), C.int(d0), C.int(d1), C.int(d2)),
	}

	tt = tt.Reshape(ctx, t.Dim(3)/c, t2.Dim(3)/c)
	return tt
}

Michael Yang's avatar
Michael Yang committed
1602
func (t *Tensor) AvgPool2D(ctx ml.Context, k, s int, p float32) ml.Tensor {
Michael Yang's avatar
Michael Yang committed
1603
1604
	return &Tensor{
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1605
		t: C.ggml_pool_2d(ctx.(*Context).ctx, t.t, C.GGML_OP_POOL_AVG, C.int(k), C.int(k), C.int(s), C.int(s), C.float(p), C.float(p)),
Michael Yang's avatar
Michael Yang committed
1606
1607
1608
	}
}

Michael Yang's avatar
Michael Yang committed
1609
1610
1611
1612
func (t *Tensor) Set(ctx ml.Context, t2 ml.Tensor, offset int, strides ...int) ml.Tensor {
	var tt *C.struct_ggml_tensor
	switch len(strides) {
	case 0:
Michael Yang's avatar
Michael Yang committed
1613
		tt = C.ggml_set_1d(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.size_t(offset))
Michael Yang's avatar
Michael Yang committed
1614
	case 1:
Michael Yang's avatar
Michael Yang committed
1615
		tt = C.ggml_set_2d(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.size_t(offset), C.size_t(strides[0]))
Michael Yang's avatar
Michael Yang committed
1616
1617
1618
1619
1620
1621
1622
	default:
		panic("unsupported number of dimensions")
	}

	return &Tensor{b: t.b, t: tt}
}

1623
func (t *Tensor) ScaledDotProductAttention(ctx ml.Context, key, value, mask, sinks ml.Tensor, scale float64) ml.Tensor {
1624
1625
1626
1627
1628
	var kqMask *C.struct_ggml_tensor
	if mask != nil {
		kqMask = mask.(*Tensor).t
	}

1629
1630
1631
	query := t.Permute(ctx, 0, 2, 1, 3)
	key = key.Permute(ctx, 0, 2, 1, 3)

1632
1633
	if t.b.flashAttention {
		value = value.Permute(ctx, 0, 2, 1, 3)
1634

1635
		kqv := C.ggml_flash_attn_ext(ctx.(*Context).ctx, query.(*Tensor).t, key.(*Tensor).t, value.(*Tensor).t, kqMask, C.float(scale), 0, 0)
1636
1637
1638
		if sinks != nil {
			C.ggml_flash_attn_ext_add_sinks(kqv, sinks.(*Tensor).t)
		}
1639
1640
1641
1642
1643
1644
1645
1646
		C.ggml_flash_attn_ext_set_prec(kqv, C.GGML_PREC_F32)
		return &Tensor{b: t.b, t: kqv}
	} else {
		kq := key.MulmatFullPrec(ctx, query)
		kq = &Tensor{
			b: t.b,
			t: C.ggml_soft_max_ext(ctx.(*Context).ctx, kq.(*Tensor).t, kqMask, C.float(scale), 0),
		}
1647
1648
1649
		if sinks != nil {
			C.ggml_soft_max_add_sinks(kq.(*Tensor).t, sinks.(*Tensor).t)
		}
1650
1651
1652
1653

		kqv := value.Mulmat(ctx, kq)
		return kqv.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx)
	}
1654
}
1655
1656
1657
1658
1659
1660
1661

func (t *Tensor) Duplicate(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_dup(ctx.(*Context).ctx, t.t),
	}
}
Michael Yang's avatar
llama4  
Michael Yang committed
1662
1663
1664
1665
1666
1667
1668

func (t *Tensor) TopK(ctx ml.Context, k int) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_top_k(ctx.(*Context).ctx, t.t, C.int(k)),
	}
}
1669
1670
1671
1672
1673
1674
1675

func (t *Tensor) Argsort(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_argsort(ctx.(*Context).ctx, t.t, C.GGML_SORT_ORDER_ASC),
	}
}
Michael Yang's avatar
Michael Yang committed
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714

func (t *Tensor) Mean(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_mean(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) Variance(ctx ml.Context) ml.Tensor {
	return t.Add(ctx, t.Mean(ctx).Scale(ctx, -1)).
		Sqr(ctx).
		SumRows(ctx).
		Scale(ctx, 1/float64(t.Dim(0)))
}

func (t *Tensor) Stddev(ctx ml.Context) ml.Tensor {
	return t.Variance(ctx).Sqrt(ctx)
}

func (t *Tensor) Sqr(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sqr(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) Sqrt(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sqrt(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) Clamp(ctx ml.Context, min, max float32) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_clamp(ctx.(*Context).ctx, t.t, C.float(min), C.float(max)),
	}
}