ggml.go 42 KB
Newer Older
Michael Yang's avatar
Michael Yang committed
1
2
package ggml

3
4
// #cgo linux LDFLAGS: -lrt -lpthread -ldl -lstdc++ -lm
// #cgo windows LDFLAGS: -lpthread
5
6
7
8
9
10
// #cgo CPPFLAGS: -I${SRCDIR}/ggml/include
// #include <stdlib.h>
// #include <stdint.h>
// #include "ggml.h"
// #include "ggml-cpu.h"
// #include "ggml-backend.h"
Michael Yang's avatar
Michael Yang committed
11
12
13
import "C"

import (
14
	"context"
Jesse Gross's avatar
Jesse Gross committed
15
	"errors"
Michael Yang's avatar
Michael Yang committed
16
17
18
	"fmt"
	"io"
	"log/slog"
19
	"maps"
Michael Yang's avatar
Michael Yang committed
20
	"os"
21
	"runtime"
22
23
24
	"slices"
	"strconv"
	"strings"
Jesse Gross's avatar
Jesse Gross committed
25
	"sync"
26
	"sync/atomic"
27
	"unicode"
Michael Yang's avatar
Michael Yang committed
28
29
30
	"unsafe"

	"github.com/ollama/ollama/format"
31
32
	"github.com/ollama/ollama/fs"
	fsggml "github.com/ollama/ollama/fs/ggml"
33
	"github.com/ollama/ollama/logutil"
Michael Yang's avatar
Michael Yang committed
34
	"github.com/ollama/ollama/ml"
35
	ggml "github.com/ollama/ollama/ml/backend/ggml/ggml/src"
36
	"github.com/ollama/ollama/ml/nn/rope"
Michael Yang's avatar
Michael Yang committed
37
38
39
	"golang.org/x/sync/errgroup"
)

Jesse Gross's avatar
Jesse Gross committed
40
41
42
43
44
45
var (
	cpus, accels, gpus []C.ggml_backend_dev_t
	backends           map[C.ggml_backend_dev_t]C.ggml_backend_t
)

var initDevices = sync.OnceFunc(func() {
Michael Yang's avatar
Michael Yang committed
46
47
	ggml.OnceLoad()

Jesse Gross's avatar
Jesse Gross committed
48
49
50
51
52
53
54
55
56
57
58
59
	backends = make(map[C.ggml_backend_dev_t]C.ggml_backend_t)
	for i := range C.ggml_backend_dev_count() {
		d := C.ggml_backend_dev_get(i)

		switch C.ggml_backend_dev_type(d) {
		case C.GGML_BACKEND_DEVICE_TYPE_CPU:
			if len(cpus) == 0 {
				// only the first cpu device should be used
				cpus = append(cpus, d)
			}
		case C.GGML_BACKEND_DEVICE_TYPE_ACCEL:
			accels = append(accels, d)
60
61
		case C.GGML_BACKEND_DEVICE_TYPE_GPU,
			C.GGML_BACKEND_DEVICE_TYPE_IGPU:
Jesse Gross's avatar
Jesse Gross committed
62
63
64
65
66
67
			gpus = append(gpus, d)
		}

		backends[d] = C.ggml_backend_dev_init(d, nil)
	}
})
Michael Yang's avatar
Michael Yang committed
68

Jesse Gross's avatar
Jesse Gross committed
69
70
71
72
73
type layerDevice struct {
	d  C.ggml_backend_dev_t
	bt C.ggml_backend_buffer_type_t
}

Michael Yang's avatar
Michael Yang committed
74
type Backend struct {
75
76
77
	// modelPath is the location of the model data
	modelPath string

78
79
	meta *fsggml.GGML

Jesse Gross's avatar
Jesse Gross committed
80
81
82
83
	// allocMemory means that memory should be allocated for tensors and not
	// just a dry run
	allocMemory bool

84
85
86
87
	// tensorLoadTargets maps from the name of the tensor in the file
	// to the name that is used by the model definition
	tensorLoadTargets map[string][]string

88
	schedMu       sync.Mutex // Only one Compute can run at a time
89
90
91
	sched         C.ggml_backend_sched_t
	schedBackends []C.ggml_backend_t
	schedBufts    []C.ggml_backend_buffer_type_t
92

93
	tensors map[string]*C.struct_ggml_tensor
Michael Yang's avatar
Michael Yang committed
94

Jesse Gross's avatar
Jesse Gross committed
95
	// input is the backend buffer type used for inputs
96
	input C.ggml_backend_buffer_type_t
Michael Yang's avatar
Michael Yang committed
97

Jesse Gross's avatar
Jesse Gross committed
98
99
100
	// output is the backend device used for outputs
	output C.ggml_backend_dev_t

Michael Yang's avatar
Michael Yang committed
101
	// layers is the backend used for repeating layers
Jesse Gross's avatar
Jesse Gross committed
102
	layers map[int]layerDevice
103

104
105
106
107
	// requiredMemory is the cumulative memory allocations needed by the backend
	requiredMemory *ml.BackendMemory

	// btDeviceMemory maps from a buffer type to the memory allocations associated with that device
108
	btDeviceMemory map[C.ggml_backend_buffer_type_t]*ml.DeviceMemory
109

110
	flashAttention bool
Michael Yang's avatar
Michael Yang committed
111
112
113

	// maxGraphNodes is the maximum allowed number of graph nodes in this scheduler
	maxGraphNodes int
Jesse Gross's avatar
Jesse Gross committed
114
115
116

	// weightBuffers are the GGML contexts and buffers for allocating weights
	weightBuffers map[*C.struct_ggml_context]C.ggml_backend_buffer_t
Michael Yang's avatar
Michael Yang committed
117
118
}

Jesse Gross's avatar
Jesse Gross committed
119
120
var once sync.Once

121
122
123
124
125
126
127
128
func New(modelPath string, params ml.BackendParams) (ml.Backend, error) {
	r, err := os.Open(modelPath)
	if err != nil {
		return nil, err
	}
	defer r.Close()

	meta, err := fsggml.Decode(r, -1)
Michael Yang's avatar
Michael Yang committed
129
130
131
132
	if err != nil {
		return nil, err
	}

Jesse Gross's avatar
Jesse Gross committed
133
134
135
136
137
138
139
140
141
142
143
	once.Do(func() {
		slog.Info(
			"",
			"architecture", meta.KV().Architecture(),
			"file_type", meta.KV().FileType(),
			"name", meta.KV().String("general.name"),
			"description", meta.KV().String("general.description"),
			"num_tensors", len(meta.Tensors().Items()),
			"num_key_values", len(meta.KV()),
		)
	})
Michael Yang's avatar
Michael Yang committed
144

Jesse Gross's avatar
Jesse Gross committed
145
146
	initDevices()

147
	var requiredMemory ml.BackendMemory
148
	btDeviceMemory := make(map[C.ggml_backend_buffer_type_t]*ml.DeviceMemory)
149

150
	type deviceBufferType struct {
151
152
		d   C.ggml_backend_dev_t
		bts []C.ggml_backend_buffer_type_t
153
154
	}

155
156
	blocks := int(meta.KV().BlockCount())

Michael Yang's avatar
Michael Yang committed
157
	// create list of buffer types for the cpu
Michael Yang's avatar
Michael Yang committed
158
	cpuDeviceBufferType := deviceBufferType{d: C.ggml_backend_dev_by_type(C.GGML_BACKEND_DEVICE_TYPE_CPU)}
159
160
161
162
	for _, d := range append(accels, append(gpus, cpus...)...) {
		switch C.ggml_backend_dev_type(d) {
		case C.GGML_BACKEND_DEVICE_TYPE_CPU,
			C.GGML_BACKEND_DEVICE_TYPE_ACCEL:
Jesse Gross's avatar
Jesse Gross committed
163
164
165
			bt := C.ggml_backend_dev_buffer_type(d)
			cpuDeviceBufferType.bts = append(cpuDeviceBufferType.bts, bt)

166
			btDeviceMemory[C.ggml_backend_dev_buffer_type(d)] = &requiredMemory.CPU
Michael Yang's avatar
Michael Yang committed
167
		}
168
169
	}

170
	requiredMemory.CPU.Name = C.GoString(C.ggml_backend_dev_name(cpuDeviceBufferType.d))
171
172
	var props C.struct_ggml_backend_dev_props
	C.ggml_backend_dev_get_props(cpuDeviceBufferType.d, &props)
173
	requiredMemory.CPU.ID = C.GoString(props.id)
174
	requiredMemory.CPU.Library = C.GoString(props.library)
175
176
	requiredMemory.CPU.Weights = make([]uint64, blocks+1)
	requiredMemory.CPU.Cache = make([]uint64, blocks+1)
177

Michael Yang's avatar
Michael Yang committed
178
	// create list of buffer types for each gpu
179
	var gpuDeviceBufferTypes []deviceBufferType
180
181
	requiredMemory.GPUs = make([]ml.DeviceMemory, len(gpus))
	for i, d := range gpus {
182
		bt := C.ggml_backend_dev_buffer_type(d)
183
		gpuDeviceBufferTypes = append(gpuDeviceBufferTypes, deviceBufferType{
184
			d:   d,
185
			bts: append([]C.ggml_backend_buffer_type_t{bt}, cpuDeviceBufferType.bts...),
186
		})
Jesse Gross's avatar
Jesse Gross committed
187

188
189
		btDeviceMemory[bt] = &requiredMemory.GPUs[i]
		requiredMemory.GPUs[i].Name = C.GoString(C.ggml_backend_dev_name(d))
190
191
		var props C.struct_ggml_backend_dev_props
		C.ggml_backend_dev_get_props(d, &props)
192
		requiredMemory.GPUs[i].ID = C.GoString(props.id)
193
		requiredMemory.GPUs[i].Library = C.GoString(props.library)
194
195
		requiredMemory.GPUs[i].Weights = make([]uint64, blocks+1)
		requiredMemory.GPUs[i].Cache = make([]uint64, blocks+1)
Michael Yang's avatar
Michael Yang committed
196
197
	}

Michael Yang's avatar
Michael Yang committed
198
	// inputs always use cpu
Michael Yang's avatar
Michael Yang committed
199
	input := cpuDeviceBufferType
200

Jesse Gross's avatar
Jesse Gross committed
201
202
203
204
205
	assignLayer := func(layer int) deviceBufferType {
		for _, p := range params.GPULayers {
			for _, l := range p.Layers {
				if l == layer {
					for i := range requiredMemory.GPUs {
206
						if requiredMemory.GPUs[i].DeviceID == p.DeviceID {
Jesse Gross's avatar
Jesse Gross committed
207
208
209
							return gpuDeviceBufferTypes[i]
						}
					}
210

Jesse Gross's avatar
Jesse Gross committed
211
212
213
					return cpuDeviceBufferType
				}
			}
214
215
		}

Jesse Gross's avatar
Jesse Gross committed
216
		return cpuDeviceBufferType
217
218
	}

Michael Yang's avatar
Michael Yang committed
219
	// repeating layers are assigned based on their index in reverse order, e.g. i / (block_count + 1)
220
	layers := make([]deviceBufferType, blocks)
221
	for i := range layers {
222
		layers[i] = assignLayer(i)
223
224
	}

Michael Yang's avatar
Michael Yang committed
225
	// outputs are assigned iff allowed by splits and configured number of gpu layers
226
	output := assignLayer(blocks)
227
228
229

	maxTensors := len(meta.Tensors().Items())
	maxTensors += 1
Michael Yang's avatar
Michael Yang committed
230
	// each layer has at most 2 extra tensors for rope operations
231
232
	maxTensors += blocks * 2

233
	type tensor struct {
234
		source *fsggml.Tensor
235
236
237
		target string
	}

Michael Yang's avatar
Michael Yang committed
238
	// some tensors are mapped to different names so keep a list
239
240
	targets := make(map[string][]string)

Michael Yang's avatar
Michael Yang committed
241
	// contexts are shared by tensors of the same buffer type
242
243
	ctxs := make(map[C.ggml_backend_buffer_type_t]*C.struct_ggml_context)
	createTensor := func(t tensor, bts []C.ggml_backend_buffer_type_t, layer int) *C.struct_ggml_tensor {
244
245
246
247
248
249
250
		for _, bt := range bts {
			if _, ok := ctxs[bt]; !ok {
				ctxs[bt] = C.ggml_init(C.struct_ggml_init_params{
					mem_size: C.ggml_tensor_overhead() * C.size_t(maxTensors),
					no_alloc: true,
				})
			}
Michael Yang's avatar
Michael Yang committed
251

252
253
254
255
256
257
258
259
			targets[t.source.Name] = append(targets[t.source.Name], t.target)

			name := t.source.Name
			if t.target != "" {
				name = t.target
			}

			cname := C.CString(name)
Michael Yang's avatar
Michael Yang committed
260
			defer C.free(unsafe.Pointer(cname))
261
262
263
264
			if tt := C.ggml_get_tensor(ctxs[bt], cname); tt != nil {
				return tt
			}

265
266
267
268
269
270
271
272
273
274
			kind := t.source.Kind
			if t.source.Kind == 4 {
				// transform raw mxfp4 stream to ggml mxfp4 format
				kind = 39
			} else if t.source.Kind == uint32(fsggml.TensorTypeBF16) && strings.HasSuffix(t.source.Name, "_exps.bias") {
				// transform "_exps.bias" from bf16 to fp32; add_ids only supports fp32 tensors
				kind = uint32(fsggml.TensorTypeF32)
			}

			tt := C.ggml_new_tensor(ctxs[bt], kind, C.int(len(t.source.Shape)), (*C.int64_t)(unsafe.Pointer(&t.source.Shape[0])))
Michael Yang's avatar
Michael Yang committed
275
276
			C.ggml_set_name(tt, cname)

277
			logutil.Trace("created tensor", "name", name, "shape", t.source.Shape, "dtype", t.source.Kind, "buffer_type", C.GoString(C.ggml_backend_buft_name(bt)))
278
279
280

			size := pad(C.ggml_backend_buft_get_alloc_size(bt, tt), C.ggml_backend_buft_get_alignment(bt))
			if layer == -1 {
281
				requiredMemory.InputWeights += uint64(size)
282
			} else {
283
				btDeviceMemory[bt].Weights[layer] += uint64(size)
284
285
			}

286
287
288
289
290
			//nolint:staticcheck // TODO: check if buffer type supports this tensor
			return tt
		}

		return nil
Michael Yang's avatar
Michael Yang committed
291
292
	}

293
	contains := func(s string, parts ...string) bool {
294
295
296
297
298
299
300
301
		split := strings.Split(s, ".")
		for _, part := range parts {
			if slices.Contains(split, part) {
				return true
			}
		}

		return false
Michael Yang's avatar
Michael Yang committed
302
303
	}

304
305
	for _, t := range meta.Tensors().Items() {
		switch {
306
		case contains(t.Name, "position_embd", "token_embd", "token_norm_embd", "token_types"):
307
			createTensor(tensor{source: t}, input.bts, -1)
Michael Yang's avatar
Michael Yang committed
308
			if _, ok := meta.Tensors().GroupLayers()["output"]; !ok && t.Name == "token_embd.weight" {
309
				createTensor(tensor{source: t, target: "output.weight"}, output.bts, blocks)
Michael Yang's avatar
Michael Yang committed
310
			}
Michael Yang's avatar
Michael Yang committed
311
312
313
		case contains(t.Name, "cls", "output", "output_norm",
			"altup_proj", "altup_unembd_proj",
			"per_layer_token_embd", "per_layer_model_proj", "per_layer_proj_norm"):
314
			createTensor(tensor{source: t}, output.bts, blocks)
315
		case strings.HasPrefix(t.Name, "v.") || strings.HasPrefix(t.Name, "mm."):
Michael Yang's avatar
Michael Yang committed
316
			// TODO: assign vision tensors to the gpu if possible
317
			createTensor(tensor{source: t}, output.bts, blocks)
Michael Yang's avatar
Michael Yang committed
318
319
320
321
322
323
		case contains(t.Name, "rope_freqs", "rope_factors_long", "rope_factors_short"):
			// these tensors should be repeated per layer
			for i, layer := range layers {
				createTensor(tensor{
					source: t,
					target: "blk." + strconv.Itoa(i) + "." + t.Name,
324
				}, layer.bts, i)
Michael Yang's avatar
Michael Yang committed
325
			}
326
		default:
Michael Yang's avatar
Michael Yang committed
327
328
329
330
			layerIndex := -1
			if fields := strings.FieldsFunc(t.Name, func(r rune) bool { return !unicode.IsNumber(r) }); len(fields) > 0 {
				if i, err := strconv.Atoi(fields[0]); err == nil {
					layerIndex = i
331
				}
Michael Yang's avatar
Michael Yang committed
332
			}
333

Michael Yang's avatar
Michael Yang committed
334
			if layerIndex >= 0 {
335
				createTensor(tensor{source: t}, layers[layerIndex].bts, layerIndex)
336
			} else {
Michael Yang's avatar
Michael Yang committed
337
				// load all other tensors on the cpu
338
				createTensor(tensor{source: t}, input.bts, -1)
339
340
341
			}
		}
	}
Michael Yang's avatar
Michael Yang committed
342

Michael Yang's avatar
Michael Yang committed
343
	// map tensor names to tensors for easy lookup later
344
345
346
347
348
349
350
	tensors := make(map[string]*C.struct_ggml_tensor)
	for _, c := range ctxs {
		for t := C.ggml_get_first_tensor(c); t != nil; t = C.ggml_get_next_tensor(c, t) {
			tensors[C.GoString(C.ggml_get_name(t))] = t
		}
	}

351
	// map devices to backend buffer types so new tensors can be assigned to the correct device
352
	deviceBufferTypes := make(map[C.ggml_backend_dev_t]C.ggml_backend_buffer_type_t)
353
354

	// create backends and buffer types used for the compute graph scheduler
355
356
	var schedBackends []C.ggml_backend_t
	var schedBufts []C.ggml_backend_buffer_type_t
357
	for _, d := range append(gpus, append(accels, cpus...)...) {
Jesse Gross's avatar
Jesse Gross committed
358
		b := backends[d]
359
360
		bt := C.ggml_backend_get_default_buffer_type(b)

Jesse Gross's avatar
Jesse Gross committed
361
362
363
364
365
366
367
		// Always include CPU as a fallback but otherwise, just use the devices where we assigned layers
		if !slices.Contains(cpuDeviceBufferType.bts, bt) {
			if c, ok := ctxs[bt]; !ok || C.ggml_get_first_tensor(c) == nil {
				continue
			}
		}

368
369
370
371
372
373
374
375
376
377
378
379
		deviceBufferTypes[d] = bt

		schedBackends = append(schedBackends, b)
		schedBufts = append(schedBufts, bt)

		if C.ggml_backend_is_cpu(b) {
			// set number of threads for cpu backend
			C.ggml_backend_cpu_set_n_threads(b, C.int(Threads(params.NumThreads)))
		}
	}

	maxGraphNodes := max(8192, len(meta.Tensors().Items())*5)
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419

	sched := C.ggml_backend_sched_new_ext(
		(*C.ggml_backend_t)(unsafe.Pointer(&schedBackends[0])),
		(*C.ggml_backend_buffer_type_t)(unsafe.Pointer(&schedBufts[0])),
		C.int(len(schedBackends)),
		C.size_t(maxGraphNodes),
		C._Bool(false),
		C._Bool(false),
		C._Bool(params.AllocMemory),
	)

	// allocate buffers for each context
	bbs := make(map[*C.struct_ggml_context]C.ggml_backend_buffer_t, len(ctxs))
	for bt, c := range ctxs {
		if C.ggml_get_first_tensor(c) == nil {
			continue
		}

		b := C.ggml_backend_alloc_ctx_tensors_from_buft(c, bt)
		if b == nil {
			for _, b := range bbs {
				C.ggml_backend_buffer_free(b)
			}

			for _, ctx := range ctxs {
				C.ggml_free(ctx)
			}

			panic(ml.ErrNoMem{BackendMemory: requiredMemory})
		}

		C.ggml_backend_buffer_set_usage(b, C.GGML_BACKEND_BUFFER_USAGE_WEIGHTS)
		bbs[c] = b
	}

	for bs := range maps.Values(bbs) {
		logutil.Trace("model weights", "buffer", C.GoString(C.ggml_backend_buffer_name(bs)),
			"size", format.HumanBytes2(uint64(C.ggml_backend_buffer_get_size(bs))))
	}

420
421
	return &Backend{
		modelPath:         modelPath,
Jesse Gross's avatar
Jesse Gross committed
422
		allocMemory:       params.AllocMemory,
423
424
425
426
		flashAttention:    params.FlashAttention,
		meta:              meta,
		tensorLoadTargets: targets,
		tensors:           tensors,
427
428
429
430
431
		sched:             sched,
		schedBackends:     schedBackends,
		schedBufts:        schedBufts,
		input:             deviceBufferTypes[input.d],
		output:            output.d,
Jesse Gross's avatar
Jesse Gross committed
432
433
		layers: func() map[int]layerDevice {
			m := make(map[int]layerDevice)
434
			for i, layer := range layers {
Jesse Gross's avatar
Jesse Gross committed
435
436
437
438
				m[i] = layerDevice{
					d:  layer.d,
					bt: deviceBufferTypes[layer.d],
				}
439
440
441
			}
			return m
		}(),
442
443
444
		requiredMemory: &requiredMemory,
		btDeviceMemory: btDeviceMemory,
		maxGraphNodes:  maxGraphNodes,
Jesse Gross's avatar
Jesse Gross committed
445
		weightBuffers:  bbs,
446
447
448
449
450
451
452
	}, nil
}

func init() {
	ml.RegisterBackend("ggml", New)
}

Jesse Gross's avatar
Jesse Gross committed
453
454
455
456
457
458
459
460
461
462
463
464
465
func (b *Backend) Close() {
	if b == nil {
		return
	}

	for ctx, b := range b.weightBuffers {
		C.ggml_backend_buffer_free(b)
		C.ggml_free(ctx)
	}

	C.ggml_backend_sched_free(b.sched)
}

466
func (b *Backend) Load(ctx context.Context, progress func(float32)) error {
Jesse Gross's avatar
Jesse Gross committed
467
468
469
470
471
472
473
	if !b.allocMemory {
		return errors.New("cannot load model without memory allocation")
	}

	// Mimic llama runner logs summarizing layers and memory
	gpuLayers := 0
	for layer := range maps.Values(b.layers) {
474
475
476
		switch C.ggml_backend_dev_type(layer.d) {
		case C.GGML_BACKEND_DEVICE_TYPE_GPU,
			C.GGML_BACKEND_DEVICE_TYPE_IGPU:
Jesse Gross's avatar
Jesse Gross committed
477
478
479
480
481
482
483
484
			gpuLayers++
		}
	}
	slog.Info(fmt.Sprintf("offloading %d repeating layers to GPU", gpuLayers))

	switch C.ggml_backend_dev_type(b.output) {
	case C.GGML_BACKEND_DEVICE_TYPE_CPU:
		slog.Info("offloading output layer to CPU")
485
486
	case C.GGML_BACKEND_DEVICE_TYPE_GPU,
		C.GGML_BACKEND_DEVICE_TYPE_IGPU:
Jesse Gross's avatar
Jesse Gross committed
487
488
489
490
491
492
493
		slog.Info("offloading output layer to GPU")
		gpuLayers++
	case C.GGML_BACKEND_DEVICE_TYPE_ACCEL:
		slog.Info("offloading output layer to ACCEL")
	}
	slog.Info(fmt.Sprintf("offloaded %d/%d layers to GPU", gpuLayers, len(b.layers)+1))

494
	var doneBytes atomic.Uint64
495
	totalBytes := uint64(b.meta.Length) - b.meta.Tensors().Offset
496
497
498

	g, ctx := errgroup.WithContext(ctx)
	g.SetLimit(runtime.GOMAXPROCS(0))
499
	for _, t := range b.meta.Tensors().Items() {
500
		t := t
501
		g.Go(func() error {
502
			tts := make([]*C.struct_ggml_tensor, max(1, len(b.tensorLoadTargets[t.Name])))
503
			for i := range tts {
504
				target := b.tensorLoadTargets[t.Name][i]
505
506
507
				if target == "" {
					target = t.Name
				}
508

509
				tt, ok := b.tensors[target]
510
511
512
				if !ok {
					return fmt.Errorf("unassigned tensor: %s", t.Name)
				}
Michael Yang's avatar
Michael Yang committed
513

514
515
516
				tts[i] = tt
			}

517
518
			// Create a new FD for each goroutine so that each FD is read sequentially, rather than
			// seeking around within an FD shared between all goroutines.
519
			file, err := os.Open(b.modelPath)
520
			if err != nil {
521
				slog.Warn("file open error", "file", b.modelPath, "error", err)
522
523
524
				return err
			}
			defer file.Close()
525
			sr := io.NewSectionReader(file, int64(b.meta.Tensors().Offset+t.Offset), int64(t.Size()))
526
527
528
529
530
531
532

			if t.Kind == 4 && tts[0]._type == 39 {
				// source is mxfp4, target is ggml mxfp4

				const BS = 17                             // MXFP4 block size
				bts := make([]byte, 8*BS*format.KibiByte) // ~128k block aligned
				var s uint64
533
				var tmp [16]byte
534
535
536
537
538
539
540
541
542
543
544
545
				for s < t.Size() {
					// Stop if either the parent context has been canceled or if any of the other tensors returned an error
					if err := ctx.Err(); err != nil {
						return err
					}
					n, err := io.ReadFull(sr, bts[:min(len(bts), int(t.Size()-s))])
					if err != nil {
						slog.Warn("file read error", "file", b.modelPath, "error", err)
						return err
					}
					for j := range n / BS {
						for i := 1; i < 9; i++ {
546
547
548
549
							// transform a1b2c3 ... x7y8z9 -> 71xa82yb93zc
							a, b := bts[j*BS+i], bts[j*BS+i+8]
							tmp[2*(i-1)] = (a & 0x0F) | (b << 4)
							tmp[2*(i-1)+1] = (a >> 4) | (b & 0xF0)
550
						}
551
						copy(bts[j*BS+1:j*BS+17], tmp[:])
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
					}

					for _, tt := range tts {
						C.ggml_backend_tensor_set(tt, unsafe.Pointer(&bts[0]), C.size_t(s), C.size_t(n))
					}

					s += uint64(n)

					if progress != nil {
						done := doneBytes.Add(uint64(n))
						progress(float32(done) / float32(totalBytes))
					}
				}
				return nil
			} else if strings.HasSuffix(t.Name, "_exps.bias") && t.Kind == 30 && tts[0]._type == 0 {
				// source is bf16, target is ggml fp32

				// data is bf16 but we need to convert to fp32
				bts := make([]byte, 128*format.KibiByte)
				var e uint64
				for e < t.Elements() {
					// Stop if either the parent context has been canceled or if any of the other tensors returned an error
					if err := ctx.Err(); err != nil {
						return err
					}
					n, err := io.ReadFull(sr, bts[:min(len(bts), int(t.Elements()-e)*2)])
					if err != nil {
						slog.Warn("file read error", "file", b.modelPath, "error", err)
						return err
					}
					fp32 := ConvertToF32(bts, uint32(fsggml.TensorTypeBF16), uint64(n/2))

					for _, tt := range tts {
						C.ggml_backend_tensor_set(tt, unsafe.Pointer(&fp32[0]), C.size_t(e*4), C.size_t(n*2))
					}
					e += uint64(n / 2)
					if progress != nil {
						done := doneBytes.Add(uint64(n))
						progress(float32(done) / float32(totalBytes))
					}
				}
				return nil
			}

596
597
598
599
			bts := make([]byte, 128*format.KibiByte)

			var s uint64
			for s < t.Size() {
600
601
602
603
604
				// Stop if either the parent context has been canceled or if any of the other tensors returned an error
				if err := ctx.Err(); err != nil {
					return err
				}

605
606
				n, err := io.ReadFull(sr, bts[:min(len(bts), int(t.Size()-s))])
				if err != nil {
607
					slog.Warn("file read error", "file", b.modelPath, "error", err)
608
					return err
609
				}
Michael Yang's avatar
Michael Yang committed
610

611
612
				for _, tt := range tts {
					C.ggml_backend_tensor_set(tt, unsafe.Pointer(&bts[0]), C.size_t(s), C.size_t(n))
613
				}
Michael Yang's avatar
Michael Yang committed
614

615
616
				s += uint64(n)

617
				if progress != nil {
618
					done := doneBytes.Add(uint64(n))
619
					progress(float32(done) / float32(totalBytes))
620
621
622
623
624
				}
			}

			return nil
		})
Michael Yang's avatar
Michael Yang committed
625
626
	}

627
628
629
630
631
632
633
634
635
636
637
638
	// Cleanup any backend state from devices that we didn't end up using
nextDevice:
	for _, d := range append(gpus, append(accels, cpus...)...) {
		for _, backend := range b.schedBackends {
			if d == C.ggml_backend_get_device(backend) {
				continue nextDevice
			}
		}

		C.ggml_backend_dev_reset(d)
	}

639
	if err := g.Wait(); err != nil {
640
		return err
641
642
	}

643
	return nil
Michael Yang's avatar
Michael Yang committed
644
645
}

646
647
648
649
func (b *Backend) BackendMemory() ml.BackendMemory {
	return *b.requiredMemory
}

650
func (b *Backend) Config() fs.Config {
Michael Yang's avatar
Michael Yang committed
651
652
653
654
	return b.meta.KV()
}

func (b *Backend) Get(name string) ml.Tensor {
655
656
	if t, ok := b.tensors[name]; ok {
		return &Tensor{b: b, t: t}
Michael Yang's avatar
Michael Yang committed
657
658
659
660
661
662
	}

	return nil
}

func (b *Backend) NewContext() ml.Context {
Michael Yang's avatar
Michael Yang committed
663
	return b.NewContextSize(b.maxGraphNodes)
664
665
666
}

func (b *Backend) NewContextSize(n int) ml.Context {
Jesse Gross's avatar
Jesse Gross committed
667
668
669
670
	if n > b.maxGraphNodes {
		panic(fmt.Errorf("requested number of graph nodes (%v) for new context exceeds maximum (%v)", n, b.maxGraphNodes))
	}

671
	var allocatedBuffers []C.ggml_backend_buffer_t
672

Michael Yang's avatar
Michael Yang committed
673
	return &Context{
674
675
		b:             b,
		maxGraphNodes: n,
676
		ctx: C.ggml_init(C.struct_ggml_init_params{
677
			mem_size: C.size_t(n)*C.ggml_tensor_overhead() + C.ggml_graph_overhead_custom(C.size_t(n), false),
678
679
			no_alloc: true,
		}),
680
		allocatedBuffers: &allocatedBuffers,
681
		layer:            -1,
Michael Yang's avatar
Michael Yang committed
682
683
684
	}
}

685
func (b *Backend) CacheConfig() ml.CacheConfig {
686
687
688
689
690
	if b.flashAttention {
		return ml.CacheConfig{CachePadding: 256, MaskDType: ml.DTypeF16, MaskBatchPadding: C.GGML_KQ_MASK_PAD}
	} else {
		return ml.CacheConfig{CachePadding: 32, PermutedV: true}
	}
691
692
}

693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
func (b *Backend) BackendDevices() []ml.DeviceInfo {
	deviceInfos := []ml.DeviceInfo{}
	for _, dev := range gpus {
		// If we have a model loaded, and it's only loaded on a subset of the devices
		// skip idle/unused devices to avoid initializing them and causing VRAM allocations
		if b.allocMemory {
			idleDev := true
			for _, backend := range b.schedBackends {
				if dev == C.ggml_backend_get_device(backend) {
					idleDev = false
					break
				}
			}
			if idleDev {
				slog.Debug("skipping unused backend device", "description", C.GoString(C.ggml_backend_dev_description(dev)))
				continue
			}
		}

		info := ml.DeviceInfo{}
		props := C.struct_ggml_backend_dev_props{}
		C.ggml_backend_dev_get_props(dev, &props)
		info.Name = C.GoString(props.name)
		info.Description = C.GoString(props.description)
		info.ID = C.GoString(props.id)
		info.Library = C.GoString(props.library)
		info.ComputeMajor = (int)(props.compute_major)
		info.ComputeMinor = (int)(props.compute_minor)
		info.DriverMajor = (int)(props.driver_major)
		info.DriverMinor = (int)(props.driver_minor)
		info.Integrated = props.integrated != 0
		if props.library != nil {
			info.Library = C.GoString(props.library)
		}
		info.PCIID = fmt.Sprintf("%02x:%02x.%x", props.pci_bus_id, props.pci_device_id, props.pci_domain_id)
		info.LibraryPath = ggml.LibPaths()
729
730
731
		if props.numeric_id != nil {
			info.FilteredID = C.GoString(props.numeric_id)
		}
732
733
734
735
736
737
738
739
740
741

		C.ggml_backend_dev_memory(dev, &props.memory_free, &props.memory_total)
		info.TotalMemory = (uint64)(props.memory_total)
		info.FreeMemory = (uint64)(props.memory_free)

		deviceInfos = append(deviceInfos, info)
	}
	return deviceInfos
}

Michael Yang's avatar
Michael Yang committed
742
type Context struct {
743
	b *Backend
Michael Yang's avatar
Michael Yang committed
744

745
	ctx   *C.struct_ggml_context
Michael Yang's avatar
Michael Yang committed
746
	graph *C.struct_ggml_cgraph
747

748
	// buft is the buffer type used for new tensors
749
	buft C.ggml_backend_buffer_type_t
750

751
752
	// allocatedBuffers are buffers for tensors that we have allocated in this context
	// so that we can free them when we close the context
753
	allocatedBuffers *[]C.ggml_backend_buffer_t
754

Michael Yang's avatar
Michael Yang committed
755
	// maxGraphNodes is the maximum allowed number of graph nodes in this context
756
	maxGraphNodes int
757
758
759

	// layer is the graph layer that this context is allocating for - assumed to be cache
	layer int
Michael Yang's avatar
Michael Yang committed
760
761
}

762
func (c *Context) Input() ml.Context {
Michael Yang's avatar
Michael Yang committed
763
	if c.b.input != nil {
764
		return &Context{
765
766
767
768
769
			b:                c.b,
			ctx:              c.ctx,
			buft:             c.b.input,
			allocatedBuffers: c.allocatedBuffers,
			maxGraphNodes:    c.maxGraphNodes,
770
			layer:            -1,
771
772
773
		}
	}

774
	return c
775
776
}

777
func (c *Context) Layer(i int) ml.Context {
Jesse Gross's avatar
Jesse Gross committed
778
	if layer, ok := c.b.layers[i]; ok {
779
		return &Context{
780
781
			b:                c.b,
			ctx:              c.ctx,
Jesse Gross's avatar
Jesse Gross committed
782
			buft:             layer.bt,
783
784
			allocatedBuffers: c.allocatedBuffers,
			maxGraphNodes:    c.maxGraphNodes,
785
			layer:            i,
786
787
788
		}
	}

789
	return c
790
791
}

792
func (c *Context) Forward(tensors ...ml.Tensor) ml.Context {
Michael Yang's avatar
Michael Yang committed
793
	if c.graph == nil {
794
		c.graph = C.ggml_new_graph_custom(c.ctx, C.size_t(c.maxGraphNodes), false)
Michael Yang's avatar
Michael Yang committed
795
796
	}

797
798
799
800
801
	for _, tensor := range tensors {
		C.ggml_build_forward_expand(c.graph, tensor.(*Tensor).t)
	}

	return c
Michael Yang's avatar
Michael Yang committed
802
803
}

804
func (c *Context) Compute(tensors ...ml.Tensor) {
805
806
807
808
809
810
811
812
813
	c.ComputeWithNotify(nil, tensors...)
}

func (c *Context) ComputeWithNotify(cb func(), tensors ...ml.Tensor) {
	c.b.schedMu.Lock()
	defer c.b.schedMu.Unlock()
	if cb != nil {
		go cb()
	}
814
815
816
	if status := C.ggml_backend_sched_graph_compute_async(c.b.sched, c.graph); status != C.GGML_STATUS_SUCCESS {
		panic(fmt.Errorf("error computing ggml graph: %v", status))
	}
Michael Yang's avatar
Michael Yang committed
817
	C.ggml_backend_sched_reset(c.b.sched)
Michael Yang's avatar
Michael Yang committed
818

819
820
821
	needSync := true
	sync := func() {
		if needSync {
822
			C.ggml_backend_sched_synchronize(c.b.sched)
823
824
825
			needSync = false
		}
	}
Michael Yang's avatar
Michael Yang committed
826

827
828
829
	for _, t := range tensors {
		if C.ggml_nbytes(t.(*Tensor).t) > 0 {
			t.(*Tensor).sync = sync
830
831
		}
	}
Michael Yang's avatar
Michael Yang committed
832
833
}

834
835
func (c *Context) Reserve() {
	reserved := C.ggml_backend_sched_reserve(c.b.sched, c.graph)
836
837

	slog.Debug("compute graph", "nodes", C.ggml_graph_n_nodes(c.graph), "splits", C.ggml_backend_sched_get_n_splits(c.b.sched))
838
839
840

	// Reserve may get called multiple times for different graphs - we just want the last run, which will contain the max allocations
	for _, bt := range c.b.schedBufts {
841
		c.b.btDeviceMemory[bt].Graph = 0
842
843
	}

844
	for i := range c.b.schedBackends {
845
846
		bufferSize := C.ggml_backend_sched_get_attempted_buffer_size(c.b.sched, c.b.schedBackends[i])
		c.b.btDeviceMemory[c.b.schedBufts[i]].Graph += uint64(bufferSize)
847

848
		logutil.Trace("compute graph", "backend", C.GoString(C.ggml_backend_name(c.b.schedBackends[i])),
849
			"buffer_type", C.GoString(C.ggml_backend_buft_name(c.b.schedBufts[i])), "size", format.HumanBytes2(uint64(bufferSize)))
850
851
	}

852
853
854
	if !reserved {
		panic(ml.ErrNoMem{BackendMemory: *c.b.requiredMemory})
	}
855
856
}

857
func (c *Context) MaxGraphNodes() int {
858
	return c.maxGraphNodes
Jesse Gross's avatar
Jesse Gross committed
859
860
}

861
862
863
func shapeToGGML(shape []int) *C.int64_t {
	sh := make([]C.int64_t, len(shape))
	for i, s := range shape {
864
		sh[i] = C.int64_t(s)
865
866
867
868
869
	}

	return &sh[0]
}

870
871
872
873
func pad(length, pad C.size_t) C.size_t {
	return ((length + pad - 1) / pad) * pad
}

874
func (c *Context) newTensor(dtype ml.DType, shape []int) ml.Tensor {
875
	if c.buft == nil {
876
		panic("set Input or Layer before creating tensors")
877
878
	}

879
	cdtype := ggmlDType(dtype)
Michael Yang's avatar
Michael Yang committed
880

Jesse Gross's avatar
Jesse Gross committed
881
	if len(shape) < 1 || shape[0] == 0 {
Michael Yang's avatar
Michael Yang committed
882
		var shape C.int64_t = 0
883
		return &Tensor{b: c.b, t: C.ggml_new_tensor(c.ctx, cdtype, 1, &shape)}
Michael Yang's avatar
Michael Yang committed
884
	} else if len(shape) > 4 {
Michael Yang's avatar
Michael Yang committed
885
886
887
888
889
890
891
892
893
		panic("unsupported number of dimensions")
	}

	for _, dim := range shape {
		if dim < 1 {
			panic("invalid shape")
		}
	}

Michael Yang's avatar
Michael Yang committed
894
	t := C.ggml_new_tensor(c.ctx, cdtype, C.int(len(shape)), shapeToGGML(shape))
895
	size := pad(C.ggml_backend_buft_get_alloc_size(c.buft, t), C.ggml_backend_buft_get_alignment(c.buft))
896

897
	b := C.ggml_backend_buft_alloc_buffer(c.buft, size)
898
	if c.layer >= 0 {
899
		c.b.btDeviceMemory[c.buft].Cache[c.layer] += uint64(size)
900
901
	}

902
	if b == nil {
903
		panic(ml.ErrNoMem{BackendMemory: *c.b.requiredMemory})
904
905
	}

906
	*c.allocatedBuffers = append(*c.allocatedBuffers, b)
Michael Yang's avatar
Michael Yang committed
907
	C.ggml_backend_tensor_alloc(b, t, C.ggml_backend_buffer_get_base(b))
908
	return &Tensor{b: c.b, t: t}
909
910
}

911
func (c *Context) Empty(dtype ml.DType, shape ...int) ml.Tensor {
912
	return c.newTensor(dtype, shape)
913
914
}

915
func (c *Context) Zeros(dtype ml.DType, shape ...int) ml.Tensor {
916
	t := c.newTensor(dtype, shape)
Jesse Gross's avatar
Jesse Gross committed
917
918
919
	if c.b.allocMemory {
		C.ggml_set_zero(t.(*Tensor).t)
	}
920
	return t
Michael Yang's avatar
Michael Yang committed
921
922
}

923
func checkShape[S ~[]E, E any](s S, shape ...int) {
Michael Yang's avatar
Michael Yang committed
924
	n := len(s)
Jesse Gross's avatar
Jesse Gross committed
925
926

	if n == 0 {
927
		return
Jesse Gross's avatar
Jesse Gross committed
928
929
	}

Michael Yang's avatar
Michael Yang committed
930
931
932
933
934
	for _, v := range shape {
		n /= v
	}

	if n != 1 {
935
		panic(fmt.Errorf("invalid shape: %v", shape))
Michael Yang's avatar
Michael Yang committed
936
937
938
	}
}

939
940
func (c *Context) FromFloatSlice(s []float32, shape ...int) ml.Tensor {
	checkShape(s, shape...)
941

942
	t := c.newTensor(ml.DTypeF32, shape)
943

Jesse Gross's avatar
Jesse Gross committed
944
	if c.b.allocMemory && len(s) > 0 {
Jesse Gross's avatar
Jesse Gross committed
945
946
947
		C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t))
	}

948
	return t
Michael Yang's avatar
Michael Yang committed
949
950
}

951
952
func (c *Context) FromIntSlice(s []int32, shape ...int) ml.Tensor {
	checkShape(s, shape...)
953

954
	t := c.newTensor(ml.DTypeI32, shape)
955

Jesse Gross's avatar
Jesse Gross committed
956
	if c.b.allocMemory && len(s) > 0 {
Jesse Gross's avatar
Jesse Gross committed
957
958
959
		C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t))
	}

960
	return t
Michael Yang's avatar
Michael Yang committed
961
962
}

Michael Yang's avatar
arange  
Michael Yang committed
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
func (c Context) Arange(start, stop, step float32, dtype ml.DType) ml.Tensor {
	switch dtype {
	case ml.DTypeF32:
		// ggml_arange creates a float32 tensor
		return &Tensor{
			b: c.b,
			t: C.ggml_arange(c.ctx, C.float(start), C.float(stop), C.float(step)),
		}
	case ml.DTypeI32:
		// ggml_cast does not support float32 to int32 conversion
		arange := make([]int32, 0, int((stop-start)/step))
		for i := start; i < stop; i += step {
			arange = append(arange, int32(i))
		}

978
		return c.Input().FromIntSlice(arange, len(arange))
Michael Yang's avatar
arange  
Michael Yang committed
979
980
981
982
983
	default:
		panic("unsupported dtype for arange")
	}
}

Michael Yang's avatar
Michael Yang committed
984
985
func (c *Context) Close() {
	if c != nil {
986
987
988
989
990
		for _, b := range *c.allocatedBuffers {
			C.ggml_backend_buffer_free(b)
		}
		*c.allocatedBuffers = nil

991
992
		C.ggml_free(c.ctx)
	}
Michael Yang's avatar
Michael Yang committed
993
994
995
}

type Tensor struct {
996
	b    *Backend
Michael Yang's avatar
Michael Yang committed
997
	t    *C.struct_ggml_tensor
998
	sync func()
Michael Yang's avatar
Michael Yang committed
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
}

func (t *Tensor) LogValue() slog.Value {
	return slog.GroupValue(
		slog.String("name", C.GoString(C.ggml_get_name(t.t))),
		slog.String("type", C.GoString(C.ggml_type_name(t.t._type))),
		slog.Any("shape", t.Shape()),
	)
}

1009
1010
func (t *Tensor) Dim(n int) int {
	return int(t.t.ne[n])
Michael Yang's avatar
Michael Yang committed
1011
1012
}

1013
1014
func (t *Tensor) Stride(n int) int {
	return int(t.t.nb[n])
Michael Yang's avatar
Michael Yang committed
1015
1016
}

1017
1018
func (t *Tensor) Shape() []int {
	shape := make([]int, C.ggml_n_dims(t.t))
Michael Yang's avatar
Michael Yang committed
1019
1020
1021
1022
1023
1024
1025
	for i := range shape {
		shape[i] = t.Dim(i)
	}

	return shape
}

1026
1027
1028
1029
1030
1031
1032
1033
1034
func (t *Tensor) Bytes() (data []byte) {
	if t.sync != nil {
		data = make([]byte, C.ggml_nbytes(t.t))

		t.sync()
		C.ggml_backend_tensor_get(t.t, unsafe.Pointer(&data[0]), 0, C.ggml_nbytes(t.t))
	}

	return
Michael Yang's avatar
Michael Yang committed
1035
1036
}

1037
1038
1039
1040
1041
1042
func (t *Tensor) Floats() (data []float32) {
	if t.sync != nil {
		data = make([]float32, C.ggml_nelements(t.t))

		t.sync()
		C.ggml_backend_tensor_get(t.t, unsafe.Pointer(&data[0]), 0, C.ggml_nbytes(t.t))
Michael Yang's avatar
Michael Yang committed
1043
1044
1045
1046
1047
	}

	return
}

1048
1049
1050
1051
1052
1053
func (t *Tensor) SetValueFromIntSlice(s []int32) {
	if len(s) > 0 {
		C.ggml_backend_tensor_set(t.t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.t))
	}
}

Michael Yang's avatar
Michael Yang committed
1054
1055
1056
1057
func (t *Tensor) DType() ml.DType {
	switch t.t._type {
	case C.GGML_TYPE_F32:
		return ml.DTypeF32
Jesse Gross's avatar
Jesse Gross committed
1058
1059
	case C.GGML_TYPE_F16:
		return ml.DTypeF16
1060
1061
1062
1063
	case C.GGML_TYPE_Q8_0:
		return ml.DTypeQ80
	case C.GGML_TYPE_Q4_0:
		return ml.DTypeQ40
Michael Yang's avatar
Michael Yang committed
1064
1065
	case C.GGML_TYPE_I32:
		return ml.DTypeI32
Michael Yang's avatar
Michael Yang committed
1066
1067
	case C.GGML_TYPE_MXFP4:
		return ml.DTypeMXFP4
Michael Yang's avatar
Michael Yang committed
1068
1069
1070
1071
1072
	default:
		return ml.DTypeOther
	}
}

1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
func ggmlDType(dtype ml.DType) uint32 {
	switch dtype {
	case ml.DTypeF32:
		return C.GGML_TYPE_F32
	case ml.DTypeF16:
		return C.GGML_TYPE_F16
	case ml.DTypeQ80:
		return C.GGML_TYPE_Q8_0
	case ml.DTypeQ40:
		return C.GGML_TYPE_Q4_0
	case ml.DTypeI32:
		return C.GGML_TYPE_I32
	case ml.DTypeMXFP4:
		return C.GGML_TYPE_MXFP4
	default:
		panic("unsupported dtype")
	}
}

func (t *Tensor) Cast(ctx ml.Context, dtype ml.DType) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_cast(ctx.(*Context).ctx, t.t, ggmlDType(dtype)),
	}
}

1099
1100
1101
1102
1103
1104
1105
func (t *Tensor) Neg(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_neg(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1106
1107
func (t *Tensor) Add(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1108
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1109
1110
1111
1112
		t: C.ggml_add(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

Michael Yang's avatar
Michael Yang committed
1113
1114
1115
1116
1117
1118
1119
func (t *Tensor) Sub(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sub(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
func (t *Tensor) Repeat(ctx ml.Context, dim, n int) ml.Tensor {
	if dim < 0 || dim >= C.GGML_MAX_DIMS {
		panic("invalid dimension")
	}

	shape := make([]C.int64_t, C.GGML_MAX_DIMS)
	for i := range C.GGML_MAX_DIMS {
		if i == dim {
			shape[i] = C.int64_t(t.Dim(i) * n)
		} else {
			shape[i] = C.int64_t(t.Dim(i))
		}
	}

	tmpl := C.ggml_new_tensor(ctx.(*Context).ctx, t.t._type, C.int(len(shape)), unsafe.SliceData(shape))
	return &Tensor{
		b: t.b,
		t: C.ggml_repeat(ctx.(*Context).ctx, t.t, tmpl),
	}
}

Michael Yang's avatar
Michael Yang committed
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
func (t *Tensor) Stack(ctx ml.Context, dim int, s ...ml.Tensor) ml.Tensor {
	if len(s) > 0 {
		return t.Concat(ctx, s[0].Stack(ctx, dim, s[1:]...), dim)
	}

	return t
}

func (t *Tensor) Concat(ctx ml.Context, t2 ml.Tensor, dim int) ml.Tensor {
	return &Tensor{
1151
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1152
1153
1154
1155
		t: C.ggml_concat(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.int(dim)),
	}
}

Michael Yang's avatar
Michael Yang committed
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
func (t *Tensor) Contiguous(ctx ml.Context, shape ...int) ml.Tensor {
	switch len(shape) {
	case 0:
		return &Tensor{
			b: t.b,
			t: C.ggml_cont(ctx.(*Context).ctx, t.t),
		}
	case 1:
		return &Tensor{
			b: t.b,
			t: C.ggml_cont_1d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0])),
		}
	case 2:
		return &Tensor{
			b: t.b,
			t: C.ggml_cont_2d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1])),
		}
	case 3:
		return &Tensor{
			b: t.b,
			t: C.ggml_cont_3d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1]), C.int64_t(shape[2])),
		}
	case 4:
		return &Tensor{
			b: t.b,
			t: C.ggml_cont_4d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1]), C.int64_t(shape[2]), C.int64_t(shape[3])),
		}
	default:
		panic("unsupported number of dimensions")
Michael Yang's avatar
Michael Yang committed
1185
1186
1187
1188
1189
	}
}

func (t *Tensor) Mul(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1190
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1191
1192
1193
1194
		t: C.ggml_mul(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

1195
1196
1197
1198
1199
1200
1201
func (t *Tensor) Div(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_div(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

Michael Yang's avatar
Michael Yang committed
1202
1203
func (t *Tensor) Mulmat(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1204
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1205
1206
1207
1208
		t: C.ggml_mul_mat(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

1209
1210
1211
1212
1213
func (t *Tensor) MulmatFullPrec(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	mul := C.ggml_mul_mat(ctx.(*Context).ctx, t.t, t2.(*Tensor).t)
	C.ggml_mul_mat_set_prec(mul, C.GGML_PREC_F32)

	return &Tensor{
1214
		b: t.b,
1215
1216
1217
1218
		t: mul,
	}
}

Michael Yang's avatar
llama4  
Michael Yang committed
1219
1220
1221
1222
1223
1224
1225
func (t *Tensor) MulmatID(ctx ml.Context, t2, ids ml.Tensor) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_mul_mat_id(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, ids.(*Tensor).t),
	}
}

1226
1227
1228
1229
1230
1231
1232
func (t *Tensor) AddID(ctx ml.Context, t2, ids ml.Tensor) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_add_id(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, ids.(*Tensor).t),
	}
}

1233
1234
1235
1236
1237
1238
1239
func (t *Tensor) L2Norm(ctx ml.Context, eps float32) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_l2_norm(ctx.(*Context).ctx, t.t, C.float(eps)),
	}
}

Michael Yang's avatar
Michael Yang committed
1240
func (t *Tensor) LayerNorm(ctx ml.Context, w, b ml.Tensor, eps float32) ml.Tensor {
Michael Yang's avatar
llama4  
Michael Yang committed
1241
1242
1243
1244
1245
1246
	tt := C.ggml_norm(ctx.(*Context).ctx, t.t, C.float(eps))
	if w != nil {
		tt = C.ggml_mul(ctx.(*Context).ctx, tt, w.(*Tensor).t)
		if b != nil {
			tt = C.ggml_add(ctx.(*Context).ctx, tt, b.(*Tensor).t)
		}
Michael Yang's avatar
Michael Yang committed
1247
1248
	}

Michael Yang's avatar
llama4  
Michael Yang committed
1249
	return &Tensor{b: t.b, t: tt}
Michael Yang's avatar
Michael Yang committed
1250
1251
1252
}

func (t *Tensor) RMSNorm(ctx ml.Context, w ml.Tensor, eps float32) ml.Tensor {
Michael Yang's avatar
llama4  
Michael Yang committed
1253
1254
1255
1256
1257
1258
	tt := C.ggml_rms_norm(ctx.(*Context).ctx, t.t, C.float(eps))
	if w != nil {
		tt = C.ggml_mul(ctx.(*Context).ctx, tt, w.(*Tensor).t)
	}

	return &Tensor{b: t.b, t: tt}
Michael Yang's avatar
Michael Yang committed
1259
1260
}

1261
func (t *Tensor) Pad(ctx ml.Context, shape ...int) ml.Tensor {
Michael Yang's avatar
Michael Yang committed
1262
1263
	if len(shape) != 4 {
		panic("expected 4 dimensions")
1264
1265
	} else if shape[3] != 0 {
		panic("cuda does not support 4d tensors")
Michael Yang's avatar
Michael Yang committed
1266
1267
1268
	}

	return &Tensor{
1269
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
		t: C.ggml_pad(ctx.(*Context).ctx, t.t, C.int(shape[0]), C.int(shape[1]), C.int(shape[2]), C.int(shape[3])),
	}
}

func (t *Tensor) Permute(ctx ml.Context, shape ...int) ml.Tensor {
	if len(shape) != 4 {
		panic("expected 4 dimensions")
	}

	return &Tensor{
1280
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1281
1282
1283
1284
1285
1286
		t: C.ggml_permute(ctx.(*Context).ctx, t.t, C.int(shape[0]), C.int(shape[1]), C.int(shape[2]), C.int(shape[3])),
	}
}

func (t *Tensor) Rows(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1287
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1288
1289
1290
1291
1292
1293
		t: C.ggml_get_rows(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

func (t *Tensor) Copy(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1294
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1295
1296
1297
1298
		t: C.ggml_cpy(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

1299
func (t *Tensor) Reshape(ctx ml.Context, shape ...int) ml.Tensor {
Michael Yang's avatar
Michael Yang committed
1300
1301
1302
	switch len(shape) {
	case 1:
		return &Tensor{
1303
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1304
1305
1306
1307
			t: C.ggml_reshape_1d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0])),
		}
	case 2:
		return &Tensor{
1308
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1309
1310
1311
1312
			t: C.ggml_reshape_2d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1])),
		}
	case 3:
		return &Tensor{
1313
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1314
1315
1316
1317
			t: C.ggml_reshape_3d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1]), C.int64_t(shape[2])),
		}
	case 4:
		return &Tensor{
1318
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1319
1320
1321
1322
1323
1324
1325
1326
1327
			t: C.ggml_reshape_4d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1]), C.int64_t(shape[2]), C.int64_t(shape[3])),
		}
	default:
		panic("unsupported number of dimensions")
	}
}

func (t *Tensor) Scale(ctx ml.Context, s float64) ml.Tensor {
	return &Tensor{
1328
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1329
1330
1331
1332
		t: C.ggml_scale(ctx.(*Context).ctx, t.t, (C.float)(s)),
	}
}

1333
1334
1335
1336
1337
1338
1339
func (t *Tensor) SumRows(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sum_rows(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1340
1341
func (t *Tensor) Softmax(ctx ml.Context) ml.Tensor {
	return &Tensor{
1342
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1343
1344
1345
1346
		t: C.ggml_soft_max(ctx.(*Context).ctx, t.t),
	}
}

1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
func (t *Tensor) Sin(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sin(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) Cos(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_cos(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1361
1362
func (t *Tensor) Tanh(ctx ml.Context) ml.Tensor {
	return &Tensor{
1363
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1364
1365
1366
1367
		t: C.ggml_tanh_inplace(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
llama4  
Michael Yang committed
1368
1369
1370
1371
1372
1373
1374
func (t *Tensor) Sigmoid(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sigmoid_inplace(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1375
1376
1377
1378
func (t *Tensor) View(ctx ml.Context, offset int, shape ...int) ml.Tensor {
	switch len(shape) {
	case 1:
		return &Tensor{
1379
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1380
1381
1382
1383
			t: C.ggml_view_1d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.size_t(offset)),
		}
	case 3:
		return &Tensor{
1384
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1385
1386
1387
1388
1389
1390
1391
			t: C.ggml_view_2d(ctx.(*Context).ctx, t.t,
				C.int64_t(shape[0]), C.int64_t(shape[2]),
				C.size_t(shape[1]),
				C.size_t(offset)),
		}
	case 5:
		return &Tensor{
1392
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1393
1394
1395
1396
1397
1398
1399
			t: C.ggml_view_3d(ctx.(*Context).ctx, t.t,
				C.int64_t(shape[0]), C.int64_t(shape[2]), C.int64_t(shape[4]),
				C.size_t(shape[1]), C.size_t(shape[3]),
				C.size_t(offset)),
		}
	case 7:
		return &Tensor{
1400
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
			t: C.ggml_view_4d(ctx.(*Context).ctx, t.t,
				C.int64_t(shape[0]), C.int64_t(shape[2]), C.int64_t(shape[4]), C.int64_t(shape[6]),
				C.size_t(shape[1]), C.size_t(shape[3]), C.size_t(shape[5]),
				C.size_t(offset)),
		}
	default:
		panic("unsupported number of dimensions")
	}
}

1411
func (t *Tensor) RoPE(ctx ml.Context, positions ml.Tensor, ropeDim int, ropeBase, ropeScale float32, options ...func(*rope.Options)) ml.Tensor {
1412
	// Default options
Michael Yang's avatar
Michael Yang committed
1413
1414
1415
1416
1417
1418
1419
1420
	opts := rope.Options{
		Factors:               &Tensor{},
		OriginalContextLength: 131072,
		ExtrapolationFactor:   0.,
		AttentionFactor:       1.,
		BetaFast:              32.,
		BetaSlow:              1.,
	}
1421
1422
1423

	// Apply any provided options
	for _, option := range options {
Michael Yang's avatar
Michael Yang committed
1424
		option(&opts)
1425
1426
	}

Jesse Gross's avatar
Jesse Gross committed
1427
1428
1429
1430
1431
	dequant := t.t
	if C.ggml_is_quantized(t.t._type) {
		dequant = C.ggml_cast(ctx.(*Context).ctx, t.t, C.GGML_TYPE_F32)
	}

Michael Yang's avatar
Michael Yang committed
1432
	return &Tensor{
1433
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1434
		t: C.ggml_rope_ext(
1435
1436
			ctx.(*Context).ctx,
			dequant,
1437
1438
			positions.(*Tensor).t,
			opts.Factors.(*Tensor).t,
Michael Yang's avatar
Michael Yang committed
1439
			C.int(ropeDim),
1440
1441
			C.int(opts.Type),
			C.int(opts.OriginalContextLength),
Michael Yang's avatar
Michael Yang committed
1442
1443
			C.float(ropeBase),
			C.float(ropeScale),
Michael Yang's avatar
Michael Yang committed
1444
1445
1446
1447
			C.float(opts.ExtrapolationFactor),
			C.float(opts.AttentionFactor),
			C.float(opts.BetaFast),
			C.float(opts.BetaSlow),
Michael Yang's avatar
Michael Yang committed
1448
1449
1450
1451
		),
	}
}

1452
1453
1454
1455
1456
1457
1458
func (t *Tensor) IM2Col(ctx ml.Context, t2 ml.Tensor, s0, s1, p0, p1, d0, d1 int) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_im2col(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.int(s0), C.int(s1), C.int(p0), C.int(p1), C.int(d0), C.int(d1), true, C.GGML_TYPE_F32),
	}
}

1459
1460
1461
1462
1463
1464
1465
func (t *Tensor) GELU(ctx ml.Context, t2 ...ml.Tensor) ml.Tensor {
	if len(t2) > 0 {
		return &Tensor{
			b: t.b,
			t: C.ggml_geglu_split(ctx.(*Context).ctx, t.t, t2[0].(*Tensor).t),
		}
	}
Michael Yang's avatar
Michael Yang committed
1466
	return &Tensor{
1467
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1468
1469
1470
1471
		t: C.ggml_gelu_inplace(ctx.(*Context).ctx, t.t),
	}
}

1472
1473
1474
1475
1476
1477
func (t *Tensor) SILU(ctx ml.Context, t2 ...ml.Tensor) ml.Tensor {
	if len(t2) > 0 {
		return &Tensor{
			b: t.b,
			t: C.ggml_swiglu_split(ctx.(*Context).ctx, t.t, t2[0].(*Tensor).t),
		}
Michael Yang's avatar
Michael Yang committed
1478
	}
Michael Yang's avatar
Michael Yang committed
1479
	return &Tensor{
1480
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1481
1482
1483
1484
		t: C.ggml_silu_inplace(ctx.(*Context).ctx, t.t),
	}
}

1485
1486
1487
1488
1489
1490
1491
func (t *Tensor) RELU(ctx ml.Context, t2 ...ml.Tensor) ml.Tensor {
	if len(t2) > 0 {
		return &Tensor{
			b: t.b,
			t: C.ggml_reglu_split(ctx.(*Context).ctx, t.t, t2[0].(*Tensor).t),
		}
	}
Michael Yang's avatar
Michael Yang committed
1492
1493
1494
1495
1496
1497
	return &Tensor{
		b: t.b,
		t: C.ggml_relu_inplace(ctx.(*Context).ctx, t.t),
	}
}

1498
func (t *Tensor) SILUAlphaLimit(ctx ml.Context, up ml.Tensor, alpha, limit float32) ml.Tensor {
1499
1500
1501
1502
1503
1504
	return &Tensor{
		b: t.b,
		t: C.ggml_swiglu_oai(ctx.(*Context).ctx, t.t, up.(*Tensor).t, C.float(alpha), C.float(limit)),
	}
}

Michael Yang's avatar
Michael Yang committed
1505
1506
func (t *Tensor) Conv2D(ctx ml.Context, t2 ml.Tensor, s0, s1, p0, p1, d0, d1 int) ml.Tensor {
	return &Tensor{
1507
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1508
1509
1510
		t: C.ggml_conv_2d(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.int(s0), C.int(s1), C.int(p0), C.int(p1), C.int(d0), C.int(d1)),
	}
}
1511

Michael Yang's avatar
Michael Yang committed
1512
func (t *Tensor) AvgPool2D(ctx ml.Context, k, s int, p float32) ml.Tensor {
Michael Yang's avatar
Michael Yang committed
1513
1514
	return &Tensor{
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1515
		t: C.ggml_pool_2d(ctx.(*Context).ctx, t.t, C.GGML_OP_POOL_AVG, C.int(k), C.int(k), C.int(s), C.int(s), C.float(p), C.float(p)),
Michael Yang's avatar
Michael Yang committed
1516
1517
1518
	}
}

Michael Yang's avatar
Michael Yang committed
1519
1520
1521
1522
func (t *Tensor) Set(ctx ml.Context, t2 ml.Tensor, offset int, strides ...int) ml.Tensor {
	var tt *C.struct_ggml_tensor
	switch len(strides) {
	case 0:
Michael Yang's avatar
Michael Yang committed
1523
		tt = C.ggml_set_1d(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.size_t(offset))
Michael Yang's avatar
Michael Yang committed
1524
	case 1:
Michael Yang's avatar
Michael Yang committed
1525
		tt = C.ggml_set_2d(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.size_t(offset), C.size_t(strides[0]))
Michael Yang's avatar
Michael Yang committed
1526
1527
1528
1529
1530
1531
1532
	default:
		panic("unsupported number of dimensions")
	}

	return &Tensor{b: t.b, t: tt}
}

1533
func (t *Tensor) ScaledDotProductAttention(ctx ml.Context, key, value, mask, sinks ml.Tensor, scale float64) ml.Tensor {
1534
1535
1536
1537
1538
	var kqMask *C.struct_ggml_tensor
	if mask != nil {
		kqMask = mask.(*Tensor).t
	}

1539
1540
1541
	query := t.Permute(ctx, 0, 2, 1, 3)
	key = key.Permute(ctx, 0, 2, 1, 3)

1542
1543
	if t.b.flashAttention {
		value = value.Permute(ctx, 0, 2, 1, 3)
1544

1545
		kqv := C.ggml_flash_attn_ext(ctx.(*Context).ctx, query.(*Tensor).t, key.(*Tensor).t, value.(*Tensor).t, kqMask, C.float(scale), 0, 0)
1546
1547
1548
		if sinks != nil {
			C.ggml_flash_attn_ext_add_sinks(kqv, sinks.(*Tensor).t)
		}
1549
1550
1551
1552
1553
1554
1555
1556
		C.ggml_flash_attn_ext_set_prec(kqv, C.GGML_PREC_F32)
		return &Tensor{b: t.b, t: kqv}
	} else {
		kq := key.MulmatFullPrec(ctx, query)
		kq = &Tensor{
			b: t.b,
			t: C.ggml_soft_max_ext(ctx.(*Context).ctx, kq.(*Tensor).t, kqMask, C.float(scale), 0),
		}
1557
1558
1559
		if sinks != nil {
			C.ggml_soft_max_add_sinks(kq.(*Tensor).t, sinks.(*Tensor).t)
		}
1560
1561
1562
1563

		kqv := value.Mulmat(ctx, kq)
		return kqv.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx)
	}
1564
}
1565
1566
1567
1568
1569
1570
1571

func (t *Tensor) Duplicate(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_dup(ctx.(*Context).ctx, t.t),
	}
}
Michael Yang's avatar
llama4  
Michael Yang committed
1572
1573
1574
1575
1576
1577
1578

func (t *Tensor) TopK(ctx ml.Context, k int) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_top_k(ctx.(*Context).ctx, t.t, C.int(k)),
	}
}
1579
1580
1581
1582
1583
1584
1585

func (t *Tensor) Argsort(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_argsort(ctx.(*Context).ctx, t.t, C.GGML_SORT_ORDER_ASC),
	}
}
Michael Yang's avatar
Michael Yang committed
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624

func (t *Tensor) Mean(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_mean(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) Variance(ctx ml.Context) ml.Tensor {
	return t.Add(ctx, t.Mean(ctx).Scale(ctx, -1)).
		Sqr(ctx).
		SumRows(ctx).
		Scale(ctx, 1/float64(t.Dim(0)))
}

func (t *Tensor) Stddev(ctx ml.Context) ml.Tensor {
	return t.Variance(ctx).Sqrt(ctx)
}

func (t *Tensor) Sqr(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sqr(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) Sqrt(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sqrt(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) Clamp(ctx ml.Context, min, max float32) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_clamp(ctx.(*Context).ctx, t.t, C.float(min), C.float(max)),
	}
}
Michael Yang's avatar
Michael Yang committed
1625
1626
1627
1628

func (c Context) FromBytes(dtype ml.DType, s []uint8, shape ...int) ml.Tensor {
	// Unchecked to handle quantized types
	t := c.newTensor(dtype, shape)
Jesse Gross's avatar
Jesse Gross committed
1629
	if c.b.allocMemory && len(s) > 0 {
Michael Yang's avatar
Michael Yang committed
1630
1631
1632
1633
1634
		C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t))
	}

	return t
}