ggml.go 41.8 KB
Newer Older
Michael Yang's avatar
Michael Yang committed
1
2
package ggml

3
4
// #cgo linux LDFLAGS: -lrt -lpthread -ldl -lstdc++ -lm
// #cgo windows LDFLAGS: -lpthread
5
6
7
8
9
10
// #cgo CPPFLAGS: -I${SRCDIR}/ggml/include
// #include <stdlib.h>
// #include <stdint.h>
// #include "ggml.h"
// #include "ggml-cpu.h"
// #include "ggml-backend.h"
Michael Yang's avatar
Michael Yang committed
11
12
13
import "C"

import (
14
	"context"
Jesse Gross's avatar
Jesse Gross committed
15
	"errors"
Michael Yang's avatar
Michael Yang committed
16
17
18
	"fmt"
	"io"
	"log/slog"
19
	"maps"
Michael Yang's avatar
Michael Yang committed
20
	"os"
21
	"runtime"
22
23
24
	"slices"
	"strconv"
	"strings"
Jesse Gross's avatar
Jesse Gross committed
25
	"sync"
26
	"sync/atomic"
27
	"unicode"
Michael Yang's avatar
Michael Yang committed
28
29
30
	"unsafe"

	"github.com/ollama/ollama/format"
31
32
	"github.com/ollama/ollama/fs"
	fsggml "github.com/ollama/ollama/fs/ggml"
33
	"github.com/ollama/ollama/logutil"
Michael Yang's avatar
Michael Yang committed
34
	"github.com/ollama/ollama/ml"
35
	ggml "github.com/ollama/ollama/ml/backend/ggml/ggml/src"
36
	"github.com/ollama/ollama/ml/nn/rope"
Michael Yang's avatar
Michael Yang committed
37
38
39
	"golang.org/x/sync/errgroup"
)

Jesse Gross's avatar
Jesse Gross committed
40
41
42
43
44
45
var (
	cpus, accels, gpus []C.ggml_backend_dev_t
	backends           map[C.ggml_backend_dev_t]C.ggml_backend_t
)

var initDevices = sync.OnceFunc(func() {
Michael Yang's avatar
Michael Yang committed
46
47
	ggml.OnceLoad()

Jesse Gross's avatar
Jesse Gross committed
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
	backends = make(map[C.ggml_backend_dev_t]C.ggml_backend_t)
	for i := range C.ggml_backend_dev_count() {
		d := C.ggml_backend_dev_get(i)

		switch C.ggml_backend_dev_type(d) {
		case C.GGML_BACKEND_DEVICE_TYPE_CPU:
			if len(cpus) == 0 {
				// only the first cpu device should be used
				cpus = append(cpus, d)
			}
		case C.GGML_BACKEND_DEVICE_TYPE_ACCEL:
			accels = append(accels, d)
		case C.GGML_BACKEND_DEVICE_TYPE_GPU:
			gpus = append(gpus, d)
		}

		backends[d] = C.ggml_backend_dev_init(d, nil)
	}
})
Michael Yang's avatar
Michael Yang committed
67

Jesse Gross's avatar
Jesse Gross committed
68
69
70
71
72
type layerDevice struct {
	d  C.ggml_backend_dev_t
	bt C.ggml_backend_buffer_type_t
}

Michael Yang's avatar
Michael Yang committed
73
type Backend struct {
74
75
76
	// modelPath is the location of the model data
	modelPath string

77
78
	meta *fsggml.GGML

Jesse Gross's avatar
Jesse Gross committed
79
80
81
82
	// allocMemory means that memory should be allocated for tensors and not
	// just a dry run
	allocMemory bool

83
84
85
86
	// tensorLoadTargets maps from the name of the tensor in the file
	// to the name that is used by the model definition
	tensorLoadTargets map[string][]string

87
	schedMu       sync.Mutex // Only one Compute can run at a time
88
89
90
	sched         C.ggml_backend_sched_t
	schedBackends []C.ggml_backend_t
	schedBufts    []C.ggml_backend_buffer_type_t
91

92
	tensors map[string]*C.struct_ggml_tensor
Michael Yang's avatar
Michael Yang committed
93

Jesse Gross's avatar
Jesse Gross committed
94
	// input is the backend buffer type used for inputs
95
	input C.ggml_backend_buffer_type_t
Michael Yang's avatar
Michael Yang committed
96

Jesse Gross's avatar
Jesse Gross committed
97
98
99
	// output is the backend device used for outputs
	output C.ggml_backend_dev_t

Michael Yang's avatar
Michael Yang committed
100
	// layers is the backend used for repeating layers
Jesse Gross's avatar
Jesse Gross committed
101
	layers map[int]layerDevice
102

103
104
105
106
	// requiredMemory is the cumulative memory allocations needed by the backend
	requiredMemory *ml.BackendMemory

	// btDeviceMemory maps from a buffer type to the memory allocations associated with that device
107
	btDeviceMemory map[C.ggml_backend_buffer_type_t]*ml.DeviceMemory
108

109
	flashAttention bool
Michael Yang's avatar
Michael Yang committed
110
111
112

	// maxGraphNodes is the maximum allowed number of graph nodes in this scheduler
	maxGraphNodes int
Jesse Gross's avatar
Jesse Gross committed
113
114
115

	// weightBuffers are the GGML contexts and buffers for allocating weights
	weightBuffers map[*C.struct_ggml_context]C.ggml_backend_buffer_t
Michael Yang's avatar
Michael Yang committed
116
117
}

Jesse Gross's avatar
Jesse Gross committed
118
119
var once sync.Once

120
121
122
123
124
125
126
127
func New(modelPath string, params ml.BackendParams) (ml.Backend, error) {
	r, err := os.Open(modelPath)
	if err != nil {
		return nil, err
	}
	defer r.Close()

	meta, err := fsggml.Decode(r, -1)
Michael Yang's avatar
Michael Yang committed
128
129
130
131
	if err != nil {
		return nil, err
	}

Jesse Gross's avatar
Jesse Gross committed
132
133
134
135
136
137
138
139
140
141
142
	once.Do(func() {
		slog.Info(
			"",
			"architecture", meta.KV().Architecture(),
			"file_type", meta.KV().FileType(),
			"name", meta.KV().String("general.name"),
			"description", meta.KV().String("general.description"),
			"num_tensors", len(meta.Tensors().Items()),
			"num_key_values", len(meta.KV()),
		)
	})
Michael Yang's avatar
Michael Yang committed
143

Jesse Gross's avatar
Jesse Gross committed
144
145
	initDevices()

146
	var requiredMemory ml.BackendMemory
147
	btDeviceMemory := make(map[C.ggml_backend_buffer_type_t]*ml.DeviceMemory)
148

149
	type deviceBufferType struct {
150
151
		d   C.ggml_backend_dev_t
		bts []C.ggml_backend_buffer_type_t
152
153
	}

154
155
	blocks := int(meta.KV().BlockCount())

Michael Yang's avatar
Michael Yang committed
156
	// create list of buffer types for the cpu
Michael Yang's avatar
Michael Yang committed
157
	cpuDeviceBufferType := deviceBufferType{d: C.ggml_backend_dev_by_type(C.GGML_BACKEND_DEVICE_TYPE_CPU)}
158
159
160
161
	for _, d := range append(accels, append(gpus, cpus...)...) {
		switch C.ggml_backend_dev_type(d) {
		case C.GGML_BACKEND_DEVICE_TYPE_CPU,
			C.GGML_BACKEND_DEVICE_TYPE_ACCEL:
Jesse Gross's avatar
Jesse Gross committed
162
163
164
			bt := C.ggml_backend_dev_buffer_type(d)
			cpuDeviceBufferType.bts = append(cpuDeviceBufferType.bts, bt)

165
			btDeviceMemory[C.ggml_backend_dev_buffer_type(d)] = &requiredMemory.CPU
Michael Yang's avatar
Michael Yang committed
166
		}
167
168
	}

169
	requiredMemory.CPU.Name = C.GoString(C.ggml_backend_dev_name(cpuDeviceBufferType.d))
170
171
	var props C.struct_ggml_backend_dev_props
	C.ggml_backend_dev_get_props(cpuDeviceBufferType.d, &props)
172
	requiredMemory.CPU.ID = C.GoString(props.id)
173
	requiredMemory.CPU.Library = C.GoString(props.library)
174
175
	requiredMemory.CPU.Weights = make([]uint64, blocks+1)
	requiredMemory.CPU.Cache = make([]uint64, blocks+1)
176

Michael Yang's avatar
Michael Yang committed
177
	// create list of buffer types for each gpu
178
	var gpuDeviceBufferTypes []deviceBufferType
179
180
	requiredMemory.GPUs = make([]ml.DeviceMemory, len(gpus))
	for i, d := range gpus {
181
		bt := C.ggml_backend_dev_buffer_type(d)
182
		gpuDeviceBufferTypes = append(gpuDeviceBufferTypes, deviceBufferType{
183
			d:   d,
184
			bts: append([]C.ggml_backend_buffer_type_t{bt}, cpuDeviceBufferType.bts...),
185
		})
Jesse Gross's avatar
Jesse Gross committed
186

187
188
		btDeviceMemory[bt] = &requiredMemory.GPUs[i]
		requiredMemory.GPUs[i].Name = C.GoString(C.ggml_backend_dev_name(d))
189
190
		var props C.struct_ggml_backend_dev_props
		C.ggml_backend_dev_get_props(d, &props)
191
		requiredMemory.GPUs[i].ID = C.GoString(props.id)
192
		requiredMemory.GPUs[i].Library = C.GoString(props.library)
193
194
		requiredMemory.GPUs[i].Weights = make([]uint64, blocks+1)
		requiredMemory.GPUs[i].Cache = make([]uint64, blocks+1)
Michael Yang's avatar
Michael Yang committed
195
196
	}

Michael Yang's avatar
Michael Yang committed
197
	// inputs always use cpu
Michael Yang's avatar
Michael Yang committed
198
	input := cpuDeviceBufferType
199

Jesse Gross's avatar
Jesse Gross committed
200
201
202
203
204
	assignLayer := func(layer int) deviceBufferType {
		for _, p := range params.GPULayers {
			for _, l := range p.Layers {
				if l == layer {
					for i := range requiredMemory.GPUs {
205
						if requiredMemory.GPUs[i].DeviceID == p.DeviceID {
Jesse Gross's avatar
Jesse Gross committed
206
207
208
							return gpuDeviceBufferTypes[i]
						}
					}
209

Jesse Gross's avatar
Jesse Gross committed
210
211
212
					return cpuDeviceBufferType
				}
			}
213
214
		}

Jesse Gross's avatar
Jesse Gross committed
215
		return cpuDeviceBufferType
216
217
	}

Michael Yang's avatar
Michael Yang committed
218
	// repeating layers are assigned based on their index in reverse order, e.g. i / (block_count + 1)
219
	layers := make([]deviceBufferType, blocks)
220
	for i := range layers {
221
		layers[i] = assignLayer(i)
222
223
	}

Michael Yang's avatar
Michael Yang committed
224
	// outputs are assigned iff allowed by splits and configured number of gpu layers
225
	output := assignLayer(blocks)
226
227
228

	maxTensors := len(meta.Tensors().Items())
	maxTensors += 1
Michael Yang's avatar
Michael Yang committed
229
	// each layer has at most 2 extra tensors for rope operations
230
231
	maxTensors += blocks * 2

232
	type tensor struct {
233
		source *fsggml.Tensor
234
235
236
		target string
	}

Michael Yang's avatar
Michael Yang committed
237
	// some tensors are mapped to different names so keep a list
238
239
	targets := make(map[string][]string)

Michael Yang's avatar
Michael Yang committed
240
	// contexts are shared by tensors of the same buffer type
241
242
	ctxs := make(map[C.ggml_backend_buffer_type_t]*C.struct_ggml_context)
	createTensor := func(t tensor, bts []C.ggml_backend_buffer_type_t, layer int) *C.struct_ggml_tensor {
243
244
245
246
247
248
249
		for _, bt := range bts {
			if _, ok := ctxs[bt]; !ok {
				ctxs[bt] = C.ggml_init(C.struct_ggml_init_params{
					mem_size: C.ggml_tensor_overhead() * C.size_t(maxTensors),
					no_alloc: true,
				})
			}
Michael Yang's avatar
Michael Yang committed
250

251
252
253
254
255
256
257
258
			targets[t.source.Name] = append(targets[t.source.Name], t.target)

			name := t.source.Name
			if t.target != "" {
				name = t.target
			}

			cname := C.CString(name)
Michael Yang's avatar
Michael Yang committed
259
			defer C.free(unsafe.Pointer(cname))
260
261
262
263
			if tt := C.ggml_get_tensor(ctxs[bt], cname); tt != nil {
				return tt
			}

264
265
266
267
268
269
270
271
272
273
			kind := t.source.Kind
			if t.source.Kind == 4 {
				// transform raw mxfp4 stream to ggml mxfp4 format
				kind = 39
			} else if t.source.Kind == uint32(fsggml.TensorTypeBF16) && strings.HasSuffix(t.source.Name, "_exps.bias") {
				// transform "_exps.bias" from bf16 to fp32; add_ids only supports fp32 tensors
				kind = uint32(fsggml.TensorTypeF32)
			}

			tt := C.ggml_new_tensor(ctxs[bt], kind, C.int(len(t.source.Shape)), (*C.int64_t)(unsafe.Pointer(&t.source.Shape[0])))
Michael Yang's avatar
Michael Yang committed
274
275
			C.ggml_set_name(tt, cname)

276
			logutil.Trace("created tensor", "name", name, "shape", t.source.Shape, "dtype", t.source.Kind, "buffer_type", C.GoString(C.ggml_backend_buft_name(bt)))
277
278
279

			size := pad(C.ggml_backend_buft_get_alloc_size(bt, tt), C.ggml_backend_buft_get_alignment(bt))
			if layer == -1 {
280
				requiredMemory.InputWeights += uint64(size)
281
			} else {
282
				btDeviceMemory[bt].Weights[layer] += uint64(size)
283
284
			}

285
286
287
288
289
			//nolint:staticcheck // TODO: check if buffer type supports this tensor
			return tt
		}

		return nil
Michael Yang's avatar
Michael Yang committed
290
291
	}

292
	contains := func(s string, parts ...string) bool {
293
294
295
296
297
298
299
300
		split := strings.Split(s, ".")
		for _, part := range parts {
			if slices.Contains(split, part) {
				return true
			}
		}

		return false
Michael Yang's avatar
Michael Yang committed
301
302
	}

303
304
	for _, t := range meta.Tensors().Items() {
		switch {
305
		case contains(t.Name, "position_embd", "token_embd", "token_norm_embd", "token_types"):
306
			createTensor(tensor{source: t}, input.bts, -1)
Michael Yang's avatar
Michael Yang committed
307
			if _, ok := meta.Tensors().GroupLayers()["output"]; !ok && t.Name == "token_embd.weight" {
308
				createTensor(tensor{source: t, target: "output.weight"}, output.bts, blocks)
Michael Yang's avatar
Michael Yang committed
309
			}
Michael Yang's avatar
Michael Yang committed
310
311
312
		case contains(t.Name, "cls", "output", "output_norm",
			"altup_proj", "altup_unembd_proj",
			"per_layer_token_embd", "per_layer_model_proj", "per_layer_proj_norm"):
313
			createTensor(tensor{source: t}, output.bts, blocks)
314
		case strings.HasPrefix(t.Name, "v.") || strings.HasPrefix(t.Name, "mm."):
Michael Yang's avatar
Michael Yang committed
315
			// TODO: assign vision tensors to the gpu if possible
316
			createTensor(tensor{source: t}, output.bts, blocks)
Michael Yang's avatar
Michael Yang committed
317
318
319
320
321
322
		case contains(t.Name, "rope_freqs", "rope_factors_long", "rope_factors_short"):
			// these tensors should be repeated per layer
			for i, layer := range layers {
				createTensor(tensor{
					source: t,
					target: "blk." + strconv.Itoa(i) + "." + t.Name,
323
				}, layer.bts, i)
Michael Yang's avatar
Michael Yang committed
324
			}
325
		default:
Michael Yang's avatar
Michael Yang committed
326
327
328
329
			layerIndex := -1
			if fields := strings.FieldsFunc(t.Name, func(r rune) bool { return !unicode.IsNumber(r) }); len(fields) > 0 {
				if i, err := strconv.Atoi(fields[0]); err == nil {
					layerIndex = i
330
				}
Michael Yang's avatar
Michael Yang committed
331
			}
332

Michael Yang's avatar
Michael Yang committed
333
			if layerIndex >= 0 {
334
				createTensor(tensor{source: t}, layers[layerIndex].bts, layerIndex)
335
			} else {
Michael Yang's avatar
Michael Yang committed
336
				// load all other tensors on the cpu
337
				createTensor(tensor{source: t}, input.bts, -1)
338
339
340
			}
		}
	}
Michael Yang's avatar
Michael Yang committed
341

Michael Yang's avatar
Michael Yang committed
342
	// map tensor names to tensors for easy lookup later
343
344
345
346
347
348
349
	tensors := make(map[string]*C.struct_ggml_tensor)
	for _, c := range ctxs {
		for t := C.ggml_get_first_tensor(c); t != nil; t = C.ggml_get_next_tensor(c, t) {
			tensors[C.GoString(C.ggml_get_name(t))] = t
		}
	}

350
	// map devices to backend buffer types so new tensors can be assigned to the correct device
351
	deviceBufferTypes := make(map[C.ggml_backend_dev_t]C.ggml_backend_buffer_type_t)
352
353

	// create backends and buffer types used for the compute graph scheduler
354
355
	var schedBackends []C.ggml_backend_t
	var schedBufts []C.ggml_backend_buffer_type_t
356
	for _, d := range append(gpus, append(accels, cpus...)...) {
Jesse Gross's avatar
Jesse Gross committed
357
		b := backends[d]
358
359
		bt := C.ggml_backend_get_default_buffer_type(b)

Jesse Gross's avatar
Jesse Gross committed
360
361
362
363
364
365
366
		// Always include CPU as a fallback but otherwise, just use the devices where we assigned layers
		if !slices.Contains(cpuDeviceBufferType.bts, bt) {
			if c, ok := ctxs[bt]; !ok || C.ggml_get_first_tensor(c) == nil {
				continue
			}
		}

367
368
369
370
371
372
373
374
375
376
377
378
		deviceBufferTypes[d] = bt

		schedBackends = append(schedBackends, b)
		schedBufts = append(schedBufts, bt)

		if C.ggml_backend_is_cpu(b) {
			// set number of threads for cpu backend
			C.ggml_backend_cpu_set_n_threads(b, C.int(Threads(params.NumThreads)))
		}
	}

	maxGraphNodes := max(8192, len(meta.Tensors().Items())*5)
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418

	sched := C.ggml_backend_sched_new_ext(
		(*C.ggml_backend_t)(unsafe.Pointer(&schedBackends[0])),
		(*C.ggml_backend_buffer_type_t)(unsafe.Pointer(&schedBufts[0])),
		C.int(len(schedBackends)),
		C.size_t(maxGraphNodes),
		C._Bool(false),
		C._Bool(false),
		C._Bool(params.AllocMemory),
	)

	// allocate buffers for each context
	bbs := make(map[*C.struct_ggml_context]C.ggml_backend_buffer_t, len(ctxs))
	for bt, c := range ctxs {
		if C.ggml_get_first_tensor(c) == nil {
			continue
		}

		b := C.ggml_backend_alloc_ctx_tensors_from_buft(c, bt)
		if b == nil {
			for _, b := range bbs {
				C.ggml_backend_buffer_free(b)
			}

			for _, ctx := range ctxs {
				C.ggml_free(ctx)
			}

			panic(ml.ErrNoMem{BackendMemory: requiredMemory})
		}

		C.ggml_backend_buffer_set_usage(b, C.GGML_BACKEND_BUFFER_USAGE_WEIGHTS)
		bbs[c] = b
	}

	for bs := range maps.Values(bbs) {
		logutil.Trace("model weights", "buffer", C.GoString(C.ggml_backend_buffer_name(bs)),
			"size", format.HumanBytes2(uint64(C.ggml_backend_buffer_get_size(bs))))
	}

419
420
	return &Backend{
		modelPath:         modelPath,
Jesse Gross's avatar
Jesse Gross committed
421
		allocMemory:       params.AllocMemory,
422
423
424
425
		flashAttention:    params.FlashAttention,
		meta:              meta,
		tensorLoadTargets: targets,
		tensors:           tensors,
426
427
428
429
430
		sched:             sched,
		schedBackends:     schedBackends,
		schedBufts:        schedBufts,
		input:             deviceBufferTypes[input.d],
		output:            output.d,
Jesse Gross's avatar
Jesse Gross committed
431
432
		layers: func() map[int]layerDevice {
			m := make(map[int]layerDevice)
433
			for i, layer := range layers {
Jesse Gross's avatar
Jesse Gross committed
434
435
436
437
				m[i] = layerDevice{
					d:  layer.d,
					bt: deviceBufferTypes[layer.d],
				}
438
439
440
			}
			return m
		}(),
441
442
443
		requiredMemory: &requiredMemory,
		btDeviceMemory: btDeviceMemory,
		maxGraphNodes:  maxGraphNodes,
Jesse Gross's avatar
Jesse Gross committed
444
		weightBuffers:  bbs,
445
446
447
448
449
450
451
	}, nil
}

func init() {
	ml.RegisterBackend("ggml", New)
}

Jesse Gross's avatar
Jesse Gross committed
452
453
454
455
456
457
458
459
460
461
462
463
464
func (b *Backend) Close() {
	if b == nil {
		return
	}

	for ctx, b := range b.weightBuffers {
		C.ggml_backend_buffer_free(b)
		C.ggml_free(ctx)
	}

	C.ggml_backend_sched_free(b.sched)
}

465
func (b *Backend) Load(ctx context.Context, progress func(float32)) error {
Jesse Gross's avatar
Jesse Gross committed
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
	if !b.allocMemory {
		return errors.New("cannot load model without memory allocation")
	}

	// Mimic llama runner logs summarizing layers and memory
	gpuLayers := 0
	for layer := range maps.Values(b.layers) {
		if C.ggml_backend_dev_type(layer.d) == C.GGML_BACKEND_DEVICE_TYPE_GPU {
			gpuLayers++
		}
	}
	slog.Info(fmt.Sprintf("offloading %d repeating layers to GPU", gpuLayers))

	switch C.ggml_backend_dev_type(b.output) {
	case C.GGML_BACKEND_DEVICE_TYPE_CPU:
		slog.Info("offloading output layer to CPU")
	case C.GGML_BACKEND_DEVICE_TYPE_GPU:
		slog.Info("offloading output layer to GPU")
		gpuLayers++
	case C.GGML_BACKEND_DEVICE_TYPE_ACCEL:
		slog.Info("offloading output layer to ACCEL")
	}
	slog.Info(fmt.Sprintf("offloaded %d/%d layers to GPU", gpuLayers, len(b.layers)+1))

490
	var doneBytes atomic.Uint64
491
	totalBytes := uint64(b.meta.Length) - b.meta.Tensors().Offset
492
493
494

	g, ctx := errgroup.WithContext(ctx)
	g.SetLimit(runtime.GOMAXPROCS(0))
495
	for _, t := range b.meta.Tensors().Items() {
496
		t := t
497
		g.Go(func() error {
498
			tts := make([]*C.struct_ggml_tensor, max(1, len(b.tensorLoadTargets[t.Name])))
499
			for i := range tts {
500
				target := b.tensorLoadTargets[t.Name][i]
501
502
503
				if target == "" {
					target = t.Name
				}
504

505
				tt, ok := b.tensors[target]
506
507
508
				if !ok {
					return fmt.Errorf("unassigned tensor: %s", t.Name)
				}
Michael Yang's avatar
Michael Yang committed
509

510
511
512
				tts[i] = tt
			}

513
514
			// Create a new FD for each goroutine so that each FD is read sequentially, rather than
			// seeking around within an FD shared between all goroutines.
515
			file, err := os.Open(b.modelPath)
516
			if err != nil {
517
				slog.Warn("file open error", "file", b.modelPath, "error", err)
518
519
520
				return err
			}
			defer file.Close()
521
			sr := io.NewSectionReader(file, int64(b.meta.Tensors().Offset+t.Offset), int64(t.Size()))
522
523
524
525
526
527
528

			if t.Kind == 4 && tts[0]._type == 39 {
				// source is mxfp4, target is ggml mxfp4

				const BS = 17                             // MXFP4 block size
				bts := make([]byte, 8*BS*format.KibiByte) // ~128k block aligned
				var s uint64
529
				var tmp [16]byte
530
531
532
533
534
535
536
537
538
539
540
541
				for s < t.Size() {
					// Stop if either the parent context has been canceled or if any of the other tensors returned an error
					if err := ctx.Err(); err != nil {
						return err
					}
					n, err := io.ReadFull(sr, bts[:min(len(bts), int(t.Size()-s))])
					if err != nil {
						slog.Warn("file read error", "file", b.modelPath, "error", err)
						return err
					}
					for j := range n / BS {
						for i := 1; i < 9; i++ {
542
543
544
545
							// transform a1b2c3 ... x7y8z9 -> 71xa82yb93zc
							a, b := bts[j*BS+i], bts[j*BS+i+8]
							tmp[2*(i-1)] = (a & 0x0F) | (b << 4)
							tmp[2*(i-1)+1] = (a >> 4) | (b & 0xF0)
546
						}
547
						copy(bts[j*BS+1:j*BS+17], tmp[:])
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
					}

					for _, tt := range tts {
						C.ggml_backend_tensor_set(tt, unsafe.Pointer(&bts[0]), C.size_t(s), C.size_t(n))
					}

					s += uint64(n)

					if progress != nil {
						done := doneBytes.Add(uint64(n))
						progress(float32(done) / float32(totalBytes))
					}
				}
				return nil
			} else if strings.HasSuffix(t.Name, "_exps.bias") && t.Kind == 30 && tts[0]._type == 0 {
				// source is bf16, target is ggml fp32

				// data is bf16 but we need to convert to fp32
				bts := make([]byte, 128*format.KibiByte)
				var e uint64
				for e < t.Elements() {
					// Stop if either the parent context has been canceled or if any of the other tensors returned an error
					if err := ctx.Err(); err != nil {
						return err
					}
					n, err := io.ReadFull(sr, bts[:min(len(bts), int(t.Elements()-e)*2)])
					if err != nil {
						slog.Warn("file read error", "file", b.modelPath, "error", err)
						return err
					}
					fp32 := ConvertToF32(bts, uint32(fsggml.TensorTypeBF16), uint64(n/2))

					for _, tt := range tts {
						C.ggml_backend_tensor_set(tt, unsafe.Pointer(&fp32[0]), C.size_t(e*4), C.size_t(n*2))
					}
					e += uint64(n / 2)
					if progress != nil {
						done := doneBytes.Add(uint64(n))
						progress(float32(done) / float32(totalBytes))
					}
				}
				return nil
			}

592
593
594
595
			bts := make([]byte, 128*format.KibiByte)

			var s uint64
			for s < t.Size() {
596
597
598
599
600
				// Stop if either the parent context has been canceled or if any of the other tensors returned an error
				if err := ctx.Err(); err != nil {
					return err
				}

601
602
				n, err := io.ReadFull(sr, bts[:min(len(bts), int(t.Size()-s))])
				if err != nil {
603
					slog.Warn("file read error", "file", b.modelPath, "error", err)
604
					return err
605
				}
Michael Yang's avatar
Michael Yang committed
606

607
608
				for _, tt := range tts {
					C.ggml_backend_tensor_set(tt, unsafe.Pointer(&bts[0]), C.size_t(s), C.size_t(n))
609
				}
Michael Yang's avatar
Michael Yang committed
610

611
612
				s += uint64(n)

613
				if progress != nil {
614
					done := doneBytes.Add(uint64(n))
615
					progress(float32(done) / float32(totalBytes))
616
617
618
619
620
				}
			}

			return nil
		})
Michael Yang's avatar
Michael Yang committed
621
622
	}

623
624
625
626
627
628
629
630
631
632
633
634
	// Cleanup any backend state from devices that we didn't end up using
nextDevice:
	for _, d := range append(gpus, append(accels, cpus...)...) {
		for _, backend := range b.schedBackends {
			if d == C.ggml_backend_get_device(backend) {
				continue nextDevice
			}
		}

		C.ggml_backend_dev_reset(d)
	}

635
	if err := g.Wait(); err != nil {
636
		return err
637
638
	}

639
	return nil
Michael Yang's avatar
Michael Yang committed
640
641
}

642
643
644
645
func (b *Backend) BackendMemory() ml.BackendMemory {
	return *b.requiredMemory
}

646
func (b *Backend) Config() fs.Config {
Michael Yang's avatar
Michael Yang committed
647
648
649
650
	return b.meta.KV()
}

func (b *Backend) Get(name string) ml.Tensor {
651
652
	if t, ok := b.tensors[name]; ok {
		return &Tensor{b: b, t: t}
Michael Yang's avatar
Michael Yang committed
653
654
655
656
657
658
	}

	return nil
}

func (b *Backend) NewContext() ml.Context {
Michael Yang's avatar
Michael Yang committed
659
	return b.NewContextSize(b.maxGraphNodes)
660
661
662
}

func (b *Backend) NewContextSize(n int) ml.Context {
Jesse Gross's avatar
Jesse Gross committed
663
664
665
666
	if n > b.maxGraphNodes {
		panic(fmt.Errorf("requested number of graph nodes (%v) for new context exceeds maximum (%v)", n, b.maxGraphNodes))
	}

667
	var allocatedBuffers []C.ggml_backend_buffer_t
668

Michael Yang's avatar
Michael Yang committed
669
	return &Context{
670
671
		b:             b,
		maxGraphNodes: n,
672
		ctx: C.ggml_init(C.struct_ggml_init_params{
673
			mem_size: C.size_t(n)*C.ggml_tensor_overhead() + C.ggml_graph_overhead_custom(C.size_t(n), false),
674
675
			no_alloc: true,
		}),
676
		allocatedBuffers: &allocatedBuffers,
677
		layer:            -1,
Michael Yang's avatar
Michael Yang committed
678
679
680
	}
}

681
func (b *Backend) CacheConfig() ml.CacheConfig {
682
683
684
685
686
	if b.flashAttention {
		return ml.CacheConfig{CachePadding: 256, MaskDType: ml.DTypeF16, MaskBatchPadding: C.GGML_KQ_MASK_PAD}
	} else {
		return ml.CacheConfig{CachePadding: 32, PermutedV: true}
	}
687
688
}

689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
func (b *Backend) BackendDevices() []ml.DeviceInfo {
	deviceInfos := []ml.DeviceInfo{}
	for _, dev := range gpus {
		// If we have a model loaded, and it's only loaded on a subset of the devices
		// skip idle/unused devices to avoid initializing them and causing VRAM allocations
		if b.allocMemory {
			idleDev := true
			for _, backend := range b.schedBackends {
				if dev == C.ggml_backend_get_device(backend) {
					idleDev = false
					break
				}
			}
			if idleDev {
				slog.Debug("skipping unused backend device", "description", C.GoString(C.ggml_backend_dev_description(dev)))
				continue
			}
		}

		info := ml.DeviceInfo{}
		props := C.struct_ggml_backend_dev_props{}
		C.ggml_backend_dev_get_props(dev, &props)
		info.Name = C.GoString(props.name)
		info.Description = C.GoString(props.description)
		info.ID = C.GoString(props.id)
		info.Library = C.GoString(props.library)
		info.ComputeMajor = (int)(props.compute_major)
		info.ComputeMinor = (int)(props.compute_minor)
		info.DriverMajor = (int)(props.driver_major)
		info.DriverMinor = (int)(props.driver_minor)
		info.Integrated = props.integrated != 0
		if props.library != nil {
			info.Library = C.GoString(props.library)
		}
		info.PCIID = fmt.Sprintf("%02x:%02x.%x", props.pci_bus_id, props.pci_device_id, props.pci_domain_id)
		info.LibraryPath = ggml.LibPaths()

		C.ggml_backend_dev_memory(dev, &props.memory_free, &props.memory_total)
		info.TotalMemory = (uint64)(props.memory_total)
		info.FreeMemory = (uint64)(props.memory_free)

		deviceInfos = append(deviceInfos, info)
	}
	return deviceInfos
}

Michael Yang's avatar
Michael Yang committed
735
type Context struct {
736
	b *Backend
Michael Yang's avatar
Michael Yang committed
737

738
	ctx   *C.struct_ggml_context
Michael Yang's avatar
Michael Yang committed
739
	graph *C.struct_ggml_cgraph
740

741
	// buft is the buffer type used for new tensors
742
	buft C.ggml_backend_buffer_type_t
743

744
745
	// allocatedBuffers are buffers for tensors that we have allocated in this context
	// so that we can free them when we close the context
746
	allocatedBuffers *[]C.ggml_backend_buffer_t
747

Michael Yang's avatar
Michael Yang committed
748
	// maxGraphNodes is the maximum allowed number of graph nodes in this context
749
	maxGraphNodes int
750
751
752

	// layer is the graph layer that this context is allocating for - assumed to be cache
	layer int
Michael Yang's avatar
Michael Yang committed
753
754
}

755
func (c *Context) Input() ml.Context {
Michael Yang's avatar
Michael Yang committed
756
	if c.b.input != nil {
757
		return &Context{
758
759
760
761
762
			b:                c.b,
			ctx:              c.ctx,
			buft:             c.b.input,
			allocatedBuffers: c.allocatedBuffers,
			maxGraphNodes:    c.maxGraphNodes,
763
			layer:            -1,
764
765
766
		}
	}

767
	return c
768
769
}

770
func (c *Context) Layer(i int) ml.Context {
Jesse Gross's avatar
Jesse Gross committed
771
	if layer, ok := c.b.layers[i]; ok {
772
		return &Context{
773
774
			b:                c.b,
			ctx:              c.ctx,
Jesse Gross's avatar
Jesse Gross committed
775
			buft:             layer.bt,
776
777
			allocatedBuffers: c.allocatedBuffers,
			maxGraphNodes:    c.maxGraphNodes,
778
			layer:            i,
779
780
781
		}
	}

782
	return c
783
784
}

785
func (c *Context) Forward(tensors ...ml.Tensor) ml.Context {
Michael Yang's avatar
Michael Yang committed
786
	if c.graph == nil {
787
		c.graph = C.ggml_new_graph_custom(c.ctx, C.size_t(c.maxGraphNodes), false)
Michael Yang's avatar
Michael Yang committed
788
789
	}

790
791
792
793
794
	for _, tensor := range tensors {
		C.ggml_build_forward_expand(c.graph, tensor.(*Tensor).t)
	}

	return c
Michael Yang's avatar
Michael Yang committed
795
796
}

797
func (c *Context) Compute(tensors ...ml.Tensor) {
798
799
800
801
802
803
804
805
806
	c.ComputeWithNotify(nil, tensors...)
}

func (c *Context) ComputeWithNotify(cb func(), tensors ...ml.Tensor) {
	c.b.schedMu.Lock()
	defer c.b.schedMu.Unlock()
	if cb != nil {
		go cb()
	}
807
808
809
	if status := C.ggml_backend_sched_graph_compute_async(c.b.sched, c.graph); status != C.GGML_STATUS_SUCCESS {
		panic(fmt.Errorf("error computing ggml graph: %v", status))
	}
Michael Yang's avatar
Michael Yang committed
810
	C.ggml_backend_sched_reset(c.b.sched)
Michael Yang's avatar
Michael Yang committed
811

812
813
814
	needSync := true
	sync := func() {
		if needSync {
815
			C.ggml_backend_sched_synchronize(c.b.sched)
816
817
818
			needSync = false
		}
	}
Michael Yang's avatar
Michael Yang committed
819

820
821
822
	for _, t := range tensors {
		if C.ggml_nbytes(t.(*Tensor).t) > 0 {
			t.(*Tensor).sync = sync
823
824
		}
	}
Michael Yang's avatar
Michael Yang committed
825
826
}

827
828
func (c *Context) Reserve() {
	reserved := C.ggml_backend_sched_reserve(c.b.sched, c.graph)
829
830

	slog.Debug("compute graph", "nodes", C.ggml_graph_n_nodes(c.graph), "splits", C.ggml_backend_sched_get_n_splits(c.b.sched))
831
832
833

	// Reserve may get called multiple times for different graphs - we just want the last run, which will contain the max allocations
	for _, bt := range c.b.schedBufts {
834
		c.b.btDeviceMemory[bt].Graph = 0
835
836
	}

837
	for i := range c.b.schedBackends {
838
839
		bufferSize := C.ggml_backend_sched_get_attempted_buffer_size(c.b.sched, c.b.schedBackends[i])
		c.b.btDeviceMemory[c.b.schedBufts[i]].Graph += uint64(bufferSize)
840

841
		logutil.Trace("compute graph", "backend", C.GoString(C.ggml_backend_name(c.b.schedBackends[i])),
842
			"buffer_type", C.GoString(C.ggml_backend_buft_name(c.b.schedBufts[i])), "size", format.HumanBytes2(uint64(bufferSize)))
843
844
	}

845
846
847
	if !reserved {
		panic(ml.ErrNoMem{BackendMemory: *c.b.requiredMemory})
	}
848
849
}

850
func (c *Context) MaxGraphNodes() int {
851
	return c.maxGraphNodes
Jesse Gross's avatar
Jesse Gross committed
852
853
}

854
855
856
func shapeToGGML(shape []int) *C.int64_t {
	sh := make([]C.int64_t, len(shape))
	for i, s := range shape {
857
		sh[i] = C.int64_t(s)
858
859
860
861
862
	}

	return &sh[0]
}

863
864
865
866
func pad(length, pad C.size_t) C.size_t {
	return ((length + pad - 1) / pad) * pad
}

867
func (c *Context) newTensor(dtype ml.DType, shape []int) ml.Tensor {
868
	if c.buft == nil {
869
		panic("set Input or Layer before creating tensors")
870
871
	}

872
	cdtype := ggmlDType(dtype)
Michael Yang's avatar
Michael Yang committed
873

Jesse Gross's avatar
Jesse Gross committed
874
	if len(shape) < 1 || shape[0] == 0 {
Michael Yang's avatar
Michael Yang committed
875
		var shape C.int64_t = 0
876
		return &Tensor{b: c.b, t: C.ggml_new_tensor(c.ctx, cdtype, 1, &shape)}
Michael Yang's avatar
Michael Yang committed
877
	} else if len(shape) > 4 {
Michael Yang's avatar
Michael Yang committed
878
879
880
881
882
883
884
885
886
		panic("unsupported number of dimensions")
	}

	for _, dim := range shape {
		if dim < 1 {
			panic("invalid shape")
		}
	}

Michael Yang's avatar
Michael Yang committed
887
	t := C.ggml_new_tensor(c.ctx, cdtype, C.int(len(shape)), shapeToGGML(shape))
888
	size := pad(C.ggml_backend_buft_get_alloc_size(c.buft, t), C.ggml_backend_buft_get_alignment(c.buft))
889

890
	b := C.ggml_backend_buft_alloc_buffer(c.buft, size)
891
	if c.layer >= 0 {
892
		c.b.btDeviceMemory[c.buft].Cache[c.layer] += uint64(size)
893
894
	}

895
	if b == nil {
896
		panic(ml.ErrNoMem{BackendMemory: *c.b.requiredMemory})
897
898
	}

899
	*c.allocatedBuffers = append(*c.allocatedBuffers, b)
Michael Yang's avatar
Michael Yang committed
900
	C.ggml_backend_tensor_alloc(b, t, C.ggml_backend_buffer_get_base(b))
901
	return &Tensor{b: c.b, t: t}
902
903
}

904
func (c *Context) Empty(dtype ml.DType, shape ...int) ml.Tensor {
905
	return c.newTensor(dtype, shape)
906
907
}

908
func (c *Context) Zeros(dtype ml.DType, shape ...int) ml.Tensor {
909
	t := c.newTensor(dtype, shape)
Jesse Gross's avatar
Jesse Gross committed
910
911
912
	if c.b.allocMemory {
		C.ggml_set_zero(t.(*Tensor).t)
	}
913
	return t
Michael Yang's avatar
Michael Yang committed
914
915
}

916
func checkShape[S ~[]E, E any](s S, shape ...int) {
Michael Yang's avatar
Michael Yang committed
917
	n := len(s)
Jesse Gross's avatar
Jesse Gross committed
918
919

	if n == 0 {
920
		return
Jesse Gross's avatar
Jesse Gross committed
921
922
	}

Michael Yang's avatar
Michael Yang committed
923
924
925
926
927
	for _, v := range shape {
		n /= v
	}

	if n != 1 {
928
		panic(fmt.Errorf("invalid shape: %v", shape))
Michael Yang's avatar
Michael Yang committed
929
930
931
	}
}

932
933
func (c *Context) FromFloatSlice(s []float32, shape ...int) ml.Tensor {
	checkShape(s, shape...)
934

935
	t := c.newTensor(ml.DTypeF32, shape)
936

Jesse Gross's avatar
Jesse Gross committed
937
	if c.b.allocMemory && len(s) > 0 {
Jesse Gross's avatar
Jesse Gross committed
938
939
940
		C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t))
	}

941
	return t
Michael Yang's avatar
Michael Yang committed
942
943
}

944
945
func (c *Context) FromIntSlice(s []int32, shape ...int) ml.Tensor {
	checkShape(s, shape...)
946

947
	t := c.newTensor(ml.DTypeI32, shape)
948

Jesse Gross's avatar
Jesse Gross committed
949
	if c.b.allocMemory && len(s) > 0 {
Jesse Gross's avatar
Jesse Gross committed
950
951
952
		C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t))
	}

953
	return t
Michael Yang's avatar
Michael Yang committed
954
955
}

Michael Yang's avatar
arange  
Michael Yang committed
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
func (c Context) Arange(start, stop, step float32, dtype ml.DType) ml.Tensor {
	switch dtype {
	case ml.DTypeF32:
		// ggml_arange creates a float32 tensor
		return &Tensor{
			b: c.b,
			t: C.ggml_arange(c.ctx, C.float(start), C.float(stop), C.float(step)),
		}
	case ml.DTypeI32:
		// ggml_cast does not support float32 to int32 conversion
		arange := make([]int32, 0, int((stop-start)/step))
		for i := start; i < stop; i += step {
			arange = append(arange, int32(i))
		}

971
		return c.Input().FromIntSlice(arange, len(arange))
Michael Yang's avatar
arange  
Michael Yang committed
972
973
974
975
976
	default:
		panic("unsupported dtype for arange")
	}
}

Michael Yang's avatar
Michael Yang committed
977
978
func (c *Context) Close() {
	if c != nil {
979
980
981
982
983
		for _, b := range *c.allocatedBuffers {
			C.ggml_backend_buffer_free(b)
		}
		*c.allocatedBuffers = nil

984
985
		C.ggml_free(c.ctx)
	}
Michael Yang's avatar
Michael Yang committed
986
987
988
}

type Tensor struct {
989
	b    *Backend
Michael Yang's avatar
Michael Yang committed
990
	t    *C.struct_ggml_tensor
991
	sync func()
Michael Yang's avatar
Michael Yang committed
992
993
994
995
996
997
998
999
1000
1001
}

func (t *Tensor) LogValue() slog.Value {
	return slog.GroupValue(
		slog.String("name", C.GoString(C.ggml_get_name(t.t))),
		slog.String("type", C.GoString(C.ggml_type_name(t.t._type))),
		slog.Any("shape", t.Shape()),
	)
}

1002
1003
func (t *Tensor) Dim(n int) int {
	return int(t.t.ne[n])
Michael Yang's avatar
Michael Yang committed
1004
1005
}

1006
1007
func (t *Tensor) Stride(n int) int {
	return int(t.t.nb[n])
Michael Yang's avatar
Michael Yang committed
1008
1009
}

1010
1011
func (t *Tensor) Shape() []int {
	shape := make([]int, C.ggml_n_dims(t.t))
Michael Yang's avatar
Michael Yang committed
1012
1013
1014
1015
1016
1017
1018
	for i := range shape {
		shape[i] = t.Dim(i)
	}

	return shape
}

1019
1020
1021
1022
1023
1024
1025
1026
1027
func (t *Tensor) Bytes() (data []byte) {
	if t.sync != nil {
		data = make([]byte, C.ggml_nbytes(t.t))

		t.sync()
		C.ggml_backend_tensor_get(t.t, unsafe.Pointer(&data[0]), 0, C.ggml_nbytes(t.t))
	}

	return
Michael Yang's avatar
Michael Yang committed
1028
1029
}

1030
1031
1032
1033
1034
1035
func (t *Tensor) Floats() (data []float32) {
	if t.sync != nil {
		data = make([]float32, C.ggml_nelements(t.t))

		t.sync()
		C.ggml_backend_tensor_get(t.t, unsafe.Pointer(&data[0]), 0, C.ggml_nbytes(t.t))
Michael Yang's avatar
Michael Yang committed
1036
1037
1038
1039
1040
	}

	return
}

1041
1042
1043
1044
1045
1046
func (t *Tensor) SetValueFromIntSlice(s []int32) {
	if len(s) > 0 {
		C.ggml_backend_tensor_set(t.t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.t))
	}
}

Michael Yang's avatar
Michael Yang committed
1047
1048
1049
1050
func (t *Tensor) DType() ml.DType {
	switch t.t._type {
	case C.GGML_TYPE_F32:
		return ml.DTypeF32
Jesse Gross's avatar
Jesse Gross committed
1051
1052
	case C.GGML_TYPE_F16:
		return ml.DTypeF16
1053
1054
1055
1056
	case C.GGML_TYPE_Q8_0:
		return ml.DTypeQ80
	case C.GGML_TYPE_Q4_0:
		return ml.DTypeQ40
Michael Yang's avatar
Michael Yang committed
1057
1058
	case C.GGML_TYPE_I32:
		return ml.DTypeI32
Michael Yang's avatar
Michael Yang committed
1059
1060
	case C.GGML_TYPE_MXFP4:
		return ml.DTypeMXFP4
Michael Yang's avatar
Michael Yang committed
1061
1062
1063
1064
1065
	default:
		return ml.DTypeOther
	}
}

1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
func ggmlDType(dtype ml.DType) uint32 {
	switch dtype {
	case ml.DTypeF32:
		return C.GGML_TYPE_F32
	case ml.DTypeF16:
		return C.GGML_TYPE_F16
	case ml.DTypeQ80:
		return C.GGML_TYPE_Q8_0
	case ml.DTypeQ40:
		return C.GGML_TYPE_Q4_0
	case ml.DTypeI32:
		return C.GGML_TYPE_I32
	case ml.DTypeMXFP4:
		return C.GGML_TYPE_MXFP4
	default:
		panic("unsupported dtype")
	}
}

func (t *Tensor) Cast(ctx ml.Context, dtype ml.DType) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_cast(ctx.(*Context).ctx, t.t, ggmlDType(dtype)),
	}
}

1092
1093
1094
1095
1096
1097
1098
func (t *Tensor) Neg(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_neg(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1099
1100
func (t *Tensor) Add(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1101
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1102
1103
1104
1105
		t: C.ggml_add(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

Michael Yang's avatar
Michael Yang committed
1106
1107
1108
1109
1110
1111
1112
func (t *Tensor) Sub(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sub(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
func (t *Tensor) Repeat(ctx ml.Context, dim, n int) ml.Tensor {
	if dim < 0 || dim >= C.GGML_MAX_DIMS {
		panic("invalid dimension")
	}

	shape := make([]C.int64_t, C.GGML_MAX_DIMS)
	for i := range C.GGML_MAX_DIMS {
		if i == dim {
			shape[i] = C.int64_t(t.Dim(i) * n)
		} else {
			shape[i] = C.int64_t(t.Dim(i))
		}
	}

	tmpl := C.ggml_new_tensor(ctx.(*Context).ctx, t.t._type, C.int(len(shape)), unsafe.SliceData(shape))
	return &Tensor{
		b: t.b,
		t: C.ggml_repeat(ctx.(*Context).ctx, t.t, tmpl),
	}
}

Michael Yang's avatar
Michael Yang committed
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
func (t *Tensor) Stack(ctx ml.Context, dim int, s ...ml.Tensor) ml.Tensor {
	if len(s) > 0 {
		return t.Concat(ctx, s[0].Stack(ctx, dim, s[1:]...), dim)
	}

	return t
}

func (t *Tensor) Concat(ctx ml.Context, t2 ml.Tensor, dim int) ml.Tensor {
	return &Tensor{
1144
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1145
1146
1147
1148
		t: C.ggml_concat(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.int(dim)),
	}
}

Michael Yang's avatar
Michael Yang committed
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
func (t *Tensor) Contiguous(ctx ml.Context, shape ...int) ml.Tensor {
	switch len(shape) {
	case 0:
		return &Tensor{
			b: t.b,
			t: C.ggml_cont(ctx.(*Context).ctx, t.t),
		}
	case 1:
		return &Tensor{
			b: t.b,
			t: C.ggml_cont_1d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0])),
		}
	case 2:
		return &Tensor{
			b: t.b,
			t: C.ggml_cont_2d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1])),
		}
	case 3:
		return &Tensor{
			b: t.b,
			t: C.ggml_cont_3d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1]), C.int64_t(shape[2])),
		}
	case 4:
		return &Tensor{
			b: t.b,
			t: C.ggml_cont_4d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1]), C.int64_t(shape[2]), C.int64_t(shape[3])),
		}
	default:
		panic("unsupported number of dimensions")
Michael Yang's avatar
Michael Yang committed
1178
1179
1180
1181
1182
	}
}

func (t *Tensor) Mul(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1183
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1184
1185
1186
1187
		t: C.ggml_mul(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

1188
1189
1190
1191
1192
1193
1194
func (t *Tensor) Div(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_div(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

Michael Yang's avatar
Michael Yang committed
1195
1196
func (t *Tensor) Mulmat(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1197
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1198
1199
1200
1201
		t: C.ggml_mul_mat(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

1202
1203
1204
1205
1206
func (t *Tensor) MulmatFullPrec(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	mul := C.ggml_mul_mat(ctx.(*Context).ctx, t.t, t2.(*Tensor).t)
	C.ggml_mul_mat_set_prec(mul, C.GGML_PREC_F32)

	return &Tensor{
1207
		b: t.b,
1208
1209
1210
1211
		t: mul,
	}
}

Michael Yang's avatar
llama4  
Michael Yang committed
1212
1213
1214
1215
1216
1217
1218
func (t *Tensor) MulmatID(ctx ml.Context, t2, ids ml.Tensor) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_mul_mat_id(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, ids.(*Tensor).t),
	}
}

1219
1220
1221
1222
1223
1224
1225
func (t *Tensor) AddID(ctx ml.Context, t2, ids ml.Tensor) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_add_id(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, ids.(*Tensor).t),
	}
}

1226
1227
1228
1229
1230
1231
1232
func (t *Tensor) L2Norm(ctx ml.Context, eps float32) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_l2_norm(ctx.(*Context).ctx, t.t, C.float(eps)),
	}
}

Michael Yang's avatar
Michael Yang committed
1233
func (t *Tensor) LayerNorm(ctx ml.Context, w, b ml.Tensor, eps float32) ml.Tensor {
Michael Yang's avatar
llama4  
Michael Yang committed
1234
1235
1236
1237
1238
1239
	tt := C.ggml_norm(ctx.(*Context).ctx, t.t, C.float(eps))
	if w != nil {
		tt = C.ggml_mul(ctx.(*Context).ctx, tt, w.(*Tensor).t)
		if b != nil {
			tt = C.ggml_add(ctx.(*Context).ctx, tt, b.(*Tensor).t)
		}
Michael Yang's avatar
Michael Yang committed
1240
1241
	}

Michael Yang's avatar
llama4  
Michael Yang committed
1242
	return &Tensor{b: t.b, t: tt}
Michael Yang's avatar
Michael Yang committed
1243
1244
1245
}

func (t *Tensor) RMSNorm(ctx ml.Context, w ml.Tensor, eps float32) ml.Tensor {
Michael Yang's avatar
llama4  
Michael Yang committed
1246
1247
1248
1249
1250
1251
	tt := C.ggml_rms_norm(ctx.(*Context).ctx, t.t, C.float(eps))
	if w != nil {
		tt = C.ggml_mul(ctx.(*Context).ctx, tt, w.(*Tensor).t)
	}

	return &Tensor{b: t.b, t: tt}
Michael Yang's avatar
Michael Yang committed
1252
1253
}

1254
func (t *Tensor) Pad(ctx ml.Context, shape ...int) ml.Tensor {
Michael Yang's avatar
Michael Yang committed
1255
1256
	if len(shape) != 4 {
		panic("expected 4 dimensions")
1257
1258
	} else if shape[3] != 0 {
		panic("cuda does not support 4d tensors")
Michael Yang's avatar
Michael Yang committed
1259
1260
1261
	}

	return &Tensor{
1262
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
		t: C.ggml_pad(ctx.(*Context).ctx, t.t, C.int(shape[0]), C.int(shape[1]), C.int(shape[2]), C.int(shape[3])),
	}
}

func (t *Tensor) Permute(ctx ml.Context, shape ...int) ml.Tensor {
	if len(shape) != 4 {
		panic("expected 4 dimensions")
	}

	return &Tensor{
1273
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1274
1275
1276
1277
1278
1279
		t: C.ggml_permute(ctx.(*Context).ctx, t.t, C.int(shape[0]), C.int(shape[1]), C.int(shape[2]), C.int(shape[3])),
	}
}

func (t *Tensor) Rows(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1280
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1281
1282
1283
1284
1285
1286
		t: C.ggml_get_rows(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

func (t *Tensor) Copy(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1287
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1288
1289
1290
1291
		t: C.ggml_cpy(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

1292
func (t *Tensor) Reshape(ctx ml.Context, shape ...int) ml.Tensor {
Michael Yang's avatar
Michael Yang committed
1293
1294
1295
	switch len(shape) {
	case 1:
		return &Tensor{
1296
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1297
1298
1299
1300
			t: C.ggml_reshape_1d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0])),
		}
	case 2:
		return &Tensor{
1301
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1302
1303
1304
1305
			t: C.ggml_reshape_2d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1])),
		}
	case 3:
		return &Tensor{
1306
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1307
1308
1309
1310
			t: C.ggml_reshape_3d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1]), C.int64_t(shape[2])),
		}
	case 4:
		return &Tensor{
1311
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1312
1313
1314
1315
1316
1317
1318
1319
1320
			t: C.ggml_reshape_4d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1]), C.int64_t(shape[2]), C.int64_t(shape[3])),
		}
	default:
		panic("unsupported number of dimensions")
	}
}

func (t *Tensor) Scale(ctx ml.Context, s float64) ml.Tensor {
	return &Tensor{
1321
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1322
1323
1324
1325
		t: C.ggml_scale(ctx.(*Context).ctx, t.t, (C.float)(s)),
	}
}

1326
1327
1328
1329
1330
1331
1332
func (t *Tensor) SumRows(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sum_rows(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1333
1334
func (t *Tensor) Softmax(ctx ml.Context) ml.Tensor {
	return &Tensor{
1335
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1336
1337
1338
1339
		t: C.ggml_soft_max(ctx.(*Context).ctx, t.t),
	}
}

1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
func (t *Tensor) Sin(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sin(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) Cos(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_cos(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1354
1355
func (t *Tensor) Tanh(ctx ml.Context) ml.Tensor {
	return &Tensor{
1356
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1357
1358
1359
1360
		t: C.ggml_tanh_inplace(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
llama4  
Michael Yang committed
1361
1362
1363
1364
1365
1366
1367
func (t *Tensor) Sigmoid(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sigmoid_inplace(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1368
1369
1370
1371
func (t *Tensor) View(ctx ml.Context, offset int, shape ...int) ml.Tensor {
	switch len(shape) {
	case 1:
		return &Tensor{
1372
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1373
1374
1375
1376
			t: C.ggml_view_1d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.size_t(offset)),
		}
	case 3:
		return &Tensor{
1377
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1378
1379
1380
1381
1382
1383
1384
			t: C.ggml_view_2d(ctx.(*Context).ctx, t.t,
				C.int64_t(shape[0]), C.int64_t(shape[2]),
				C.size_t(shape[1]),
				C.size_t(offset)),
		}
	case 5:
		return &Tensor{
1385
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1386
1387
1388
1389
1390
1391
1392
			t: C.ggml_view_3d(ctx.(*Context).ctx, t.t,
				C.int64_t(shape[0]), C.int64_t(shape[2]), C.int64_t(shape[4]),
				C.size_t(shape[1]), C.size_t(shape[3]),
				C.size_t(offset)),
		}
	case 7:
		return &Tensor{
1393
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
			t: C.ggml_view_4d(ctx.(*Context).ctx, t.t,
				C.int64_t(shape[0]), C.int64_t(shape[2]), C.int64_t(shape[4]), C.int64_t(shape[6]),
				C.size_t(shape[1]), C.size_t(shape[3]), C.size_t(shape[5]),
				C.size_t(offset)),
		}
	default:
		panic("unsupported number of dimensions")
	}
}

1404
func (t *Tensor) RoPE(ctx ml.Context, positions ml.Tensor, ropeDim int, ropeBase, ropeScale float32, options ...func(*rope.Options)) ml.Tensor {
1405
	// Default options
Michael Yang's avatar
Michael Yang committed
1406
1407
1408
1409
1410
1411
1412
1413
	opts := rope.Options{
		Factors:               &Tensor{},
		OriginalContextLength: 131072,
		ExtrapolationFactor:   0.,
		AttentionFactor:       1.,
		BetaFast:              32.,
		BetaSlow:              1.,
	}
1414
1415
1416

	// Apply any provided options
	for _, option := range options {
Michael Yang's avatar
Michael Yang committed
1417
		option(&opts)
1418
1419
	}

Jesse Gross's avatar
Jesse Gross committed
1420
1421
1422
1423
1424
	dequant := t.t
	if C.ggml_is_quantized(t.t._type) {
		dequant = C.ggml_cast(ctx.(*Context).ctx, t.t, C.GGML_TYPE_F32)
	}

Michael Yang's avatar
Michael Yang committed
1425
	return &Tensor{
1426
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1427
		t: C.ggml_rope_ext(
1428
1429
			ctx.(*Context).ctx,
			dequant,
1430
1431
			positions.(*Tensor).t,
			opts.Factors.(*Tensor).t,
Michael Yang's avatar
Michael Yang committed
1432
			C.int(ropeDim),
1433
1434
			C.int(opts.Type),
			C.int(opts.OriginalContextLength),
Michael Yang's avatar
Michael Yang committed
1435
1436
			C.float(ropeBase),
			C.float(ropeScale),
Michael Yang's avatar
Michael Yang committed
1437
1438
1439
1440
			C.float(opts.ExtrapolationFactor),
			C.float(opts.AttentionFactor),
			C.float(opts.BetaFast),
			C.float(opts.BetaSlow),
Michael Yang's avatar
Michael Yang committed
1441
1442
1443
1444
		),
	}
}

1445
1446
1447
1448
1449
1450
1451
func (t *Tensor) IM2Col(ctx ml.Context, t2 ml.Tensor, s0, s1, p0, p1, d0, d1 int) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_im2col(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.int(s0), C.int(s1), C.int(p0), C.int(p1), C.int(d0), C.int(d1), true, C.GGML_TYPE_F32),
	}
}

1452
1453
1454
1455
1456
1457
1458
func (t *Tensor) GELU(ctx ml.Context, t2 ...ml.Tensor) ml.Tensor {
	if len(t2) > 0 {
		return &Tensor{
			b: t.b,
			t: C.ggml_geglu_split(ctx.(*Context).ctx, t.t, t2[0].(*Tensor).t),
		}
	}
Michael Yang's avatar
Michael Yang committed
1459
	return &Tensor{
1460
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1461
1462
1463
1464
		t: C.ggml_gelu_inplace(ctx.(*Context).ctx, t.t),
	}
}

1465
1466
1467
1468
1469
1470
func (t *Tensor) SILU(ctx ml.Context, t2 ...ml.Tensor) ml.Tensor {
	if len(t2) > 0 {
		return &Tensor{
			b: t.b,
			t: C.ggml_swiglu_split(ctx.(*Context).ctx, t.t, t2[0].(*Tensor).t),
		}
Michael Yang's avatar
Michael Yang committed
1471
	}
Michael Yang's avatar
Michael Yang committed
1472
	return &Tensor{
1473
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1474
1475
1476
1477
		t: C.ggml_silu_inplace(ctx.(*Context).ctx, t.t),
	}
}

1478
1479
1480
1481
1482
1483
1484
func (t *Tensor) RELU(ctx ml.Context, t2 ...ml.Tensor) ml.Tensor {
	if len(t2) > 0 {
		return &Tensor{
			b: t.b,
			t: C.ggml_reglu_split(ctx.(*Context).ctx, t.t, t2[0].(*Tensor).t),
		}
	}
Michael Yang's avatar
Michael Yang committed
1485
1486
1487
1488
1489
1490
	return &Tensor{
		b: t.b,
		t: C.ggml_relu_inplace(ctx.(*Context).ctx, t.t),
	}
}

1491
func (t *Tensor) SILUAlphaLimit(ctx ml.Context, up ml.Tensor, alpha, limit float32) ml.Tensor {
1492
1493
1494
1495
1496
1497
	return &Tensor{
		b: t.b,
		t: C.ggml_swiglu_oai(ctx.(*Context).ctx, t.t, up.(*Tensor).t, C.float(alpha), C.float(limit)),
	}
}

Michael Yang's avatar
Michael Yang committed
1498
1499
func (t *Tensor) Conv2D(ctx ml.Context, t2 ml.Tensor, s0, s1, p0, p1, d0, d1 int) ml.Tensor {
	return &Tensor{
1500
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1501
1502
1503
		t: C.ggml_conv_2d(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.int(s0), C.int(s1), C.int(p0), C.int(p1), C.int(d0), C.int(d1)),
	}
}
1504

Michael Yang's avatar
Michael Yang committed
1505
func (t *Tensor) AvgPool2D(ctx ml.Context, k, s int, p float32) ml.Tensor {
Michael Yang's avatar
Michael Yang committed
1506
1507
	return &Tensor{
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1508
		t: C.ggml_pool_2d(ctx.(*Context).ctx, t.t, C.GGML_OP_POOL_AVG, C.int(k), C.int(k), C.int(s), C.int(s), C.float(p), C.float(p)),
Michael Yang's avatar
Michael Yang committed
1509
1510
1511
	}
}

Michael Yang's avatar
Michael Yang committed
1512
1513
1514
1515
func (t *Tensor) Set(ctx ml.Context, t2 ml.Tensor, offset int, strides ...int) ml.Tensor {
	var tt *C.struct_ggml_tensor
	switch len(strides) {
	case 0:
Michael Yang's avatar
Michael Yang committed
1516
		tt = C.ggml_set_1d(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.size_t(offset))
Michael Yang's avatar
Michael Yang committed
1517
	case 1:
Michael Yang's avatar
Michael Yang committed
1518
		tt = C.ggml_set_2d(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.size_t(offset), C.size_t(strides[0]))
Michael Yang's avatar
Michael Yang committed
1519
1520
1521
1522
1523
1524
1525
	default:
		panic("unsupported number of dimensions")
	}

	return &Tensor{b: t.b, t: tt}
}

1526
func (t *Tensor) ScaledDotProductAttention(ctx ml.Context, key, value, mask, sinks ml.Tensor, scale float64) ml.Tensor {
1527
1528
1529
1530
1531
	var kqMask *C.struct_ggml_tensor
	if mask != nil {
		kqMask = mask.(*Tensor).t
	}

1532
1533
1534
	query := t.Permute(ctx, 0, 2, 1, 3)
	key = key.Permute(ctx, 0, 2, 1, 3)

1535
1536
	if t.b.flashAttention {
		value = value.Permute(ctx, 0, 2, 1, 3)
1537

1538
		kqv := C.ggml_flash_attn_ext(ctx.(*Context).ctx, query.(*Tensor).t, key.(*Tensor).t, value.(*Tensor).t, kqMask, C.float(scale), 0, 0)
1539
1540
1541
		if sinks != nil {
			C.ggml_flash_attn_ext_add_sinks(kqv, sinks.(*Tensor).t)
		}
1542
1543
1544
1545
1546
1547
1548
1549
		C.ggml_flash_attn_ext_set_prec(kqv, C.GGML_PREC_F32)
		return &Tensor{b: t.b, t: kqv}
	} else {
		kq := key.MulmatFullPrec(ctx, query)
		kq = &Tensor{
			b: t.b,
			t: C.ggml_soft_max_ext(ctx.(*Context).ctx, kq.(*Tensor).t, kqMask, C.float(scale), 0),
		}
1550
1551
1552
		if sinks != nil {
			C.ggml_soft_max_add_sinks(kq.(*Tensor).t, sinks.(*Tensor).t)
		}
1553
1554
1555
1556

		kqv := value.Mulmat(ctx, kq)
		return kqv.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx)
	}
1557
}
1558
1559
1560
1561
1562
1563
1564

func (t *Tensor) Duplicate(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_dup(ctx.(*Context).ctx, t.t),
	}
}
Michael Yang's avatar
llama4  
Michael Yang committed
1565
1566
1567
1568
1569
1570
1571

func (t *Tensor) TopK(ctx ml.Context, k int) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_top_k(ctx.(*Context).ctx, t.t, C.int(k)),
	}
}
1572
1573
1574
1575
1576
1577
1578

func (t *Tensor) Argsort(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_argsort(ctx.(*Context).ctx, t.t, C.GGML_SORT_ORDER_ASC),
	}
}
Michael Yang's avatar
Michael Yang committed
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617

func (t *Tensor) Mean(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_mean(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) Variance(ctx ml.Context) ml.Tensor {
	return t.Add(ctx, t.Mean(ctx).Scale(ctx, -1)).
		Sqr(ctx).
		SumRows(ctx).
		Scale(ctx, 1/float64(t.Dim(0)))
}

func (t *Tensor) Stddev(ctx ml.Context) ml.Tensor {
	return t.Variance(ctx).Sqrt(ctx)
}

func (t *Tensor) Sqr(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sqr(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) Sqrt(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sqrt(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) Clamp(ctx ml.Context, min, max float32) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_clamp(ctx.(*Context).ctx, t.t, C.float(min), C.float(max)),
	}
}
Michael Yang's avatar
Michael Yang committed
1618
1619
1620
1621

func (c Context) FromBytes(dtype ml.DType, s []uint8, shape ...int) ml.Tensor {
	// Unchecked to handle quantized types
	t := c.newTensor(dtype, shape)
Jesse Gross's avatar
Jesse Gross committed
1622
	if c.b.allocMemory && len(s) > 0 {
Michael Yang's avatar
Michael Yang committed
1623
1624
1625
1626
1627
		C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t))
	}

	return t
}