ggml.go 34.6 KB
Newer Older
Michael Yang's avatar
Michael Yang committed
1
2
package ggml

3
4
5
6
7
8
// #cgo CPPFLAGS: -I${SRCDIR}/ggml/include
// #include <stdlib.h>
// #include <stdint.h>
// #include "ggml.h"
// #include "ggml-cpu.h"
// #include "ggml-backend.h"
Michael Yang's avatar
Michael Yang committed
9
10
11
import "C"

import (
12
	"context"
Michael Yang's avatar
Michael Yang committed
13
14
15
	"fmt"
	"io"
	"log/slog"
16
	"maps"
Michael Yang's avatar
Michael Yang committed
17
	"os"
18
	"runtime"
19
20
21
	"slices"
	"strconv"
	"strings"
22
	"sync/atomic"
23
	"unicode"
Michael Yang's avatar
Michael Yang committed
24
25
26
	"unsafe"

	"github.com/ollama/ollama/format"
27
28
	"github.com/ollama/ollama/fs"
	fsggml "github.com/ollama/ollama/fs/ggml"
29
	"github.com/ollama/ollama/logutil"
Michael Yang's avatar
Michael Yang committed
30
	"github.com/ollama/ollama/ml"
31
	ggml "github.com/ollama/ollama/ml/backend/ggml/ggml/src"
32
	"github.com/ollama/ollama/ml/nn/rope"
Michael Yang's avatar
Michael Yang committed
33
34
35
	"golang.org/x/sync/errgroup"
)

Michael Yang's avatar
Michael Yang committed
36
37
38
39
40
func devices() []*C.struct_ggml_backend_device {
	ggml.OnceLoad()
	ds := make([]*C.struct_ggml_backend_device, C.ggml_backend_dev_count())
	for i := range ds {
		ds[i] = C.ggml_backend_dev_get(C.size_t(i))
Michael Yang's avatar
Michael Yang committed
41
	}
Michael Yang's avatar
Michael Yang committed
42
43

	return ds
44
}
Michael Yang's avatar
Michael Yang committed
45
46

type Backend struct {
47
48
49
	// modelPath is the location of the model data
	modelPath string

50
51
	meta *fsggml.GGML

52
53
54
55
	// tensorLoadTargets maps from the name of the tensor in the file
	// to the name that is used by the model definition
	tensorLoadTargets map[string][]string

56
57
58
59
	sched         *C.struct_ggml_backend_sched
	schedBackends []*C.struct_ggml_backend
	schedBufts    []*C.struct_ggml_backend_buffer_type

60
	tensors map[string]*C.struct_ggml_tensor
Michael Yang's avatar
Michael Yang committed
61
62

	// input is the backend used for inputs
63
	input *C.struct_ggml_backend_buffer_type
Michael Yang's avatar
Michael Yang committed
64
65

	// layers is the backend used for repeating layers
66
	layers map[int]*C.struct_ggml_backend_buffer_type
67

68
69
70
71
72
73
	// requiredMemory is the cumulative memory allocations needed by the backend
	requiredMemory *ml.BackendMemory

	// btDeviceMemory maps from a buffer type to the memory allocations associated with that device
	btDeviceMemory map[*C.struct_ggml_backend_buffer_type]*ml.DeviceMemory

74
	flashAttention bool
Michael Yang's avatar
Michael Yang committed
75
76
77

	// maxGraphNodes is the maximum allowed number of graph nodes in this scheduler
	maxGraphNodes int
Michael Yang's avatar
Michael Yang committed
78
79
}

80
81
82
83
84
85
86
87
func New(modelPath string, params ml.BackendParams) (ml.Backend, error) {
	r, err := os.Open(modelPath)
	if err != nil {
		return nil, err
	}
	defer r.Close()

	meta, err := fsggml.Decode(r, -1)
Michael Yang's avatar
Michael Yang committed
88
89
90
91
92
93
94
95
96
97
98
99
100
101
	if err != nil {
		return nil, err
	}

	slog.Info(
		"",
		"architecture", meta.KV().Architecture(),
		"file_type", meta.KV().FileType(),
		"name", meta.KV().String("general.name"),
		"description", meta.KV().String("general.description"),
		"num_tensors", len(meta.Tensors().Items()),
		"num_key_values", len(meta.KV()),
	)

102
103
104
	var requiredMemory ml.BackendMemory
	btDeviceMemory := make(map[*C.struct_ggml_backend_buffer_type]*ml.DeviceMemory)

105
	type deviceBufferType struct {
106
107
108
109
110
		d   *C.struct_ggml_backend_device
		bts []*C.struct_ggml_backend_buffer_type
	}

	var cpus, accels, gpus []*C.struct_ggml_backend_device
Michael Yang's avatar
Michael Yang committed
111
	for _, d := range devices() {
112
113
		switch C.ggml_backend_dev_type(d) {
		case C.GGML_BACKEND_DEVICE_TYPE_CPU:
114
115
116
117
			if len(cpus) == 0 {
				// only the first cpu device should be used
				cpus = append(cpus, d)
			}
118
119
		case C.GGML_BACKEND_DEVICE_TYPE_ACCEL:
			accels = append(accels, d)
Michael Yang's avatar
Michael Yang committed
120
		case C.GGML_BACKEND_DEVICE_TYPE_GPU:
121
			gpus = append(gpus, d)
Michael Yang's avatar
Michael Yang committed
122
123
124
		}
	}

125
126
	blocks := int(meta.KV().BlockCount())

Michael Yang's avatar
Michael Yang committed
127
	// create list of buffer types for the cpu
Michael Yang's avatar
Michael Yang committed
128
	cpuDeviceBufferType := deviceBufferType{d: C.ggml_backend_dev_by_type(C.GGML_BACKEND_DEVICE_TYPE_CPU)}
129
130
131
132
	for _, d := range append(accels, append(gpus, cpus...)...) {
		switch C.ggml_backend_dev_type(d) {
		case C.GGML_BACKEND_DEVICE_TYPE_CPU,
			C.GGML_BACKEND_DEVICE_TYPE_ACCEL:
Michael Yang's avatar
Michael Yang committed
133
			cpuDeviceBufferType.bts = append(cpuDeviceBufferType.bts, C.ggml_backend_dev_buffer_type(d))
134
			btDeviceMemory[C.ggml_backend_dev_buffer_type(d)] = &requiredMemory.CPU
Michael Yang's avatar
Michael Yang committed
135
		}
136
137
	}

138
	requiredMemory.CPU.Name = C.GoString(C.ggml_backend_dev_name(cpuDeviceBufferType.d))
139
140
	var props C.struct_ggml_backend_dev_props
	C.ggml_backend_dev_get_props(cpuDeviceBufferType.d, &props)
141
142
143
144

	// Bug #11211: Reporting of UUIDs is temporarily disabled due to causing segfaults
	// This only affects debug information until the new memory management code is in place
	// requiredMemory.CPU.UUID = C.GoString(props.uuid)
145
146
147
	requiredMemory.CPU.Weights = make([]ml.Memory, blocks+1)
	requiredMemory.CPU.Cache = make([]ml.Memory, blocks+1)

Michael Yang's avatar
Michael Yang committed
148
	// create list of buffer types for each gpu
149
	var gpuDeviceBufferTypes []deviceBufferType
150
151
	requiredMemory.GPUs = make([]ml.DeviceMemory, len(gpus))
	for i, d := range gpus {
152
		bt := C.ggml_backend_dev_buffer_type(d)
153
		gpuDeviceBufferTypes = append(gpuDeviceBufferTypes, deviceBufferType{
154
			d:   d,
Michael Yang's avatar
Michael Yang committed
155
			bts: append([]*C.struct_ggml_backend_buffer_type{bt}, cpuDeviceBufferType.bts...),
156
		})
157
158
		btDeviceMemory[bt] = &requiredMemory.GPUs[i]
		requiredMemory.GPUs[i].Name = C.GoString(C.ggml_backend_dev_name(d))
159
160
		var props C.struct_ggml_backend_dev_props
		C.ggml_backend_dev_get_props(d, &props)
161
		// requiredMemory.GPUs[i].UUID = C.GoString(props.uuid)
162
163
		requiredMemory.GPUs[i].Weights = make([]ml.Memory, blocks+1)
		requiredMemory.GPUs[i].Cache = make([]ml.Memory, blocks+1)
Michael Yang's avatar
Michael Yang committed
164
165
	}

Michael Yang's avatar
Michael Yang committed
166
167
168
169
170
	useDefaultSplit := true
	for _, s := range params.TensorSplit {
		if s != 0 {
			useDefaultSplit = false
			break
171
		}
Michael Yang's avatar
Michael Yang committed
172
	}
173

Michael Yang's avatar
Michael Yang committed
174
175
176
177
	// calculate splits
	splits := make([]float32, len(gpus))
	if useDefaultSplit {
		// default: split on free memory
178
179
180
181
182
		for i := range splits {
			var free, total C.size_t
			C.ggml_backend_dev_memory(gpus[i], &free, &total)
			splits[i] = float32(free)
		}
Michael Yang's avatar
Michael Yang committed
183
184
	} else {
		splits = params.TensorSplit
185
186
187
	}

	var sum float32
Michael Yang's avatar
Michael Yang committed
188
	// cumulative sum of all splits
189
190
191
192
193
	for i := range splits {
		sum += splits[i]
		splits[i] = sum
	}

Michael Yang's avatar
Michael Yang committed
194
	// normalize splits
195
	for i := range splits {
196
		splits[i] /= sum
197
198
	}

Michael Yang's avatar
Michael Yang committed
199
	// inputs always use cpu
Michael Yang's avatar
Michael Yang committed
200
	input := cpuDeviceBufferType
201

Michael Yang's avatar
Michael Yang committed
202
203
204
	// define a range of gpu layers. anything outside of this range is assigned to the cpu
	gpuRangeStart := max(0, blocks-params.NumGPULayers)
	gpuRangeStop := min(gpuRangeStart+params.NumGPULayers, blocks+1)
Michael Yang's avatar
Michael Yang committed
205
	assignLayer := func(i int) deviceBufferType {
Michael Yang's avatar
Michael Yang committed
206
		if i < gpuRangeStart || i >= gpuRangeStop {
Michael Yang's avatar
Michael Yang committed
207
			return cpuDeviceBufferType
208
		}
209

Michael Yang's avatar
Michael Yang committed
210
		index := slices.IndexFunc(splits, func(f float32) bool { return float32(i-gpuRangeStart)/float32(gpuRangeStop-gpuRangeStart) < f })
211
		if index < 0 || index >= len(gpuDeviceBufferTypes) {
Michael Yang's avatar
Michael Yang committed
212
			return cpuDeviceBufferType
213
214
215
		}

		return gpuDeviceBufferTypes[index]
216
217
	}

Michael Yang's avatar
Michael Yang committed
218
	// repeating layers are assigned based on their index in reverse order, e.g. i / (block_count + 1)
219
	layers := make([]deviceBufferType, blocks)
220
	for i := range layers {
221
		layers[i] = assignLayer(i)
222
223
	}

Michael Yang's avatar
Michael Yang committed
224
	// outputs are assigned iff allowed by splits and configured number of gpu layers
225
	output := assignLayer(blocks)
226
227
228

	maxTensors := len(meta.Tensors().Items())
	maxTensors += 1
Michael Yang's avatar
Michael Yang committed
229
	// each layer has at most 2 extra tensors for rope operations
230
231
	maxTensors += blocks * 2

232
	type tensor struct {
233
		source *fsggml.Tensor
234
235
236
		target string
	}

Michael Yang's avatar
Michael Yang committed
237
	// some tensors are mapped to different names so keep a list
238
239
	targets := make(map[string][]string)

Michael Yang's avatar
Michael Yang committed
240
	// contexts are shared by tensors of the same buffer type
241
	ctxs := make(map[*C.struct_ggml_backend_buffer_type]*C.struct_ggml_context)
242
	createTensor := func(t tensor, bts []*C.struct_ggml_backend_buffer_type, layer int) *C.struct_ggml_tensor {
243
244
245
246
247
248
249
		for _, bt := range bts {
			if _, ok := ctxs[bt]; !ok {
				ctxs[bt] = C.ggml_init(C.struct_ggml_init_params{
					mem_size: C.ggml_tensor_overhead() * C.size_t(maxTensors),
					no_alloc: true,
				})
			}
Michael Yang's avatar
Michael Yang committed
250

251
252
253
254
255
256
257
258
			targets[t.source.Name] = append(targets[t.source.Name], t.target)

			name := t.source.Name
			if t.target != "" {
				name = t.target
			}

			cname := C.CString(name)
Michael Yang's avatar
Michael Yang committed
259
			defer C.free(unsafe.Pointer(cname))
260
261
262
263
			if tt := C.ggml_get_tensor(ctxs[bt], cname); tt != nil {
				return tt
			}

264
			tt := C.ggml_new_tensor(ctxs[bt], t.source.Kind, C.int(len(t.source.Shape)), (*C.int64_t)(unsafe.Pointer(&t.source.Shape[0])))
Michael Yang's avatar
Michael Yang committed
265
266
			C.ggml_set_name(tt, cname)

267
			slog.Log(context.TODO(), logutil.LevelTrace, "created tensor", "name", name, "shape", t.source.Shape, "dtype", t.source.Kind, "buffer_type", C.GoString(C.ggml_backend_buft_name(bt)))
268
269
270
271
272
273
274
275
276
277

			size := pad(C.ggml_backend_buft_get_alloc_size(bt, tt), C.ggml_backend_buft_get_alignment(bt))
			if layer == -1 {
				// Assume that InputWeights can be allocated - they're always in system memory and can't be moved in any case
				requiredMemory.InputWeights.Status = ml.Allocated
				requiredMemory.InputWeights.Size += uint64(size)
			} else {
				btDeviceMemory[bt].Weights[layer].Size += uint64(size)
			}

278
279
280
281
282
			//nolint:staticcheck // TODO: check if buffer type supports this tensor
			return tt
		}

		return nil
Michael Yang's avatar
Michael Yang committed
283
284
	}

285
	contains := func(s string, parts ...string) bool {
286
287
288
289
290
291
292
293
		split := strings.Split(s, ".")
		for _, part := range parts {
			if slices.Contains(split, part) {
				return true
			}
		}

		return false
Michael Yang's avatar
Michael Yang committed
294
295
	}

296
297
	for _, t := range meta.Tensors().Items() {
		switch {
298
		case contains(t.Name, "position_embd", "token_embd", "token_norm_embd", "token_types"):
299
			createTensor(tensor{source: t}, input.bts, -1)
Michael Yang's avatar
Michael Yang committed
300
			if _, ok := meta.Tensors().GroupLayers()["output"]; !ok && t.Name == "token_embd.weight" {
301
				createTensor(tensor{source: t, target: "output.weight"}, output.bts, blocks)
Michael Yang's avatar
Michael Yang committed
302
			}
Michael Yang's avatar
Michael Yang committed
303
304
305
		case contains(t.Name, "cls", "output", "output_norm",
			"altup_proj", "altup_unembd_proj",
			"per_layer_token_embd", "per_layer_model_proj", "per_layer_proj_norm"):
306
			createTensor(tensor{source: t}, output.bts, blocks)
307
		case strings.HasPrefix(t.Name, "v.") || strings.HasPrefix(t.Name, "mm."):
Michael Yang's avatar
Michael Yang committed
308
			// TODO: assign vision tensors to the gpu if possible
309
			createTensor(tensor{source: t}, output.bts, blocks)
Michael Yang's avatar
Michael Yang committed
310
311
312
313
314
315
		case contains(t.Name, "rope_freqs", "rope_factors_long", "rope_factors_short"):
			// these tensors should be repeated per layer
			for i, layer := range layers {
				createTensor(tensor{
					source: t,
					target: "blk." + strconv.Itoa(i) + "." + t.Name,
316
				}, layer.bts, i)
Michael Yang's avatar
Michael Yang committed
317
			}
318
		default:
Michael Yang's avatar
Michael Yang committed
319
320
321
322
			layerIndex := -1
			if fields := strings.FieldsFunc(t.Name, func(r rune) bool { return !unicode.IsNumber(r) }); len(fields) > 0 {
				if i, err := strconv.Atoi(fields[0]); err == nil {
					layerIndex = i
323
				}
Michael Yang's avatar
Michael Yang committed
324
			}
325

Michael Yang's avatar
Michael Yang committed
326
			if layerIndex >= 0 {
327
				createTensor(tensor{source: t}, layers[layerIndex].bts, layerIndex)
328
			} else {
Michael Yang's avatar
Michael Yang committed
329
				// load all other tensors on the cpu
330
				createTensor(tensor{source: t}, input.bts, -1)
331
332
333
			}
		}
	}
Michael Yang's avatar
Michael Yang committed
334

Michael Yang's avatar
Michael Yang committed
335
336
	// allocate buffers for each context
	bbs := make(map[*C.struct_ggml_context]*C.struct_ggml_backend_buffer, len(ctxs))
337
338
339
340
341
342
	for bt, c := range ctxs {
		if C.ggml_get_first_tensor(c) == nil {
			continue
		}

		b := C.ggml_backend_alloc_ctx_tensors_from_buft(c, bt)
343
344
345
346
347
348
349
350
351
352
		for i := range btDeviceMemory[bt].Weights {
			if btDeviceMemory[bt].Weights[i].Size != 0 {
				if b != nil {
					btDeviceMemory[bt].Weights[i].Status = ml.Allocated
				} else {
					btDeviceMemory[bt].Weights[i].Status = ml.Failed
				}
			}
		}

353
		if b == nil {
354
			panic(ml.ErrNoMem{BackendMemory: requiredMemory})
355
356
		}

357
		C.ggml_backend_buffer_set_usage(b, C.GGML_BACKEND_BUFFER_USAGE_WEIGHTS)
Michael Yang's avatar
Michael Yang committed
358
		bbs[c] = b
359
360
	}

361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
	// Mimic llama runner logs summarizing layers and memory
	slog.Info(fmt.Sprintf("offloading %d repeating layers to GPU", max(0, params.NumGPULayers-1)))
	gpuLayers := 0
	switch C.ggml_backend_dev_type(output.d) {
	case 0: // CPU
		slog.Info("offloading output layer to CPU")
	case 1: // GPU
		slog.Info("offloading output layer to GPU")
		gpuLayers++
	case 2: // ACCEL
		slog.Info("offloading output layer to ACCEL")
	}
	for _, layer := range layers {
		if C.ggml_backend_dev_type(layer.d) == 1 {
			gpuLayers++
		}
	}
	slog.Info(fmt.Sprintf("offloaded %d/%d layers to GPU", gpuLayers, len(layers)+1))
379
	for bs := range maps.Values(bbs) {
Michael Yang's avatar
Michael Yang committed
380
		slog.Info("model weights", "buffer", C.GoString(C.ggml_backend_buffer_name(bs)), "size", format.HumanBytes2(uint64(C.ggml_backend_buffer_get_size(bs))))
381
382
	}

Michael Yang's avatar
Michael Yang committed
383
	// map tensor names to tensors for easy lookup later
384
385
386
387
388
389
390
	tensors := make(map[string]*C.struct_ggml_tensor)
	for _, c := range ctxs {
		for t := C.ggml_get_first_tensor(c); t != nil; t = C.ggml_get_next_tensor(c, t) {
			tensors[C.GoString(C.ggml_get_name(t))] = t
		}
	}

391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
	// map devices to backend buffer types so new tensors can be assigned to the correct device
	deviceBufferTypes := make(map[*C.struct_ggml_backend_device]*C.struct_ggml_backend_buffer_type)

	// create backends and buffer types used for the compute graph scheduler
	var schedBackends []*C.struct_ggml_backend
	var schedBufts []*C.struct_ggml_backend_buffer_type
	for _, d := range append(gpus, append(accels, cpus...)...) {
		b := C.ggml_backend_dev_init(d, nil)
		bt := C.ggml_backend_get_default_buffer_type(b)

		deviceBufferTypes[d] = bt

		schedBackends = append(schedBackends, b)
		schedBufts = append(schedBufts, bt)

		if C.ggml_backend_is_cpu(b) {
			// set number of threads for cpu backend
			C.ggml_backend_cpu_set_n_threads(b, C.int(Threads(params.NumThreads)))
		}
	}

	maxGraphNodes := max(8192, len(meta.Tensors().Items())*5)
	return &Backend{
		modelPath:         modelPath,
		flashAttention:    params.FlashAttention,
		meta:              meta,
		tensorLoadTargets: targets,
		tensors:           tensors,
		sched: C.ggml_backend_sched_new(
			(*C.ggml_backend_t)(unsafe.Pointer(&schedBackends[0])),
			(*C.ggml_backend_buffer_type_t)(unsafe.Pointer(&schedBufts[0])),
			C.int(len(schedBackends)),
			C.size_t(maxGraphNodes),
			C._Bool(len(gpus) > 1 && slices.Contains(gpus, output.d)),
			C._Bool(false),
		),
		schedBackends: schedBackends,
		schedBufts:    schedBufts,
		input:         deviceBufferTypes[input.d],
		layers: func() map[int]*C.struct_ggml_backend_buffer_type {
			m := make(map[int]*C.struct_ggml_backend_buffer_type)
			for i, layer := range layers {
				m[i] = deviceBufferTypes[layer.d]
			}
			return m
		}(),
437
438
439
		requiredMemory: &requiredMemory,
		btDeviceMemory: btDeviceMemory,
		maxGraphNodes:  maxGraphNodes,
440
441
442
443
444
445
446
447
	}, nil
}

func init() {
	ml.RegisterBackend("ggml", New)
}

func (b *Backend) Load(ctx context.Context, progress func(float32)) error {
448
	var doneBytes atomic.Uint64
449
	totalBytes := uint64(b.meta.Length) - b.meta.Tensors().Offset
450
451
452

	g, ctx := errgroup.WithContext(ctx)
	g.SetLimit(runtime.GOMAXPROCS(0))
453
	for _, t := range b.meta.Tensors().Items() {
454
		t := t
455
		g.Go(func() error {
456
			tts := make([]*C.struct_ggml_tensor, max(1, len(b.tensorLoadTargets[t.Name])))
457
			for i := range tts {
458
				target := b.tensorLoadTargets[t.Name][i]
459
460
461
				if target == "" {
					target = t.Name
				}
462

463
				tt, ok := b.tensors[target]
464
465
466
				if !ok {
					return fmt.Errorf("unassigned tensor: %s", t.Name)
				}
Michael Yang's avatar
Michael Yang committed
467

468
469
470
				tts[i] = tt
			}

471
472
			// Create a new FD for each goroutine so that each FD is read sequentially, rather than
			// seeking around within an FD shared between all goroutines.
473
			file, err := os.Open(b.modelPath)
474
			if err != nil {
475
				slog.Warn("file open error", "file", b.modelPath, "error", err)
476
477
478
				return err
			}
			defer file.Close()
479
			sr := io.NewSectionReader(file, int64(b.meta.Tensors().Offset+t.Offset), int64(t.Size()))
480
481
482
483
			bts := make([]byte, 128*format.KibiByte)

			var s uint64
			for s < t.Size() {
484
485
486
487
488
				// Stop if either the parent context has been canceled or if any of the other tensors returned an error
				if err := ctx.Err(); err != nil {
					return err
				}

489
490
				n, err := io.ReadFull(sr, bts[:min(len(bts), int(t.Size()-s))])
				if err != nil {
491
					slog.Warn("file read error", "file", b.modelPath, "error", err)
492
					return err
493
				}
Michael Yang's avatar
Michael Yang committed
494

495
496
				for _, tt := range tts {
					C.ggml_backend_tensor_set(tt, unsafe.Pointer(&bts[0]), C.size_t(s), C.size_t(n))
497
				}
Michael Yang's avatar
Michael Yang committed
498

499
500
				s += uint64(n)

501
				if progress != nil {
502
					done := doneBytes.Add(uint64(n))
503
					progress(float32(done) / float32(totalBytes))
504
505
506
507
508
				}
			}

			return nil
		})
Michael Yang's avatar
Michael Yang committed
509
510
	}

511
	if err := g.Wait(); err != nil {
512
		return err
513
514
	}

515
	return nil
Michael Yang's avatar
Michael Yang committed
516
517
}

518
519
520
521
func (b *Backend) BackendMemory() ml.BackendMemory {
	return *b.requiredMemory
}

522
func (b *Backend) Config() fs.Config {
Michael Yang's avatar
Michael Yang committed
523
524
525
526
	return b.meta.KV()
}

func (b *Backend) Get(name string) ml.Tensor {
527
528
	if t, ok := b.tensors[name]; ok {
		return &Tensor{b: b, t: t}
Michael Yang's avatar
Michael Yang committed
529
530
531
532
533
534
	}

	return nil
}

func (b *Backend) NewContext() ml.Context {
Michael Yang's avatar
Michael Yang committed
535
	return b.NewContextSize(b.maxGraphNodes)
536
537
538
}

func (b *Backend) NewContextSize(n int) ml.Context {
Jesse Gross's avatar
Jesse Gross committed
539
540
541
542
	if n > b.maxGraphNodes {
		panic(fmt.Errorf("requested number of graph nodes (%v) for new context exceeds maximum (%v)", n, b.maxGraphNodes))
	}

543
544
	var allocatedBuffers []*C.struct_ggml_backend_buffer

Michael Yang's avatar
Michael Yang committed
545
	return &Context{
546
547
		b:             b,
		maxGraphNodes: n,
548
		ctx: C.ggml_init(C.struct_ggml_init_params{
549
			mem_size: C.size_t(n)*C.ggml_tensor_overhead() + C.ggml_graph_overhead_custom(C.size_t(n), false),
550
551
			no_alloc: true,
		}),
552
		allocatedBuffers: &allocatedBuffers,
553
		layer:            -1,
Michael Yang's avatar
Michael Yang committed
554
555
556
	}
}

557
func (b *Backend) CacheConfig() ml.CacheConfig {
558
559
560
561
562
	if b.flashAttention {
		return ml.CacheConfig{CachePadding: 256, MaskDType: ml.DTypeF16, MaskBatchPadding: C.GGML_KQ_MASK_PAD}
	} else {
		return ml.CacheConfig{CachePadding: 32, PermutedV: true}
	}
563
564
}

Michael Yang's avatar
Michael Yang committed
565
type Context struct {
566
	b *Backend
Michael Yang's avatar
Michael Yang committed
567

568
	ctx   *C.struct_ggml_context
Michael Yang's avatar
Michael Yang committed
569
	graph *C.struct_ggml_cgraph
570

571
572
	// buft is the buffer type used for new tensors
	buft *C.struct_ggml_backend_buffer_type
573

574
575
576
577
	// allocatedBuffers are buffers for tensors that we have allocated in this context
	// so that we can free them when we close the context
	allocatedBuffers *[]*C.struct_ggml_backend_buffer

Michael Yang's avatar
Michael Yang committed
578
	// maxGraphNodes is the maximum allowed number of graph nodes in this context
579
	maxGraphNodes int
580
581
582

	// layer is the graph layer that this context is allocating for - assumed to be cache
	layer int
Michael Yang's avatar
Michael Yang committed
583
584
}

585
func (c *Context) Input() ml.Context {
Michael Yang's avatar
Michael Yang committed
586
	if c.b.input != nil {
587
		return &Context{
588
589
590
591
592
			b:                c.b,
			ctx:              c.ctx,
			buft:             c.b.input,
			allocatedBuffers: c.allocatedBuffers,
			maxGraphNodes:    c.maxGraphNodes,
593
			layer:            -1,
594
595
596
		}
	}

597
	return c
598
599
}

600
func (c *Context) Layer(i int) ml.Context {
601
	if buft, ok := c.b.layers[i]; ok {
602
		return &Context{
603
604
605
606
607
			b:                c.b,
			ctx:              c.ctx,
			buft:             buft,
			allocatedBuffers: c.allocatedBuffers,
			maxGraphNodes:    c.maxGraphNodes,
608
			layer:            i,
609
610
611
		}
	}

612
	return c
613
614
}

615
func (c *Context) Forward(tensors ...ml.Tensor) ml.Context {
Michael Yang's avatar
Michael Yang committed
616
	if c.graph == nil {
617
		c.graph = C.ggml_new_graph_custom(c.ctx, C.size_t(c.maxGraphNodes), false)
Michael Yang's avatar
Michael Yang committed
618
619
	}

620
621
622
623
624
	for _, tensor := range tensors {
		C.ggml_build_forward_expand(c.graph, tensor.(*Tensor).t)
	}

	return c
Michael Yang's avatar
Michael Yang committed
625
626
}

627
func (c *Context) Compute(tensors ...ml.Tensor) {
628
629
630
	if status := C.ggml_backend_sched_graph_compute_async(c.b.sched, c.graph); status != C.GGML_STATUS_SUCCESS {
		panic(fmt.Errorf("error computing ggml graph: %v", status))
	}
Michael Yang's avatar
Michael Yang committed
631
	C.ggml_backend_sched_reset(c.b.sched)
Michael Yang's avatar
Michael Yang committed
632

633
634
635
	needSync := true
	sync := func() {
		if needSync {
636
			C.ggml_backend_sched_synchronize(c.b.sched)
637
638
639
			needSync = false
		}
	}
Michael Yang's avatar
Michael Yang committed
640

641
642
643
	for _, t := range tensors {
		if C.ggml_nbytes(t.(*Tensor).t) > 0 {
			t.(*Tensor).sync = sync
644
645
		}
	}
Michael Yang's avatar
Michael Yang committed
646
647
}

648
649
func (c *Context) Reserve() {
	reserved := C.ggml_backend_sched_reserve(c.b.sched, c.graph)
650
651

	slog.Debug("compute graph", "nodes", C.ggml_graph_n_nodes(c.graph), "splits", C.ggml_backend_sched_get_n_splits(c.b.sched))
652
653
654
655
656
657

	// Reserve may get called multiple times for different graphs - we just want the last run, which will contain the max allocations
	for _, bt := range c.b.schedBufts {
		c.b.btDeviceMemory[bt].Graph = ml.Memory{}
	}

658
	for i := range c.b.schedBackends {
659
660
661
662
663
664
665
666
667
668
		bufferStatus := C.ggml_backend_sched_get_attempted_buffer_size(c.b.sched, c.b.schedBackends[i])

		graph := &c.b.btDeviceMemory[c.b.schedBufts[i]].Graph
		graph.Size += uint64(bufferStatus.size)
		if bufferStatus.allocated && graph.Status != ml.Failed {
			graph.Status = ml.Allocated
		} else {
			graph.Status = ml.Failed
		}

669
		slog.Info("compute graph", "backend", C.GoString(C.ggml_backend_name(c.b.schedBackends[i])), "buffer_type", C.GoString(C.ggml_backend_buft_name(c.b.schedBufts[i])),
670
			"size", format.HumanBytes2(uint64(bufferStatus.size)))
671
672
	}

673
674
675
	if !reserved {
		panic(ml.ErrNoMem{BackendMemory: *c.b.requiredMemory})
	}
676
677
}

678
func (c *Context) MaxGraphNodes() int {
679
	return c.maxGraphNodes
Jesse Gross's avatar
Jesse Gross committed
680
681
}

682
683
684
func shapeToGGML(shape []int) *C.int64_t {
	sh := make([]C.int64_t, len(shape))
	for i, s := range shape {
685
		sh[i] = C.int64_t(s)
686
687
688
689
690
	}

	return &sh[0]
}

691
692
693
694
func pad(length, pad C.size_t) C.size_t {
	return ((length + pad - 1) / pad) * pad
}

695
func (c *Context) newTensor(dtype ml.DType, shape []int) ml.Tensor {
696
	if c.buft == nil {
697
		panic("set Input or Layer before creating tensors")
698
699
	}

Michael Yang's avatar
Michael Yang committed
700
701
702
703
704
705
	var cdtype uint32
	switch dtype {
	case ml.DTypeF32:
		cdtype = C.GGML_TYPE_F32
	case ml.DTypeF16:
		cdtype = C.GGML_TYPE_F16
706
707
708
709
	case ml.DTypeQ80:
		cdtype = C.GGML_TYPE_Q8_0
	case ml.DTypeQ40:
		cdtype = C.GGML_TYPE_Q4_0
Michael Yang's avatar
Michael Yang committed
710
711
712
713
714
715
	case ml.DTypeI32:
		cdtype = C.GGML_TYPE_I32
	default:
		panic("unsupported dtype")
	}

Jesse Gross's avatar
Jesse Gross committed
716
	if len(shape) < 1 || shape[0] == 0 {
Michael Yang's avatar
Michael Yang committed
717
		var shape C.int64_t = 0
718
		return &Tensor{b: c.b, t: C.ggml_new_tensor(c.ctx, cdtype, 1, &shape)}
Michael Yang's avatar
Michael Yang committed
719
	} else if len(shape) > 4 {
Michael Yang's avatar
Michael Yang committed
720
721
722
723
724
725
726
727
728
		panic("unsupported number of dimensions")
	}

	for _, dim := range shape {
		if dim < 1 {
			panic("invalid shape")
		}
	}

Michael Yang's avatar
Michael Yang committed
729
	t := C.ggml_new_tensor(c.ctx, cdtype, C.int(len(shape)), shapeToGGML(shape))
730
	size := pad(C.ggml_backend_buft_get_alloc_size(c.buft, t), C.ggml_backend_buft_get_alignment(c.buft))
731

732
	b := C.ggml_backend_buft_alloc_buffer(c.buft, size)
733
734
735
736
737
738
739
740
741
742
743
	if c.layer >= 0 {
		cache := &c.b.btDeviceMemory[c.buft].Cache[c.layer]

		cache.Size += uint64(size)
		if b != nil {
			cache.Status = ml.Allocated
		} else {
			cache.Status = ml.Failed
		}
	}

744
	if b == nil {
745
		panic(ml.ErrNoMem{BackendMemory: *c.b.requiredMemory})
746
747
	}

748
	*c.allocatedBuffers = append(*c.allocatedBuffers, b)
Michael Yang's avatar
Michael Yang committed
749
	C.ggml_backend_tensor_alloc(b, t, C.ggml_backend_buffer_get_base(b))
750
	return &Tensor{b: c.b, t: t}
751
752
}

753
func (c *Context) Empty(dtype ml.DType, shape ...int) ml.Tensor {
754
	return c.newTensor(dtype, shape)
755
756
}

757
func (c *Context) Zeros(dtype ml.DType, shape ...int) ml.Tensor {
758
	t := c.newTensor(dtype, shape)
759
760
	C.ggml_set_zero(t.(*Tensor).t)
	return t
Michael Yang's avatar
Michael Yang committed
761
762
}

763
func checkShape[S ~[]E, E any](s S, shape ...int) {
Michael Yang's avatar
Michael Yang committed
764
	n := len(s)
Jesse Gross's avatar
Jesse Gross committed
765
766

	if n == 0 {
767
		return
Jesse Gross's avatar
Jesse Gross committed
768
769
	}

Michael Yang's avatar
Michael Yang committed
770
771
772
773
774
	for _, v := range shape {
		n /= v
	}

	if n != 1 {
775
		panic(fmt.Errorf("invalid shape: %v", shape))
Michael Yang's avatar
Michael Yang committed
776
777
778
	}
}

779
780
func (c *Context) FromFloatSlice(s []float32, shape ...int) ml.Tensor {
	checkShape(s, shape...)
781

782
	t := c.newTensor(ml.DTypeF32, shape)
783

Jesse Gross's avatar
Jesse Gross committed
784
785
786
787
	if len(s) > 0 {
		C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t))
	}

788
	return t
Michael Yang's avatar
Michael Yang committed
789
790
}

791
792
func (c *Context) FromIntSlice(s []int32, shape ...int) ml.Tensor {
	checkShape(s, shape...)
793

794
	t := c.newTensor(ml.DTypeI32, shape)
795

Jesse Gross's avatar
Jesse Gross committed
796
797
798
799
	if len(s) > 0 {
		C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t))
	}

800
	return t
Michael Yang's avatar
Michael Yang committed
801
802
}

Michael Yang's avatar
arange  
Michael Yang committed
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
func (c Context) Arange(start, stop, step float32, dtype ml.DType) ml.Tensor {
	switch dtype {
	case ml.DTypeF32:
		// ggml_arange creates a float32 tensor
		return &Tensor{
			b: c.b,
			t: C.ggml_arange(c.ctx, C.float(start), C.float(stop), C.float(step)),
		}
	case ml.DTypeI32:
		// ggml_cast does not support float32 to int32 conversion
		arange := make([]int32, 0, int((stop-start)/step))
		for i := start; i < stop; i += step {
			arange = append(arange, int32(i))
		}

818
		return c.Input().FromIntSlice(arange, len(arange))
Michael Yang's avatar
arange  
Michael Yang committed
819
820
821
822
823
	default:
		panic("unsupported dtype for arange")
	}
}

Michael Yang's avatar
Michael Yang committed
824
825
func (c *Context) Close() {
	if c != nil {
826
827
828
829
830
		for _, b := range *c.allocatedBuffers {
			C.ggml_backend_buffer_free(b)
		}
		*c.allocatedBuffers = nil

831
832
		C.ggml_free(c.ctx)
	}
Michael Yang's avatar
Michael Yang committed
833
834
835
}

type Tensor struct {
836
	b    *Backend
Michael Yang's avatar
Michael Yang committed
837
	t    *C.struct_ggml_tensor
838
	sync func()
Michael Yang's avatar
Michael Yang committed
839
840
841
842
843
844
845
846
847
848
}

func (t *Tensor) LogValue() slog.Value {
	return slog.GroupValue(
		slog.String("name", C.GoString(C.ggml_get_name(t.t))),
		slog.String("type", C.GoString(C.ggml_type_name(t.t._type))),
		slog.Any("shape", t.Shape()),
	)
}

849
850
func (t *Tensor) Dim(n int) int {
	return int(t.t.ne[n])
Michael Yang's avatar
Michael Yang committed
851
852
}

853
854
func (t *Tensor) Stride(n int) int {
	return int(t.t.nb[n])
Michael Yang's avatar
Michael Yang committed
855
856
}

857
858
func (t *Tensor) Shape() []int {
	shape := make([]int, C.ggml_n_dims(t.t))
Michael Yang's avatar
Michael Yang committed
859
860
861
862
863
864
865
	for i := range shape {
		shape[i] = t.Dim(i)
	}

	return shape
}

866
867
868
869
870
871
872
873
874
func (t *Tensor) Bytes() (data []byte) {
	if t.sync != nil {
		data = make([]byte, C.ggml_nbytes(t.t))

		t.sync()
		C.ggml_backend_tensor_get(t.t, unsafe.Pointer(&data[0]), 0, C.ggml_nbytes(t.t))
	}

	return
Michael Yang's avatar
Michael Yang committed
875
876
}

877
878
879
880
881
882
func (t *Tensor) Floats() (data []float32) {
	if t.sync != nil {
		data = make([]float32, C.ggml_nelements(t.t))

		t.sync()
		C.ggml_backend_tensor_get(t.t, unsafe.Pointer(&data[0]), 0, C.ggml_nbytes(t.t))
Michael Yang's avatar
Michael Yang committed
883
884
885
886
887
888
889
890
891
	}

	return
}

func (t *Tensor) DType() ml.DType {
	switch t.t._type {
	case C.GGML_TYPE_F32:
		return ml.DTypeF32
Jesse Gross's avatar
Jesse Gross committed
892
893
	case C.GGML_TYPE_F16:
		return ml.DTypeF16
894
895
896
897
	case C.GGML_TYPE_Q8_0:
		return ml.DTypeQ80
	case C.GGML_TYPE_Q4_0:
		return ml.DTypeQ40
Michael Yang's avatar
Michael Yang committed
898
899
900
901
902
903
904
	case C.GGML_TYPE_I32:
		return ml.DTypeI32
	default:
		return ml.DTypeOther
	}
}

905
906
907
908
909
910
911
func (t *Tensor) Neg(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_neg(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
912
913
func (t *Tensor) Add(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
914
		b: t.b,
Michael Yang's avatar
Michael Yang committed
915
916
917
918
		t: C.ggml_add(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

Michael Yang's avatar
Michael Yang committed
919
920
921
922
923
924
925
func (t *Tensor) Sub(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sub(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
func (t *Tensor) Repeat(ctx ml.Context, dim, n int) ml.Tensor {
	if dim < 0 || dim >= C.GGML_MAX_DIMS {
		panic("invalid dimension")
	}

	shape := make([]C.int64_t, C.GGML_MAX_DIMS)
	for i := range C.GGML_MAX_DIMS {
		if i == dim {
			shape[i] = C.int64_t(t.Dim(i) * n)
		} else {
			shape[i] = C.int64_t(t.Dim(i))
		}
	}

	tmpl := C.ggml_new_tensor(ctx.(*Context).ctx, t.t._type, C.int(len(shape)), unsafe.SliceData(shape))
	return &Tensor{
		b: t.b,
		t: C.ggml_repeat(ctx.(*Context).ctx, t.t, tmpl),
	}
}

Michael Yang's avatar
Michael Yang committed
947
948
949
950
951
952
953
954
955
956
func (t *Tensor) Stack(ctx ml.Context, dim int, s ...ml.Tensor) ml.Tensor {
	if len(s) > 0 {
		return t.Concat(ctx, s[0].Stack(ctx, dim, s[1:]...), dim)
	}

	return t
}

func (t *Tensor) Concat(ctx ml.Context, t2 ml.Tensor, dim int) ml.Tensor {
	return &Tensor{
957
		b: t.b,
Michael Yang's avatar
Michael Yang committed
958
959
960
961
962
963
		t: C.ggml_concat(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.int(dim)),
	}
}

func (t *Tensor) Contiguous(ctx ml.Context) ml.Tensor {
	return &Tensor{
964
		b: t.b,
Michael Yang's avatar
Michael Yang committed
965
966
967
968
969
970
		t: C.ggml_cont(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) Mul(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
971
		b: t.b,
Michael Yang's avatar
Michael Yang committed
972
973
974
975
		t: C.ggml_mul(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

976
977
978
979
980
981
982
func (t *Tensor) Div(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_div(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

Michael Yang's avatar
Michael Yang committed
983
984
func (t *Tensor) Mulmat(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
985
		b: t.b,
Michael Yang's avatar
Michael Yang committed
986
987
988
989
		t: C.ggml_mul_mat(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

990
991
992
993
994
func (t *Tensor) MulmatFullPrec(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	mul := C.ggml_mul_mat(ctx.(*Context).ctx, t.t, t2.(*Tensor).t)
	C.ggml_mul_mat_set_prec(mul, C.GGML_PREC_F32)

	return &Tensor{
995
		b: t.b,
996
997
998
999
		t: mul,
	}
}

Michael Yang's avatar
llama4  
Michael Yang committed
1000
1001
1002
1003
1004
1005
1006
func (t *Tensor) MulmatID(ctx ml.Context, t2, ids ml.Tensor) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_mul_mat_id(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, ids.(*Tensor).t),
	}
}

Michael Yang's avatar
Michael Yang committed
1007
func (t *Tensor) LayerNorm(ctx ml.Context, w, b ml.Tensor, eps float32) ml.Tensor {
Michael Yang's avatar
llama4  
Michael Yang committed
1008
1009
1010
1011
1012
1013
	tt := C.ggml_norm(ctx.(*Context).ctx, t.t, C.float(eps))
	if w != nil {
		tt = C.ggml_mul(ctx.(*Context).ctx, tt, w.(*Tensor).t)
		if b != nil {
			tt = C.ggml_add(ctx.(*Context).ctx, tt, b.(*Tensor).t)
		}
Michael Yang's avatar
Michael Yang committed
1014
1015
	}

Michael Yang's avatar
llama4  
Michael Yang committed
1016
	return &Tensor{b: t.b, t: tt}
Michael Yang's avatar
Michael Yang committed
1017
1018
1019
}

func (t *Tensor) RMSNorm(ctx ml.Context, w ml.Tensor, eps float32) ml.Tensor {
Michael Yang's avatar
llama4  
Michael Yang committed
1020
1021
1022
1023
1024
1025
	tt := C.ggml_rms_norm(ctx.(*Context).ctx, t.t, C.float(eps))
	if w != nil {
		tt = C.ggml_mul(ctx.(*Context).ctx, tt, w.(*Tensor).t)
	}

	return &Tensor{b: t.b, t: tt}
Michael Yang's avatar
Michael Yang committed
1026
1027
}

1028
func (t *Tensor) Pad(ctx ml.Context, shape ...int) ml.Tensor {
Michael Yang's avatar
Michael Yang committed
1029
1030
	if len(shape) != 4 {
		panic("expected 4 dimensions")
1031
1032
	} else if shape[3] != 0 {
		panic("cuda does not support 4d tensors")
Michael Yang's avatar
Michael Yang committed
1033
1034
1035
	}

	return &Tensor{
1036
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
		t: C.ggml_pad(ctx.(*Context).ctx, t.t, C.int(shape[0]), C.int(shape[1]), C.int(shape[2]), C.int(shape[3])),
	}
}

func (t *Tensor) Permute(ctx ml.Context, shape ...int) ml.Tensor {
	if len(shape) != 4 {
		panic("expected 4 dimensions")
	}

	return &Tensor{
1047
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1048
1049
1050
1051
1052
1053
		t: C.ggml_permute(ctx.(*Context).ctx, t.t, C.int(shape[0]), C.int(shape[1]), C.int(shape[2]), C.int(shape[3])),
	}
}

func (t *Tensor) Rows(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1054
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1055
1056
1057
1058
1059
1060
		t: C.ggml_get_rows(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

func (t *Tensor) Copy(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
	return &Tensor{
1061
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1062
1063
1064
1065
		t: C.ggml_cpy(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
	}
}

1066
func (t *Tensor) Reshape(ctx ml.Context, shape ...int) ml.Tensor {
Michael Yang's avatar
Michael Yang committed
1067
1068
1069
	switch len(shape) {
	case 1:
		return &Tensor{
1070
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1071
1072
1073
1074
			t: C.ggml_reshape_1d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0])),
		}
	case 2:
		return &Tensor{
1075
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1076
1077
1078
1079
			t: C.ggml_reshape_2d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1])),
		}
	case 3:
		return &Tensor{
1080
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1081
1082
1083
1084
			t: C.ggml_reshape_3d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1]), C.int64_t(shape[2])),
		}
	case 4:
		return &Tensor{
1085
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1086
1087
1088
1089
1090
1091
1092
1093
1094
			t: C.ggml_reshape_4d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.int64_t(shape[1]), C.int64_t(shape[2]), C.int64_t(shape[3])),
		}
	default:
		panic("unsupported number of dimensions")
	}
}

func (t *Tensor) Scale(ctx ml.Context, s float64) ml.Tensor {
	return &Tensor{
1095
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1096
1097
1098
1099
		t: C.ggml_scale(ctx.(*Context).ctx, t.t, (C.float)(s)),
	}
}

1100
1101
1102
1103
1104
1105
1106
func (t *Tensor) SumRows(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sum_rows(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1107
1108
func (t *Tensor) Softmax(ctx ml.Context) ml.Tensor {
	return &Tensor{
1109
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1110
1111
1112
1113
		t: C.ggml_soft_max(ctx.(*Context).ctx, t.t),
	}
}

1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
func (t *Tensor) Sin(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sin(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) Cos(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_cos(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1128
1129
func (t *Tensor) Tanh(ctx ml.Context) ml.Tensor {
	return &Tensor{
1130
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1131
1132
1133
1134
		t: C.ggml_tanh_inplace(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
llama4  
Michael Yang committed
1135
1136
1137
1138
1139
1140
1141
func (t *Tensor) Sigmoid(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sigmoid_inplace(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1142
1143
1144
1145
func (t *Tensor) View(ctx ml.Context, offset int, shape ...int) ml.Tensor {
	switch len(shape) {
	case 1:
		return &Tensor{
1146
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1147
1148
1149
1150
			t: C.ggml_view_1d(ctx.(*Context).ctx, t.t, C.int64_t(shape[0]), C.size_t(offset)),
		}
	case 3:
		return &Tensor{
1151
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1152
1153
1154
1155
1156
1157
1158
			t: C.ggml_view_2d(ctx.(*Context).ctx, t.t,
				C.int64_t(shape[0]), C.int64_t(shape[2]),
				C.size_t(shape[1]),
				C.size_t(offset)),
		}
	case 5:
		return &Tensor{
1159
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1160
1161
1162
1163
1164
1165
1166
			t: C.ggml_view_3d(ctx.(*Context).ctx, t.t,
				C.int64_t(shape[0]), C.int64_t(shape[2]), C.int64_t(shape[4]),
				C.size_t(shape[1]), C.size_t(shape[3]),
				C.size_t(offset)),
		}
	case 7:
		return &Tensor{
1167
			b: t.b,
Michael Yang's avatar
Michael Yang committed
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
			t: C.ggml_view_4d(ctx.(*Context).ctx, t.t,
				C.int64_t(shape[0]), C.int64_t(shape[2]), C.int64_t(shape[4]), C.int64_t(shape[6]),
				C.size_t(shape[1]), C.size_t(shape[3]), C.size_t(shape[5]),
				C.size_t(offset)),
		}
	default:
		panic("unsupported number of dimensions")
	}
}

1178
func (t *Tensor) RoPE(ctx ml.Context, positions ml.Tensor, ropeDim int, ropeBase, ropeScale float32, options ...func(*rope.Options)) ml.Tensor {
1179
	// Default options
1180
	opts := &rope.Options{OriginalContextLength: 131072, Factors: &Tensor{}}
1181
1182
1183
1184
1185
1186

	// Apply any provided options
	for _, option := range options {
		option(opts)
	}

Jesse Gross's avatar
Jesse Gross committed
1187
1188
1189
1190
1191
	dequant := t.t
	if C.ggml_is_quantized(t.t._type) {
		dequant = C.ggml_cast(ctx.(*Context).ctx, t.t, C.GGML_TYPE_F32)
	}

Michael Yang's avatar
Michael Yang committed
1192
	return &Tensor{
1193
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1194
		t: C.ggml_rope_ext(
1195
1196
			ctx.(*Context).ctx,
			dequant,
1197
1198
			positions.(*Tensor).t,
			opts.Factors.(*Tensor).t,
Michael Yang's avatar
Michael Yang committed
1199
			C.int(ropeDim),
1200
1201
			C.int(opts.Type),
			C.int(opts.OriginalContextLength),
Michael Yang's avatar
Michael Yang committed
1202
1203
			C.float(ropeBase),
			C.float(ropeScale),
1204
1205
1206
1207
			C.float(0.0),
			C.float(1.0),
			C.float(32.0),
			C.float(1.0),
Michael Yang's avatar
Michael Yang committed
1208
1209
1210
1211
		),
	}
}

1212
1213
1214
1215
1216
1217
1218
func (t *Tensor) IM2Col(ctx ml.Context, t2 ml.Tensor, s0, s1, p0, p1, d0, d1 int) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_im2col(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.int(s0), C.int(s1), C.int(p0), C.int(p1), C.int(d0), C.int(d1), true, C.GGML_TYPE_F32),
	}
}

Michael Yang's avatar
Michael Yang committed
1219
1220
func (t *Tensor) GELU(ctx ml.Context) ml.Tensor {
	return &Tensor{
1221
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1222
1223
1224
1225
1226
1227
		t: C.ggml_gelu_inplace(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) SILU(ctx ml.Context) ml.Tensor {
	return &Tensor{
1228
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1229
1230
1231
1232
		t: C.ggml_silu_inplace(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1233
1234
1235
1236
1237
1238
1239
func (t *Tensor) RELU(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_relu_inplace(ctx.(*Context).ctx, t.t),
	}
}

Michael Yang's avatar
Michael Yang committed
1240
1241
func (t *Tensor) Conv2D(ctx ml.Context, t2 ml.Tensor, s0, s1, p0, p1, d0, d1 int) ml.Tensor {
	return &Tensor{
1242
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1243
1244
1245
		t: C.ggml_conv_2d(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.int(s0), C.int(s1), C.int(p0), C.int(p1), C.int(d0), C.int(d1)),
	}
}
1246

Michael Yang's avatar
Michael Yang committed
1247
func (t *Tensor) AvgPool2D(ctx ml.Context, k, s int, p float32) ml.Tensor {
Michael Yang's avatar
Michael Yang committed
1248
1249
	return &Tensor{
		b: t.b,
Michael Yang's avatar
Michael Yang committed
1250
		t: C.ggml_pool_2d(ctx.(*Context).ctx, t.t, C.GGML_OP_POOL_AVG, C.int(k), C.int(k), C.int(s), C.int(s), C.float(p), C.float(p)),
Michael Yang's avatar
Michael Yang committed
1251
1252
1253
	}
}

Michael Yang's avatar
Michael Yang committed
1254
1255
1256
1257
func (t *Tensor) Set(ctx ml.Context, t2 ml.Tensor, offset int, strides ...int) ml.Tensor {
	var tt *C.struct_ggml_tensor
	switch len(strides) {
	case 0:
Michael Yang's avatar
Michael Yang committed
1258
		tt = C.ggml_set_1d(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.size_t(offset))
Michael Yang's avatar
Michael Yang committed
1259
	case 1:
Michael Yang's avatar
Michael Yang committed
1260
		tt = C.ggml_set_2d(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, C.size_t(offset), C.size_t(strides[0]))
Michael Yang's avatar
Michael Yang committed
1261
1262
1263
1264
1265
1266
1267
	default:
		panic("unsupported number of dimensions")
	}

	return &Tensor{b: t.b, t: tt}
}

1268
1269
1270
1271
1272
1273
func (t *Tensor) ScaledDotProductAttention(ctx ml.Context, key, value, mask ml.Tensor, scale float64) ml.Tensor {
	var kqMask *C.struct_ggml_tensor
	if mask != nil {
		kqMask = mask.(*Tensor).t
	}

1274
1275
1276
	query := t.Permute(ctx, 0, 2, 1, 3)
	key = key.Permute(ctx, 0, 2, 1, 3)

1277
1278
	if t.b.flashAttention {
		value = value.Permute(ctx, 0, 2, 1, 3)
1279

1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
		kqv := C.ggml_flash_attn_ext(ctx.(*Context).ctx, query.(*Tensor).t, key.(*Tensor).t, value.(*Tensor).t, kqMask, C.float(scale), 0, 0)
		C.ggml_flash_attn_ext_set_prec(kqv, C.GGML_PREC_F32)
		return &Tensor{b: t.b, t: kqv}
	} else {
		kq := key.MulmatFullPrec(ctx, query)
		kq = &Tensor{
			b: t.b,
			t: C.ggml_soft_max_ext(ctx.(*Context).ctx, kq.(*Tensor).t, kqMask, C.float(scale), 0),
		}

		kqv := value.Mulmat(ctx, kq)
		return kqv.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx)
	}
1293
}
1294
1295
1296
1297
1298
1299
1300

func (t *Tensor) Duplicate(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_dup(ctx.(*Context).ctx, t.t),
	}
}
Michael Yang's avatar
llama4  
Michael Yang committed
1301
1302
1303
1304
1305
1306
1307

func (t *Tensor) TopK(ctx ml.Context, k int) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_top_k(ctx.(*Context).ctx, t.t, C.int(k)),
	}
}
1308
1309
1310
1311
1312
1313
1314

func (t *Tensor) Argsort(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_argsort(ctx.(*Context).ctx, t.t, C.GGML_SORT_ORDER_ASC),
	}
}
Michael Yang's avatar
Michael Yang committed
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353

func (t *Tensor) Mean(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_mean(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) Variance(ctx ml.Context) ml.Tensor {
	return t.Add(ctx, t.Mean(ctx).Scale(ctx, -1)).
		Sqr(ctx).
		SumRows(ctx).
		Scale(ctx, 1/float64(t.Dim(0)))
}

func (t *Tensor) Stddev(ctx ml.Context) ml.Tensor {
	return t.Variance(ctx).Sqrt(ctx)
}

func (t *Tensor) Sqr(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sqr(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) Sqrt(ctx ml.Context) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_sqrt(ctx.(*Context).ctx, t.t),
	}
}

func (t *Tensor) Clamp(ctx ml.Context, min, max float32) ml.Tensor {
	return &Tensor{
		b: t.b,
		t: C.ggml_clamp(ctx.(*Context).ctx, t.t, C.float(min), C.float(max)),
	}
}