server.go 53.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
package llm

import (
	"bufio"
	"bytes"
	"context"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"log"
	"log/slog"
	"math/rand"
	"net"
	"net/http"
	"os"
	"os/exec"
	"path/filepath"
	"runtime"
20
	"slices"
Jesse Gross's avatar
Jesse Gross committed
21
	"sort"
22
23
	"strconv"
	"strings"
24
	"sync"
25
26
	"time"

Daniel Hiltgen's avatar
Daniel Hiltgen committed
27
28
	"golang.org/x/sync/semaphore"

29
	"github.com/ollama/ollama/api"
30
	"github.com/ollama/ollama/envconfig"
31
	"github.com/ollama/ollama/format"
Michael Yang's avatar
Michael Yang committed
32
	"github.com/ollama/ollama/fs/ggml"
33
	"github.com/ollama/ollama/llama"
34
	"github.com/ollama/ollama/logutil"
Jesse Gross's avatar
Jesse Gross committed
35
	"github.com/ollama/ollama/ml"
36
	"github.com/ollama/ollama/model"
37
38
)

39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
type filteredEnv []string

func (e filteredEnv) LogValue() slog.Value {
	var attrs []slog.Attr
	for _, env := range e {
		if key, value, ok := strings.Cut(env, "="); ok {
			switch {
			case strings.HasPrefix(key, "OLLAMA_"),
				strings.HasPrefix(key, "CUDA_"),
				strings.HasPrefix(key, "ROCR_"),
				strings.HasPrefix(key, "ROCM_"),
				strings.HasPrefix(key, "HIP_"),
				strings.HasPrefix(key, "GPU_"),
				strings.HasPrefix(key, "HSA_"),
				strings.HasPrefix(key, "GGML_"),
				slices.Contains([]string{
					"PATH",
					"LD_LIBRARY_PATH",
					"DYLD_LIBRARY_PATH",
				}, key):
				attrs = append(attrs, slog.String(key, value))
			}
		}
	}
	return slog.GroupValue(attrs...)
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
66
type LlamaServer interface {
Jesse Gross's avatar
Jesse Gross committed
67
	ModelPath() string
68
	Load(ctx context.Context, systemInfo ml.SystemInfo, gpus []ml.DeviceInfo, requireFull bool) ([]ml.DeviceID, error)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
69
70
71
	Ping(ctx context.Context) error
	WaitUntilRunning(ctx context.Context) error
	Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
72
	Embedding(ctx context.Context, input string) ([]float32, error)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
73
74
75
	Tokenize(ctx context.Context, content string) ([]int, error)
	Detokenize(ctx context.Context, tokens []int) (string, error)
	Close() error
Jesse Gross's avatar
Jesse Gross committed
76
77
	VRAMSize() uint64 // Total VRAM across all GPUs
	TotalSize() uint64
78
	VRAMByGPU(id ml.DeviceID) uint64
79
	Pid() int
80
81
82
	GetPort() int
	GetDeviceInfos(ctx context.Context) []ml.DeviceInfo
	HasExited() bool
Daniel Hiltgen's avatar
Daniel Hiltgen committed
83
84
}

Jesse Gross's avatar
Jesse Gross committed
85
// llmServer is an instance of a runner hosting a single model
Daniel Hiltgen's avatar
Daniel Hiltgen committed
86
type llmServer struct {
87
88
89
90
91
92
	port        int
	cmd         *exec.Cmd
	done        chan error // Channel to signal when the process exits
	status      *StatusWriter
	options     api.Options
	numParallel int
93
	modelPath   string
94

95
96
	loadRequest LoadRequest       // Parameters used to initialize the runner
	mem         *ml.BackendMemory // Memory allocations for this model
Jesse Gross's avatar
Jesse Gross committed
97

98
99
100
	// llamaModel is an instance of the cgo llama.cpp model definition
	// nil if this server is running the new engine
	llamaModel     *llama.Model
Jesse Gross's avatar
Jesse Gross committed
101
	llamaModelLock *sync.Mutex
102
103
104
105

	// textProcessor handles text encoding/decoding for the model in the Ollama engine
	// nil if this server is running the llama.cpp based engine
	textProcessor model.TextProcessor
Daniel Hiltgen's avatar
Daniel Hiltgen committed
106

Jesse Gross's avatar
Jesse Gross committed
107
108
	totalLayers  uint64
	loadStart    time.Time // Record how long it took the model to load
109
	loadProgress float32
Daniel Hiltgen's avatar
Daniel Hiltgen committed
110
111

	sem *semaphore.Weighted
112
113
}

Jesse Gross's avatar
Jesse Gross committed
114
115
116
type llamaServer struct {
	llmServer

117
	ggml *ggml.GGML
Jesse Gross's avatar
Jesse Gross committed
118
119
120
121
122
123
}

type ollamaServer struct {
	llmServer
}

124
125
126
127
128
// LoadModel will load a model from disk. The model must be in the GGML format.
//
// It collects array values for arrays with a size less than or equal to
// maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If
// the maxArraySize is negative, all arrays are collected.
Michael Yang's avatar
Michael Yang committed
129
func LoadModel(model string, maxArraySize int) (*ggml.GGML, error) {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
130
131
132
133
	if _, err := os.Stat(model); err != nil {
		return nil, err
	}

134
135
136
137
138
139
	f, err := os.Open(model)
	if err != nil {
		return nil, err
	}
	defer f.Close()

140
	ggml, err := ggml.Decode(f, maxArraySize)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
141
142
	return ggml, err
}
143

Daniel Hiltgen's avatar
Daniel Hiltgen committed
144
// NewLlamaServer will run a server for the given GPUs
145
func NewLlamaServer(systemInfo ml.SystemInfo, gpus []ml.DeviceInfo, modelPath string, f *ggml.GGML, adapters, projectors []string, opts api.Options, numParallel int) (LlamaServer, error) {
Jesse Gross's avatar
Jesse Gross committed
146
147
148
149
	var llamaModel *llama.Model
	var textProcessor model.TextProcessor
	var err error
	if envconfig.NewEngine() || f.KV().OllamaEngineRequired() {
150
151
152
153
154
		if len(projectors) == 0 {
			textProcessor, err = model.NewTextProcessor(modelPath)
		} else {
			err = errors.New("split vision models aren't supported")
		}
Jesse Gross's avatar
Jesse Gross committed
155
156
157
158
		if err != nil {
			// To prepare for opt-out mode, instead of treating this as an error, we fallback to the old runner
			slog.Debug("model not yet supported by Ollama engine, switching to compatibility mode", "model", modelPath, "error", err)
		}
159
	}
Jesse Gross's avatar
Jesse Gross committed
160
161
162
163
164
	if textProcessor == nil {
		llamaModel, err = llama.LoadModelFromFile(modelPath, llama.ModelParams{VocabOnly: true})
		if err != nil {
			return nil, err
		}
165
166
	}

Jesse Gross's avatar
Jesse Gross committed
167
168
169
170
171
	// Verify the requested context size is <= the model training size
	trainCtx := f.KV().ContextLength()
	if opts.NumCtx > int(trainCtx) && trainCtx > 0 {
		slog.Warn("requested context size too large for model", "num_ctx", opts.NumCtx, "n_ctx_train", trainCtx)
		opts.NumCtx = int(trainCtx)
172
173
	}

174
175
	opts.NumBatch = min(opts.NumBatch, opts.NumCtx)

Jesse Gross's avatar
Jesse Gross committed
176
	loadRequest := LoadRequest{LoraPath: adapters, KvSize: opts.NumCtx * numParallel, BatchSize: opts.NumBatch, Parallel: numParallel, MultiUserCache: envconfig.MultiUserCache()}
177

178
	defaultThreads := systemInfo.ThreadCount
Jesse Gross's avatar
Jesse Gross committed
179
180
181
182
	if opts.NumThread > 0 {
		loadRequest.NumThreads = opts.NumThread
	} else if defaultThreads > 0 {
		loadRequest.NumThreads = defaultThreads
183
	}
Michael Yang's avatar
Michael Yang committed
184

Jesse Gross's avatar
Jesse Gross committed
185
	// TODO - NUMA support currently doesn't work properly
186
187

	if opts.MainGPU > 0 {
Jesse Gross's avatar
Jesse Gross committed
188
		loadRequest.MainGPU = opts.MainGPU
189
190
	}

Jesse Gross's avatar
Jesse Gross committed
191
192
	if len(projectors) > 0 && llamaModel != nil {
		loadRequest.ProjectorPath = projectors[0]
193
194
	}

195
196
	fa := envconfig.FlashAttention(f.FlashAttention())

Jesse Gross's avatar
Jesse Gross committed
197
198
	// This will disable flash attention unless all GPUs on the system support it, even if we end up selecting a subset
	// that can handle it.
199
	if fa && !ml.FlashAttentionSupported(gpus) {
200
201
202
		slog.Warn("flash attention enabled but not supported by gpu")
		fa = false
	}
Sam's avatar
Sam committed
203

Michael Yang's avatar
Michael Yang committed
204
	if fa && !f.SupportsFlashAttention() {
205
206
207
208
		slog.Warn("flash attention enabled but not supported by model")
		fa = false
	}

209
	kvct := strings.ToLower(envconfig.KvCacheType())
210
211
212

	if fa {
		slog.Info("enabling flash attention")
Jesse Gross's avatar
Jesse Gross committed
213
		loadRequest.FlashAttention = true
214
215
216

		// Flash Attention also supports kv cache quantization
		// Enable if the requested and kv cache type is supported by the model
217
		if f.SupportsKVCacheType(kvct) {
Jesse Gross's avatar
Jesse Gross committed
218
			loadRequest.KvCacheType = kvct
219
220
		} else {
			slog.Warn("kv cache type not supported by model", "type", kvct)
Sam's avatar
Sam committed
221
		}
222
223
224
	} else if kvct != "" && kvct != "f16" {
		slog.Warn("quantized kv cache requested but flash attention disabled", "type", kvct)
	}
225

226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
	gpuLibs := ml.LibraryPaths(gpus)
	status := NewStatusWriter(os.Stderr)
	cmd, port, err := StartRunner(
		textProcessor != nil,
		modelPath,
		gpuLibs,
		status,
		ml.GetVisibleDevicesEnv(gpus),
	)

	s := llmServer{
		port:           port,
		cmd:            cmd,
		status:         status,
		options:        opts,
		modelPath:      modelPath,
		loadRequest:    loadRequest,
		llamaModel:     llamaModel,
		llamaModelLock: &sync.Mutex{},
		textProcessor:  textProcessor,
		numParallel:    numParallel,
		sem:            semaphore.NewWeighted(int64(numParallel)),
		totalLayers:    f.KV().BlockCount() + 1,
		loadStart:      time.Now(),
		done:           make(chan error, 1),
Jesse Gross's avatar
Jesse Gross committed
251
252
	}

253
254
255
256
257
258
259
260
261
262
	if err != nil {
		var msg string
		if s.status != nil && s.status.LastErrMsg != "" {
			msg = s.status.LastErrMsg
		}
		err := fmt.Errorf("error starting runner: %v %s", err, msg)
		if llamaModel != nil {
			llama.FreeModel(llamaModel)
		}
		return nil, err
Michael Yang's avatar
Michael Yang committed
263
264
	}

265
266
267
268
269
270
271
272
	// reap subprocess when it exits
	go func() {
		err := s.cmd.Wait()
		// Favor a more detailed message over the process exit status
		if err != nil && s.status != nil && s.status.LastErrMsg != "" {
			slog.Error("llama runner terminated", "error", err)
			if strings.Contains(s.status.LastErrMsg, "unknown model") {
				s.status.LastErrMsg = "this model is not supported by your version of Ollama. You may need to upgrade"
Jesse Gross's avatar
Jesse Gross committed
273
			}
274
275
276
			s.done <- errors.New(s.status.LastErrMsg)
		} else {
			s.done <- err
277
		}
278
	}()
279

280
281
282
283
	if textProcessor != nil {
		return &ollamaServer{llmServer: s}, nil
	} else {
		return &llamaServer{llmServer: s, ggml: f}, nil
Michael Yang's avatar
Michael Yang committed
284
	}
285
}
Jesse Gross's avatar
Jesse Gross committed
286

287
288
289
func StartRunner(ollamaEngine bool, modelPath string, gpuLibs []string, out io.Writer, extraEnvs map[string]string) (cmd *exec.Cmd, port int, err error) {
	var exe string
	exe, err = os.Executable()
290
	if err != nil {
291
		return nil, 0, fmt.Errorf("unable to lookup executable path: %w", err)
292
293
294
295
296
297
	}

	if eval, err := filepath.EvalSymlinks(exe); err == nil {
		exe = eval
	}

298
299
300
301
302
303
	port = 0
	if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
		var l *net.TCPListener
		if l, err = net.ListenTCP("tcp", a); err == nil {
			port = l.Addr().(*net.TCPAddr).Port
			l.Close()
Jesse Gross's avatar
Jesse Gross committed
304
		}
305
306
307
308
309
310
311
312
313
314
	}
	if port == 0 {
		slog.Debug("ResolveTCPAddr failed, using random port")
		port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
	}
	params := []string{"runner"}
	if ollamaEngine {
		params = append(params, "--ollama-engine")
	}
	if modelPath != "" {
Jesse Gross's avatar
Jesse Gross committed
315
		params = append(params, "--model", modelPath)
316
317
	}
	params = append(params, "--port", strconv.Itoa(port))
Daniel Hiltgen's avatar
Daniel Hiltgen committed
318

319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
	var pathEnv string
	switch runtime.GOOS {
	case "windows":
		pathEnv = "PATH"
	case "darwin":
		pathEnv = "DYLD_LIBRARY_PATH"
	default:
		pathEnv = "LD_LIBRARY_PATH"
	}

	// Note: we always put our dependency paths first
	// since these are the exact version we compiled/linked against
	libraryPaths := append([]string{}, gpuLibs...)
	if libraryPath, ok := os.LookupEnv(pathEnv); ok {
		libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...)
	}

	cmd = exec.Command(exe, params...)

	cmd.Env = os.Environ()
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355

	if out != nil {
		stdout, err := cmd.StdoutPipe()
		if err != nil {
			return nil, 0, fmt.Errorf("failed to spawn server stdout pipe: %w", err)
		}
		stderr, err := cmd.StderrPipe()
		if err != nil {
			return nil, 0, fmt.Errorf("failed to spawn server stderr pipe: %w", err)
		}
		go func() {
			io.Copy(out, stdout) //nolint:errcheck
		}()
		go func() {
			io.Copy(out, stderr) //nolint:errcheck
		}()
	}
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
	cmd.SysProcAttr = LlamaServerSysProcAttr

	// Always filter down the set of GPUs in case there are any unsupported devices that might crash
	pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))

	// Update or add the path variable with our adjusted version
	pathNeeded := true
	ollamaPathNeeded := true
	extraEnvsDone := map[string]bool{}
	for k := range extraEnvs {
		extraEnvsDone[k] = false
	}
	for i := range cmd.Env {
		cmp := strings.SplitN(cmd.Env[i], "=", 2)
		if strings.EqualFold(cmp[0], pathEnv) {
			cmd.Env[i] = pathEnv + "=" + pathEnvVal
			pathNeeded = false
		} else if strings.EqualFold(cmp[0], "OLLAMA_LIBRARY_PATH") {
			cmd.Env[i] = "OLLAMA_LIBRARY_PATH=" + strings.Join(gpuLibs, string(filepath.ListSeparator))
			ollamaPathNeeded = false
		} else if len(extraEnvs) != 0 {
			for k, v := range extraEnvs {
				if strings.EqualFold(cmp[0], k) {
					cmd.Env[i] = k + "=" + v
					extraEnvsDone[k] = true
Daniel Hiltgen's avatar
Daniel Hiltgen committed
381
				}
382
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
383
		}
384
385
386
387
388
389
390
391
392
393
	}
	if pathNeeded {
		cmd.Env = append(cmd.Env, pathEnv+"="+pathEnvVal)
	}
	if ollamaPathNeeded {
		cmd.Env = append(cmd.Env, "OLLAMA_LIBRARY_PATH="+strings.Join(gpuLibs, string(filepath.ListSeparator)))
	}
	for k, done := range extraEnvsDone {
		if !done {
			cmd.Env = append(cmd.Env, k+"="+extraEnvs[k])
394
		}
395
	}
396

397
398
	slog.Info("starting runner", "cmd", cmd)
	slog.Debug("subprocess", "", filteredEnv(cmd.Env))
Daniel Hiltgen's avatar
Daniel Hiltgen committed
399

400
401
	if err = cmd.Start(); err != nil {
		return nil, 0, err
Jesse Gross's avatar
Jesse Gross committed
402
	}
403
404
	err = nil
	return
Jesse Gross's avatar
Jesse Gross committed
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
}

func (s *llmServer) ModelPath() string {
	return s.modelPath
}

type LoadOperation int

// The order of these constants are significant because we iterate over the operations. They
// should be in order of increasingly loading the model.
const (
	LoadOperationFit    LoadOperation = iota // Return memory requirements but do not allocate
	LoadOperationAlloc                       // Allocate memory but do not load the weights
	LoadOperationCommit                      // Load weights - further changes cannot be made after this
	LoadOperationClose                       // Close model and free memory
)

func (o LoadOperation) String() string {
	switch o {
	case LoadOperationFit:
		return "fit"
	case LoadOperationAlloc:
		return "alloc"
	case LoadOperationCommit:
		return "commit"
	case LoadOperationClose:
		return "close"
	default:
		return "unknown"
	}
}

type LoadRequest struct {
	Operation LoadOperation

	LoraPath       []string
	Parallel       int
	BatchSize      int
	FlashAttention bool
	KvSize         int
	KvCacheType    string
	NumThreads     int
	GPULayers      ml.GPULayersList
	MultiUserCache bool

	// Legacy fields - not used with the Ollama engine
	ProjectorPath string
	MainGPU       int
	UseMmap       bool
}

type LoadResponse struct {
	Success bool
	Memory  ml.BackendMemory
}

var ErrLoadRequiredFull = errors.New("unable to load full model on GPU")

463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
func (s *llamaServer) Load(ctx context.Context, systemInfo ml.SystemInfo, systemGPUs []ml.DeviceInfo, requireFull bool) ([]ml.DeviceID, error) {
	slog.Info("loading model", "model layers", s.totalLayers, "requested", s.options.NumGPU)

	gpus := append(make([]ml.DeviceInfo, 0, len(systemGPUs)), systemGPUs...)

	// Synthesize memory allocation information based on our estimates
	s.mem = &ml.BackendMemory{CPU: ml.DeviceMemory{
		Name:    "CPU",
		Weights: make([]uint64, s.totalLayers),
		Cache:   make([]uint64, s.totalLayers),
	}, GPUs: make([]ml.DeviceMemory, len(gpus))}

	for i := range s.mem.GPUs {
		s.mem.GPUs[i].Name = gpus[i].Name
		s.mem.GPUs[i].DeviceID = gpus[i].DeviceID
		s.mem.GPUs[i].Weights = make([]uint64, s.totalLayers)
		s.mem.GPUs[i].Cache = make([]uint64, s.totalLayers)
	}
Jesse Gross's avatar
Jesse Gross committed
481

482
483
484
485
486
487
488
489
	kv, graphPartialOffload, graphFullOffload := s.ggml.GraphSize(uint64(s.options.NumCtx), uint64(s.loadRequest.BatchSize),
		s.loadRequest.Parallel, s.loadRequest.KvCacheType, s.loadRequest.FlashAttention)

	// Use the size of one layer as a buffer
	layers := s.ggml.Tensors().GroupLayers()
	if blk0, ok := layers["blk.0"]; ok {
		for i := range gpus {
			gpus[i].FreeMemory -= blk0.Size() + kv[0]
490
491
		}
	} else {
492
493
494
495
496
497
498
499
		slog.Warn("model missing blk.0 layer size")
	}

	// Assign all the layers to the CPU for now, they will get reassigned later
	for i := range s.ggml.KV().BlockCount() {
		if blk, ok := layers[fmt.Sprintf("blk.%d", i)]; ok {
			s.mem.CPU.Weights[i] = blk.Size()
			s.mem.CPU.Cache[i] += kv[i]
Jesse Gross's avatar
Jesse Gross committed
500
501
502
		}
	}

503
504
505
506
507
508
509
510
511
512
513
	// We historically haven't included InputWeights in the model size
	var outputWeights uint64
	if layer, ok := layers["output_norm"]; ok {
		outputWeights += layer.Size()
	}
	if layer, ok := layers["output"]; ok {
		outputWeights += layer.Size()
	} else if layer, ok := layers["token_embd"]; ok {
		outputWeights += layer.Size()
	}
	s.mem.CPU.Weights[s.totalLayers-1] = outputWeights
Jesse Gross's avatar
Jesse Gross committed
514

515
516
517
518
519
520
521
	// The vision projector is always loaded on the first GPU if available.
	// This can't be assigned by us, so just subtract it from free space
	projectorGPU := -1
	var projectorWeights uint64
	if len(gpus) > 0 {
		for _, projector := range s.loadRequest.LoraPath {
			projectorWeights += projectorMemoryRequirements(projector)
Jesse Gross's avatar
Jesse Gross committed
522
		}
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539

		// llama.cpp uses the first discrete GPU if available, otherwise the first iGPU
		firstIntegrated := -1
		for i := range gpus {
			if !gpus[i].Integrated {
				projectorGPU = i
				break
			}
			if firstIntegrated == -1 {
				firstIntegrated = i
			}
		}
		if projectorGPU == -1 {
			projectorGPU = firstIntegrated
		}

		gpus[projectorGPU].FreeMemory -= projectorWeights
Jesse Gross's avatar
Jesse Gross committed
540
541
	}

542
543
544
545
546
547
548
549
550
	var kvTotal uint64
	for _, kvLayer := range kv {
		kvTotal += kvLayer
	}

	if graphPartialOffload == 0 {
		headsKV := s.ggml.KV().HeadCountKVMin()
		if headsKV == 0 {
			headsKV = 1
Jesse Gross's avatar
Jesse Gross committed
551
		}
552
553
554
555
556
		gqa := s.ggml.KV().HeadCountMax() / headsKV
		graphPartialOffload = gqa * kvTotal / 6
	}
	if graphFullOffload == 0 {
		graphFullOffload = graphPartialOffload
Jesse Gross's avatar
Jesse Gross committed
557
558
	}

559
560
561
562
	// On Metal there's no partial offload overhead
	if len(gpus) > 0 && gpus[0].Library == "Metal" {
		graphPartialOffload = graphFullOffload
	}
Jesse Gross's avatar
Jesse Gross committed
563

564
565
566
567
568
	// Create a layout based on the memory data that we've built. The compute graph
	// for GPUs is iteratively assigned based on the number of GPUs that are required.
	var gpuLayers ml.GPULayersList
	for {
		prevGPULayers := gpuLayers
Jesse Gross's avatar
Jesse Gross committed
569

570
571
572
573
574
		var err error
		gpuLayers, err = s.createLayout(systemInfo, gpus, s.mem, requireFull, 0)
		if err != nil {
			return nil, err
		}
Jesse Gross's avatar
Jesse Gross committed
575

576
577
578
579
580
581
582
583
		if len(gpuLayers) > len(prevGPULayers) {
			for _, gl := range gpuLayers {
				for i := range s.mem.GPUs {
					if gl.DeviceID == s.mem.GPUs[i].DeviceID {
						s.mem.GPUs[i].Graph = max(graphPartialOffload, graphFullOffload)
						break
					}
				}
Jesse Gross's avatar
Jesse Gross committed
584
			}
585
586
		} else {
			break
Jesse Gross's avatar
Jesse Gross committed
587
		}
588
589
590
591
592
593
594
	}

	// This maintains the historical assignment of graph sizes, though it isn't fully accurate
	graphSize := graphFullOffload
	if gpuLayers.Sum() < int(s.totalLayers) {
		graphSize = graphPartialOffload
	}
Jesse Gross's avatar
Jesse Gross committed
595

596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
	// For all layers that we have assigned to GPUs, move them in the memory data so
	// that it is reported accurately
	for _, gl := range gpuLayers {
		for i := range s.mem.GPUs {
			if gl.DeviceID == s.mem.GPUs[i].DeviceID {
				for _, l := range gl.Layers {
					s.mem.GPUs[i].Weights[l] = s.mem.CPU.Weights[l]
					s.mem.GPUs[i].Cache[l] = s.mem.CPU.Cache[l]

					s.mem.CPU.Weights[l] = 0
					s.mem.CPU.Cache[l] = 0
				}

				s.mem.GPUs[i].Graph = graphSize
				break
			}
Jesse Gross's avatar
Jesse Gross committed
612
613
614
		}
	}

615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
	if projectorGPU > 0 && len(s.mem.GPUs[projectorGPU].Weights) > 0 {
		s.mem.GPUs[projectorGPU].Weights[s.totalLayers-1] += projectorWeights
	}

	slog.Debug("memory", "estimate", s.mem)
	s.mem.Log(slog.LevelInfo)

	// The llama engine uses mmap by default
	s.loadRequest.UseMmap = true

	// mmap has issues with partial offloading on metal
	for _, g := range gpus {
		if g.Library == "Metal" &&
			uint64(s.options.NumGPU) > 0 &&
			uint64(s.options.NumGPU) < s.totalLayers {
			s.options.UseMMap = new(bool)
			*s.options.UseMMap = false
		}
	}

	// Windows CUDA should not use mmap for best performance
	// Linux  with a model larger than free space, mmap leads to thrashing
	// For CPU loads we want the memory to be allocated, not FS cache
	if (runtime.GOOS == "windows" && len(gpus) > 0 && gpus[0].Library == "CUDA" && s.options.UseMMap == nil) ||
		(runtime.GOOS == "linux" && systemInfo.FreeMemory < s.TotalSize() && s.options.UseMMap == nil) ||
		(len(gpus) == 0 && s.options.UseMMap == nil) ||
		(len(gpus) > 0 && gpus[0].Library == "Vulkan" && s.options.UseMMap == nil) ||
		(s.options.UseMMap != nil && !*s.options.UseMMap) {
		s.loadRequest.UseMmap = false
	}

Jesse Gross's avatar
Jesse Gross committed
646
	if err := s.waitUntilRunnerLaunched(ctx); err != nil {
647
		return nil, err
Jesse Gross's avatar
Jesse Gross committed
648
649
	}

650
	s.loadRequest.GPULayers = gpuLayers
Jesse Gross's avatar
Jesse Gross committed
651
652
	resp, err := s.initModel(ctx, s.loadRequest, LoadOperationCommit)
	if err != nil {
653
		return nil, err
Jesse Gross's avatar
Jesse Gross committed
654
655
656
	}

	if !resp.Success {
657
		return nil, errors.New("failed to allocate memory for model")
Jesse Gross's avatar
Jesse Gross committed
658
659
660
661
662
	}

	// The llama engine does its memory allocations together with model loading, so we
	// need to wait until it is done to ensure that we have accurate memory data before
	// loading the next model
663
	return uniqueDeviceIDs(s.loadRequest.GPULayers), s.WaitUntilRunning(ctx)
Jesse Gross's avatar
Jesse Gross committed
664
665
}

666
667
668
669
func projectorMemoryRequirements(filename string) (weights uint64) {
	file, err := os.Open(filename)
	if err != nil {
		return 0
Jesse Gross's avatar
Jesse Gross committed
670
	}
671
	defer file.Close()
Jesse Gross's avatar
Jesse Gross committed
672

673
674
675
	ggml, err := ggml.Decode(file, 1024)
	if err != nil {
		return 0
Jesse Gross's avatar
Jesse Gross committed
676
677
	}

678
679
	for _, layer := range ggml.Tensors().GroupLayers() {
		weights += layer.Size()
Jesse Gross's avatar
Jesse Gross committed
680
681
	}

682
	return weights
Jesse Gross's avatar
Jesse Gross committed
683
684
685
686
687
688
689
690
691
692
693
}

// Load finds the optimal layout of layers to offload on GPUs based on no initial information about the size of the model
// It does this by:
// 1. Assigning the full model to the GPU with the largest available free memory
// 2. Attempting to allocate the layout and receiving the memory requirements in response
// 3. Creating a new layout based on the updated memory information
// 4. Going back to step 2 and looping until we either stabilize on a particular layout or discover that we have entered a cycle
//
// This process is repeated for higher levels of loading the model (fit, allocate, commit). The earlier levels are quicker,
// allowing for faster iteration, but may return less information.
694
695
//
// Returns the list of GPU IDs that were used in the final allocation on success
696
func (s *ollamaServer) Load(ctx context.Context, systemInfo ml.SystemInfo, gpus []ml.DeviceInfo, requireFull bool) ([]ml.DeviceID, error) {
Jesse Gross's avatar
Jesse Gross committed
697
698
699
700
701
	var success bool
	defer func() {
		if !success {
			s.initModel(ctx, LoadRequest{}, LoadOperationClose)
		}
702
703
704
		if s.mem != nil {
			s.mem.Log(slog.LevelInfo)
		}
Jesse Gross's avatar
Jesse Gross committed
705
706
707
708
709
710
711
712
713
	}()

	slog.Info("loading model", "model layers", s.totalLayers, "requested", s.options.NumGPU)

	pastAllocations := make(map[uint64]struct{})
	var backoff float32

	gpuLayers, err := s.createLayout(systemInfo, gpus, s.mem, requireFull, backoff)
	if err != nil {
714
		return nil, err
Jesse Gross's avatar
Jesse Gross committed
715
716
717
	}

	if err := s.waitUntilRunnerLaunched(ctx); err != nil {
718
		return nil, err
Jesse Gross's avatar
Jesse Gross committed
719
720
721
722
723
724
725
726
727
	}

nextOperation:
	for operation := LoadOperationFit; operation < LoadOperationCommit; operation++ {
	nextLoad:
		for {
			s.loadRequest.GPULayers = gpuLayers
			resp, err := s.initModel(ctx, s.loadRequest, operation)
			if err != nil {
728
				return nil, err
Jesse Gross's avatar
Jesse Gross committed
729
730
731
732
733
734
735
736
737
738
739
			}

			resp.Memory.Log(slog.LevelDebug)
			slog.Debug("memory", "success", resp.Success, "required", resp.Memory)

			pastAllocations[gpuLayers.Hash()] = struct{}{}
			s.mem = &resp.Memory

			for {
				newGPULayers, err := s.createLayout(systemInfo, gpus, s.mem, requireFull, backoff)
				if err != nil {
740
					return nil, err
Jesse Gross's avatar
Jesse Gross committed
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
				}

				slog.Debug("new layout created", "layers", newGPULayers)

				// We get additional memory information over time, which will reduce the number of
				// layers that can fit, so fewer layers is actually better. As long as we haven't seen
				// this layout before and it doesn't have more layers than the last one, we can keep
				// trying to see if we can do better.
				if _, ok := pastAllocations[newGPULayers.Hash()]; !ok && newGPULayers.Sum() <= gpuLayers.Sum() {
					gpuLayers = newGPULayers
					continue nextLoad
				}

				// If we are looping around a few different layouts due to graphs moving off and on
				// GPUs, make sure that we try out the intermediate states. For example, if we are
				// looping between offloading 39 and 41 layers, we should also check 40.
				//
				// This switches strategies to force an incremental number of layers to be offloaded
				// and checking the memory layout. If the allocation succeeds and creating a new layout
				// without forcing offload yields the same or greater number of layers offloaded, then
				// the trial is successful.
				//
				// This alternate strategy does not introduce the possibility of loops with the overall
				// state machine, as it exits this code block either with a successful result, moving
				// to the next operation or the original number of layers offloaded.
				if s.options.NumGPU < 0 && newGPULayers.Sum()-gpuLayers.Sum() > 1 {
					for i := newGPULayers.Sum() - 1; i >= gpuLayers.Sum(); i-- {
						slog.Debug("exploring intermediate layers", "layer", i)

						s.options.NumGPU = i
						newGPULayers, err = s.createLayout(systemInfo, gpus, s.mem, requireFull, backoff)
						s.options.NumGPU = -1
						if err != nil {
774
							return nil, err
Jesse Gross's avatar
Jesse Gross committed
775
776
777
778
779
780
						}
						slog.Debug("new layout created", "layers", newGPULayers)

						s.loadRequest.GPULayers = newGPULayers
						resp, err = s.initModel(ctx, s.loadRequest, operation)
						if err != nil {
781
							return nil, err
Jesse Gross's avatar
Jesse Gross committed
782
783
784
785
786
787
788
789
						}

						resp.Memory.Log(slog.LevelDebug)
						slog.Debug("memory", "success", resp.Success, "required", resp.Memory)

						if resp.Success {
							verifyGPULayers, err := s.createLayout(systemInfo, gpus, &resp.Memory, requireFull, backoff)
							if err != nil {
790
								return nil, err
Jesse Gross's avatar
Jesse Gross committed
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
							}

							slog.Debug("verifying layout", "layers", verifyGPULayers)

							if newGPULayers.Sum() <= verifyGPULayers.Sum() {
								gpuLayers = newGPULayers

								// Since we are going backwards (increasing the number of layers), ensure that
								// we can come back down if needed
								clear(pastAllocations)

								continue nextOperation
							}
						}
					}
				}

				// If we generated a layout a second time or go backwards, then we've converged. Use the last
				// layout before the repeat, which is already allocated.
				if resp.Success {
					continue nextOperation
				}

				if s.options.NumGPU >= 0 {
815
					return nil, fmt.Errorf("memory layout cannot be allocated with num_gpu = %v", s.options.NumGPU)
Jesse Gross's avatar
Jesse Gross committed
816
817
818
819
820
				}

				// Memory allocation failed even though we created a layout that we thought should
				// fit in available memory. This could happen if either our free memory reports
				// are incorrect or if available memory is changing between layout and allocation
821
				// time. Apply a backoff to try to find the real amount of available space.
Jesse Gross's avatar
Jesse Gross committed
822
823
				if backoff > 1 {
					slog.Warn("memory layout cannot be allocated", "memory", resp.Memory)
824
					return nil, errors.New("memory layout cannot be allocated")
Jesse Gross's avatar
Jesse Gross committed
825
				} else {
826
					backoff += 0.1
Jesse Gross's avatar
Jesse Gross committed
827
828
829
830
831
832
833
834
835
836
				}

				slog.Info("model layout did not fit, applying backoff", "backoff", fmt.Sprintf("%.2f", backoff))
			}
		}
	}

	s.loadRequest.GPULayers = gpuLayers
	resp, err := s.initModel(ctx, s.loadRequest, LoadOperationCommit)
	if err != nil {
837
		return nil, err
Jesse Gross's avatar
Jesse Gross committed
838
839
840
841
842
843
844
	}

	success = resp.Success
	s.mem = &resp.Memory

	if !success {
		slog.Warn("failed to commit memory for model", "memory", resp.Memory)
845
		return nil, errors.New("failed to commit memory for model")
Jesse Gross's avatar
Jesse Gross committed
846
847
	}

848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
	return uniqueDeviceIDs(gpuLayers), nil
}

func uniqueDeviceIDs(gpuLayers ml.GPULayersList) []ml.DeviceID {
	devices := []ml.DeviceID{}
	for _, layer := range gpuLayers {
		new := true
		for _, ID := range devices {
			if layer.DeviceID == ID {
				new = false
				break
			}
		}
		if new {
			devices = append(devices, layer.DeviceID)
		}
	}
	return devices
Jesse Gross's avatar
Jesse Gross committed
866
867
868
869
870
871
872
873
}

// createLayout uses the current best view of memory requirements and creates a layout of model layers on GPUs.
// It does this by:
// - Calculating how much space each layer requires
// - Calculating how much space each GPU has available for layers, based on free memory and space occupied by the graph
// - Assigning layers
// - Ensuring that we don't exceed limits, such as requirements about partial offloading or system memory
874
func (s *llmServer) createLayout(systemInfo ml.SystemInfo, systemGPUs []ml.DeviceInfo, memory *ml.BackendMemory, requireFull bool, backoff float32) (ml.GPULayersList, error) {
Jesse Gross's avatar
Jesse Gross committed
875
876
	if memory == nil {
		memory = &ml.BackendMemory{CPU: ml.DeviceMemory{
877
878
			Weights: make([]uint64, s.totalLayers),
			Cache:   make([]uint64, s.totalLayers),
Jesse Gross's avatar
Jesse Gross committed
879
880
		}}
	}
881
882
	gpuLayers, layers := s.buildLayout(systemGPUs, memory, requireFull, backoff)
	err := s.verifyLayout(systemInfo, memory, requireFull, gpuLayers, layers)
883
884
885
886
887
888
	if err != nil {
		return nil, err
	}
	return gpuLayers, nil
}

889
func (s *llmServer) buildLayout(systemGPUs []ml.DeviceInfo, memory *ml.BackendMemory, requireFull bool, backoff float32) (ml.GPULayersList, []uint64) {
890
891
	gpus := append(make([]ml.DeviceInfo, 0, len(systemGPUs)), systemGPUs...)
	sort.Sort(sort.Reverse(ml.ByFreeMemory(gpus)))
Jesse Gross's avatar
Jesse Gross committed
892
893
894
895

	layers := make([]uint64, len(memory.CPU.Weights))
	for i := range layers {
		for j := range memory.GPUs {
896
897
			layers[i] += memory.GPUs[j].Weights[i]
			layers[i] += memory.GPUs[j].Cache[i]
Jesse Gross's avatar
Jesse Gross committed
898
		}
899
900
		layers[i] += memory.CPU.Weights[i]
		layers[i] += memory.CPU.Cache[i]
901
		logutil.Trace("layer to assign", "layer", i, "size", format.HumanBytes2(layers[i]))
Jesse Gross's avatar
Jesse Gross committed
902
903
904
	}

	gpuLayers := ml.GPULayersList{}
905
	for _, gl := range ml.ByLibrary(gpus) {
Jesse Gross's avatar
Jesse Gross committed
906
907
908
909
910
911
912
913
		// If a GPU already has a graph allocated on it, then we should continue to use it.
		// Otherwise, we lose information that we got from previous allocations, which can
		// cause cycling. Plus, we get more information about required allocation from each
		// iteration, so it doesn't make sense that a later iteration would use fewer GPUs.
		lastUsedGPU := 0
		for i := range gl {
			found := false
			for j := range memory.GPUs {
914
				if gl[i].DeviceID == memory.GPUs[j].DeviceID {
915
					if memory.GPUs[j].Graph != 0 {
Jesse Gross's avatar
Jesse Gross committed
916
917
918
						lastUsedGPU = i
					}

919
					reserved := uint64(float32(gl[i].FreeMemory)*backoff) + gl[i].MinimumMemory() + envconfig.GpuOverhead() + memory.GPUs[j].Graph
Jesse Gross's avatar
Jesse Gross committed
920
921
922
923
924
925
					if gl[i].FreeMemory > reserved {
						gl[i].FreeMemory -= reserved
					} else {
						gl[i].FreeMemory = 0
					}

926
					slog.Debug("available gpu", "id", gl[i].ID, "library", gl[i].Library,
Jesse Gross's avatar
Jesse Gross committed
927
						"available layer vram", format.HumanBytes2(gl[i].FreeMemory),
928
						"backoff", fmt.Sprintf("%.2f", backoff), "minimum", format.HumanBytes2(gl[i].MinimumMemory()),
Jesse Gross's avatar
Jesse Gross committed
929
						"overhead", format.HumanBytes2(envconfig.GpuOverhead()),
930
						"graph", format.HumanBytes2(memory.GPUs[j].Graph))
Jesse Gross's avatar
Jesse Gross committed
931
932
933
934
935
936
937
938
939
940
941

					found = true
					break
				}
			}
			if !found {
				// The runner doesn't report seeing this GPU
				gl[i].FreeMemory = 0
			}
		}

942
		libraryGpuLayers := assignLayers(layers, gl, requireFull, s.options.NumGPU, lastUsedGPU)
Jesse Gross's avatar
Jesse Gross committed
943
944
945
946
		if libraryGpuLayers.Sum() > gpuLayers.Sum() {
			gpuLayers = libraryGpuLayers
		}
	}
947
	return gpuLayers, layers
948
}
Jesse Gross's avatar
Jesse Gross committed
949

950
// verifyLayout ensures that we don't exceed limits, such as requirements about partial offloading or system memory
951
func (s *llmServer) verifyLayout(systemInfo ml.SystemInfo, memory *ml.BackendMemory, requireFull bool, gpuLayers ml.GPULayersList, layers []uint64) error {
Jesse Gross's avatar
Jesse Gross committed
952
	// These sizes will only increase as we go through additional iterations and get additional information.
953
	cpuSize := memory.InputWeights + memory.CPU.Graph
Jesse Gross's avatar
Jesse Gross committed
954
955
956
	var vramSize uint64
	for _, gl := range gpuLayers {
		for _, gpu := range memory.GPUs {
957
			if gl.DeviceID == gpu.DeviceID {
958
				vramSize += gpu.Graph
Jesse Gross's avatar
Jesse Gross committed
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
				break
			}
		}
	}

nextLayer:
	for i := range layers {
		for _, g := range gpuLayers {
			for _, gl := range g.Layers {
				if i == gl {
					vramSize += layers[i]
					continue nextLayer
				}
			}
		}
		cpuSize += layers[i]
	}

	if requireFull {
		if gpuLayers.Sum() < len(layers) && (s.options.NumGPU < 0 || gpuLayers.Sum() < s.options.NumGPU) {
979
			slog.Info("model requires more memory than is currently available, evicting a model to make space", "loaded layers", gpuLayers.Sum())
980
			return ErrLoadRequiredFull
Jesse Gross's avatar
Jesse Gross committed
981
982
		}

983
		if cpuSize > systemInfo.FreeMemory {
984
985
			slog.Info("model requires more system memory than is currently available, evicting a model to make space", "required", cpuSize, "free", systemInfo.FreeMemory)
			return fmt.Errorf("model requires more system memory than is currently available %w", ErrLoadRequiredFull)
Jesse Gross's avatar
Jesse Gross committed
986
987
988
989
990
991
		}
	}

	// On linux and windows, over-allocating CPU memory will almost always result in an error
	// Darwin has fully dynamic swap so has no direct concept of free swap space
	if runtime.GOOS != "darwin" {
992
		available := systemInfo.FreeMemory + systemInfo.FreeSwap
Jesse Gross's avatar
Jesse Gross committed
993
		if cpuSize > available {
994
995
			slog.Warn("model request too large for system", "requested", format.HumanBytes2(cpuSize), "available", format.HumanBytes2(available), "total", format.HumanBytes2(systemInfo.TotalMemory), "free", format.HumanBytes2(systemInfo.FreeMemory), "swap", format.HumanBytes2(systemInfo.FreeSwap))
			return fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(cpuSize), format.HumanBytes2(available))
Jesse Gross's avatar
Jesse Gross committed
996
997
		}
	} else {
998
		if vramSize > systemInfo.TotalMemory {
Jesse Gross's avatar
Jesse Gross committed
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
			// disable partial offloading when model is greater than total system memory as this
			// can lead to locking up the system
			s.options.NumGPU = 0
			gpuLayers = ml.GPULayersList{}
		}
	}

	if gpuLayers.Sum() == 0 {
		slog.Debug("insufficient VRAM to load any model layers")
	}

1010
	return nil
Jesse Gross's avatar
Jesse Gross committed
1011
1012
1013
}

// assignLayers packs the maximum number of layers onto the smallest set of GPUs and comes up with a layer assignment
1014
func assignLayers(layers []uint64, gpus []ml.DeviceInfo, requireFull bool, requestedLayers int, lastUsedGPU int) (gpuLayers ml.GPULayersList) {
Jesse Gross's avatar
Jesse Gross committed
1015
1016
1017
1018
1019
1020
1021
1022
	// If we can't fit everything then prefer offloading layers other than the output layer
	for range 2 {
		// requestedLayers may be -1 if nothing was requested
		requestedLayers = min(len(layers), requestedLayers)

		if !envconfig.SchedSpread() {
			for i := lastUsedGPU; i < len(gpus); i++ {
				// Try to pack things into as few GPUs as possible
1023
				forceRequest := i == len(gpus)-1 && !requireFull
Jesse Gross's avatar
Jesse Gross committed
1024
1025
1026
1027
1028
1029
				gpuLayers = findBestFit(layers, gpus[:i+1], requestedLayers, forceRequest)
				if gpuLayers.Sum() == len(layers) || gpuLayers.Sum() == requestedLayers {
					break
				}
			}
		} else {
1030
			gpuLayers = findBestFit(layers, gpus, requestedLayers, !requireFull)
Jesse Gross's avatar
Jesse Gross committed
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
		}

		// We only stop if we've gotten all of the layers - even if we got requestedLayers, we still
		// might want to try dropping the output layer.
		if gpuLayers.Sum() == len(layers) {
			return gpuLayers
		}

		layers = layers[:len(layers)-1]
	}

	return gpuLayers
}

// findBestFit binary searches to find the smallest capacity factor that can fit
// the max number of layers. The capacity factor is multiplied by the free space on
// each GPU and a small one will force even balancing.
1048
func findBestFit(layers []uint64, gpus []ml.DeviceInfo, requestedLayers int, forceRequest bool) (gpuLayers ml.GPULayersList) {
Jesse Gross's avatar
Jesse Gross committed
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
	var high float32 = 1
	var low float32 = 0

	// If we need to fulfill the requested number of layers, pretend we have almost infinite VRAM
	if requestedLayers >= 0 && forceRequest {
		high = 1000
	}

	bestAssignments := greedyFit(layers, gpus, high, requestedLayers)
	maxNumGPU := bestAssignments.Sum()
	if maxNumGPU == 0 {
		return bestAssignments
	}

	for high-low > 1e-6 {
		mid := (low + high) / 2
		assignments := greedyFit(layers, gpus, mid, requestedLayers)
		if assignments.Sum() == maxNumGPU {
			high = mid
			bestAssignments = assignments
		} else {
			low = mid
		}
	}
	return bestAssignments
}

// greedyFit assigns layers incrementally to GPUs, spilling over as each runs out of free space
1077
func greedyFit(layers []uint64, gpus []ml.DeviceInfo, capacity float32, requestedLayers int) (gpuLayers ml.GPULayersList) {
Jesse Gross's avatar
Jesse Gross committed
1078
	device := len(gpus) - 1
1079
	gpuLayers = ml.GPULayersList{{DeviceID: gpus[device].DeviceID}}
Jesse Gross's avatar
Jesse Gross committed
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
	freeSpace := uint64(float32(gpus[device].FreeMemory) * capacity)
	for i := len(layers) - 1; i >= 0; i-- {
		if requestedLayers >= 0 && len(layers)-1-i >= requestedLayers {
			break
		}

		for {
			if layers[i] <= freeSpace {
				gpuLayers[0].Layers = append([]int{i}, gpuLayers[0].Layers...)
				freeSpace -= layers[i]
				break
			}

			device--
			if device < 0 {
				return gpuLayers
			}
1097
			gpuLayers = append(ml.GPULayersList{{DeviceID: gpus[device].DeviceID}}, gpuLayers...)
Jesse Gross's avatar
Jesse Gross committed
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
			freeSpace = uint64(float32(gpus[device].FreeMemory) * capacity)
		}
	}
	return gpuLayers
}

// waitUntilRunnerLaunched sleeps until the runner subprocess is alive enough
// to respond to status requests
func (s *llmServer) waitUntilRunnerLaunched(ctx context.Context) error {
	for {
		_, err := s.getServerStatus(ctx)
		if err == nil {
			break
		}

		t := time.NewTimer(10 * time.Millisecond)
		select {
		case <-t.C:
			continue
		case <-ctx.Done():
			return ctx.Err()
		}
	}

	return nil
}

// initModel sends a load request to the runner based on the request operation (fit, alloc, commit)
// and parameters
func (s *llmServer) initModel(ctx context.Context, req LoadRequest, operation LoadOperation) (*LoadResponse, error) {
	req.Operation = operation

	data, err := json.Marshal(req)
	if err != nil {
		return nil, fmt.Errorf("error marshaling load data: %w", err)
	}

	r, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/load", s.port), bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("error creating load request: %w", err)
	}
	r.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(r)
	if err != nil {
		return nil, fmt.Errorf("do load request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("read load request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm load error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var llmResp LoadResponse
	if err := json.Unmarshal(body, &llmResp); err != nil {
		return nil, fmt.Errorf("load unmarshal encode response: %w", err)
	}

	return &llmResp, nil
1163
1164
1165
1166
1167
1168
}

type ServerStatus int

const ( // iota is reset to 0
	ServerStatusReady ServerStatus = iota
1169
	ServerStatusNoSlotsAvailable
Jesse Gross's avatar
Jesse Gross committed
1170
	ServerStatusLaunched
1171
1172
1173
1174
1175
	ServerStatusLoadingModel
	ServerStatusNotResponding
	ServerStatusError
)

1176
func (s ServerStatus) String() string {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1177
1178
1179
	switch s {
	case ServerStatusReady:
		return "llm server ready"
1180
	case ServerStatusNoSlotsAvailable:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1181
		return "llm busy - no slots available"
Jesse Gross's avatar
Jesse Gross committed
1182
1183
	case ServerStatusLaunched:
		return "llm server launched"
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1184
1185
1186
1187
1188
1189
1190
1191
1192
	case ServerStatusLoadingModel:
		return "llm server loading model"
	case ServerStatusNotResponding:
		return "llm server not responding"
	default:
		return "llm server error"
	}
}

1193
1194
1195
type ServerStatusResponse struct {
	Status   ServerStatus `json:"status"`
	Progress float32      `json:"progress"`
1196
1197
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1198
func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
1199
1200
1201
1202
1203
1204
	// Fail fast if its exited
	if s.cmd.ProcessState != nil {
		msg := ""
		if s.status != nil && s.status.LastErrMsg != "" {
			msg = s.status.LastErrMsg
		}
1205
1206
		if s.cmd.ProcessState.ExitCode() == -1 {
			// Most likely a signal killed it, log some more details to try to help troubleshoot
1207
			slog.Warn("llama runner process no longer running", "sys", s.cmd.ProcessState.Sys(), "string", s.cmd.ProcessState)
1208
		}
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
		return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
	if err != nil {
		return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		if errors.Is(err, context.DeadlineExceeded) {
Michael Yang's avatar
Michael Yang committed
1221
			return ServerStatusNotResponding, errors.New("server not responding")
1222
		}
1223
1224
1225
		if strings.Contains(err.Error(), "connection refused") {
			return ServerStatusNotResponding, errors.New("connection refused")
		}
1226
1227
1228
1229
1230
1231
1232
1233
1234
		return ServerStatusError, fmt.Errorf("health resp: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return ServerStatusError, fmt.Errorf("read health request: %w", err)
	}

1235
1236
	var ssr ServerStatusResponse
	if err := json.Unmarshal(body, &ssr); err != nil {
1237
1238
1239
		return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
	}

1240
1241
1242
1243
	switch ssr.Status {
	case ServerStatusLoadingModel:
		s.loadProgress = ssr.Progress
		return ssr.Status, nil
Jesse Gross's avatar
Jesse Gross committed
1244
	case ServerStatusLaunched, ServerStatusReady, ServerStatusNoSlotsAvailable:
1245
		return ssr.Status, nil
1246
	default:
1247
		return ssr.Status, fmt.Errorf("server error: %+v", ssr)
1248
1249
1250
	}
}

1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
// getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received
func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) {
	var retries int
	for {
		status, err := s.getServerStatus(ctx)
		if err != nil {
			return status, err
		}

		if status == ServerStatusNoSlotsAvailable {
			if retries >= 10 {
				return status, fmt.Errorf("no slots available after %d retries", retries)
			}

			time.Sleep(5 * time.Millisecond)
			retries++
			continue
		}

		return status, nil
	}
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1274
func (s *llmServer) Ping(ctx context.Context) error {
1275
1276
1277
1278
1279
1280
1281
1282
	_, err := s.getServerStatus(ctx)
	if err != nil {
		slog.Debug("server unhealthy", "error", err)
		return err
	}
	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1283
func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
1284
	stallDuration := envconfig.LoadTimeout()    // If no progress happens
1285
	stallTimer := time.Now().Add(stallDuration) // give up if we stall
1286
1287
1288

	slog.Info("waiting for llama runner to start responding")
	var lastStatus ServerStatus = -1
1289
	fullyLoaded := false
ManniX-ITA's avatar
ManniX-ITA committed
1290

1291
1292
	for {
		select {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1293
		case <-ctx.Done():
1294
			slog.Warn("client connection closed before server finished loading, aborting load")
1295
			return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
1296
		case err := <-s.done:
1297
			return fmt.Errorf("llama runner process has terminated: %w", err)
1298
1299
		default:
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1300
		if time.Now().After(stallTimer) {
ManniX-ITA's avatar
ManniX-ITA committed
1301
			// timeout
1302
1303
1304
1305
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1306
			return fmt.Errorf("timed out waiting for llama runner to start - progress %0.2f - %s", s.loadProgress, msg)
ManniX-ITA's avatar
ManniX-ITA committed
1307
1308
1309
1310
1311
		}
		if s.cmd.ProcessState != nil {
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
1312
			}
ManniX-ITA's avatar
ManniX-ITA committed
1313
1314
			return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1315
1316
		ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
		defer cancel()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1317
		priorProgress := s.loadProgress
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1318
1319
1320
		status, _ := s.getServerStatus(ctx)
		if lastStatus != status && status != ServerStatusReady {
			// Only log on status changes
1321
			slog.Info("waiting for server to become available", "status", status)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1322
		}
ManniX-ITA's avatar
ManniX-ITA committed
1323
1324
		switch status {
		case ServerStatusReady:
Jesse Gross's avatar
Jesse Gross committed
1325
			slog.Info(fmt.Sprintf("llama runner started in %0.2f seconds", time.Since(s.loadStart).Seconds()))
ManniX-ITA's avatar
ManniX-ITA committed
1326
1327
			return nil
		default:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1328
			lastStatus = status
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1329
1330
1331
1332
			// Reset the timer as long as we're making forward progress on the load
			if priorProgress != s.loadProgress {
				slog.Debug(fmt.Sprintf("model load progress %0.2f", s.loadProgress))
				stallTimer = time.Now().Add(stallDuration)
1333
			} else if !fullyLoaded && int(s.loadProgress*100.0) >= 100 {
1334
				slog.Debug("model load completed, waiting for server to become available", "status", status)
1335
				stallTimer = time.Now().Add(stallDuration)
1336
				fullyLoaded = true
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1337
			}
ManniX-ITA's avatar
ManniX-ITA committed
1338
1339
			time.Sleep(time.Millisecond * 250)
			continue
1340
1341
1342
1343
		}
	}
}

1344
1345
1346
1347
1348
1349
1350
func (s *llmServer) Pid() int {
	if s.cmd != nil && s.cmd.Process != nil {
		return s.cmd.Process.Pid
	}
	return -1
}

1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
func (s *llmServer) GetPort() int {
	return s.port
}

func (s *llmServer) HasExited() bool {
	if s.cmd != nil && s.cmd.ProcessState != nil && s.cmd.ProcessState.ExitCode() >= 0 {
		return true
	}
	return false
}

1362
var grammarJSON = `
1363
1364
1365
1366
root   ::= object
value  ::= object | array | string | number | ("true" | "false" | "null") ws
object ::=
  "{" ws (
1367
         string ":" ws value
1368
    ("," ws string ":" ws value)*
1369
  )? ws "}" 
1370
1371
1372
1373
array  ::=
  "[" ws (
            value
    ("," ws value)*
1374
  )? ws "]" 
1375
1376
string ::=
  "\"" (
1377
    [^"\\\x7F\x00-\x1F] |
1378
    "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
1379
1380
  )* "\"" 
number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? 
1381
1382
1383
1384
1385
1386
1387
# Optional space: by convention, applied in this grammar after literal chars when allowed
ws ::= ([ \t\n] ws)?
`

const maxBufferSize = 512 * format.KiloByte

type ImageData struct {
1388
1389
	Data []byte `json:"data"`
	ID   int    `json:"id"`
1390
1391
1392
1393
}

type CompletionRequest struct {
	Prompt  string
1394
	Format  json.RawMessage
1395
	Images  []ImageData
Michael Yang's avatar
Michael Yang committed
1396
	Options *api.Options
1397

1398
1399
1400
	Grammar  string // set before sending the request to the subprocess
	Shift    bool
	Truncate bool
1401
1402
1403
1404
1405
1406

	// Logprobs specifies whether to include log probabilities in the response
	Logprobs bool

	// TopLogprobs specifies the number of most likely alternative tokens to return (0-20)
	TopLogprobs int
1407
1408
}

1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
// DoneReason represents the reason why a completion response is done
type DoneReason int

const (
	// DoneReasonStop indicates the completion stopped naturally
	DoneReasonStop DoneReason = iota
	// DoneReasonLength indicates the completion stopped due to length limits
	DoneReasonLength
	// DoneReasonConnectionClosed indicates the completion stopped due to the connection being closed
	DoneReasonConnectionClosed
)

func (d DoneReason) String() string {
	switch d {
	case DoneReasonLength:
		return "length"
	case DoneReasonStop:
		return "stop"
	default:
		return "" // closed
	}
}

1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
// TokenLogprob represents log probability information for a single token alternative.
type TokenLogprob struct {
	Token   string  `json:"token"`
	Logprob float64 `json:"logprob"`
}

// Logprob contains log probability information for a generated token.
type Logprob struct {
	TokenLogprob
	TopLogprobs []TokenLogprob `json:"top_logprobs,omitempty"`
}

1444
type CompletionResponse struct {
1445
1446
1447
1448
1449
1450
1451
	Content            string        `json:"content"`
	DoneReason         DoneReason    `json:"done_reason"`
	Done               bool          `json:"done"`
	PromptEvalCount    int           `json:"prompt_eval_count"`
	PromptEvalDuration time.Duration `json:"prompt_eval_duration"`
	EvalCount          int           `json:"eval_count"`
	EvalDuration       time.Duration `json:"eval_duration"`
1452
1453
1454

	// Logprobs contains log probability information if requested
	Logprobs []Logprob `json:"logprobs,omitempty"`
1455
1456
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1457
func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
1458
	slog.Debug("completion request", "images", len(req.Images), "prompt", len(req.Prompt), "format", string(req.Format))
1459
	logutil.Trace("completion request", "prompt", req.Prompt)
1460

1461
	if len(req.Format) > 0 {
1462
1463
1464
1465
1466
1467
		switch string(req.Format) {
		case `null`, `""`:
			// Field was set, but "missing" a value. We accept
			// these as "not set".
			break
		case `"json"`:
1468
			req.Grammar = grammarJSON
1469
1470
1471
1472
		default:
			if req.Format[0] != '{' {
				return fmt.Errorf("invalid format: %q; expected \"json\" or a valid JSON Schema object", req.Format)
			}
1473

1474
1475
1476
1477
			// User provided a JSON schema
			g := llama.SchemaToGrammar(req.Format)
			if g == nil {
				return fmt.Errorf("invalid JSON schema in format")
1478
			}
1479
			req.Grammar = string(g)
1480
1481
1482
		}
	}

1483
1484
1485
1486
1487
	if req.Options == nil {
		opts := api.DefaultOptions()
		req.Options = &opts
	}

1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
	if err := s.sem.Acquire(ctx, 1); err != nil {
		if errors.Is(err, context.Canceled) {
			slog.Info("aborting completion request due to client closing the connection")
		} else {
			slog.Error("Failed to acquire semaphore", "error", err)
		}
		return err
	}
	defer s.sem.Release(1)

	// put an upper limit on num_predict to avoid the model running on forever
	if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
		req.Options.NumPredict = 10 * s.options.NumCtx
	}

1503
	// Make sure the server is ready
1504
	status, err := s.getServerStatusRetry(ctx)
1505
1506
1507
	if err != nil {
		return err
	} else if status != ServerStatusReady {
1508
		return fmt.Errorf("unexpected server status: %s", status)
1509
1510
	}

1511
1512
1513
1514
	// Handling JSON marshaling with special characters unescaped.
	buffer := &bytes.Buffer{}
	enc := json.NewEncoder(buffer)
	enc.SetEscapeHTML(false)
1515

1516
	if err := enc.Encode(req); err != nil {
1517
1518
		return fmt.Errorf("failed to marshal data: %v", err)
	}
1519

1520
1521
1522
1523
1524
1525
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
	serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
	if err != nil {
		return fmt.Errorf("error creating POST request: %v", err)
	}
	serverReq.Header.Set("Content-Type", "application/json")
1526

1527
	res, err := http.DefaultClient.Do(serverReq)
1528
1529
1530
1531
	if err != nil && errors.Is(err, context.Canceled) {
		// client closed connection
		return err
	} else if err != nil {
1532
1533
		slog.Error("post predict", "error", err)
		return errors.New("model runner has unexpectedly stopped, this may be due to resource limitations or an internal error, check ollama server logs for details")
1534
1535
	}
	defer res.Body.Close()
1536

1537
1538
	if res.StatusCode >= 400 {
		bodyBytes, err := io.ReadAll(res.Body)
1539
		if err != nil {
1540
			return fmt.Errorf("failed reading llm error response: %w", err)
1541
		}
1542
		log.Printf("llm predict error: %s", bodyBytes)
1543
		return api.StatusError{StatusCode: res.StatusCode, ErrorMessage: strings.TrimSpace(string(bodyBytes))}
1544
	}
1545

1546
1547
1548
	scanner := bufio.NewScanner(res.Body)
	buf := make([]byte, 0, maxBufferSize)
	scanner.Buffer(buf, maxBufferSize)
1549

1550
1551
1552
	// keep track of the last token generated, this is used to abort if the model starts looping
	var lastToken string
	var tokenRepeat int
1553

1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
	for scanner.Scan() {
		select {
		case <-ctx.Done():
			// This handles the request cancellation
			return ctx.Err()
		default:
			line := scanner.Bytes()
			if len(line) == 0 {
				continue
			}
1564

1565
1566
			evt, ok := bytes.CutPrefix(line, []byte("data: "))
			if !ok {
1567
				evt = line
1568
			}
1569

1570
			var c CompletionResponse
1571
			if err := json.Unmarshal(evt, &c); err != nil {
1572
				return fmt.Errorf("error unmarshalling llm prediction response: %v", err)
1573
1574
			}
			switch {
1575
			case strings.TrimSpace(c.Content) == lastToken:
1576
1577
1578
1579
1580
				tokenRepeat++
			default:
				lastToken = strings.TrimSpace(c.Content)
				tokenRepeat = 0
			}
1581

1582
1583
1584
1585
1586
			// 30 picked as an arbitrary max token repeat limit, modify as needed
			if tokenRepeat > 30 {
				slog.Debug("prediction aborted, token repeat limit reached")
				return ctx.Err()
			}
1587

1588
1589
			if c.Content != "" {
				fn(CompletionResponse{
1590
1591
					Content:  c.Content,
					Logprobs: c.Logprobs,
1592
				})
1593
			}
1594

1595
			if c.Done {
1596
				fn(c)
1597
				return nil
1598
			}
1599
		}
1600
	}
1601

1602
	if err := scanner.Err(); err != nil {
1603
		if strings.Contains(err.Error(), "unexpected EOF") || strings.Contains(err.Error(), "forcibly closed") {
1604
			s.Close()
1605
			var msg string
1606
1607
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
1608
1609
			} else {
				msg = err.Error()
1610
			}
1611
			return fmt.Errorf("an error was encountered while running the model: %s", msg)
1612
1613
		}

1614
		return fmt.Errorf("error reading llm response: %v", err)
1615
1616
	}

1617
	return nil
1618
1619
}

1620
type EmbeddingRequest struct {
1621
	Content string `json:"content"`
1622
1623
}

1624
type EmbeddingResponse struct {
1625
	Embedding []float32 `json:"embedding"`
1626
1627
}

1628
func (s *llmServer) Embedding(ctx context.Context, input string) ([]float32, error) {
1629
	logutil.Trace("embedding request", "input", input)
1630

1631
	if err := s.sem.Acquire(ctx, 1); err != nil {
1632
1633
1634
1635
1636
		if errors.Is(err, context.Canceled) {
			slog.Info("aborting embedding request due to client closing the connection")
		} else {
			slog.Error("Failed to acquire semaphore", "error", err)
		}
1637
		return nil, err
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1638
	}
1639
	defer s.sem.Release(1)
1640

1641
	// Make sure the server is ready
1642
	status, err := s.getServerStatusRetry(ctx)
1643
	if err != nil {
1644
		return nil, err
1645
	} else if status != ServerStatusReady {
1646
		return nil, fmt.Errorf("unexpected server status: %s", status)
1647
1648
	}

1649
	data, err := json.Marshal(EmbeddingRequest{Content: input})
Michael Yang's avatar
Michael Yang committed
1650
	if err != nil {
1651
		return nil, fmt.Errorf("error marshaling embed data: %w", err)
1652
1653
	}

1654
	r, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
1655
	if err != nil {
1656
		return nil, fmt.Errorf("error creating embed request: %w", err)
1657
	}
1658
	r.Header.Set("Content-Type", "application/json")
1659

1660
	resp, err := http.DefaultClient.Do(r)
1661
	if err != nil {
1662
		return nil, fmt.Errorf("do embedding request: %w", err)
1663
1664
1665
1666
1667
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
1668
		return nil, fmt.Errorf("error reading embed response: %w", err)
1669
1670
1671
	}

	if resp.StatusCode >= 400 {
1672
		log.Printf("llm embedding error: %s", body)
1673
		return nil, fmt.Errorf("%s", body)
1674
1675
	}

1676
	var e EmbeddingResponse
1677
	if err := json.Unmarshal(body, &e); err != nil {
1678
		return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
1679
1680
	}

1681
	return e.Embedding, nil
1682
1683
}

Michael Yang's avatar
Michael Yang committed
1684
1685
1686
1687
1688
1689
1690
1691
type TokenizeRequest struct {
	Content string `json:"content"`
}

type TokenizeResponse struct {
	Tokens []int `json:"tokens"`
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1692
func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
1693
1694
	s.llamaModelLock.Lock()
	defer s.llamaModelLock.Unlock()
1695

1696
1697
	if s.llamaModel != nil {
		return s.llamaModel.Tokenize(content, false, true)
Michael Yang's avatar
Michael Yang committed
1698
	}
1699
	if s.textProcessor != nil {
1700
		tokens, err := s.textProcessor.Encode(content, false)
1701
1702
		if err != nil {
			return nil, err
1703
		}
1704
1705
1706
1707
1708
		toks := make([]int, len(tokens))
		for i, t := range tokens {
			toks[i] = int(t)
		}
		return toks, nil
Michael Yang's avatar
Michael Yang committed
1709
	}
1710
1711
	// not reached
	return nil, fmt.Errorf("no tokenizer configured")
Michael Yang's avatar
Michael Yang committed
1712
1713
1714
1715
1716
1717
1718
1719
}

type DetokenizeRequest struct {
	Tokens []int `json:"tokens"`
}

type DetokenizeResponse struct {
	Content string `json:"content"`
1720
1721
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1722
func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
1723
1724
1725
1726
	s.llamaModelLock.Lock()
	defer s.llamaModelLock.Unlock()

	if s.llamaModel != nil {
1727
1728
		var resp string
		for _, token := range tokens {
1729
			resp += s.llamaModel.TokenToPiece(token)
1730
1731
1732
		}
		return resp, nil
	}
1733
1734
1735
1736
	if s.textProcessor != nil {
		toks := make([]int32, len(tokens))
		for i, t := range tokens {
			toks[i] = int32(t)
1737
		}
1738
1739
1740
		content, err := s.textProcessor.Decode(toks)
		if err != nil {
			return "", err
1741
		}
1742
		return content, nil
Michael Yang's avatar
Michael Yang committed
1743
	}
1744
1745
	// not reached
	return "", fmt.Errorf("no tokenizer configured")
1746
1747
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1748
func (s *llmServer) Close() error {
1749
1750
1751
1752
	s.llamaModelLock.Lock()
	if s.llamaModel != nil {
		llama.FreeModel(s.llamaModel)
		s.llamaModel = nil
1753
	}
1754
	s.llamaModelLock.Unlock()
1755

1756
	if s.cmd != nil {
1757
		slog.Debug("stopping llama server", "pid", s.Pid())
1758
1759
1760
		if err := s.cmd.Process.Kill(); err != nil {
			return err
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1761
1762
		// if ProcessState is already populated, Wait already completed, no need to wait again
		if s.cmd.ProcessState == nil {
1763
			slog.Debug("waiting for llama server to exit", "pid", s.Pid())
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1764
1765
			<-s.done
		}
1766

1767
		slog.Debug("llama server stopped", "pid", s.Pid())
1768
1769
1770
1771
1772
	}

	return nil
}

1773
1774
1775
1776
1777
func (s *llamaServer) GetDeviceInfos(ctx context.Context) []ml.DeviceInfo {
	slog.Debug("llamarunner free vram reporting not supported")
	return nil
}

1778
func (s *llmServer) VRAMSize() uint64 {
Jesse Gross's avatar
Jesse Gross committed
1779
1780
1781
1782
1783
1784
1785
	if s.mem == nil {
		return 0
	}

	var mem uint64

	for _, g := range s.mem.GPUs {
1786
		mem += g.Size()
Jesse Gross's avatar
Jesse Gross committed
1787
1788
1789
1790
1791
1792
	}

	// Some elements are always on CPU. However, if we have allocated all layers
	// on the GPU then include the CPU components as well, to represent complete offloading.
	noCPULayers := true
	for i := range s.mem.CPU.Weights {
1793
		if s.mem.CPU.Weights[i] != 0 || s.mem.CPU.Cache[i] != 0 {
Jesse Gross's avatar
Jesse Gross committed
1794
1795
1796
1797
1798
			noCPULayers = false
			break
		}
	}
	if noCPULayers {
1799
1800
		mem += s.mem.InputWeights
		mem += s.mem.CPU.Graph
Jesse Gross's avatar
Jesse Gross committed
1801
1802
1803
1804
1805
	}

	return mem
}

1806
func (s *llmServer) TotalSize() uint64 {
Jesse Gross's avatar
Jesse Gross committed
1807
1808
1809
1810
	if s.mem == nil {
		return 0
	}

1811
1812
	mem := s.mem.InputWeights
	mem += s.mem.CPU.Size()
Jesse Gross's avatar
Jesse Gross committed
1813
	for _, g := range s.mem.GPUs {
1814
		mem += g.Size()
Jesse Gross's avatar
Jesse Gross committed
1815
1816
1817
1818
1819
	}

	return mem
}

1820
func (s *llmServer) VRAMByGPU(id ml.DeviceID) uint64 {
Jesse Gross's avatar
Jesse Gross committed
1821
1822
1823
1824
1825
	if s.mem == nil {
		return 0
	}

	for _, g := range s.mem.GPUs {
1826
		if g.DeviceID == id {
1827
			return g.Size()
Jesse Gross's avatar
Jesse Gross committed
1828
1829
1830
1831
1832
		}
	}

	return 0
}
1833
1834

func (s *ollamaServer) GetDeviceInfos(ctx context.Context) []ml.DeviceInfo {
1835
	devices, err := ml.GetDevicesFromRunner(ctx, s)
1836
1837
1838
1839
1840
1841
1842
1843
1844
	if err != nil {
		if s.cmd != nil && s.cmd.ProcessState == nil {
			// Still running but hit an error, log
			slog.Debug("failure refreshing GPU information", "error", err)
		}
		// else no longer running so suppress logging as a failure is expected
	}
	return devices
}