server.go 52.8 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
package llm

import (
	"bufio"
	"bytes"
	"context"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"log"
	"log/slog"
	"math/rand"
	"net"
	"net/http"
	"os"
	"os/exec"
	"path/filepath"
	"runtime"
20
	"slices"
Jesse Gross's avatar
Jesse Gross committed
21
	"sort"
22
23
	"strconv"
	"strings"
24
	"sync"
25
26
	"time"

Daniel Hiltgen's avatar
Daniel Hiltgen committed
27
28
	"golang.org/x/sync/semaphore"

29
	"github.com/ollama/ollama/api"
30
	"github.com/ollama/ollama/envconfig"
31
	"github.com/ollama/ollama/format"
Michael Yang's avatar
Michael Yang committed
32
	"github.com/ollama/ollama/fs/ggml"
33
	"github.com/ollama/ollama/llama"
34
	"github.com/ollama/ollama/logutil"
Jesse Gross's avatar
Jesse Gross committed
35
	"github.com/ollama/ollama/ml"
36
	"github.com/ollama/ollama/model"
37
38
)

39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
type filteredEnv []string

func (e filteredEnv) LogValue() slog.Value {
	var attrs []slog.Attr
	for _, env := range e {
		if key, value, ok := strings.Cut(env, "="); ok {
			switch {
			case strings.HasPrefix(key, "OLLAMA_"),
				strings.HasPrefix(key, "CUDA_"),
				strings.HasPrefix(key, "ROCR_"),
				strings.HasPrefix(key, "ROCM_"),
				strings.HasPrefix(key, "HIP_"),
				strings.HasPrefix(key, "GPU_"),
				strings.HasPrefix(key, "HSA_"),
				strings.HasPrefix(key, "GGML_"),
				slices.Contains([]string{
					"PATH",
					"LD_LIBRARY_PATH",
					"DYLD_LIBRARY_PATH",
				}, key):
				attrs = append(attrs, slog.String(key, value))
			}
		}
	}
	return slog.GroupValue(attrs...)
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
66
type LlamaServer interface {
Jesse Gross's avatar
Jesse Gross committed
67
	ModelPath() string
68
	Load(ctx context.Context, systemInfo ml.SystemInfo, gpus []ml.DeviceInfo, requireFull bool) ([]ml.DeviceID, error)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
69
70
71
	Ping(ctx context.Context) error
	WaitUntilRunning(ctx context.Context) error
	Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
72
	Embedding(ctx context.Context, input string) ([]float32, error)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
73
74
75
	Tokenize(ctx context.Context, content string) ([]int, error)
	Detokenize(ctx context.Context, tokens []int) (string, error)
	Close() error
Jesse Gross's avatar
Jesse Gross committed
76
77
	VRAMSize() uint64 // Total VRAM across all GPUs
	TotalSize() uint64
78
	VRAMByGPU(id ml.DeviceID) uint64
79
	Pid() int
80
81
82
	GetPort() int
	GetDeviceInfos(ctx context.Context) []ml.DeviceInfo
	HasExited() bool
Daniel Hiltgen's avatar
Daniel Hiltgen committed
83
84
}

Jesse Gross's avatar
Jesse Gross committed
85
// llmServer is an instance of a runner hosting a single model
Daniel Hiltgen's avatar
Daniel Hiltgen committed
86
type llmServer struct {
87
88
89
90
91
92
	port        int
	cmd         *exec.Cmd
	done        chan error // Channel to signal when the process exits
	status      *StatusWriter
	options     api.Options
	numParallel int
93
	modelPath   string
94

Jesse Gross's avatar
Jesse Gross committed
95
96
	loadRequest LoadRequest // Parameters used to initialize the runner

97
98
99
	// llamaModel is an instance of the cgo llama.cpp model definition
	// nil if this server is running the new engine
	llamaModel     *llama.Model
Jesse Gross's avatar
Jesse Gross committed
100
	llamaModelLock *sync.Mutex
101
102
103
104

	// textProcessor handles text encoding/decoding for the model in the Ollama engine
	// nil if this server is running the llama.cpp based engine
	textProcessor model.TextProcessor
Daniel Hiltgen's avatar
Daniel Hiltgen committed
105

Jesse Gross's avatar
Jesse Gross committed
106
107
	totalLayers  uint64
	loadStart    time.Time // Record how long it took the model to load
108
	loadProgress float32
Daniel Hiltgen's avatar
Daniel Hiltgen committed
109
110

	sem *semaphore.Weighted
111
112
}

Jesse Gross's avatar
Jesse Gross committed
113
114
115
116
type llamaServer struct {
	llmServer

	ggml     *ggml.GGML
117
	gpus     []ml.DeviceInfo // The set of GPUs covered by the memory estimate
Jesse Gross's avatar
Jesse Gross committed
118
119
120
121
122
123
124
125
126
	estimate MemoryEstimate
}

type ollamaServer struct {
	llmServer

	mem *ml.BackendMemory
}

127
128
129
130
131
// LoadModel will load a model from disk. The model must be in the GGML format.
//
// It collects array values for arrays with a size less than or equal to
// maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If
// the maxArraySize is negative, all arrays are collected.
Michael Yang's avatar
Michael Yang committed
132
func LoadModel(model string, maxArraySize int) (*ggml.GGML, error) {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
133
134
135
136
	if _, err := os.Stat(model); err != nil {
		return nil, err
	}

137
138
139
140
141
142
	f, err := os.Open(model)
	if err != nil {
		return nil, err
	}
	defer f.Close()

143
	ggml, err := ggml.Decode(f, maxArraySize)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
144
145
	return ggml, err
}
146

Daniel Hiltgen's avatar
Daniel Hiltgen committed
147
// NewLlamaServer will run a server for the given GPUs
148
func NewLlamaServer(systemInfo ml.SystemInfo, gpus []ml.DeviceInfo, modelPath string, f *ggml.GGML, adapters, projectors []string, opts api.Options, numParallel int) (LlamaServer, error) {
Jesse Gross's avatar
Jesse Gross committed
149
150
151
152
	var llamaModel *llama.Model
	var textProcessor model.TextProcessor
	var err error
	if envconfig.NewEngine() || f.KV().OllamaEngineRequired() {
153
154
155
156
157
		if len(projectors) == 0 {
			textProcessor, err = model.NewTextProcessor(modelPath)
		} else {
			err = errors.New("split vision models aren't supported")
		}
Jesse Gross's avatar
Jesse Gross committed
158
159
160
161
		if err != nil {
			// To prepare for opt-out mode, instead of treating this as an error, we fallback to the old runner
			slog.Debug("model not yet supported by Ollama engine, switching to compatibility mode", "model", modelPath, "error", err)
		}
162
	}
Jesse Gross's avatar
Jesse Gross committed
163
164
165
166
167
	if textProcessor == nil {
		llamaModel, err = llama.LoadModelFromFile(modelPath, llama.ModelParams{VocabOnly: true})
		if err != nil {
			return nil, err
		}
168
169
	}

Jesse Gross's avatar
Jesse Gross committed
170
171
172
173
174
	// Verify the requested context size is <= the model training size
	trainCtx := f.KV().ContextLength()
	if opts.NumCtx > int(trainCtx) && trainCtx > 0 {
		slog.Warn("requested context size too large for model", "num_ctx", opts.NumCtx, "n_ctx_train", trainCtx)
		opts.NumCtx = int(trainCtx)
175
176
	}

177
178
	opts.NumBatch = min(opts.NumBatch, opts.NumCtx)

Jesse Gross's avatar
Jesse Gross committed
179
	loadRequest := LoadRequest{LoraPath: adapters, KvSize: opts.NumCtx * numParallel, BatchSize: opts.NumBatch, Parallel: numParallel, MultiUserCache: envconfig.MultiUserCache()}
180

181
	defaultThreads := systemInfo.ThreadCount
Jesse Gross's avatar
Jesse Gross committed
182
183
184
185
	if opts.NumThread > 0 {
		loadRequest.NumThreads = opts.NumThread
	} else if defaultThreads > 0 {
		loadRequest.NumThreads = defaultThreads
186
	}
Michael Yang's avatar
Michael Yang committed
187

Jesse Gross's avatar
Jesse Gross committed
188
	// TODO - NUMA support currently doesn't work properly
189
190

	if opts.MainGPU > 0 {
Jesse Gross's avatar
Jesse Gross committed
191
		loadRequest.MainGPU = opts.MainGPU
192
193
	}

Jesse Gross's avatar
Jesse Gross committed
194
195
	if len(projectors) > 0 && llamaModel != nil {
		loadRequest.ProjectorPath = projectors[0]
196
197
	}

198
199
	fa := envconfig.FlashAttention(f.FlashAttention())

Jesse Gross's avatar
Jesse Gross committed
200
201
	// This will disable flash attention unless all GPUs on the system support it, even if we end up selecting a subset
	// that can handle it.
202
	if fa && !ml.FlashAttentionSupported(gpus) {
203
204
205
		slog.Warn("flash attention enabled but not supported by gpu")
		fa = false
	}
Sam's avatar
Sam committed
206

Michael Yang's avatar
Michael Yang committed
207
	if fa && !f.SupportsFlashAttention() {
208
209
210
211
		slog.Warn("flash attention enabled but not supported by model")
		fa = false
	}

212
	kvct := strings.ToLower(envconfig.KvCacheType())
213
214
215

	if fa {
		slog.Info("enabling flash attention")
Jesse Gross's avatar
Jesse Gross committed
216
		loadRequest.FlashAttention = true
217
218
219

		// Flash Attention also supports kv cache quantization
		// Enable if the requested and kv cache type is supported by the model
220
		if f.SupportsKVCacheType(kvct) {
Jesse Gross's avatar
Jesse Gross committed
221
			loadRequest.KvCacheType = kvct
222
223
		} else {
			slog.Warn("kv cache type not supported by model", "type", kvct)
Sam's avatar
Sam committed
224
		}
225
226
227
	} else if kvct != "" && kvct != "f16" {
		slog.Warn("quantized kv cache requested but flash attention disabled", "type", kvct)
	}
228

229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
	gpuLibs := ml.LibraryPaths(gpus)
	status := NewStatusWriter(os.Stderr)
	cmd, port, err := StartRunner(
		textProcessor != nil,
		modelPath,
		gpuLibs,
		status,
		ml.GetVisibleDevicesEnv(gpus),
	)

	s := llmServer{
		port:           port,
		cmd:            cmd,
		status:         status,
		options:        opts,
		modelPath:      modelPath,
		loadRequest:    loadRequest,
		llamaModel:     llamaModel,
		llamaModelLock: &sync.Mutex{},
		textProcessor:  textProcessor,
		numParallel:    numParallel,
		sem:            semaphore.NewWeighted(int64(numParallel)),
		totalLayers:    f.KV().BlockCount() + 1,
		loadStart:      time.Now(),
		done:           make(chan error, 1),
Jesse Gross's avatar
Jesse Gross committed
254
255
	}

256
257
258
259
260
261
262
263
264
265
	if err != nil {
		var msg string
		if s.status != nil && s.status.LastErrMsg != "" {
			msg = s.status.LastErrMsg
		}
		err := fmt.Errorf("error starting runner: %v %s", err, msg)
		if llamaModel != nil {
			llama.FreeModel(llamaModel)
		}
		return nil, err
Michael Yang's avatar
Michael Yang committed
266
267
	}

268
269
270
271
272
273
274
275
	// reap subprocess when it exits
	go func() {
		err := s.cmd.Wait()
		// Favor a more detailed message over the process exit status
		if err != nil && s.status != nil && s.status.LastErrMsg != "" {
			slog.Error("llama runner terminated", "error", err)
			if strings.Contains(s.status.LastErrMsg, "unknown model") {
				s.status.LastErrMsg = "this model is not supported by your version of Ollama. You may need to upgrade"
Jesse Gross's avatar
Jesse Gross committed
276
			}
277
278
279
			s.done <- errors.New(s.status.LastErrMsg)
		} else {
			s.done <- err
280
		}
281
	}()
282

283
284
285
286
	if textProcessor != nil {
		return &ollamaServer{llmServer: s}, nil
	} else {
		return &llamaServer{llmServer: s, ggml: f}, nil
Michael Yang's avatar
Michael Yang committed
287
	}
288
}
Jesse Gross's avatar
Jesse Gross committed
289

290
291
292
func StartRunner(ollamaEngine bool, modelPath string, gpuLibs []string, out io.Writer, extraEnvs map[string]string) (cmd *exec.Cmd, port int, err error) {
	var exe string
	exe, err = os.Executable()
293
	if err != nil {
294
		return nil, 0, fmt.Errorf("unable to lookup executable path: %w", err)
295
296
297
298
299
300
	}

	if eval, err := filepath.EvalSymlinks(exe); err == nil {
		exe = eval
	}

301
302
303
304
305
306
	port = 0
	if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
		var l *net.TCPListener
		if l, err = net.ListenTCP("tcp", a); err == nil {
			port = l.Addr().(*net.TCPAddr).Port
			l.Close()
Jesse Gross's avatar
Jesse Gross committed
307
		}
308
309
310
311
312
313
314
315
316
317
	}
	if port == 0 {
		slog.Debug("ResolveTCPAddr failed, using random port")
		port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
	}
	params := []string{"runner"}
	if ollamaEngine {
		params = append(params, "--ollama-engine")
	}
	if modelPath != "" {
Jesse Gross's avatar
Jesse Gross committed
318
		params = append(params, "--model", modelPath)
319
320
	}
	params = append(params, "--port", strconv.Itoa(port))
Daniel Hiltgen's avatar
Daniel Hiltgen committed
321

322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
	var pathEnv string
	switch runtime.GOOS {
	case "windows":
		pathEnv = "PATH"
	case "darwin":
		pathEnv = "DYLD_LIBRARY_PATH"
	default:
		pathEnv = "LD_LIBRARY_PATH"
	}

	// Note: we always put our dependency paths first
	// since these are the exact version we compiled/linked against
	libraryPaths := append([]string{}, gpuLibs...)
	if libraryPath, ok := os.LookupEnv(pathEnv); ok {
		libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...)
	}

	cmd = exec.Command(exe, params...)

	cmd.Env = os.Environ()
	cmd.Stdout = out
	cmd.Stderr = out
	cmd.SysProcAttr = LlamaServerSysProcAttr

	// Always filter down the set of GPUs in case there are any unsupported devices that might crash
	pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))

	// Update or add the path variable with our adjusted version
	pathNeeded := true
	ollamaPathNeeded := true
	extraEnvsDone := map[string]bool{}
	for k := range extraEnvs {
		extraEnvsDone[k] = false
	}
	for i := range cmd.Env {
		cmp := strings.SplitN(cmd.Env[i], "=", 2)
		if strings.EqualFold(cmp[0], pathEnv) {
			cmd.Env[i] = pathEnv + "=" + pathEnvVal
			pathNeeded = false
		} else if strings.EqualFold(cmp[0], "OLLAMA_LIBRARY_PATH") {
			cmd.Env[i] = "OLLAMA_LIBRARY_PATH=" + strings.Join(gpuLibs, string(filepath.ListSeparator))
			ollamaPathNeeded = false
		} else if len(extraEnvs) != 0 {
			for k, v := range extraEnvs {
				if strings.EqualFold(cmp[0], k) {
					cmd.Env[i] = k + "=" + v
					extraEnvsDone[k] = true
Daniel Hiltgen's avatar
Daniel Hiltgen committed
369
				}
370
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
371
		}
372
373
374
375
376
377
378
379
380
381
	}
	if pathNeeded {
		cmd.Env = append(cmd.Env, pathEnv+"="+pathEnvVal)
	}
	if ollamaPathNeeded {
		cmd.Env = append(cmd.Env, "OLLAMA_LIBRARY_PATH="+strings.Join(gpuLibs, string(filepath.ListSeparator)))
	}
	for k, done := range extraEnvsDone {
		if !done {
			cmd.Env = append(cmd.Env, k+"="+extraEnvs[k])
382
		}
383
	}
384

385
386
	slog.Info("starting runner", "cmd", cmd)
	slog.Debug("subprocess", "", filteredEnv(cmd.Env))
Daniel Hiltgen's avatar
Daniel Hiltgen committed
387

388
389
	if err = cmd.Start(); err != nil {
		return nil, 0, err
Jesse Gross's avatar
Jesse Gross committed
390
	}
391
392
	err = nil
	return
Jesse Gross's avatar
Jesse Gross committed
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
}

func (s *llmServer) ModelPath() string {
	return s.modelPath
}

type LoadOperation int

// The order of these constants are significant because we iterate over the operations. They
// should be in order of increasingly loading the model.
const (
	LoadOperationFit    LoadOperation = iota // Return memory requirements but do not allocate
	LoadOperationAlloc                       // Allocate memory but do not load the weights
	LoadOperationCommit                      // Load weights - further changes cannot be made after this
	LoadOperationClose                       // Close model and free memory
)

func (o LoadOperation) String() string {
	switch o {
	case LoadOperationFit:
		return "fit"
	case LoadOperationAlloc:
		return "alloc"
	case LoadOperationCommit:
		return "commit"
	case LoadOperationClose:
		return "close"
	default:
		return "unknown"
	}
}

type LoadRequest struct {
	Operation LoadOperation

	LoraPath       []string
	Parallel       int
	BatchSize      int
	FlashAttention bool
	KvSize         int
	KvCacheType    string
	NumThreads     int
	GPULayers      ml.GPULayersList
	MultiUserCache bool

	// Legacy fields - not used with the Ollama engine
	ProjectorPath string
	MainGPU       int
	UseMmap       bool
}

type LoadResponse struct {
	Success bool
	Memory  ml.BackendMemory
}

var ErrLoadRequiredFull = errors.New("unable to load full model on GPU")

451
452
453
454
func (s *llamaServer) Load(ctx context.Context, systemInfo ml.SystemInfo, gpus []ml.DeviceInfo, requireFull bool) ([]ml.DeviceID, error) {
	systemTotalMemory := systemInfo.TotalMemory
	systemFreeMemory := systemInfo.FreeMemory
	systemSwapFreeMemory := systemInfo.FreeSwap
Jesse Gross's avatar
Jesse Gross committed
455
456
	slog.Info("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "free_swap", format.HumanBytes2(systemSwapFreeMemory))

457
458
	if len(gpus) == 0 || s.options.NumGPU == 0 {
		if !verifyCPUFit(s.ggml, s.modelPath, []string{s.loadRequest.ProjectorPath}, s.loadRequest.LoraPath, s.options, systemInfo, s.numParallel) {
459
			slog.Info("model requires more memory than is currently available, evicting a model to make space", "estimate", s.estimate)
460
461
462
463
464
465
466
467
468
469
470
			return nil, fmt.Errorf("model requires more system memory than is currently available %w", ErrLoadRequiredFull)
		}
	} else {
		g := pickBestFullFitByLibrary(s.ggml, s.modelPath, []string{s.loadRequest.ProjectorPath}, s.loadRequest.LoraPath, s.options, gpus, s.numParallel)
		if g == nil {
			if !requireFull {
				g = pickBestPartialFitByLibrary(s.ggml, []string{s.loadRequest.ProjectorPath}, s.loadRequest.LoraPath, s.options, gpus, s.numParallel)
			} else {
				slog.Info("model requires more memory than is currently available, evicting a model to make space", "estimate", s.estimate)
				return nil, ErrLoadRequiredFull
			}
Jesse Gross's avatar
Jesse Gross committed
471
		}
472
		gpus = g
Jesse Gross's avatar
Jesse Gross committed
473
474
475
476
	}

	s.estimate = estimateGPULayers(gpus, s.ggml, []string{s.loadRequest.ProjectorPath}, s.options, s.numParallel)

477
	if len(gpus) >= 1 {
Jesse Gross's avatar
Jesse Gross committed
478
		switch {
479
480
481
		case s.options.NumGPU == 0:
			gpus = []ml.DeviceInfo{}
		case gpus[0].Library == "Metal" && s.estimate.VRAMSize > systemInfo.TotalMemory:
Jesse Gross's avatar
Jesse Gross committed
482
483
484
			// disable partial offloading when model is greater than total system memory as this
			// can lead to locking up the system
			s.options.NumGPU = 0
485
			gpus = []ml.DeviceInfo{}
486
		case gpus[0].Library != "Metal" && s.estimate.Layers == 0:
Jesse Gross's avatar
Jesse Gross committed
487
			// Don't bother loading into the GPU if no layers can fit
488
489
			gpus = []ml.DeviceInfo{}
		case s.options.NumGPU < 0 && s.estimate.Layers > 0:
Jesse Gross's avatar
Jesse Gross committed
490
491
			s.options.NumGPU = s.estimate.Layers
		}
492
493
	} else {
		s.options.NumGPU = 0
Jesse Gross's avatar
Jesse Gross committed
494
495
496
497
498
499
	}

	// On linux and windows, over-allocating CPU memory will almost always result in an error
	// Darwin has fully dynamic swap so has no direct concept of free swap space
	if runtime.GOOS != "darwin" {
		systemMemoryRequired := s.estimate.TotalSize - s.estimate.VRAMSize
500
		available := systemInfo.FreeMemory + systemInfo.FreeSwap
Jesse Gross's avatar
Jesse Gross committed
501
		if systemMemoryRequired > available {
502
			slog.Warn("model request too large for system", "requested", format.HumanBytes2(systemMemoryRequired), "available", format.HumanBytes2(available), "total", format.HumanBytes2(systemInfo.TotalMemory), "free", format.HumanBytes2(systemInfo.FreeMemory), "swap", format.HumanBytes2(systemInfo.FreeSwap))
503
			return nil, fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(systemMemoryRequired), format.HumanBytes2(available))
Jesse Gross's avatar
Jesse Gross committed
504
505
506
507
508
509
510
511
512
513
514
515
516
517
		}
	}

	slog.Info("offload", "", s.estimate)

	s.gpus = gpus
	s.loadRequest.GPULayers = createGPULayers(s.estimate, s.ggml, gpus, s.options.NumGPU)

	// Mmap is only supported on the llama engine
	if s.textProcessor == nil {
		s.loadRequest.UseMmap = true

		// mmap has issues with partial offloading on metal
		for _, g := range gpus {
518
			if g.Library == "Metal" &&
Jesse Gross's avatar
Jesse Gross committed
519
520
521
522
523
524
525
526
527
528
				uint64(s.options.NumGPU) > 0 &&
				uint64(s.options.NumGPU) < s.ggml.KV().BlockCount()+1 {
				s.options.UseMMap = new(bool)
				*s.options.UseMMap = false
			}
		}

		// Windows CUDA should not use mmap for best performance
		// Linux  with a model larger than free space, mmap leads to thrashing
		// For CPU loads we want the memory to be allocated, not FS cache
529
530
531
532
		if (runtime.GOOS == "windows" && len(gpus) > 0 && gpus[0].Library == "CUDA" && s.options.UseMMap == nil) ||
			(runtime.GOOS == "linux" && systemInfo.FreeMemory < s.estimate.TotalSize && s.options.UseMMap == nil) ||
			(len(gpus) == 0 && s.options.UseMMap == nil) ||
			(len(gpus) > 0 && gpus[0].Library == "Vulkan" && s.options.UseMMap == nil) ||
Jesse Gross's avatar
Jesse Gross committed
533
534
535
536
537
538
			(s.options.UseMMap != nil && !*s.options.UseMMap) {
			s.loadRequest.UseMmap = false
		}
	}

	if err := s.waitUntilRunnerLaunched(ctx); err != nil {
539
		return nil, err
Jesse Gross's avatar
Jesse Gross committed
540
541
542
543
	}

	resp, err := s.initModel(ctx, s.loadRequest, LoadOperationCommit)
	if err != nil {
544
		return nil, err
Jesse Gross's avatar
Jesse Gross committed
545
546
547
548
549
550
551
552
553
554
	}

	// On the Ollama engine, we can print out a summary of the memory allocations.
	// We don't have this for the llama engine but it does something similar itself.
	if s.textProcessor != nil {
		resp.Memory.Log(slog.LevelInfo)
	}

	if !resp.Success {
		slog.Warn("failed to allocate memory for model", "memory", resp.Memory)
555
		return nil, errors.New("failed to allocate memory for model")
Jesse Gross's avatar
Jesse Gross committed
556
557
558
559
560
561
	}

	// The llama engine does its memory allocations together with model loading, so we
	// need to wait until it is done to ensure that we have accurate memory data before
	// loading the next model
	if s.textProcessor == nil {
562
		return uniqueDeviceIDs(s.loadRequest.GPULayers), s.WaitUntilRunning(ctx)
Jesse Gross's avatar
Jesse Gross committed
563
	} else {
564
		return uniqueDeviceIDs(s.loadRequest.GPULayers), nil
Jesse Gross's avatar
Jesse Gross committed
565
566
567
568
569
	}
}

// createGPULayers maps from the tensor splits assigned by the memory estimates to explicit assignment
// of particular layers onto GPUs
570
571
func createGPULayers(estimate MemoryEstimate, ggml *ggml.GGML, gpus []ml.DeviceInfo, numGPU int) ml.GPULayersList {
	if numGPU <= 0 || len(gpus) == 0 {
Jesse Gross's avatar
Jesse Gross committed
572
		return nil
573
	}
Jesse Gross's avatar
Jesse Gross committed
574
575
576

	gpuLayers := make(ml.GPULayersList, len(gpus))
	for i := range gpuLayers {
577
		gpuLayers[i].DeviceID = gpus[i].DeviceID
Jesse Gross's avatar
Jesse Gross committed
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
	}

	var sum float32
	splits := make([]float32, len(estimate.TensorSplit))
	// cumulative sum of all splits
	for i := range splits {
		sum += float32(estimate.TensorSplit[i])
		splits[i] = sum
	}

	if sum <= 0 {
		return nil
	}

	// normalize splits
	for i := range splits {
		splits[i] /= sum
	}

	blocks := int(ggml.KV().BlockCount())
	gpuRangeStart := max(0, blocks-numGPU)
	gpuRangeStop := min(gpuRangeStart+numGPU, blocks+1)
	for i := range blocks + 1 {
		if i < gpuRangeStart || i >= gpuRangeStop {
			continue
		}

		index := slices.IndexFunc(splits, func(f float32) bool { return float32(i-gpuRangeStart)/float32(gpuRangeStop-gpuRangeStart) < f })
		if index < 0 || index >= len(gpus) {
			continue
		}

		gpuLayers[index].Layers = append(gpuLayers[index].Layers, i)
	}

	return gpuLayers
}

// Load finds the optimal layout of layers to offload on GPUs based on no initial information about the size of the model
// It does this by:
// 1. Assigning the full model to the GPU with the largest available free memory
// 2. Attempting to allocate the layout and receiving the memory requirements in response
// 3. Creating a new layout based on the updated memory information
// 4. Going back to step 2 and looping until we either stabilize on a particular layout or discover that we have entered a cycle
//
// This process is repeated for higher levels of loading the model (fit, allocate, commit). The earlier levels are quicker,
// allowing for faster iteration, but may return less information.
625
626
//
// Returns the list of GPU IDs that were used in the final allocation on success
627
func (s *ollamaServer) Load(ctx context.Context, systemInfo ml.SystemInfo, gpus []ml.DeviceInfo, requireFull bool) ([]ml.DeviceID, error) {
Jesse Gross's avatar
Jesse Gross committed
628
629
630
631
632
	var success bool
	defer func() {
		if !success {
			s.initModel(ctx, LoadRequest{}, LoadOperationClose)
		}
633
634
635
		if s.mem != nil {
			s.mem.Log(slog.LevelInfo)
		}
Jesse Gross's avatar
Jesse Gross committed
636
637
638
639
	}()

	slog.Info("loading model", "model layers", s.totalLayers, "requested", s.options.NumGPU)

640
641
642
	systemTotalMemory := systemInfo.TotalMemory
	systemFreeMemory := systemInfo.FreeMemory
	systemSwapFreeMemory := systemInfo.FreeSwap
Jesse Gross's avatar
Jesse Gross committed
643
644
	slog.Info("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "free_swap", format.HumanBytes2(systemSwapFreeMemory))

645
646
647
648
	for _, gpu := range gpus {
		available := gpu.FreeMemory - envconfig.GpuOverhead() - gpu.MinimumMemory()
		if gpu.FreeMemory < envconfig.GpuOverhead()+gpu.MinimumMemory() {
			available = 0
Jesse Gross's avatar
Jesse Gross committed
649
		}
650
651
652
653
654
		slog.Info("gpu memory", "id", gpu.ID, "library", gpu.Library,
			"available", format.HumanBytes2(available),
			"free", format.HumanBytes2(gpu.FreeMemory),
			"minimum", format.HumanBytes2(gpu.MinimumMemory()),
			"overhead", format.HumanBytes2(envconfig.GpuOverhead()))
Jesse Gross's avatar
Jesse Gross committed
655
656
657
658
659
660
661
	}

	pastAllocations := make(map[uint64]struct{})
	var backoff float32

	gpuLayers, err := s.createLayout(systemInfo, gpus, s.mem, requireFull, backoff)
	if err != nil {
662
		return nil, err
Jesse Gross's avatar
Jesse Gross committed
663
664
665
	}

	if err := s.waitUntilRunnerLaunched(ctx); err != nil {
666
		return nil, err
Jesse Gross's avatar
Jesse Gross committed
667
668
669
670
671
672
673
674
675
	}

nextOperation:
	for operation := LoadOperationFit; operation < LoadOperationCommit; operation++ {
	nextLoad:
		for {
			s.loadRequest.GPULayers = gpuLayers
			resp, err := s.initModel(ctx, s.loadRequest, operation)
			if err != nil {
676
				return nil, err
Jesse Gross's avatar
Jesse Gross committed
677
678
679
680
681
682
683
684
685
686
687
			}

			resp.Memory.Log(slog.LevelDebug)
			slog.Debug("memory", "success", resp.Success, "required", resp.Memory)

			pastAllocations[gpuLayers.Hash()] = struct{}{}
			s.mem = &resp.Memory

			for {
				newGPULayers, err := s.createLayout(systemInfo, gpus, s.mem, requireFull, backoff)
				if err != nil {
688
					return nil, err
Jesse Gross's avatar
Jesse Gross committed
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
				}

				slog.Debug("new layout created", "layers", newGPULayers)

				// We get additional memory information over time, which will reduce the number of
				// layers that can fit, so fewer layers is actually better. As long as we haven't seen
				// this layout before and it doesn't have more layers than the last one, we can keep
				// trying to see if we can do better.
				if _, ok := pastAllocations[newGPULayers.Hash()]; !ok && newGPULayers.Sum() <= gpuLayers.Sum() {
					gpuLayers = newGPULayers
					continue nextLoad
				}

				// If we are looping around a few different layouts due to graphs moving off and on
				// GPUs, make sure that we try out the intermediate states. For example, if we are
				// looping between offloading 39 and 41 layers, we should also check 40.
				//
				// This switches strategies to force an incremental number of layers to be offloaded
				// and checking the memory layout. If the allocation succeeds and creating a new layout
				// without forcing offload yields the same or greater number of layers offloaded, then
				// the trial is successful.
				//
				// This alternate strategy does not introduce the possibility of loops with the overall
				// state machine, as it exits this code block either with a successful result, moving
				// to the next operation or the original number of layers offloaded.
				if s.options.NumGPU < 0 && newGPULayers.Sum()-gpuLayers.Sum() > 1 {
					for i := newGPULayers.Sum() - 1; i >= gpuLayers.Sum(); i-- {
						slog.Debug("exploring intermediate layers", "layer", i)

						s.options.NumGPU = i
						newGPULayers, err = s.createLayout(systemInfo, gpus, s.mem, requireFull, backoff)
						s.options.NumGPU = -1
						if err != nil {
722
							return nil, err
Jesse Gross's avatar
Jesse Gross committed
723
724
725
726
727
728
						}
						slog.Debug("new layout created", "layers", newGPULayers)

						s.loadRequest.GPULayers = newGPULayers
						resp, err = s.initModel(ctx, s.loadRequest, operation)
						if err != nil {
729
							return nil, err
Jesse Gross's avatar
Jesse Gross committed
730
731
732
733
734
735
736
737
						}

						resp.Memory.Log(slog.LevelDebug)
						slog.Debug("memory", "success", resp.Success, "required", resp.Memory)

						if resp.Success {
							verifyGPULayers, err := s.createLayout(systemInfo, gpus, &resp.Memory, requireFull, backoff)
							if err != nil {
738
								return nil, err
Jesse Gross's avatar
Jesse Gross committed
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
							}

							slog.Debug("verifying layout", "layers", verifyGPULayers)

							if newGPULayers.Sum() <= verifyGPULayers.Sum() {
								gpuLayers = newGPULayers

								// Since we are going backwards (increasing the number of layers), ensure that
								// we can come back down if needed
								clear(pastAllocations)

								continue nextOperation
							}
						}
					}
				}

				// If we generated a layout a second time or go backwards, then we've converged. Use the last
				// layout before the repeat, which is already allocated.
				if resp.Success {
					continue nextOperation
				}

				if s.options.NumGPU >= 0 {
763
					return nil, fmt.Errorf("memory layout cannot be allocated with num_gpu = %v", s.options.NumGPU)
Jesse Gross's avatar
Jesse Gross committed
764
765
766
767
768
				}

				// Memory allocation failed even though we created a layout that we thought should
				// fit in available memory. This could happen if either our free memory reports
				// are incorrect or if available memory is changing between layout and allocation
769
				// time. Apply a backoff to try to find the real amount of available space.
Jesse Gross's avatar
Jesse Gross committed
770
771
				if backoff > 1 {
					slog.Warn("memory layout cannot be allocated", "memory", resp.Memory)
772
					return nil, errors.New("memory layout cannot be allocated")
Jesse Gross's avatar
Jesse Gross committed
773
				} else {
774
					backoff += 0.1
Jesse Gross's avatar
Jesse Gross committed
775
776
777
778
779
780
781
782
783
784
				}

				slog.Info("model layout did not fit, applying backoff", "backoff", fmt.Sprintf("%.2f", backoff))
			}
		}
	}

	s.loadRequest.GPULayers = gpuLayers
	resp, err := s.initModel(ctx, s.loadRequest, LoadOperationCommit)
	if err != nil {
785
		return nil, err
Jesse Gross's avatar
Jesse Gross committed
786
787
788
789
790
791
792
	}

	success = resp.Success
	s.mem = &resp.Memory

	if !success {
		slog.Warn("failed to commit memory for model", "memory", resp.Memory)
793
		return nil, errors.New("failed to commit memory for model")
Jesse Gross's avatar
Jesse Gross committed
794
795
	}

796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
	return uniqueDeviceIDs(gpuLayers), nil
}

func uniqueDeviceIDs(gpuLayers ml.GPULayersList) []ml.DeviceID {
	devices := []ml.DeviceID{}
	for _, layer := range gpuLayers {
		new := true
		for _, ID := range devices {
			if layer.DeviceID == ID {
				new = false
				break
			}
		}
		if new {
			devices = append(devices, layer.DeviceID)
		}
	}
	return devices
Jesse Gross's avatar
Jesse Gross committed
814
815
816
817
818
819
820
821
}

// createLayout uses the current best view of memory requirements and creates a layout of model layers on GPUs.
// It does this by:
// - Calculating how much space each layer requires
// - Calculating how much space each GPU has available for layers, based on free memory and space occupied by the graph
// - Assigning layers
// - Ensuring that we don't exceed limits, such as requirements about partial offloading or system memory
822
func (s *ollamaServer) createLayout(systemInfo ml.SystemInfo, systemGPUs []ml.DeviceInfo, memory *ml.BackendMemory, requireFull bool, backoff float32) (ml.GPULayersList, error) {
Jesse Gross's avatar
Jesse Gross committed
823
824
	if memory == nil {
		memory = &ml.BackendMemory{CPU: ml.DeviceMemory{
825
826
			Weights: make([]uint64, s.totalLayers),
			Cache:   make([]uint64, s.totalLayers),
Jesse Gross's avatar
Jesse Gross committed
827
828
		}}
	}
829
830
831
832
833
834
835
836
837
838
839
840
841
842
	gpuLayers, layers, err := s.buildLayout(systemGPUs, memory, requireFull, backoff)
	if err != nil {
		return nil, err
	}
	err = s.verifyLayout(systemInfo, memory, requireFull, gpuLayers, layers)
	if err != nil {
		return nil, err
	}
	return gpuLayers, nil
}

func (s *ollamaServer) buildLayout(systemGPUs []ml.DeviceInfo, memory *ml.BackendMemory, requireFull bool, backoff float32) (ml.GPULayersList, []uint64, error) {
	gpus := append(make([]ml.DeviceInfo, 0, len(systemGPUs)), systemGPUs...)
	sort.Sort(sort.Reverse(ml.ByFreeMemory(gpus)))
Jesse Gross's avatar
Jesse Gross committed
843
844
845
846

	layers := make([]uint64, len(memory.CPU.Weights))
	for i := range layers {
		for j := range memory.GPUs {
847
848
			layers[i] += memory.GPUs[j].Weights[i]
			layers[i] += memory.GPUs[j].Cache[i]
Jesse Gross's avatar
Jesse Gross committed
849
		}
850
851
		layers[i] += memory.CPU.Weights[i]
		layers[i] += memory.CPU.Cache[i]
852
		logutil.Trace("layer to assign", "layer", i, "size", format.HumanBytes2(layers[i]))
Jesse Gross's avatar
Jesse Gross committed
853
854
855
	}

	gpuLayers := ml.GPULayersList{}
856
	for _, gl := range ml.ByLibrary(gpus) {
Jesse Gross's avatar
Jesse Gross committed
857
858
859
860
861
862
863
864
		// If a GPU already has a graph allocated on it, then we should continue to use it.
		// Otherwise, we lose information that we got from previous allocations, which can
		// cause cycling. Plus, we get more information about required allocation from each
		// iteration, so it doesn't make sense that a later iteration would use fewer GPUs.
		lastUsedGPU := 0
		for i := range gl {
			found := false
			for j := range memory.GPUs {
865
				if gl[i].DeviceID == memory.GPUs[j].DeviceID {
866
					if memory.GPUs[j].Graph != 0 {
Jesse Gross's avatar
Jesse Gross committed
867
868
869
						lastUsedGPU = i
					}

870
					reserved := uint64(float32(gl[i].FreeMemory)*backoff) + gl[i].MinimumMemory() + envconfig.GpuOverhead() + memory.GPUs[j].Graph
Jesse Gross's avatar
Jesse Gross committed
871
872
873
874
875
876
					if gl[i].FreeMemory > reserved {
						gl[i].FreeMemory -= reserved
					} else {
						gl[i].FreeMemory = 0
					}

877
					slog.Debug("available gpu", "id", gl[i].ID, "library", gl[i].Library,
Jesse Gross's avatar
Jesse Gross committed
878
						"available layer vram", format.HumanBytes2(gl[i].FreeMemory),
879
						"backoff", fmt.Sprintf("%.2f", backoff), "minimum", format.HumanBytes2(gl[i].MinimumMemory()),
Jesse Gross's avatar
Jesse Gross committed
880
						"overhead", format.HumanBytes2(envconfig.GpuOverhead()),
881
						"graph", format.HumanBytes2(memory.GPUs[j].Graph))
Jesse Gross's avatar
Jesse Gross committed
882
883
884
885
886
887
888
889
890
891
892

					found = true
					break
				}
			}
			if !found {
				// The runner doesn't report seeing this GPU
				gl[i].FreeMemory = 0
			}
		}

893
		libraryGpuLayers := assignLayers(layers, gl, requireFull, s.options.NumGPU, lastUsedGPU)
Jesse Gross's avatar
Jesse Gross committed
894
895
896
897
		if libraryGpuLayers.Sum() > gpuLayers.Sum() {
			gpuLayers = libraryGpuLayers
		}
	}
898
899
	return gpuLayers, layers, nil
}
Jesse Gross's avatar
Jesse Gross committed
900

901
902
// verifyLayout ensures that we don't exceed limits, such as requirements about partial offloading or system memory
func (s *ollamaServer) verifyLayout(systemInfo ml.SystemInfo, memory *ml.BackendMemory, requireFull bool, gpuLayers ml.GPULayersList, layers []uint64) error {
Jesse Gross's avatar
Jesse Gross committed
903
	// These sizes will only increase as we go through additional iterations and get additional information.
904
	cpuSize := memory.InputWeights + memory.CPU.Graph
Jesse Gross's avatar
Jesse Gross committed
905
906
907
	var vramSize uint64
	for _, gl := range gpuLayers {
		for _, gpu := range memory.GPUs {
908
			if gl.DeviceID == gpu.DeviceID {
909
				vramSize += gpu.Graph
Jesse Gross's avatar
Jesse Gross committed
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
				break
			}
		}
	}

nextLayer:
	for i := range layers {
		for _, g := range gpuLayers {
			for _, gl := range g.Layers {
				if i == gl {
					vramSize += layers[i]
					continue nextLayer
				}
			}
		}
		cpuSize += layers[i]
	}

	if requireFull {
		if gpuLayers.Sum() < len(layers) && (s.options.NumGPU < 0 || gpuLayers.Sum() < s.options.NumGPU) {
930
			return ErrLoadRequiredFull
Jesse Gross's avatar
Jesse Gross committed
931
932
		}

933
934
		if cpuSize > systemInfo.FreeMemory {
			return ErrLoadRequiredFull
Jesse Gross's avatar
Jesse Gross committed
935
936
937
938
939
940
		}
	}

	// On linux and windows, over-allocating CPU memory will almost always result in an error
	// Darwin has fully dynamic swap so has no direct concept of free swap space
	if runtime.GOOS != "darwin" {
941
		available := systemInfo.FreeMemory + systemInfo.FreeSwap
Jesse Gross's avatar
Jesse Gross committed
942
		if cpuSize > available {
943
944
			slog.Warn("model request too large for system", "requested", format.HumanBytes2(cpuSize), "available", format.HumanBytes2(available), "total", format.HumanBytes2(systemInfo.TotalMemory), "free", format.HumanBytes2(systemInfo.FreeMemory), "swap", format.HumanBytes2(systemInfo.FreeSwap))
			return fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(cpuSize), format.HumanBytes2(available))
Jesse Gross's avatar
Jesse Gross committed
945
946
		}
	} else {
947
		if vramSize > systemInfo.TotalMemory {
Jesse Gross's avatar
Jesse Gross committed
948
949
950
951
952
953
954
955
956
957
958
			// disable partial offloading when model is greater than total system memory as this
			// can lead to locking up the system
			s.options.NumGPU = 0
			gpuLayers = ml.GPULayersList{}
		}
	}

	if gpuLayers.Sum() == 0 {
		slog.Debug("insufficient VRAM to load any model layers")
	}

959
	return nil
Jesse Gross's avatar
Jesse Gross committed
960
961
962
}

// assignLayers packs the maximum number of layers onto the smallest set of GPUs and comes up with a layer assignment
963
func assignLayers(layers []uint64, gpus []ml.DeviceInfo, requireFull bool, requestedLayers int, lastUsedGPU int) (gpuLayers ml.GPULayersList) {
Jesse Gross's avatar
Jesse Gross committed
964
965
966
967
968
969
970
971
	// If we can't fit everything then prefer offloading layers other than the output layer
	for range 2 {
		// requestedLayers may be -1 if nothing was requested
		requestedLayers = min(len(layers), requestedLayers)

		if !envconfig.SchedSpread() {
			for i := lastUsedGPU; i < len(gpus); i++ {
				// Try to pack things into as few GPUs as possible
972
				forceRequest := i == len(gpus)-1 && !requireFull
Jesse Gross's avatar
Jesse Gross committed
973
974
975
976
977
978
				gpuLayers = findBestFit(layers, gpus[:i+1], requestedLayers, forceRequest)
				if gpuLayers.Sum() == len(layers) || gpuLayers.Sum() == requestedLayers {
					break
				}
			}
		} else {
979
			gpuLayers = findBestFit(layers, gpus, requestedLayers, !requireFull)
Jesse Gross's avatar
Jesse Gross committed
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
		}

		// We only stop if we've gotten all of the layers - even if we got requestedLayers, we still
		// might want to try dropping the output layer.
		if gpuLayers.Sum() == len(layers) {
			return gpuLayers
		}

		layers = layers[:len(layers)-1]
	}

	return gpuLayers
}

// findBestFit binary searches to find the smallest capacity factor that can fit
// the max number of layers. The capacity factor is multiplied by the free space on
// each GPU and a small one will force even balancing.
997
func findBestFit(layers []uint64, gpus []ml.DeviceInfo, requestedLayers int, forceRequest bool) (gpuLayers ml.GPULayersList) {
Jesse Gross's avatar
Jesse Gross committed
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
	var high float32 = 1
	var low float32 = 0

	// If we need to fulfill the requested number of layers, pretend we have almost infinite VRAM
	if requestedLayers >= 0 && forceRequest {
		high = 1000
	}

	bestAssignments := greedyFit(layers, gpus, high, requestedLayers)
	maxNumGPU := bestAssignments.Sum()
	if maxNumGPU == 0 {
		return bestAssignments
	}

	for high-low > 1e-6 {
		mid := (low + high) / 2
		assignments := greedyFit(layers, gpus, mid, requestedLayers)
		if assignments.Sum() == maxNumGPU {
			high = mid
			bestAssignments = assignments
		} else {
			low = mid
		}
	}
	return bestAssignments
}

// greedyFit assigns layers incrementally to GPUs, spilling over as each runs out of free space
1026
func greedyFit(layers []uint64, gpus []ml.DeviceInfo, capacity float32, requestedLayers int) (gpuLayers ml.GPULayersList) {
Jesse Gross's avatar
Jesse Gross committed
1027
	device := len(gpus) - 1
1028
	gpuLayers = ml.GPULayersList{{DeviceID: gpus[device].DeviceID}}
Jesse Gross's avatar
Jesse Gross committed
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
	freeSpace := uint64(float32(gpus[device].FreeMemory) * capacity)
	for i := len(layers) - 1; i >= 0; i-- {
		if requestedLayers >= 0 && len(layers)-1-i >= requestedLayers {
			break
		}

		for {
			if layers[i] <= freeSpace {
				gpuLayers[0].Layers = append([]int{i}, gpuLayers[0].Layers...)
				freeSpace -= layers[i]
				break
			}

			device--
			if device < 0 {
				return gpuLayers
			}
1046
			gpuLayers = append(ml.GPULayersList{{DeviceID: gpus[device].DeviceID}}, gpuLayers...)
Jesse Gross's avatar
Jesse Gross committed
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
			freeSpace = uint64(float32(gpus[device].FreeMemory) * capacity)
		}
	}
	return gpuLayers
}

// waitUntilRunnerLaunched sleeps until the runner subprocess is alive enough
// to respond to status requests
func (s *llmServer) waitUntilRunnerLaunched(ctx context.Context) error {
	for {
		_, err := s.getServerStatus(ctx)
		if err == nil {
			break
		}

		t := time.NewTimer(10 * time.Millisecond)
		select {
		case <-t.C:
			continue
		case <-ctx.Done():
			return ctx.Err()
		}
	}

	return nil
}

// initModel sends a load request to the runner based on the request operation (fit, alloc, commit)
// and parameters
func (s *llmServer) initModel(ctx context.Context, req LoadRequest, operation LoadOperation) (*LoadResponse, error) {
	req.Operation = operation

	data, err := json.Marshal(req)
	if err != nil {
		return nil, fmt.Errorf("error marshaling load data: %w", err)
	}

	r, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/load", s.port), bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("error creating load request: %w", err)
	}
	r.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(r)
	if err != nil {
		return nil, fmt.Errorf("do load request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("read load request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm load error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var llmResp LoadResponse
	if err := json.Unmarshal(body, &llmResp); err != nil {
		return nil, fmt.Errorf("load unmarshal encode response: %w", err)
	}

	return &llmResp, nil
1112
1113
1114
1115
1116
1117
}

type ServerStatus int

const ( // iota is reset to 0
	ServerStatusReady ServerStatus = iota
1118
	ServerStatusNoSlotsAvailable
Jesse Gross's avatar
Jesse Gross committed
1119
	ServerStatusLaunched
1120
1121
1122
1123
1124
	ServerStatusLoadingModel
	ServerStatusNotResponding
	ServerStatusError
)

1125
func (s ServerStatus) String() string {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1126
1127
1128
	switch s {
	case ServerStatusReady:
		return "llm server ready"
1129
	case ServerStatusNoSlotsAvailable:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1130
		return "llm busy - no slots available"
Jesse Gross's avatar
Jesse Gross committed
1131
1132
	case ServerStatusLaunched:
		return "llm server launched"
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1133
1134
1135
1136
1137
1138
1139
1140
1141
	case ServerStatusLoadingModel:
		return "llm server loading model"
	case ServerStatusNotResponding:
		return "llm server not responding"
	default:
		return "llm server error"
	}
}

1142
1143
1144
type ServerStatusResponse struct {
	Status   ServerStatus `json:"status"`
	Progress float32      `json:"progress"`
1145
1146
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1147
func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
1148
1149
1150
1151
1152
1153
	// Fail fast if its exited
	if s.cmd.ProcessState != nil {
		msg := ""
		if s.status != nil && s.status.LastErrMsg != "" {
			msg = s.status.LastErrMsg
		}
1154
1155
		if s.cmd.ProcessState.ExitCode() == -1 {
			// Most likely a signal killed it, log some more details to try to help troubleshoot
1156
			slog.Warn("llama runner process no longer running", "sys", s.cmd.ProcessState.Sys(), "string", s.cmd.ProcessState)
1157
		}
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
		return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
	if err != nil {
		return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		if errors.Is(err, context.DeadlineExceeded) {
Michael Yang's avatar
Michael Yang committed
1170
			return ServerStatusNotResponding, errors.New("server not responding")
1171
		}
1172
1173
1174
		if strings.Contains(err.Error(), "connection refused") {
			return ServerStatusNotResponding, errors.New("connection refused")
		}
1175
1176
1177
1178
1179
1180
1181
1182
1183
		return ServerStatusError, fmt.Errorf("health resp: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return ServerStatusError, fmt.Errorf("read health request: %w", err)
	}

1184
1185
	var ssr ServerStatusResponse
	if err := json.Unmarshal(body, &ssr); err != nil {
1186
1187
1188
		return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
	}

1189
1190
1191
1192
	switch ssr.Status {
	case ServerStatusLoadingModel:
		s.loadProgress = ssr.Progress
		return ssr.Status, nil
Jesse Gross's avatar
Jesse Gross committed
1193
	case ServerStatusLaunched, ServerStatusReady, ServerStatusNoSlotsAvailable:
1194
		return ssr.Status, nil
1195
	default:
1196
		return ssr.Status, fmt.Errorf("server error: %+v", ssr)
1197
1198
1199
	}
}

1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
// getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received
func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) {
	var retries int
	for {
		status, err := s.getServerStatus(ctx)
		if err != nil {
			return status, err
		}

		if status == ServerStatusNoSlotsAvailable {
			if retries >= 10 {
				return status, fmt.Errorf("no slots available after %d retries", retries)
			}

			time.Sleep(5 * time.Millisecond)
			retries++
			continue
		}

		return status, nil
	}
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1223
func (s *llmServer) Ping(ctx context.Context) error {
1224
1225
1226
1227
1228
1229
1230
1231
	_, err := s.getServerStatus(ctx)
	if err != nil {
		slog.Debug("server unhealthy", "error", err)
		return err
	}
	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1232
func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
1233
	stallDuration := envconfig.LoadTimeout()    // If no progress happens
1234
	stallTimer := time.Now().Add(stallDuration) // give up if we stall
1235
1236
1237

	slog.Info("waiting for llama runner to start responding")
	var lastStatus ServerStatus = -1
1238
	fullyLoaded := false
ManniX-ITA's avatar
ManniX-ITA committed
1239

1240
1241
	for {
		select {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1242
		case <-ctx.Done():
1243
			slog.Warn("client connection closed before server finished loading, aborting load")
1244
			return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
1245
		case err := <-s.done:
1246
			return fmt.Errorf("llama runner process has terminated: %w", err)
1247
1248
		default:
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1249
		if time.Now().After(stallTimer) {
ManniX-ITA's avatar
ManniX-ITA committed
1250
			// timeout
1251
1252
1253
1254
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1255
			return fmt.Errorf("timed out waiting for llama runner to start - progress %0.2f - %s", s.loadProgress, msg)
ManniX-ITA's avatar
ManniX-ITA committed
1256
1257
1258
1259
1260
		}
		if s.cmd.ProcessState != nil {
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
1261
			}
ManniX-ITA's avatar
ManniX-ITA committed
1262
1263
			return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1264
1265
		ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
		defer cancel()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1266
		priorProgress := s.loadProgress
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1267
1268
1269
		status, _ := s.getServerStatus(ctx)
		if lastStatus != status && status != ServerStatusReady {
			// Only log on status changes
1270
			slog.Info("waiting for server to become available", "status", status)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1271
		}
ManniX-ITA's avatar
ManniX-ITA committed
1272
1273
		switch status {
		case ServerStatusReady:
Jesse Gross's avatar
Jesse Gross committed
1274
			slog.Info(fmt.Sprintf("llama runner started in %0.2f seconds", time.Since(s.loadStart).Seconds()))
ManniX-ITA's avatar
ManniX-ITA committed
1275
1276
			return nil
		default:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1277
			lastStatus = status
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1278
1279
1280
1281
			// Reset the timer as long as we're making forward progress on the load
			if priorProgress != s.loadProgress {
				slog.Debug(fmt.Sprintf("model load progress %0.2f", s.loadProgress))
				stallTimer = time.Now().Add(stallDuration)
1282
			} else if !fullyLoaded && int(s.loadProgress*100.0) >= 100 {
1283
				slog.Debug("model load completed, waiting for server to become available", "status", status)
1284
				stallTimer = time.Now().Add(stallDuration)
1285
				fullyLoaded = true
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1286
			}
ManniX-ITA's avatar
ManniX-ITA committed
1287
1288
			time.Sleep(time.Millisecond * 250)
			continue
1289
1290
1291
1292
		}
	}
}

1293
1294
1295
1296
1297
1298
1299
func (s *llmServer) Pid() int {
	if s.cmd != nil && s.cmd.Process != nil {
		return s.cmd.Process.Pid
	}
	return -1
}

1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
func (s *llmServer) GetPort() int {
	return s.port
}

func (s *llmServer) HasExited() bool {
	if s.cmd != nil && s.cmd.ProcessState != nil && s.cmd.ProcessState.ExitCode() >= 0 {
		return true
	}
	return false
}

1311
var grammarJSON = `
1312
1313
1314
1315
root   ::= object
value  ::= object | array | string | number | ("true" | "false" | "null") ws
object ::=
  "{" ws (
1316
         string ":" ws value
1317
    ("," ws string ":" ws value)*
1318
  )? ws "}" 
1319
1320
1321
1322
array  ::=
  "[" ws (
            value
    ("," ws value)*
1323
  )? ws "]" 
1324
1325
string ::=
  "\"" (
1326
    [^"\\\x7F\x00-\x1F] |
1327
    "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
1328
1329
  )* "\"" 
number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? 
1330
1331
1332
1333
1334
1335
1336
# Optional space: by convention, applied in this grammar after literal chars when allowed
ws ::= ([ \t\n] ws)?
`

const maxBufferSize = 512 * format.KiloByte

type ImageData struct {
1337
1338
	Data []byte `json:"data"`
	ID   int    `json:"id"`
1339
1340
1341
1342
}

type CompletionRequest struct {
	Prompt  string
1343
	Format  json.RawMessage
1344
	Images  []ImageData
Michael Yang's avatar
Michael Yang committed
1345
	Options *api.Options
1346

1347
1348
1349
	Grammar  string // set before sending the request to the subprocess
	Shift    bool
	Truncate bool
1350
1351
}

1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
// DoneReason represents the reason why a completion response is done
type DoneReason int

const (
	// DoneReasonStop indicates the completion stopped naturally
	DoneReasonStop DoneReason = iota
	// DoneReasonLength indicates the completion stopped due to length limits
	DoneReasonLength
	// DoneReasonConnectionClosed indicates the completion stopped due to the connection being closed
	DoneReasonConnectionClosed
)

func (d DoneReason) String() string {
	switch d {
	case DoneReasonLength:
		return "length"
	case DoneReasonStop:
		return "stop"
	default:
		return "" // closed
	}
}

1375
type CompletionResponse struct {
1376
1377
1378
1379
1380
1381
1382
	Content            string        `json:"content"`
	DoneReason         DoneReason    `json:"done_reason"`
	Done               bool          `json:"done"`
	PromptEvalCount    int           `json:"prompt_eval_count"`
	PromptEvalDuration time.Duration `json:"prompt_eval_duration"`
	EvalCount          int           `json:"eval_count"`
	EvalDuration       time.Duration `json:"eval_duration"`
1383
1384
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1385
func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
1386
	slog.Debug("completion request", "images", len(req.Images), "prompt", len(req.Prompt), "format", string(req.Format))
1387
	logutil.Trace("completion request", "prompt", req.Prompt)
1388

1389
	if len(req.Format) > 0 {
1390
1391
1392
1393
1394
1395
		switch string(req.Format) {
		case `null`, `""`:
			// Field was set, but "missing" a value. We accept
			// these as "not set".
			break
		case `"json"`:
1396
			req.Grammar = grammarJSON
1397
1398
1399
1400
		default:
			if req.Format[0] != '{' {
				return fmt.Errorf("invalid format: %q; expected \"json\" or a valid JSON Schema object", req.Format)
			}
1401

1402
1403
1404
1405
			// User provided a JSON schema
			g := llama.SchemaToGrammar(req.Format)
			if g == nil {
				return fmt.Errorf("invalid JSON schema in format")
1406
			}
1407
			req.Grammar = string(g)
1408
1409
1410
		}
	}

1411
1412
1413
1414
1415
	if req.Options == nil {
		opts := api.DefaultOptions()
		req.Options = &opts
	}

1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
	if err := s.sem.Acquire(ctx, 1); err != nil {
		if errors.Is(err, context.Canceled) {
			slog.Info("aborting completion request due to client closing the connection")
		} else {
			slog.Error("Failed to acquire semaphore", "error", err)
		}
		return err
	}
	defer s.sem.Release(1)

	// put an upper limit on num_predict to avoid the model running on forever
	if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
		req.Options.NumPredict = 10 * s.options.NumCtx
	}

1431
	// Make sure the server is ready
1432
	status, err := s.getServerStatusRetry(ctx)
1433
1434
1435
	if err != nil {
		return err
	} else if status != ServerStatusReady {
1436
		return fmt.Errorf("unexpected server status: %s", status)
1437
1438
	}

1439
1440
1441
1442
	// Handling JSON marshaling with special characters unescaped.
	buffer := &bytes.Buffer{}
	enc := json.NewEncoder(buffer)
	enc.SetEscapeHTML(false)
1443

1444
	if err := enc.Encode(req); err != nil {
1445
1446
		return fmt.Errorf("failed to marshal data: %v", err)
	}
1447

1448
1449
1450
1451
1452
1453
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
	serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
	if err != nil {
		return fmt.Errorf("error creating POST request: %v", err)
	}
	serverReq.Header.Set("Content-Type", "application/json")
1454

1455
	res, err := http.DefaultClient.Do(serverReq)
1456
1457
1458
1459
	if err != nil && errors.Is(err, context.Canceled) {
		// client closed connection
		return err
	} else if err != nil {
1460
1461
		slog.Error("post predict", "error", err)
		return errors.New("model runner has unexpectedly stopped, this may be due to resource limitations or an internal error, check ollama server logs for details")
1462
1463
	}
	defer res.Body.Close()
1464

1465
1466
	if res.StatusCode >= 400 {
		bodyBytes, err := io.ReadAll(res.Body)
1467
		if err != nil {
1468
			return fmt.Errorf("failed reading llm error response: %w", err)
1469
		}
1470
		log.Printf("llm predict error: %s", bodyBytes)
1471
		return api.StatusError{StatusCode: res.StatusCode, ErrorMessage: strings.TrimSpace(string(bodyBytes))}
1472
	}
1473

1474
1475
1476
	scanner := bufio.NewScanner(res.Body)
	buf := make([]byte, 0, maxBufferSize)
	scanner.Buffer(buf, maxBufferSize)
1477

1478
1479
1480
	// keep track of the last token generated, this is used to abort if the model starts looping
	var lastToken string
	var tokenRepeat int
1481

1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
	for scanner.Scan() {
		select {
		case <-ctx.Done():
			// This handles the request cancellation
			return ctx.Err()
		default:
			line := scanner.Bytes()
			if len(line) == 0 {
				continue
			}
1492

1493
1494
			evt, ok := bytes.CutPrefix(line, []byte("data: "))
			if !ok {
1495
				evt = line
1496
			}
1497

1498
			var c CompletionResponse
1499
			if err := json.Unmarshal(evt, &c); err != nil {
1500
				return fmt.Errorf("error unmarshalling llm prediction response: %v", err)
1501
1502
			}
			switch {
1503
			case strings.TrimSpace(c.Content) == lastToken:
1504
1505
1506
1507
1508
				tokenRepeat++
			default:
				lastToken = strings.TrimSpace(c.Content)
				tokenRepeat = 0
			}
1509

1510
1511
1512
1513
1514
			// 30 picked as an arbitrary max token repeat limit, modify as needed
			if tokenRepeat > 30 {
				slog.Debug("prediction aborted, token repeat limit reached")
				return ctx.Err()
			}
1515

1516
1517
1518
1519
			if c.Content != "" {
				fn(CompletionResponse{
					Content: c.Content,
				})
1520
			}
1521

1522
			if c.Done {
1523
				fn(c)
1524
				return nil
1525
			}
1526
		}
1527
	}
1528

1529
	if err := scanner.Err(); err != nil {
1530
		if strings.Contains(err.Error(), "unexpected EOF") || strings.Contains(err.Error(), "forcibly closed") {
1531
			s.Close()
1532
			var msg string
1533
1534
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
1535
1536
			} else {
				msg = err.Error()
1537
			}
1538
			return fmt.Errorf("an error was encountered while running the model: %s", msg)
1539
1540
		}

1541
		return fmt.Errorf("error reading llm response: %v", err)
1542
1543
	}

1544
	return nil
1545
1546
}

1547
type EmbeddingRequest struct {
1548
	Content string `json:"content"`
1549
1550
}

1551
type EmbeddingResponse struct {
1552
	Embedding []float32 `json:"embedding"`
1553
1554
}

1555
func (s *llmServer) Embedding(ctx context.Context, input string) ([]float32, error) {
1556
	logutil.Trace("embedding request", "input", input)
1557

1558
	if err := s.sem.Acquire(ctx, 1); err != nil {
1559
1560
1561
1562
1563
		if errors.Is(err, context.Canceled) {
			slog.Info("aborting embedding request due to client closing the connection")
		} else {
			slog.Error("Failed to acquire semaphore", "error", err)
		}
1564
		return nil, err
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1565
	}
1566
	defer s.sem.Release(1)
1567

1568
	// Make sure the server is ready
1569
	status, err := s.getServerStatusRetry(ctx)
1570
	if err != nil {
1571
		return nil, err
1572
	} else if status != ServerStatusReady {
1573
		return nil, fmt.Errorf("unexpected server status: %s", status)
1574
1575
	}

1576
	data, err := json.Marshal(EmbeddingRequest{Content: input})
Michael Yang's avatar
Michael Yang committed
1577
	if err != nil {
1578
		return nil, fmt.Errorf("error marshaling embed data: %w", err)
1579
1580
	}

1581
	r, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
1582
	if err != nil {
1583
		return nil, fmt.Errorf("error creating embed request: %w", err)
1584
	}
1585
	r.Header.Set("Content-Type", "application/json")
1586

1587
	resp, err := http.DefaultClient.Do(r)
1588
	if err != nil {
1589
		return nil, fmt.Errorf("do embedding request: %w", err)
1590
1591
1592
1593
1594
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
1595
		return nil, fmt.Errorf("error reading embed response: %w", err)
1596
1597
1598
	}

	if resp.StatusCode >= 400 {
1599
		log.Printf("llm embedding error: %s", body)
1600
		return nil, fmt.Errorf("%s", body)
1601
1602
	}

1603
	var e EmbeddingResponse
1604
	if err := json.Unmarshal(body, &e); err != nil {
1605
		return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
1606
1607
	}

1608
	return e.Embedding, nil
1609
1610
}

Michael Yang's avatar
Michael Yang committed
1611
1612
1613
1614
1615
1616
1617
1618
type TokenizeRequest struct {
	Content string `json:"content"`
}

type TokenizeResponse struct {
	Tokens []int `json:"tokens"`
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1619
func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
1620
1621
	s.llamaModelLock.Lock()
	defer s.llamaModelLock.Unlock()
1622

1623
1624
	if s.llamaModel != nil {
		return s.llamaModel.Tokenize(content, false, true)
Michael Yang's avatar
Michael Yang committed
1625
	}
1626
	if s.textProcessor != nil {
1627
		tokens, err := s.textProcessor.Encode(content, false)
1628
1629
		if err != nil {
			return nil, err
1630
		}
1631
1632
1633
1634
1635
		toks := make([]int, len(tokens))
		for i, t := range tokens {
			toks[i] = int(t)
		}
		return toks, nil
Michael Yang's avatar
Michael Yang committed
1636
	}
1637
1638
	// not reached
	return nil, fmt.Errorf("no tokenizer configured")
Michael Yang's avatar
Michael Yang committed
1639
1640
1641
1642
1643
1644
1645
1646
}

type DetokenizeRequest struct {
	Tokens []int `json:"tokens"`
}

type DetokenizeResponse struct {
	Content string `json:"content"`
1647
1648
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1649
func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
1650
1651
1652
1653
	s.llamaModelLock.Lock()
	defer s.llamaModelLock.Unlock()

	if s.llamaModel != nil {
1654
1655
		var resp string
		for _, token := range tokens {
1656
			resp += s.llamaModel.TokenToPiece(token)
1657
1658
1659
		}
		return resp, nil
	}
1660
1661
1662
1663
	if s.textProcessor != nil {
		toks := make([]int32, len(tokens))
		for i, t := range tokens {
			toks[i] = int32(t)
1664
		}
1665
1666
1667
		content, err := s.textProcessor.Decode(toks)
		if err != nil {
			return "", err
1668
		}
1669
		return content, nil
Michael Yang's avatar
Michael Yang committed
1670
	}
1671
1672
	// not reached
	return "", fmt.Errorf("no tokenizer configured")
1673
1674
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1675
func (s *llmServer) Close() error {
1676
1677
1678
1679
	s.llamaModelLock.Lock()
	if s.llamaModel != nil {
		llama.FreeModel(s.llamaModel)
		s.llamaModel = nil
1680
	}
1681
	s.llamaModelLock.Unlock()
1682

1683
	if s.cmd != nil {
1684
		slog.Debug("stopping llama server", "pid", s.Pid())
1685
1686
1687
		if err := s.cmd.Process.Kill(); err != nil {
			return err
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1688
1689
		// if ProcessState is already populated, Wait already completed, no need to wait again
		if s.cmd.ProcessState == nil {
1690
			slog.Debug("waiting for llama server to exit", "pid", s.Pid())
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1691
1692
			<-s.done
		}
1693

1694
		slog.Debug("llama server stopped", "pid", s.Pid())
1695
1696
1697
1698
1699
	}

	return nil
}

Jesse Gross's avatar
Jesse Gross committed
1700
func (s *llamaServer) VRAMSize() uint64 {
1701
	return s.estimate.VRAMSize
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1702
1703
}

Jesse Gross's avatar
Jesse Gross committed
1704
func (s *llamaServer) TotalSize() uint64 {
1705
	return s.estimate.TotalSize
1706
1707
}

1708
func (s *llamaServer) VRAMByGPU(id ml.DeviceID) uint64 {
1709
	for i, gpu := range s.gpus {
1710
		if gpu.DeviceID == id {
1711
1712
1713
			if i < len(s.estimate.GPUSizes) {
				return s.estimate.GPUSizes[i]
			}
1714
1715
1716
1717
		}
	}
	return 0
}
Jesse Gross's avatar
Jesse Gross committed
1718

1719
1720
1721
1722
1723
func (s *llamaServer) GetDeviceInfos(ctx context.Context) []ml.DeviceInfo {
	slog.Debug("llamarunner free vram reporting not supported")
	return nil
}

Jesse Gross's avatar
Jesse Gross committed
1724
1725
1726
1727
1728
1729
1730
1731
func (s *ollamaServer) VRAMSize() uint64 {
	if s.mem == nil {
		return 0
	}

	var mem uint64

	for _, g := range s.mem.GPUs {
1732
		mem += g.Size()
Jesse Gross's avatar
Jesse Gross committed
1733
1734
1735
1736
1737
1738
	}

	// Some elements are always on CPU. However, if we have allocated all layers
	// on the GPU then include the CPU components as well, to represent complete offloading.
	noCPULayers := true
	for i := range s.mem.CPU.Weights {
1739
		if s.mem.CPU.Weights[i] != 0 || s.mem.CPU.Cache[i] != 0 {
Jesse Gross's avatar
Jesse Gross committed
1740
1741
1742
1743
1744
			noCPULayers = false
			break
		}
	}
	if noCPULayers {
1745
1746
		mem += s.mem.InputWeights
		mem += s.mem.CPU.Graph
Jesse Gross's avatar
Jesse Gross committed
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
	}

	return mem
}

func (s *ollamaServer) TotalSize() uint64 {
	if s.mem == nil {
		return 0
	}

1757
1758
	mem := s.mem.InputWeights
	mem += s.mem.CPU.Size()
Jesse Gross's avatar
Jesse Gross committed
1759
	for _, g := range s.mem.GPUs {
1760
		mem += g.Size()
Jesse Gross's avatar
Jesse Gross committed
1761
1762
1763
1764
1765
	}

	return mem
}

1766
func (s *ollamaServer) VRAMByGPU(id ml.DeviceID) uint64 {
Jesse Gross's avatar
Jesse Gross committed
1767
1768
1769
1770
1771
	if s.mem == nil {
		return 0
	}

	for _, g := range s.mem.GPUs {
1772
		if g.DeviceID == id {
1773
			return g.Size()
Jesse Gross's avatar
Jesse Gross committed
1774
1775
1776
1777
1778
		}
	}

	return 0
}
1779
1780

func (s *ollamaServer) GetDeviceInfos(ctx context.Context) []ml.DeviceInfo {
1781
	devices, err := ml.GetDevicesFromRunner(ctx, s)
1782
1783
1784
1785
1786
1787
1788
1789
1790
	if err != nil {
		if s.cmd != nil && s.cmd.ProcessState == nil {
			// Still running but hit an error, log
			slog.Debug("failure refreshing GPU information", "error", err)
		}
		// else no longer running so suppress logging as a failure is expected
	}
	return devices
}