server.go 31.2 KB
Newer Older
mashun1's avatar
v1  
mashun1 committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
package llm

import (
	"bufio"
	"bytes"
	"context"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"log"
	"log/slog"
	"math/rand"
	"net"
	"net/http"
	"os"
	"os/exec"
	"path/filepath"
	"runtime"
	"strconv"
	"strings"
	"time"

	"golang.org/x/sync/semaphore"

	"github.com/ollama/ollama/api"
	"github.com/ollama/ollama/envconfig"
	"github.com/ollama/ollama/format"
	"github.com/ollama/ollama/gpu"
)

type LlamaServer interface {
	Ping(ctx context.Context) error
	WaitUntilRunning(ctx context.Context) error
	Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
xuxzh1's avatar
init  
xuxzh1 committed
36
	Embedding(ctx context.Context, input string) ([]float32, error)
mashun1's avatar
v1  
mashun1 committed
37
38
39
	Tokenize(ctx context.Context, content string) ([]int, error)
	Detokenize(ctx context.Context, tokens []int) (string, error)
	Close() error
xuxzh1's avatar
init  
xuxzh1 committed
40
	EstimatedVRAM() uint64 // Total VRAM across all GPUs
mashun1's avatar
v1  
mashun1 committed
41
	EstimatedTotal() uint64
xuxzh1's avatar
init  
xuxzh1 committed
42
	EstimatedVRAMByGPU(gpuID string) uint64
mashun1's avatar
v1  
mashun1 committed
43
44
45
46
}

// llmServer is an instance of the llama.cpp server
type llmServer struct {
xuxzh1's avatar
init  
xuxzh1 committed
47
48
49
50
51
52
53
54
55
56
57
58
59
	port        int
	cmd         *exec.Cmd
	done        chan error // Channel to signal when the process exits
	status      *StatusWriter
	options     api.Options
	numParallel int

	estimate    MemoryEstimate
	totalLayers uint64
	// gpuCount     int
	gpus         gpu.GpuInfoList // Recorded just before the model loaded, free space will be incorrect
	loadDuration time.Duration   // Record how long it took the model to load
	loadProgress float32
mashun1's avatar
v1  
mashun1 committed
60
61
62
63

	sem *semaphore.Weighted
}

xuxzh1's avatar
init  
xuxzh1 committed
64
65
66
67
68
69
// LoadModel will load a model from disk. The model must be in the GGML format.
//
// It collects array values for arrays with a size less than or equal to
// maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If
// the maxArraySize is negative, all arrays are collected.
func LoadModel(model string, maxArraySize int) (*GGML, error) {
mashun1's avatar
v1  
mashun1 committed
70
71
72
73
74
75
76
77
78
79
	if _, err := os.Stat(model); err != nil {
		return nil, err
	}

	f, err := os.Open(model)
	if err != nil {
		return nil, err
	}
	defer f.Close()

xuxzh1's avatar
init  
xuxzh1 committed
80
	ggml, _, err := DecodeGGML(f, maxArraySize)
mashun1's avatar
v1  
mashun1 committed
81
82
83
84
85
	return ggml, err
}

// NewLlamaServer will run a server for the given GPUs
// The gpu list must be a single family.
xuxzh1's avatar
init  
xuxzh1 committed
86
func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options, numParallel int) (LlamaServer, error) {
mashun1's avatar
v1  
mashun1 committed
87
88
	var err error
	var cpuRunner string
xuxzh1's avatar
init  
xuxzh1 committed
89
90
91
92
	var estimate MemoryEstimate
	var systemTotalMemory uint64
	var systemFreeMemory uint64
	var systemSwapFreeMemory uint64
mashun1's avatar
v1  
mashun1 committed
93

xuxzh1's avatar
init  
xuxzh1 committed
94
95
96
97
98
99
100
101
102
	systemMemInfo, err := gpu.GetCPUMem()
	if err != nil {
		slog.Error("failed to lookup system memory", "error", err)
	} else {
		systemTotalMemory = systemMemInfo.TotalMemory
		systemFreeMemory = systemMemInfo.FreeMemory
		systemSwapFreeMemory = systemMemInfo.FreeSwap
		slog.Debug("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "free_swap", format.HumanBytes2(systemSwapFreeMemory))
	}
mashun1's avatar
v1  
mashun1 committed
103

xuxzh1's avatar
init  
xuxzh1 committed
104
105
106
107
108
	// If the user wants zero GPU layers, reset the gpu list to be CPU/system ram info
	if opts.NumGPU == 0 {
		gpus = gpu.GetCPUInfo()
	}
	if len(gpus) == 1 && gpus[0].Library == "cpu" {
mashun1's avatar
v1  
mashun1 committed
109
		cpuRunner = serverForCpu()
xuxzh1's avatar
init  
xuxzh1 committed
110
		estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
mashun1's avatar
v1  
mashun1 committed
111
	} else {
xuxzh1's avatar
init  
xuxzh1 committed
112
		estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
mashun1's avatar
v1  
mashun1 committed
113

xuxzh1's avatar
init  
xuxzh1 committed
114
115
		switch {
		case gpus[0].Library == "metal" && estimate.VRAMSize > systemTotalMemory:
mashun1's avatar
v1  
mashun1 committed
116
117
118
			// disable partial offloading when model is greater than total system memory as this
			// can lead to locking up the system
			opts.NumGPU = 0
xuxzh1's avatar
init  
xuxzh1 committed
119
		case gpus[0].Library != "metal" && estimate.Layers == 0:
mashun1's avatar
v1  
mashun1 committed
120
121
			// Don't bother loading into the GPU if no layers can fit
			cpuRunner = serverForCpu()
xuxzh1's avatar
init  
xuxzh1 committed
122
123
124
125
126
127
128
129
130
131
132
133
134
135
			gpus = gpu.GetCPUInfo()
		case opts.NumGPU < 0 && estimate.Layers > 0 && gpus[0].Library != "cpu":
			opts.NumGPU = estimate.Layers
		}
	}

	// On linux and windows, over-allocating CPU memory will almost always result in an error
	// Darwin has fully dynamic swap so has no direct concept of free swap space
	if runtime.GOOS != "darwin" {
		systemMemoryRequired := estimate.TotalSize - estimate.VRAMSize
		available := systemFreeMemory + systemSwapFreeMemory
		if systemMemoryRequired > available {
			slog.Warn("model request too large for system", "requested", format.HumanBytes2(systemMemoryRequired), "available", available, "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "swap", format.HumanBytes2(systemSwapFreeMemory))
			return nil, fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(systemMemoryRequired), format.HumanBytes2(available))
mashun1's avatar
v1  
mashun1 committed
136
137
138
		}
	}

xuxzh1's avatar
init  
xuxzh1 committed
139
140
	estimate.log()

mashun1's avatar
v1  
mashun1 committed
141
	// Loop through potential servers
xuxzh1's avatar
init  
xuxzh1 committed
142
	finalErr := errors.New("no suitable llama servers found")
mashun1's avatar
v1  
mashun1 committed
143
144
145
146
147

	if len(adapters) > 1 {
		return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
	}

xuxzh1's avatar
init  
xuxzh1 committed
148
149
150
151
152
153
154
155
156
157
158
159
160
161
	availableServers := getAvailableServers()
	if len(availableServers) == 0 {
		if runtime.GOOS != "windows" {
			slog.Warn("llama server binary disappeared, reinitializing payloads")
			err = Init()
			if err != nil {
				slog.Warn("failed to reinitialize payloads", "error", err)
				return nil, err
			}
			availableServers = getAvailableServers()
		} else {
			return nil, finalErr
		}
	}
mashun1's avatar
v1  
mashun1 committed
162
163
164
165
166
167
	var servers []string
	if cpuRunner != "" {
		servers = []string{cpuRunner}
	} else {
		servers = serversForGpu(gpus[0]) // All GPUs in the list are matching Library and Variant
	}
xuxzh1's avatar
init  
xuxzh1 committed
168
	demandLib := envconfig.LLMLibrary()
mashun1's avatar
v1  
mashun1 committed
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
	if demandLib != "" {
		serverPath := availableServers[demandLib]
		if serverPath == "" {
			slog.Info(fmt.Sprintf("Invalid OLLAMA_LLM_LIBRARY %s - not found", demandLib))
		} else {
			slog.Info("user override", "OLLAMA_LLM_LIBRARY", demandLib, "path", serverPath)
			servers = []string{demandLib}
			if strings.HasPrefix(demandLib, "cpu") {
				// Omit the GPU flag to silence the warning
				opts.NumGPU = -1
			}
		}
	}

	if len(servers) == 0 {
		return nil, fmt.Errorf("no servers found for %v", gpus)
	}

	params := []string{
		"--model", model,
xuxzh1's avatar
init  
xuxzh1 committed
189
190
		"--ctx-size", strconv.Itoa(opts.NumCtx),
		"--batch-size", strconv.Itoa(opts.NumBatch),
mashun1's avatar
v1  
mashun1 committed
191
192
193
194
195
196
		"--embedding",
	}

	params = append(params, "--log-disable")

	if opts.NumGPU >= 0 {
xuxzh1's avatar
init  
xuxzh1 committed
197
		params = append(params, "--n-gpu-layers", strconv.Itoa(opts.NumGPU))
mashun1's avatar
v1  
mashun1 committed
198
199
	}

xuxzh1's avatar
init  
xuxzh1 committed
200
	if envconfig.Debug() {
mashun1's avatar
v1  
mashun1 committed
201
202
203
204
		params = append(params, "--verbose")
	}

	if opts.MainGPU > 0 {
xuxzh1's avatar
init  
xuxzh1 committed
205
		params = append(params, "--main-gpu", strconv.Itoa(opts.MainGPU))
mashun1's avatar
v1  
mashun1 committed
206
207
208
209
210
211
212
213
214
215
216
217
218
	}

	if len(adapters) > 0 {
		// TODO: applying multiple adapters is not supported by the llama.cpp server yet
		params = append(params, "--lora", adapters[0])
	}

	if len(projectors) > 0 {
		// TODO: applying multiple projectors is not supported by the llama.cpp server yet
		params = append(params, "--mmproj", projectors[0])
	}

	if opts.NumThread > 0 {
xuxzh1's avatar
init  
xuxzh1 committed
219
		params = append(params, "--threads", strconv.Itoa(opts.NumThread))
mashun1's avatar
v1  
mashun1 committed
220
221
222
223
224
225
	}

	if !opts.F16KV {
		params = append(params, "--memory-f32")
	}

xuxzh1's avatar
init  
xuxzh1 committed
226
	flashAttnEnabled := envconfig.FlashAttention()
mashun1's avatar
v1  
mashun1 committed
227
228
229
230
231
232
233
234
235
236
237

	for _, g := range gpus {
		// only cuda (compute capability 7+) and metal support flash attention
		if g.Library != "metal" && (g.Library != "cuda" || g.DriverMajor < 7) {
			flashAttnEnabled = false
		}

		// mmap has issues with partial offloading on metal
		if g.Library == "metal" &&
			uint64(opts.NumGPU) > 0 &&
			uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 {
xuxzh1's avatar
init  
xuxzh1 committed
238
239
			opts.UseMMap = new(bool)
			*opts.UseMMap = false
mashun1's avatar
v1  
mashun1 committed
240
241
242
243
244
245
246
		}
	}

	if flashAttnEnabled {
		params = append(params, "--flash-attn")
	}

xuxzh1's avatar
init  
xuxzh1 committed
247
248
249
250
251
252
253
	// Windows CUDA should not use mmap for best performance
	// Linux  with a model larger than free space, mmap leads to thrashing
	// For CPU loads we want the memory to be allocated, not FS cache
	if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == nil) ||
		(runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == nil) ||
		(gpus[0].Library == "cpu" && opts.UseMMap == nil) ||
		(opts.UseMMap != nil && !*opts.UseMMap) {
mashun1's avatar
v1  
mashun1 committed
254
255
256
257
258
259
260
		params = append(params, "--no-mmap")
	}

	if opts.UseMLock {
		params = append(params, "--mlock")
	}

xuxzh1's avatar
init  
xuxzh1 committed
261
262
263
264
265
266
267
268
	if gpu.IsNUMA() {
		numaMode := "distribute"
		if runtime.GOOS == "linux" {
			if _, err := exec.LookPath("numactl"); err == nil {
				numaMode = "numactl"
			}
		}
		params = append(params, "--numa", numaMode)
mashun1's avatar
v1  
mashun1 committed
269
270
	}

xuxzh1's avatar
init  
xuxzh1 committed
271
	params = append(params, "--parallel", strconv.Itoa(numParallel))
mashun1's avatar
v1  
mashun1 committed
272

xuxzh1's avatar
init  
xuxzh1 committed
273
274
	if estimate.TensorSplit != "" {
		params = append(params, "--tensor-split", estimate.TensorSplit)
mashun1's avatar
v1  
mashun1 committed
275
276
	}

xuxzh1's avatar
init  
xuxzh1 committed
277
	for i := range len(servers) {
mashun1's avatar
v1  
mashun1 committed
278
279
280
281
282
283
284
285
286
		dir := availableServers[servers[i]]
		if dir == "" {
			// Shouldn't happen
			finalErr = fmt.Errorf("[%d] server %s not listed in available servers %v", i, servers[i], availableServers)
			slog.Error("server list inconsistent", "error", finalErr)
			continue
		}

		if strings.HasPrefix(servers[i], "cpu") {
xuxzh1's avatar
init  
xuxzh1 committed
287
			gpus = gpu.GetCPUInfo()
mashun1's avatar
v1  
mashun1 committed
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
		}

		// Find an availableServers  port, retry on each iteration in case the failure was a port conflict race
		port := 0
		if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
			var l *net.TCPListener
			if l, err = net.ListenTCP("tcp", a); err == nil {
				port = l.Addr().(*net.TCPAddr).Port
				l.Close()
			}
		}
		if port == 0 {
			slog.Debug("ResolveTCPAddr failed ", "error", err)
			port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
		}
		finalParams := append(params, "--port", strconv.Itoa(port))

		pathEnv := "LD_LIBRARY_PATH"
		if runtime.GOOS == "windows" {
			pathEnv = "PATH"
		}
xuxzh1's avatar
init  
xuxzh1 committed
309
310
		// prepend the server directory to LD_LIBRARY_PATH/PATH and the parent dir for common dependencies
		libraryPaths := []string{dir, filepath.Dir(dir)}
mashun1's avatar
v1  
mashun1 committed
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327

		if libraryPath, ok := os.LookupEnv(pathEnv); ok {
			// Append our runner directory to the path
			// This will favor system libraries over our bundled library dependencies
			libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...)
		}

		// Note: we always put the dependency path first
		// since this was the exact version we verified for AMD GPUs
		// and we favor what the user had in their path
		if gpus[0].DependencyPath != "" {
			// TODO refine for multi-gpu support
			libraryPaths = append([]string{gpus[0].DependencyPath}, libraryPaths...)
		}

		server := filepath.Join(dir, "ollama_llama_server")
		if runtime.GOOS == "windows" {
xuxzh1's avatar
init  
xuxzh1 committed
328
			server += ".exe"
mashun1's avatar
v1  
mashun1 committed
329
330
331
332
333
334
335
336
337
338
339
340
341
342
		}

		// Detect tmp cleaners wiping out the file
		_, err := os.Stat(server)
		if errors.Is(err, os.ErrNotExist) {
			slog.Warn("llama server disappeared, reinitializing payloads", "path", server, "error", err)
			err = Init()
			if err != nil {
				slog.Warn("failed to reinitialize payloads", "error", err)
				return nil, err
			}
		}

		s := &llmServer{
xuxzh1's avatar
init  
xuxzh1 committed
343
344
345
346
347
348
349
350
351
352
			port:        port,
			cmd:         exec.Command(server, finalParams...),
			status:      NewStatusWriter(os.Stderr),
			options:     opts,
			estimate:    estimate,
			numParallel: numParallel,
			sem:         semaphore.NewWeighted(int64(numParallel)),
			totalLayers: ggml.KV().BlockCount() + 1,
			gpus:        gpus,
			done:        make(chan error, 1),
mashun1's avatar
v1  
mashun1 committed
353
354
355
356
357
		}

		s.cmd.Env = os.Environ()
		s.cmd.Stdout = os.Stdout
		s.cmd.Stderr = s.status
xuxzh1's avatar
init  
xuxzh1 committed
358
		s.cmd.SysProcAttr = LlamaServerSysProcAttr
mashun1's avatar
v1  
mashun1 committed
359

xuxzh1's avatar
init  
xuxzh1 committed
360
361
362
363
364
		envWorkarounds := [][2]string{}
		for _, gpu := range gpus {
			envWorkarounds = append(envWorkarounds, gpu.EnvWorkarounds...)
		}
		visibleDevicesEnv, visibleDevicesEnvVal := gpus.GetVisibleDevicesEnv()
mashun1's avatar
v1  
mashun1 committed
365
366
367
368
369
370
371
372
373
374
375
376
377
		pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))

		// Update or add the path and visible devices variable with our adjusted version
		pathNeeded := true
		devicesNeeded := visibleDevicesEnv != ""
		for i := range s.cmd.Env {
			cmp := strings.SplitN(s.cmd.Env[i], "=", 2)
			if strings.EqualFold(cmp[0], pathEnv) {
				s.cmd.Env[i] = pathEnv + "=" + pathEnvVal
				pathNeeded = false
			} else if devicesNeeded && strings.EqualFold(cmp[0], visibleDevicesEnv) {
				s.cmd.Env[i] = visibleDevicesEnv + "=" + visibleDevicesEnvVal
				devicesNeeded = false
xuxzh1's avatar
init  
xuxzh1 committed
378
379
380
381
382
383
			} else if len(envWorkarounds) != 0 {
				for _, kv := range envWorkarounds {
					if strings.EqualFold(cmp[0], kv[0]) {
						s.cmd.Env[i] = kv[0] + "=" + kv[1]
					}
				}
mashun1's avatar
v1  
mashun1 committed
384
385
386
387
388
389
390
391
392
393
			}
		}
		if pathNeeded {
			s.cmd.Env = append(s.cmd.Env, pathEnv+"="+pathEnvVal)
		}
		if devicesNeeded {
			s.cmd.Env = append(s.cmd.Env, visibleDevicesEnv+"="+visibleDevicesEnvVal)
		}

		slog.Info("starting llama server", "cmd", s.cmd.String())
xuxzh1's avatar
init  
xuxzh1 committed
394
		if envconfig.Debug() {
mashun1's avatar
v1  
mashun1 committed
395
396
397
			filteredEnv := []string{}
			for _, ev := range s.cmd.Env {
				if strings.HasPrefix(ev, "CUDA_") ||
xuxzh1's avatar
init  
xuxzh1 committed
398
					strings.HasPrefix(ev, "ROCR_") ||
mashun1's avatar
v1  
mashun1 committed
399
400
					strings.HasPrefix(ev, "ROCM_") ||
					strings.HasPrefix(ev, "HIP_") ||
xuxzh1's avatar
init  
xuxzh1 committed
401
					strings.HasPrefix(ev, "GPU_") ||
mashun1's avatar
v1  
mashun1 committed
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
					strings.HasPrefix(ev, "HSA_") ||
					strings.HasPrefix(ev, "GGML_") ||
					strings.HasPrefix(ev, "PATH=") ||
					strings.HasPrefix(ev, "LD_LIBRARY_PATH=") {
					filteredEnv = append(filteredEnv, ev)
				}
			}
			// Log at debug as the environment is inherited and might contain sensitive information
			slog.Debug("subprocess", "environment", filteredEnv)
		}

		if err = s.cmd.Start(); err != nil {
			// Detect permission denied and augment them essage about noexec
			if errors.Is(err, os.ErrPermission) {
				finalErr = fmt.Errorf("unable to start server %w.  %s may have noexec set.  Set OLLAMA_TMPDIR for server to a writable executable directory", err, dir)
				continue
			}
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
			err = fmt.Errorf("error starting the external llama server: %v %s", err, msg)
			finalErr = err
			continue
		}

		// reap subprocess when it exits
		go func() {
xuxzh1's avatar
init  
xuxzh1 committed
430
431
432
433
434
435
436
437
438
439
440
			err := s.cmd.Wait()
			// Favor a more detailed message over the process exit status
			if err != nil && s.status != nil && s.status.LastErrMsg != "" {
				slog.Debug("llama runner terminated", "error", err)
				if strings.Contains(s.status.LastErrMsg, "unknown model") {
					s.status.LastErrMsg = "this model is not supported by your version of Ollama. You may need to upgrade"
				}
				s.done <- errors.New(s.status.LastErrMsg)
			} else {
				s.done <- err
			}
mashun1's avatar
v1  
mashun1 committed
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
		}()

		return s, nil
	}

	slog.Error("unable to load any llama server", "error", finalErr)
	return nil, finalErr
}

func projectorMemoryRequirements(filename string) uint64 {
	file, err := os.Open(filename)
	if err != nil {
		return 0
	}
	defer file.Close()

xuxzh1's avatar
init  
xuxzh1 committed
457
	ggml, _, err := DecodeGGML(file, 0)
mashun1's avatar
v1  
mashun1 committed
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
	if err != nil {
		return 0
	}

	var mem uint64
	for _, layer := range ggml.Tensors().Layers() {
		mem += layer.size()
	}

	return mem
}

type ServerStatus int

const ( // iota is reset to 0
	ServerStatusReady ServerStatus = iota
	ServerStatusNoSlotsAvailable
	ServerStatusLoadingModel
	ServerStatusNotResponding
	ServerStatusError
)

func (s ServerStatus) ToString() string {
	switch s {
	case ServerStatusReady:
		return "llm server ready"
	case ServerStatusNoSlotsAvailable:
		return "llm busy - no slots available"
	case ServerStatusLoadingModel:
		return "llm server loading model"
	case ServerStatusNotResponding:
		return "llm server not responding"
	default:
		return "llm server error"
	}
}

type ServerStatusResp struct {
	Status          string  `json:"status"`
	SlotsIdle       int     `json:"slots_idle"`
	SlotsProcessing int     `json:"slots_processing"`
	Error           string  `json:"error"`
	Progress        float32 `json:"progress"`
}

func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
	// Fail fast if its exited
	if s.cmd.ProcessState != nil {
		msg := ""
		if s.status != nil && s.status.LastErrMsg != "" {
			msg = s.status.LastErrMsg
		}
		if s.cmd.ProcessState.ExitCode() == -1 {
			// Most likely a signal killed it, log some more details to try to help troubleshoot
			slog.Warn("llama runner process no longer running", "sys", s.cmd.ProcessState.Sys(), "string", s.cmd.ProcessState.String())
		}
		return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
	if err != nil {
		return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		if errors.Is(err, context.DeadlineExceeded) {
xuxzh1's avatar
init  
xuxzh1 committed
526
			return ServerStatusNotResponding, errors.New("server not responding")
mashun1's avatar
v1  
mashun1 committed
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
		}
		return ServerStatusError, fmt.Errorf("health resp: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return ServerStatusError, fmt.Errorf("read health request: %w", err)
	}

	var status ServerStatusResp
	if err := json.Unmarshal(body, &status); err != nil {
		return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
	}

	switch status.Status {
	case "ok":
		return ServerStatusReady, nil
	case "no slot available":
		return ServerStatusNoSlotsAvailable, nil
	case "loading model":
		s.loadProgress = status.Progress
		return ServerStatusLoadingModel, nil
	default:
		return ServerStatusError, fmt.Errorf("server error: %+v", status)
	}
}

// getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received
func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) {
	var retries int
	for {
		status, err := s.getServerStatus(ctx)
		if err != nil {
			return status, err
		}

		if status == ServerStatusNoSlotsAvailable {
			if retries >= 10 {
				return status, fmt.Errorf("no slots available after %d retries", retries)
			}

			time.Sleep(5 * time.Millisecond)
			retries++
			continue
		}

		return status, nil
	}
}

func (s *llmServer) Ping(ctx context.Context) error {
	_, err := s.getServerStatus(ctx)
	if err != nil {
		slog.Debug("server unhealthy", "error", err)
		return err
	}
	return nil
}

func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
	start := time.Now()
	stallDuration := 5 * time.Minute            // If no progress happens
	finalLoadDuration := 5 * time.Minute        // After we hit 100%, give the runner more time to come online
	stallTimer := time.Now().Add(stallDuration) // give up if we stall

	slog.Info("waiting for llama runner to start responding")
	var lastStatus ServerStatus = -1
	fullyLoaded := false

	for {
		select {
		case <-ctx.Done():
			slog.Warn("client connection closed before server finished loading, aborting load")
			return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
		case err := <-s.done:
xuxzh1's avatar
init  
xuxzh1 committed
603
			return fmt.Errorf("llama runner process has terminated: %w", err)
mashun1's avatar
v1  
mashun1 committed
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
		default:
		}
		if time.Now().After(stallTimer) {
			// timeout
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
			return fmt.Errorf("timed out waiting for llama runner to start - progress %0.2f - %s", s.loadProgress, msg)
		}
		if s.cmd.ProcessState != nil {
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
			return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
		}
		ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
		defer cancel()
		priorProgress := s.loadProgress
		status, _ := s.getServerStatus(ctx)
		if lastStatus != status && status != ServerStatusReady {
			// Only log on status changes
			slog.Info("waiting for server to become available", "status", status.ToString())
		}
		switch status {
		case ServerStatusReady:
			s.loadDuration = time.Since(start)
			slog.Info(fmt.Sprintf("llama runner started in %0.2f seconds", s.loadDuration.Seconds()))
			return nil
		default:
			lastStatus = status
			// Reset the timer as long as we're making forward progress on the load
			if priorProgress != s.loadProgress {
				slog.Debug(fmt.Sprintf("model load progress %0.2f", s.loadProgress))
				stallTimer = time.Now().Add(stallDuration)
			} else if !fullyLoaded && int(s.loadProgress*100.0) >= 100 {
				slog.Debug("model load completed, waiting for server to become available", "status", status.ToString())
				stallTimer = time.Now().Add(finalLoadDuration)
				fullyLoaded = true
			}
			time.Sleep(time.Millisecond * 250)
			continue
		}
	}
}

const jsonGrammar = `
root   ::= object
value  ::= object | array | string | number | ("true" | "false" | "null") ws

object ::=
  "{" ws (
            string ":" ws value
    ("," ws string ":" ws value)*
  )? "}" ws

array  ::=
  "[" ws (
            value
    ("," ws value)*
  )? "]" ws

string ::=
  "\"" (
xuxzh1's avatar
init  
xuxzh1 committed
669
    [^"\\\x7F\x00-\x1F] |
mashun1's avatar
v1  
mashun1 committed
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
    "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  )* "\"" ws

number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws

# Optional space: by convention, applied in this grammar after literal chars when allowed
ws ::= ([ \t\n] ws)?
`

const maxBufferSize = 512 * format.KiloByte

type ImageData struct {
	Data []byte `json:"data"`
	ID   int    `json:"id"`
}

type completion struct {
	Content      string `json:"content"`
	Model        string `json:"model"`
	Prompt       string `json:"prompt"`
	Stop         bool   `json:"stop"`
	StoppedLimit bool   `json:"stopped_limit"`

	Timings struct {
		PredictedN  int     `json:"predicted_n"`
		PredictedMS float64 `json:"predicted_ms"`
		PromptN     int     `json:"prompt_n"`
		PromptMS    float64 `json:"prompt_ms"`
	}
}

type CompletionRequest struct {
	Prompt  string
	Format  string
	Images  []ImageData
xuxzh1's avatar
init  
xuxzh1 committed
705
	Options *api.Options
mashun1's avatar
v1  
mashun1 committed
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
}

type CompletionResponse struct {
	Content            string
	DoneReason         string
	Done               bool
	PromptEvalCount    int
	PromptEvalDuration time.Duration
	EvalCount          int
	EvalDuration       time.Duration
}

func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
	if err := s.sem.Acquire(ctx, 1); err != nil {
		slog.Error("Failed to acquire semaphore", "error", err)
		return err
	}
	defer s.sem.Release(1)

xuxzh1's avatar
init  
xuxzh1 committed
725
	// put an upper limit on num_predict to avoid the model running on forever
mashun1's avatar
v1  
mashun1 committed
726
727
728
729
730
731
732
733
734
735
736
737
738
	if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
		req.Options.NumPredict = 10 * s.options.NumCtx
	}

	request := map[string]any{
		"prompt":            req.Prompt,
		"stream":            true,
		"n_predict":         req.Options.NumPredict,
		"n_keep":            req.Options.NumKeep,
		"main_gpu":          req.Options.MainGPU,
		"temperature":       req.Options.Temperature,
		"top_k":             req.Options.TopK,
		"top_p":             req.Options.TopP,
xuxzh1's avatar
init  
xuxzh1 committed
739
		"min_p":             req.Options.MinP,
mashun1's avatar
v1  
mashun1 committed
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
		"tfs_z":             req.Options.TFSZ,
		"typical_p":         req.Options.TypicalP,
		"repeat_last_n":     req.Options.RepeatLastN,
		"repeat_penalty":    req.Options.RepeatPenalty,
		"presence_penalty":  req.Options.PresencePenalty,
		"frequency_penalty": req.Options.FrequencyPenalty,
		"mirostat":          req.Options.Mirostat,
		"mirostat_tau":      req.Options.MirostatTau,
		"mirostat_eta":      req.Options.MirostatEta,
		"penalize_nl":       req.Options.PenalizeNewline,
		"seed":              req.Options.Seed,
		"stop":              req.Options.Stop,
		"image_data":        req.Images,
		"cache_prompt":      true,
	}

	// Make sure the server is ready
	status, err := s.getServerStatusRetry(ctx)
	if err != nil {
		return err
	} else if status != ServerStatusReady {
		return fmt.Errorf("unexpected server status: %s", status.ToString())
	}

	if req.Format == "json" {
		request["grammar"] = jsonGrammar
		if !strings.Contains(strings.ToLower(req.Prompt), "json") {
			slog.Warn("Prompt does not specify that the LLM should response in JSON, but JSON format is expected. For best results specify that JSON is expected in the system prompt.")
		}
	}

	// Handling JSON marshaling with special characters unescaped.
	buffer := &bytes.Buffer{}
	enc := json.NewEncoder(buffer)
	enc.SetEscapeHTML(false)

	if err := enc.Encode(request); err != nil {
		return fmt.Errorf("failed to marshal data: %v", err)
	}

	endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
	serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
	if err != nil {
		return fmt.Errorf("error creating POST request: %v", err)
	}
	serverReq.Header.Set("Content-Type", "application/json")

	res, err := http.DefaultClient.Do(serverReq)
	if err != nil {
		return fmt.Errorf("POST predict: %v", err)
	}
	defer res.Body.Close()

	if res.StatusCode >= 400 {
		bodyBytes, err := io.ReadAll(res.Body)
		if err != nil {
			return fmt.Errorf("failed reading llm error response: %w", err)
		}
		log.Printf("llm predict error: %s", bodyBytes)
		return fmt.Errorf("%s", bodyBytes)
	}

	scanner := bufio.NewScanner(res.Body)
	buf := make([]byte, 0, maxBufferSize)
	scanner.Buffer(buf, maxBufferSize)

	// keep track of the last token generated, this is used to abort if the model starts looping
	var lastToken string
	var tokenRepeat int

	for scanner.Scan() {
		select {
		case <-ctx.Done():
			// This handles the request cancellation
			return ctx.Err()
		default:
			line := scanner.Bytes()
			if len(line) == 0 {
				continue
			}

			evt, ok := bytes.CutPrefix(line, []byte("data: "))
			if !ok {
				return fmt.Errorf("error parsing llm response stream: %s", line)
			}

			var c completion
			if err := json.Unmarshal(evt, &c); err != nil {
				return fmt.Errorf("error unmarshalling llm prediction response: %v", err)
			}

			switch {
			case strings.TrimSpace(c.Content) == lastToken:
				tokenRepeat++
			default:
				lastToken = strings.TrimSpace(c.Content)
				tokenRepeat = 0
			}

			// 30 picked as an arbitrary max token repeat limit, modify as needed
			if tokenRepeat > 30 {
				slog.Debug("prediction aborted, token repeat limit reached")
				return ctx.Err()
			}

			if c.Content != "" {
				fn(CompletionResponse{
					Content: c.Content,
				})
			}

			if c.Stop {
				doneReason := "stop"
				if c.StoppedLimit {
					doneReason = "length"
				}

				fn(CompletionResponse{
					Done:               true,
					DoneReason:         doneReason,
					PromptEvalCount:    c.Timings.PromptN,
					PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
					EvalCount:          c.Timings.PredictedN,
					EvalDuration:       parseDurationMs(c.Timings.PredictedMS),
				})
				return nil
			}
		}
	}

	if err := scanner.Err(); err != nil {
		if strings.Contains(err.Error(), "unexpected EOF") {
			s.Close()
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
			return fmt.Errorf("an unknown error was encountered while running the model %s", msg)
		}

		return fmt.Errorf("error reading llm response: %v", err)
	}

	return nil
}

type EmbeddingRequest struct {
	Content string `json:"content"`
}

type EmbeddingResponse struct {
xuxzh1's avatar
init  
xuxzh1 committed
891
	Embedding []float32 `json:"embedding"`
mashun1's avatar
v1  
mashun1 committed
892
893
}

xuxzh1's avatar
init  
xuxzh1 committed
894
func (s *llmServer) Embedding(ctx context.Context, input string) ([]float32, error) {
mashun1's avatar
v1  
mashun1 committed
895
896
897
898
899
900
901
902
903
904
905
906
907
908
	if err := s.sem.Acquire(ctx, 1); err != nil {
		slog.Error("Failed to acquire semaphore", "error", err)
		return nil, err
	}
	defer s.sem.Release(1)

	// Make sure the server is ready
	status, err := s.getServerStatusRetry(ctx)
	if err != nil {
		return nil, err
	} else if status != ServerStatusReady {
		return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
	}

xuxzh1's avatar
init  
xuxzh1 committed
909
	data, err := json.Marshal(EmbeddingRequest{Content: input})
mashun1's avatar
v1  
mashun1 committed
910
911
912
913
	if err != nil {
		return nil, fmt.Errorf("error marshaling embed data: %w", err)
	}

xuxzh1's avatar
init  
xuxzh1 committed
914
	r, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
mashun1's avatar
v1  
mashun1 committed
915
916
917
	if err != nil {
		return nil, fmt.Errorf("error creating embed request: %w", err)
	}
xuxzh1's avatar
init  
xuxzh1 committed
918
	r.Header.Set("Content-Type", "application/json")
mashun1's avatar
v1  
mashun1 committed
919

xuxzh1's avatar
init  
xuxzh1 committed
920
	resp, err := http.DefaultClient.Do(r)
mashun1's avatar
v1  
mashun1 committed
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
	if err != nil {
		return nil, fmt.Errorf("do embedding request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("error reading embed response: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

xuxzh1's avatar
init  
xuxzh1 committed
936
937
	var e EmbeddingResponse
	if err := json.Unmarshal(body, &e); err != nil {
mashun1's avatar
v1  
mashun1 committed
938
939
940
		return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
	}

xuxzh1's avatar
init  
xuxzh1 committed
941
	return e.Embedding, nil
mashun1's avatar
v1  
mashun1 committed
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
}

type TokenizeRequest struct {
	Content string `json:"content"`
}

type TokenizeResponse struct {
	Tokens []int `json:"tokens"`
}

func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
	// Make sure the server is ready
	status, err := s.getServerStatus(ctx)
	if err != nil {
		return nil, err
	} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
		return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
	}

	data, err := json.Marshal(TokenizeRequest{Content: content})
	if err != nil {
		return nil, fmt.Errorf("marshaling encode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("encode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do encode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("read encode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var encoded TokenizeResponse
	if err := json.Unmarshal(body, &encoded); err != nil {
		return nil, fmt.Errorf("unmarshal encode response: %w", err)
	}

	return encoded.Tokens, nil
}

type DetokenizeRequest struct {
	Tokens []int `json:"tokens"`
}

type DetokenizeResponse struct {
	Content string `json:"content"`
}

func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
	// Make sure the server is ready
	status, err := s.getServerStatus(ctx)
	if err != nil {
		return "", err
	} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
		return "", fmt.Errorf("unexpected server status: %s", status.ToString())
	}

	data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
	if err != nil {
		return "", fmt.Errorf("marshaling decode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
	if err != nil {
		return "", fmt.Errorf("decode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return "", fmt.Errorf("do decode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return "", fmt.Errorf("read decode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm decode error: %s", body)
		return "", fmt.Errorf("%s", body)
	}

	var decoded DetokenizeResponse
	if err := json.Unmarshal(body, &decoded); err != nil {
		return "", fmt.Errorf("unmarshal encode response: %w", err)
	}

	return decoded.Content, nil
}

func (s *llmServer) Close() error {
	if s.cmd != nil {
		slog.Debug("stopping llama server")
		if err := s.cmd.Process.Kill(); err != nil {
			return err
		}
		// if ProcessState is already populated, Wait already completed, no need to wait again
		if s.cmd.ProcessState == nil {
			slog.Debug("waiting for llama server to exit")
			<-s.done
		}

		slog.Debug("llama server stopped")
	}

	return nil
}

func (s *llmServer) EstimatedVRAM() uint64 {
xuxzh1's avatar
init  
xuxzh1 committed
1067
	return s.estimate.VRAMSize
mashun1's avatar
v1  
mashun1 committed
1068
1069
1070
}

func (s *llmServer) EstimatedTotal() uint64 {
xuxzh1's avatar
init  
xuxzh1 committed
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
	return s.estimate.TotalSize
}

func (s *llmServer) EstimatedVRAMByGPU(gpuID string) uint64 {
	for i, gpu := range s.gpus {
		if gpu.ID == gpuID {
			return s.estimate.GPUSizes[i]
		}
	}
	return 0
mashun1's avatar
v1  
mashun1 committed
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
}

func parseDurationMs(ms float64) time.Duration {
	dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
	if err != nil {
		panic(err)
	}

	return dur
}