server.go 29.9 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
package llm

import (
	"bufio"
	"bytes"
	"context"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"log"
	"log/slog"
	"math/rand"
	"net"
	"net/http"
	"os"
	"os/exec"
	"path/filepath"
	"runtime"
	"strconv"
	"strings"
22
	"sync"
23
24
	"time"

Daniel Hiltgen's avatar
Daniel Hiltgen committed
25
26
	"golang.org/x/sync/semaphore"

27
	"github.com/ollama/ollama/api"
28
	"github.com/ollama/ollama/discover"
29
	"github.com/ollama/ollama/envconfig"
30
	"github.com/ollama/ollama/format"
Michael Yang's avatar
Michael Yang committed
31
	"github.com/ollama/ollama/fs/ggml"
32
	"github.com/ollama/ollama/llama"
33
	"github.com/ollama/ollama/model"
34
35
)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
36
37
38
39
type LlamaServer interface {
	Ping(ctx context.Context) error
	WaitUntilRunning(ctx context.Context) error
	Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
40
	Embedding(ctx context.Context, input string) ([]float32, error)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
41
42
43
	Tokenize(ctx context.Context, content string) ([]int, error)
	Detokenize(ctx context.Context, tokens []int) (string, error)
	Close() error
44
	EstimatedVRAM() uint64 // Total VRAM across all GPUs
45
	EstimatedTotal() uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
46
	EstimatedVRAMByGPU(gpuID string) uint64
47
	Pid() int
Daniel Hiltgen's avatar
Daniel Hiltgen committed
48
49
50
51
}

// llmServer is an instance of the llama.cpp server
type llmServer struct {
52
53
54
55
56
57
	port        int
	cmd         *exec.Cmd
	done        chan error // Channel to signal when the process exits
	status      *StatusWriter
	options     api.Options
	numParallel int
58
	modelPath   string
59
60
61
62
63
64
65
66
67

	// llamaModel is an instance of the cgo llama.cpp model definition
	// nil if this server is running the new engine
	llamaModel     *llama.Model
	llamaModelLock sync.Mutex

	// textProcessor handles text encoding/decoding for the model in the Ollama engine
	// nil if this server is running the llama.cpp based engine
	textProcessor model.TextProcessor
Daniel Hiltgen's avatar
Daniel Hiltgen committed
68

69
70
71
	estimate    MemoryEstimate
	totalLayers uint64
	// gpuCount     int
72
73
	gpus         discover.GpuInfoList // Recorded just before the model loaded, free space will be incorrect
	loadDuration time.Duration        // Record how long it took the model to load
74
	loadProgress float32
Daniel Hiltgen's avatar
Daniel Hiltgen committed
75
76

	sem *semaphore.Weighted
77
78
}

79
80
81
82
83
// LoadModel will load a model from disk. The model must be in the GGML format.
//
// It collects array values for arrays with a size less than or equal to
// maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If
// the maxArraySize is negative, all arrays are collected.
Michael Yang's avatar
Michael Yang committed
84
func LoadModel(model string, maxArraySize int) (*ggml.GGML, error) {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
85
86
87
88
	if _, err := os.Stat(model); err != nil {
		return nil, err
	}

89
90
91
92
93
94
	f, err := os.Open(model)
	if err != nil {
		return nil, err
	}
	defer f.Close()

Michael Yang's avatar
Michael Yang committed
95
	ggml, _, err := ggml.Decode(f, maxArraySize)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
96
97
	return ggml, err
}
98

Daniel Hiltgen's avatar
Daniel Hiltgen committed
99
100
// NewLlamaServer will run a server for the given GPUs
// The gpu list must be a single family.
101
func NewLlamaServer(gpus discover.GpuInfoList, modelPath string, f *ggml.GGML, adapters, projectors []string, opts api.Options, numParallel int) (LlamaServer, error) {
102
	systemInfo := discover.GetSystemInfo()
Michael Yang's avatar
Michael Yang committed
103
104
105
	systemTotalMemory := systemInfo.System.TotalMemory
	systemFreeMemory := systemInfo.System.FreeMemory
	systemSwapFreeMemory := systemInfo.System.FreeSwap
106
	slog.Info("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "free_swap", format.HumanBytes2(systemSwapFreeMemory))
107

108
109
	// If the user wants zero GPU layers, reset the gpu list to be CPU/system ram info
	if opts.NumGPU == 0 {
110
		gpus = discover.GetCPUInfo()
111
	}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
112

113
	estimate := EstimateGPULayers(gpus, f, projectors, opts, numParallel)
Michael Yang's avatar
Michael Yang committed
114
	if len(gpus) > 1 || gpus[0].Library != "cpu" {
Michael Yang's avatar
Michael Yang committed
115
		switch {
116
		case gpus[0].Library == "metal" && estimate.VRAMSize > systemTotalMemory:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
117
118
119
			// disable partial offloading when model is greater than total system memory as this
			// can lead to locking up the system
			opts.NumGPU = 0
120
		case gpus[0].Library != "metal" && estimate.Layers == 0:
121
			// Don't bother loading into the GPU if no layers can fit
122
			gpus = discover.GetCPUInfo()
123
124
		case opts.NumGPU < 0 && estimate.Layers > 0 && gpus[0].Library != "cpu":
			opts.NumGPU = estimate.Layers
125
126
127
		}
	}

128
129
130
	// On linux and windows, over-allocating CPU memory will almost always result in an error
	// Darwin has fully dynamic swap so has no direct concept of free swap space
	if runtime.GOOS != "darwin" {
131
		systemMemoryRequired := estimate.TotalSize - estimate.VRAMSize
132
		available := systemFreeMemory + systemSwapFreeMemory
133
134
135
		if systemMemoryRequired > available {
			slog.Warn("model request too large for system", "requested", format.HumanBytes2(systemMemoryRequired), "available", available, "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "swap", format.HumanBytes2(systemSwapFreeMemory))
			return nil, fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(systemMemoryRequired), format.HumanBytes2(available))
136
137
138
		}
	}

Michael Yang's avatar
Michael Yang committed
139
	slog.Info("offload", "", estimate)
140

141
	params := []string{
142
		"--model", modelPath,
Michael Yang's avatar
lint  
Michael Yang committed
143
144
		"--ctx-size", strconv.Itoa(opts.NumCtx),
		"--batch-size", strconv.Itoa(opts.NumBatch),
145
	}
Michael Yang's avatar
Michael Yang committed
146

Michael Yang's avatar
Michael Yang committed
147
	if opts.NumGPU >= 0 {
Michael Yang's avatar
lint  
Michael Yang committed
148
		params = append(params, "--n-gpu-layers", strconv.Itoa(opts.NumGPU))
149
150
	}

Michael Yang's avatar
Michael Yang committed
151
	if envconfig.Debug() {
152
153
154
155
		params = append(params, "--verbose")
	}

	if opts.MainGPU > 0 {
Michael Yang's avatar
lint  
Michael Yang committed
156
		params = append(params, "--main-gpu", strconv.Itoa(opts.MainGPU))
157
158
159
	}

	if len(adapters) > 0 {
160
161
162
		for _, adapter := range adapters {
			params = append(params, "--lora", adapter)
		}
163
164
	}

165
	defaultThreads := systemInfo.GetOptimalThreadCount()
166
	if opts.NumThread > 0 {
Michael Yang's avatar
lint  
Michael Yang committed
167
		params = append(params, "--threads", strconv.Itoa(opts.NumThread))
168
169
	} else if defaultThreads > 0 {
		params = append(params, "--threads", strconv.Itoa(defaultThreads))
170
171
	}

172
173
174
175
176
	fa := envconfig.FlashAttention()
	if fa && !gpus.FlashAttentionSupported() {
		slog.Warn("flash attention enabled but not supported by gpu")
		fa = false
	}
Sam's avatar
Sam committed
177

Michael Yang's avatar
Michael Yang committed
178
	if fa && !f.SupportsFlashAttention() {
179
180
181
182
		slog.Warn("flash attention enabled but not supported by model")
		fa = false
	}

183
	kvct := strings.ToLower(envconfig.KvCacheType())
184
185
186
187
188
189
190

	if fa {
		slog.Info("enabling flash attention")
		params = append(params, "--flash-attn")

		// Flash Attention also supports kv cache quantization
		// Enable if the requested and kv cache type is supported by the model
Michael Yang's avatar
Michael Yang committed
191
		if kvct != "" && f.SupportsKVCacheType(kvct) {
192
193
194
			params = append(params, "--kv-cache-type", kvct)
		} else {
			slog.Warn("kv cache type not supported by model", "type", kvct)
Sam's avatar
Sam committed
195
		}
196
197
198
	} else if kvct != "" && kvct != "f16" {
		slog.Warn("quantized kv cache requested but flash attention disabled", "type", kvct)
	}
199

200
201
	// mmap has issues with partial offloading on metal
	for _, g := range gpus {
202
203
		if g.Library == "metal" &&
			uint64(opts.NumGPU) > 0 &&
Michael Yang's avatar
Michael Yang committed
204
			uint64(opts.NumGPU) < f.KV().BlockCount()+1 {
205
206
			opts.UseMMap = new(bool)
			*opts.UseMMap = false
207
		}
Sam's avatar
Sam committed
208
	}
209

210
	// Windows CUDA should not use mmap for best performance
211
	// Linux  with a model larger than free space, mmap leads to thrashing
Daniel Hiltgen's avatar
Daniel Hiltgen committed
212
	// For CPU loads we want the memory to be allocated, not FS cache
213
214
215
216
	if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == nil) ||
		(runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == nil) ||
		(gpus[0].Library == "cpu" && opts.UseMMap == nil) ||
		(opts.UseMMap != nil && !*opts.UseMMap) {
217
218
219
220
221
222
223
		params = append(params, "--no-mmap")
	}

	if opts.UseMLock {
		params = append(params, "--mlock")
	}

224
	// TODO - NUMA support currently doesn't work properly
225

Michael Yang's avatar
lint  
Michael Yang committed
226
	params = append(params, "--parallel", strconv.Itoa(numParallel))
Daniel Hiltgen's avatar
Daniel Hiltgen committed
227

228
229
230
231
	if estimate.TensorSplit != "" {
		params = append(params, "--tensor-split", estimate.TensorSplit)
	}

232
233
234
235
	if envconfig.MultiUserCache() {
		params = append(params, "--multiuser-cache")
	}

Michael Yang's avatar
Michael Yang committed
236
	libs := make(map[string]string)
237
238
	if entries, err := os.ReadDir(discover.LibOllamaPath); err == nil {
		for _, entry := range entries {
Michael Yang's avatar
Michael Yang committed
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
			libs[entry.Name()] = filepath.Join(discover.LibOllamaPath, entry.Name())
		}
	}

	lib := gpus[0].RunnerName()
	requested := envconfig.LLMLibrary()
	if libs[requested] != "" {
		slog.Info("using requested gpu library", "requested", requested)
		lib = requested
	}

	var compatible []string
	for k := range libs {
		// exact match first
		if k == lib {
			compatible = append([]string{k}, compatible...)
255
256
			continue
		}
257

Michael Yang's avatar
Michael Yang committed
258
259
260
		// then match the family (e.g. 'cuda')
		if strings.Split(k, "_")[0] == strings.Split(lib, "_")[0] {
			compatible = append(compatible, k)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
261
		}
Michael Yang's avatar
Michael Yang committed
262
263
	}
	slog.Debug("compatible gpu libraries", "compatible", compatible)
264
265
266
267
268
269
270
271
272
273
274
	exe, err := os.Executable()
	if err != nil {
		return nil, fmt.Errorf("unable to lookup executable path: %w", err)
	}

	if eval, err := filepath.EvalSymlinks(exe); err == nil {
		exe = eval
	}

	var llamaModel *llama.Model
	var textProcessor model.TextProcessor
275
	if envconfig.NewEngine() || f.KV().OllamaEngineRequired() {
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
		textProcessor, err = model.NewTextProcessor(modelPath)
		if err != nil {
			// To prepare for opt-out mode, instead of treating this as an error, we fallback to the old runner
			slog.Debug("model not yet supported by Ollama engine, switching to compatibility mode", "model", modelPath, "error", err)
		}
	}
	if textProcessor == nil {
		llamaModel, err = llama.LoadModelFromFile(modelPath, llama.ModelParams{VocabOnly: true})
		if err != nil {
			return nil, err
		}
	}

	if len(projectors) > 0 && llamaModel != nil {
		params = append(params, "--mmproj", projectors[0])
	}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
292

Michael Yang's avatar
Michael Yang committed
293
294
295
296
	// iterate through compatible GPU libraries such as 'cuda_v12', 'cuda_v11', 'rocm', etc.
	// adding each library's respective path to the LD_LIBRARY_PATH, until finally running
	// without any LD_LIBRARY_PATH flags
	for {
297
298
299
300
301
302
303
304
305
		port := 0
		if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
			var l *net.TCPListener
			if l, err = net.ListenTCP("tcp", a); err == nil {
				port = l.Addr().(*net.TCPAddr).Port
				l.Close()
			}
		}
		if port == 0 {
306
			slog.Debug("ResolveTCPAddr failed, using random port")
307
308
			port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
		}
309
		finalParams := []string{"runner"}
310
311
312
		if textProcessor != nil {
			// New engine
			// TODO - if we have failure to load scenarios, add logic to retry with the old runner
Jesse Gross's avatar
Jesse Gross committed
313
314
			finalParams = append(finalParams, "--ollama-engine")
		}
315
316
		finalParams = append(finalParams, params...)
		finalParams = append(finalParams, "--port", strconv.Itoa(port))
317

318
319
320
		var pathEnv string
		switch runtime.GOOS {
		case "windows":
321
			pathEnv = "PATH"
322
323
324
325
		case "darwin":
			pathEnv = "DYLD_LIBRARY_PATH"
		default:
			pathEnv = "LD_LIBRARY_PATH"
326
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
327

Michael Yang's avatar
Michael Yang committed
328
		var libraryPaths []string
329
		if libraryPath, ok := os.LookupEnv(pathEnv); ok {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
330
			libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...)
331
332
		}

333
		ggmlPaths := []string{discover.LibOllamaPath}
Michael Yang's avatar
Michael Yang committed
334
335
336
337
338
		if len(compatible) > 0 {
			c := compatible[0]
			if libpath, ok := libs[c]; ok {
				slog.Debug("adding gpu library", "path", libpath)
				libraryPaths = append(libraryPaths, libpath)
339
				ggmlPaths = append(ggmlPaths, libpath)
Michael Yang's avatar
Michael Yang committed
340
341
342
			}
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
343
		// Note: we always put the dependency path first
Daniel Hiltgen's avatar
Daniel Hiltgen committed
344
		// since this was the exact version we compiled/linked against
345
		if gpus[0].DependencyPath != nil {
Michael Yang's avatar
Michael Yang committed
346
			slog.Debug("adding gpu dependency paths", "paths", gpus[0].DependencyPath)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
347
			// assume gpus from the same library have the same dependency path
348
			libraryPaths = append(gpus[0].DependencyPath, libraryPaths...)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
349
350
		}

Michael Yang's avatar
Michael Yang committed
351
352
353
		// finally, add the root library path
		libraryPaths = append(libraryPaths, discover.LibOllamaPath)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
354
		s := &llmServer{
355
356
357
358
359
360
361
362
363
364
365
366
367
			port:          port,
			cmd:           exec.Command(exe, finalParams...),
			status:        NewStatusWriter(os.Stderr),
			options:       opts,
			modelPath:     modelPath,
			llamaModel:    llamaModel,
			textProcessor: textProcessor,
			estimate:      estimate,
			numParallel:   numParallel,
			sem:           semaphore.NewWeighted(int64(numParallel)),
			totalLayers:   f.KV().BlockCount() + 1,
			gpus:          gpus,
			done:          make(chan error, 1),
368
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
369

370
		s.cmd.Env = os.Environ()
371
372
		s.cmd.Stdout = os.Stdout
		s.cmd.Stderr = s.status
373
		s.cmd.SysProcAttr = LlamaServerSysProcAttr
374

375
376
		s.cmd.Env = append(s.cmd.Env, "OLLAMA_LIBRARY_PATH="+strings.Join(ggmlPaths, string(filepath.ListSeparator)))

Daniel Hiltgen's avatar
Daniel Hiltgen committed
377
378
379
380
		envWorkarounds := [][2]string{}
		for _, gpu := range gpus {
			envWorkarounds = append(envWorkarounds, gpu.EnvWorkarounds...)
		}
Michael Yang's avatar
lint  
Michael Yang committed
381
		visibleDevicesEnv, visibleDevicesEnvVal := gpus.GetVisibleDevicesEnv()
382
383
384
385
386
387
388
389
390
391
392
393
394
		pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))

		// Update or add the path and visible devices variable with our adjusted version
		pathNeeded := true
		devicesNeeded := visibleDevicesEnv != ""
		for i := range s.cmd.Env {
			cmp := strings.SplitN(s.cmd.Env[i], "=", 2)
			if strings.EqualFold(cmp[0], pathEnv) {
				s.cmd.Env[i] = pathEnv + "=" + pathEnvVal
				pathNeeded = false
			} else if devicesNeeded && strings.EqualFold(cmp[0], visibleDevicesEnv) {
				s.cmd.Env[i] = visibleDevicesEnv + "=" + visibleDevicesEnvVal
				devicesNeeded = false
Daniel Hiltgen's avatar
Daniel Hiltgen committed
395
396
397
398
399
400
			} else if len(envWorkarounds) != 0 {
				for _, kv := range envWorkarounds {
					if strings.EqualFold(cmp[0], kv[0]) {
						s.cmd.Env[i] = kv[0] + "=" + kv[1]
					}
				}
401
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
402
		}
403
404
		if pathNeeded {
			s.cmd.Env = append(s.cmd.Env, pathEnv+"="+pathEnvVal)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
405
		}
406
407
		if devicesNeeded {
			s.cmd.Env = append(s.cmd.Env, visibleDevicesEnv+"="+visibleDevicesEnvVal)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
408
409
		}

410
		slog.Info("starting llama server", "cmd", s.cmd)
Michael Yang's avatar
Michael Yang committed
411
		if envconfig.Debug() {
412
413
			filteredEnv := []string{}
			for _, ev := range s.cmd.Env {
414
415
				if strings.HasPrefix(ev, "OLLAMA_") ||
					strings.HasPrefix(ev, "CUDA_") ||
Daniel Hiltgen's avatar
Daniel Hiltgen committed
416
					strings.HasPrefix(ev, "ROCR_") ||
417
418
					strings.HasPrefix(ev, "ROCM_") ||
					strings.HasPrefix(ev, "HIP_") ||
Daniel Hiltgen's avatar
Daniel Hiltgen committed
419
					strings.HasPrefix(ev, "GPU_") ||
420
421
422
					strings.HasPrefix(ev, "HSA_") ||
					strings.HasPrefix(ev, "GGML_") ||
					strings.HasPrefix(ev, "PATH=") ||
423
424
					strings.HasPrefix(ev, "LD_LIBRARY_PATH=") ||
					strings.HasPrefix(ev, "DYLD_LIBRARY_PATH=") {
425
426
427
428
429
430
					filteredEnv = append(filteredEnv, ev)
				}
			}
			// Log at debug as the environment is inherited and might contain sensitive information
			slog.Debug("subprocess", "environment", filteredEnv)
		}
431
432

		if err = s.cmd.Start(); err != nil {
Michael Yang's avatar
Michael Yang committed
433
			var msg string
434
435
436
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
Michael Yang's avatar
Michael Yang committed
437
438
			err := fmt.Errorf("error starting runner: %v %s", err, msg)
			if len(compatible) == 0 {
439
440
441
				if llamaModel != nil {
					llama.FreeModel(llamaModel)
				}
Michael Yang's avatar
Michael Yang committed
442
443
444
445
446
				return nil, err
			}

			slog.Warn("unable to start runner with compatible gpu", "error", err, "compatible", compatible)
			compatible = compatible[1:]
447
448
449
			continue
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
450
451
		// reap subprocess when it exits
		go func() {
452
453
454
			err := s.cmd.Wait()
			// Favor a more detailed message over the process exit status
			if err != nil && s.status != nil && s.status.LastErrMsg != "" {
Michael Yang's avatar
Michael Yang committed
455
				slog.Error("llama runner terminated", "error", err)
456
457
458
				if strings.Contains(s.status.LastErrMsg, "unknown model") {
					s.status.LastErrMsg = "this model is not supported by your version of Ollama. You may need to upgrade"
				}
Michael Yang's avatar
lint  
Michael Yang committed
459
				s.done <- errors.New(s.status.LastErrMsg)
460
461
462
			} else {
				s.done <- err
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
463
464
		}()

465
466
467
468
469
470
471
472
		return s, nil
	}
}

type ServerStatus int

const ( // iota is reset to 0
	ServerStatusReady ServerStatus = iota
473
	ServerStatusNoSlotsAvailable
474
475
476
477
478
	ServerStatusLoadingModel
	ServerStatusNotResponding
	ServerStatusError
)

479
func (s ServerStatus) String() string {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
480
481
482
	switch s {
	case ServerStatusReady:
		return "llm server ready"
483
	case ServerStatusNoSlotsAvailable:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
484
485
486
487
488
489
490
491
492
493
		return "llm busy - no slots available"
	case ServerStatusLoadingModel:
		return "llm server loading model"
	case ServerStatusNotResponding:
		return "llm server not responding"
	default:
		return "llm server error"
	}
}

494
495
496
type ServerStatusResponse struct {
	Status   ServerStatus `json:"status"`
	Progress float32      `json:"progress"`
497
498
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
499
func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
500
501
502
503
504
505
	// Fail fast if its exited
	if s.cmd.ProcessState != nil {
		msg := ""
		if s.status != nil && s.status.LastErrMsg != "" {
			msg = s.status.LastErrMsg
		}
506
507
		if s.cmd.ProcessState.ExitCode() == -1 {
			// Most likely a signal killed it, log some more details to try to help troubleshoot
508
			slog.Warn("llama runner process no longer running", "sys", s.cmd.ProcessState.Sys(), "string", s.cmd.ProcessState)
509
		}
510
511
512
513
514
515
516
517
518
519
520
521
		return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
	if err != nil {
		return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		if errors.Is(err, context.DeadlineExceeded) {
Michael Yang's avatar
Michael Yang committed
522
			return ServerStatusNotResponding, errors.New("server not responding")
523
		}
524
525
526
		if strings.Contains(err.Error(), "connection refused") {
			return ServerStatusNotResponding, errors.New("connection refused")
		}
527
528
529
530
531
532
533
534
535
		return ServerStatusError, fmt.Errorf("health resp: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return ServerStatusError, fmt.Errorf("read health request: %w", err)
	}

536
537
	var ssr ServerStatusResponse
	if err := json.Unmarshal(body, &ssr); err != nil {
538
539
540
		return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
	}

541
542
543
544
545
546
	switch ssr.Status {
	case ServerStatusLoadingModel:
		s.loadProgress = ssr.Progress
		return ssr.Status, nil
	case ServerStatusReady, ServerStatusNoSlotsAvailable:
		return ssr.Status, nil
547
	default:
548
		return ssr.Status, fmt.Errorf("server error: %+v", ssr)
549
550
551
	}
}

552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
// getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received
func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) {
	var retries int
	for {
		status, err := s.getServerStatus(ctx)
		if err != nil {
			return status, err
		}

		if status == ServerStatusNoSlotsAvailable {
			if retries >= 10 {
				return status, fmt.Errorf("no slots available after %d retries", retries)
			}

			time.Sleep(5 * time.Millisecond)
			retries++
			continue
		}

		return status, nil
	}
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
575
func (s *llmServer) Ping(ctx context.Context) error {
576
577
578
579
580
581
582
583
	_, err := s.getServerStatus(ctx)
	if err != nil {
		slog.Debug("server unhealthy", "error", err)
		return err
	}
	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
584
func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
585
	start := time.Now()
586
	stallDuration := envconfig.LoadTimeout()    // If no progress happens
587
	stallTimer := time.Now().Add(stallDuration) // give up if we stall
588
589
590

	slog.Info("waiting for llama runner to start responding")
	var lastStatus ServerStatus = -1
591
	fullyLoaded := false
ManniX-ITA's avatar
ManniX-ITA committed
592

593
594
	for {
		select {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
595
		case <-ctx.Done():
596
			slog.Warn("client connection closed before server finished loading, aborting load")
597
			return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
598
		case err := <-s.done:
599
			return fmt.Errorf("llama runner process has terminated: %w", err)
600
601
		default:
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
602
		if time.Now().After(stallTimer) {
ManniX-ITA's avatar
ManniX-ITA committed
603
			// timeout
604
605
606
607
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
608
			return fmt.Errorf("timed out waiting for llama runner to start - progress %0.2f - %s", s.loadProgress, msg)
ManniX-ITA's avatar
ManniX-ITA committed
609
610
611
612
613
		}
		if s.cmd.ProcessState != nil {
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
614
			}
ManniX-ITA's avatar
ManniX-ITA committed
615
616
			return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
617
618
		ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
		defer cancel()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
619
		priorProgress := s.loadProgress
Daniel Hiltgen's avatar
Daniel Hiltgen committed
620
621
622
		status, _ := s.getServerStatus(ctx)
		if lastStatus != status && status != ServerStatusReady {
			// Only log on status changes
623
			slog.Info("waiting for server to become available", "status", status)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
624
		}
ManniX-ITA's avatar
ManniX-ITA committed
625
626
		switch status {
		case ServerStatusReady:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
627
628
			s.loadDuration = time.Since(start)
			slog.Info(fmt.Sprintf("llama runner started in %0.2f seconds", s.loadDuration.Seconds()))
ManniX-ITA's avatar
ManniX-ITA committed
629
630
			return nil
		default:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
631
			lastStatus = status
Daniel Hiltgen's avatar
Daniel Hiltgen committed
632
633
634
635
			// Reset the timer as long as we're making forward progress on the load
			if priorProgress != s.loadProgress {
				slog.Debug(fmt.Sprintf("model load progress %0.2f", s.loadProgress))
				stallTimer = time.Now().Add(stallDuration)
636
			} else if !fullyLoaded && int(s.loadProgress*100.0) >= 100 {
637
				slog.Debug("model load completed, waiting for server to become available", "status", status)
638
				stallTimer = time.Now().Add(stallDuration)
639
				fullyLoaded = true
Daniel Hiltgen's avatar
Daniel Hiltgen committed
640
			}
ManniX-ITA's avatar
ManniX-ITA committed
641
642
			time.Sleep(time.Millisecond * 250)
			continue
643
644
645
646
		}
	}
}

647
648
649
650
651
652
653
func (s *llmServer) Pid() int {
	if s.cmd != nil && s.cmd.Process != nil {
		return s.cmd.Process.Pid
	}
	return -1
}

654
var grammarJSON = `
655
656
657
658
root   ::= object
value  ::= object | array | string | number | ("true" | "false" | "null") ws
object ::=
  "{" ws (
659
         string ":" ws value
660
    ("," ws string ":" ws value)*
661
  )? ws "}" 
662
663
664
665
array  ::=
  "[" ws (
            value
    ("," ws value)*
666
  )? ws "]" 
667
668
string ::=
  "\"" (
669
    [^"\\\x7F\x00-\x1F] |
670
    "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
671
672
  )* "\"" 
number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? 
673
674
675
676
677
678
679
# Optional space: by convention, applied in this grammar after literal chars when allowed
ws ::= ([ \t\n] ws)?
`

const maxBufferSize = 512 * format.KiloByte

type ImageData struct {
680
681
682
	Data          []byte `json:"data"`
	ID            int    `json:"id"`
	AspectRatioID int    `json:"aspect_ratio_id"`
683
684
685
686
}

type CompletionRequest struct {
	Prompt  string
687
	Format  json.RawMessage
688
	Images  []ImageData
Michael Yang's avatar
Michael Yang committed
689
	Options *api.Options
690
691

	Grammar string // set before sending the request to the subprocess
692
693
}

694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
// DoneReason represents the reason why a completion response is done
type DoneReason int

const (
	// DoneReasonStop indicates the completion stopped naturally
	DoneReasonStop DoneReason = iota
	// DoneReasonLength indicates the completion stopped due to length limits
	DoneReasonLength
	// DoneReasonConnectionClosed indicates the completion stopped due to the connection being closed
	DoneReasonConnectionClosed
)

func (d DoneReason) String() string {
	switch d {
	case DoneReasonLength:
		return "length"
	case DoneReasonStop:
		return "stop"
	default:
		return "" // closed
	}
}

717
type CompletionResponse struct {
718
	Content            string        `json:"content"`
719
	DoneReason         DoneReason    `json:"done_reason"`
720
721
722
723
724
	Done               bool          `json:"done"`
	PromptEvalCount    int           `json:"prompt_eval_count"`
	PromptEvalDuration time.Duration `json:"prompt_eval_duration"`
	EvalCount          int           `json:"eval_count"`
	EvalDuration       time.Duration `json:"eval_duration"`
725
726
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
727
func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
728
	if len(req.Format) > 0 {
729
730
731
732
733
734
		switch string(req.Format) {
		case `null`, `""`:
			// Field was set, but "missing" a value. We accept
			// these as "not set".
			break
		case `"json"`:
735
			req.Grammar = grammarJSON
736
737
738
739
		default:
			if req.Format[0] != '{' {
				return fmt.Errorf("invalid format: %q; expected \"json\" or a valid JSON Schema object", req.Format)
			}
740

741
742
743
744
			// User provided a JSON schema
			g := llama.SchemaToGrammar(req.Format)
			if g == nil {
				return fmt.Errorf("invalid JSON schema in format")
745
			}
746
			req.Grammar = string(g)
747
748
749
		}
	}

750
751
752
753
754
	if req.Options == nil {
		opts := api.DefaultOptions()
		req.Options = &opts
	}

755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
	if err := s.sem.Acquire(ctx, 1); err != nil {
		if errors.Is(err, context.Canceled) {
			slog.Info("aborting completion request due to client closing the connection")
		} else {
			slog.Error("Failed to acquire semaphore", "error", err)
		}
		return err
	}
	defer s.sem.Release(1)

	// put an upper limit on num_predict to avoid the model running on forever
	if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
		req.Options.NumPredict = 10 * s.options.NumCtx
	}

770
	// Make sure the server is ready
771
	status, err := s.getServerStatusRetry(ctx)
772
773
774
	if err != nil {
		return err
	} else if status != ServerStatusReady {
775
		return fmt.Errorf("unexpected server status: %s", status)
776
777
	}

778
779
780
781
	// Handling JSON marshaling with special characters unescaped.
	buffer := &bytes.Buffer{}
	enc := json.NewEncoder(buffer)
	enc.SetEscapeHTML(false)
782

783
	if err := enc.Encode(req); err != nil {
784
785
		return fmt.Errorf("failed to marshal data: %v", err)
	}
786

787
788
789
790
791
792
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
	serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
	if err != nil {
		return fmt.Errorf("error creating POST request: %v", err)
	}
	serverReq.Header.Set("Content-Type", "application/json")
793

794
795
796
797
798
	res, err := http.DefaultClient.Do(serverReq)
	if err != nil {
		return fmt.Errorf("POST predict: %v", err)
	}
	defer res.Body.Close()
799

800
801
	if res.StatusCode >= 400 {
		bodyBytes, err := io.ReadAll(res.Body)
802
		if err != nil {
803
			return fmt.Errorf("failed reading llm error response: %w", err)
804
		}
805
806
807
		log.Printf("llm predict error: %s", bodyBytes)
		return fmt.Errorf("%s", bodyBytes)
	}
808

809
810
811
	scanner := bufio.NewScanner(res.Body)
	buf := make([]byte, 0, maxBufferSize)
	scanner.Buffer(buf, maxBufferSize)
812

813
814
815
	// keep track of the last token generated, this is used to abort if the model starts looping
	var lastToken string
	var tokenRepeat int
816

817
818
819
820
821
822
823
824
825
826
	for scanner.Scan() {
		select {
		case <-ctx.Done():
			// This handles the request cancellation
			return ctx.Err()
		default:
			line := scanner.Bytes()
			if len(line) == 0 {
				continue
			}
827

828
829
			evt, ok := bytes.CutPrefix(line, []byte("data: "))
			if !ok {
830
				evt = line
831
			}
832

833
			var c CompletionResponse
834
			if err := json.Unmarshal(evt, &c); err != nil {
835
				return fmt.Errorf("error unmarshalling llm prediction response: %v", err)
836
837
838
839
840
841
842
843
			}
			switch {
			case strings.TrimSpace(c.Content) == lastToken:
				tokenRepeat++
			default:
				lastToken = strings.TrimSpace(c.Content)
				tokenRepeat = 0
			}
844

845
846
847
848
849
			// 30 picked as an arbitrary max token repeat limit, modify as needed
			if tokenRepeat > 30 {
				slog.Debug("prediction aborted, token repeat limit reached")
				return ctx.Err()
			}
850

851
852
853
854
855
			if c.Content != "" {
				fn(CompletionResponse{
					Content: c.Content,
				})
			}
856

857
858
			if c.Done {
				fn(c)
859
				return nil
860
861
			}
		}
862
	}
863

864
	if err := scanner.Err(); err != nil {
865
		if strings.Contains(err.Error(), "unexpected EOF") || strings.Contains(err.Error(), "forcibly closed") {
866
			s.Close()
867
			var msg string
868
869
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
870
871
			} else {
				msg = err.Error()
872
			}
873
			return fmt.Errorf("an error was encountered while running the model: %s", msg)
874
875
		}

876
		return fmt.Errorf("error reading llm response: %v", err)
877
878
	}

879
	return nil
880
881
}

882
883
type EmbeddingRequest struct {
	Content string `json:"content"`
884
885
}

886
887
type EmbeddingResponse struct {
	Embedding []float32 `json:"embedding"`
888
889
}

890
891
func (s *llmServer) Embedding(ctx context.Context, input string) ([]float32, error) {
	if err := s.sem.Acquire(ctx, 1); err != nil {
892
893
894
895
896
		if errors.Is(err, context.Canceled) {
			slog.Info("aborting embedding request due to client closing the connection")
		} else {
			slog.Error("Failed to acquire semaphore", "error", err)
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
897
898
		return nil, err
	}
899
	defer s.sem.Release(1)
900

901
	// Make sure the server is ready
902
	status, err := s.getServerStatusRetry(ctx)
903
904
905
	if err != nil {
		return nil, err
	} else if status != ServerStatusReady {
906
		return nil, fmt.Errorf("unexpected server status: %s", status)
907
908
	}

909
	data, err := json.Marshal(EmbeddingRequest{Content: input})
Michael Yang's avatar
Michael Yang committed
910
	if err != nil {
911
912
913
		return nil, fmt.Errorf("error marshaling embed data: %w", err)
	}

914
	r, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
915
916
917
	if err != nil {
		return nil, fmt.Errorf("error creating embed request: %w", err)
	}
918
	r.Header.Set("Content-Type", "application/json")
919

920
	resp, err := http.DefaultClient.Do(r)
921
922
923
924
925
926
927
928
929
930
931
	if err != nil {
		return nil, fmt.Errorf("do embedding request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("error reading embed response: %w", err)
	}

	if resp.StatusCode >= 400 {
932
		log.Printf("llm embedding error: %s", body)
933
934
935
		return nil, fmt.Errorf("%s", body)
	}

936
	var e EmbeddingResponse
937
	if err := json.Unmarshal(body, &e); err != nil {
938
939
940
		return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
	}

941
	return e.Embedding, nil
942
943
}

Michael Yang's avatar
Michael Yang committed
944
945
946
947
948
949
950
951
type TokenizeRequest struct {
	Content string `json:"content"`
}

type TokenizeResponse struct {
	Tokens []int `json:"tokens"`
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
952
func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
953
954
	s.llamaModelLock.Lock()
	defer s.llamaModelLock.Unlock()
955

956
957
	if s.llamaModel != nil {
		return s.llamaModel.Tokenize(content, false, true)
Michael Yang's avatar
Michael Yang committed
958
	}
959
	if s.textProcessor != nil {
960
		tokens, err := s.textProcessor.Encode(content, false)
961
962
		if err != nil {
			return nil, err
963
		}
964
965
966
967
968
		toks := make([]int, len(tokens))
		for i, t := range tokens {
			toks[i] = int(t)
		}
		return toks, nil
Michael Yang's avatar
Michael Yang committed
969
	}
970
971
	// not reached
	return nil, fmt.Errorf("no tokenizer configured")
Michael Yang's avatar
Michael Yang committed
972
973
974
975
976
977
978
979
}

type DetokenizeRequest struct {
	Tokens []int `json:"tokens"`
}

type DetokenizeResponse struct {
	Content string `json:"content"`
980
981
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
982
func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
983
984
985
986
	s.llamaModelLock.Lock()
	defer s.llamaModelLock.Unlock()

	if s.llamaModel != nil {
987
988
		var resp string
		for _, token := range tokens {
989
			resp += s.llamaModel.TokenToPiece(token)
990
991
992
		}
		return resp, nil
	}
993
994
995
996
	if s.textProcessor != nil {
		toks := make([]int32, len(tokens))
		for i, t := range tokens {
			toks[i] = int32(t)
997
		}
998
999
1000
		content, err := s.textProcessor.Decode(toks)
		if err != nil {
			return "", err
1001
		}
1002
		return content, nil
Michael Yang's avatar
Michael Yang committed
1003
	}
1004
1005
	// not reached
	return "", fmt.Errorf("no tokenizer configured")
1006
1007
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1008
func (s *llmServer) Close() error {
1009
1010
1011
1012
	s.llamaModelLock.Lock()
	if s.llamaModel != nil {
		llama.FreeModel(s.llamaModel)
		s.llamaModel = nil
1013
	}
1014
	s.llamaModelLock.Unlock()
1015

1016
1017
	if s.cmd != nil {
		slog.Debug("stopping llama server")
1018
1019
1020
		if err := s.cmd.Process.Kill(); err != nil {
			return err
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1021
1022
1023
1024
1025
		// if ProcessState is already populated, Wait already completed, no need to wait again
		if s.cmd.ProcessState == nil {
			slog.Debug("waiting for llama server to exit")
			<-s.done
		}
1026
1027

		slog.Debug("llama server stopped")
1028
1029
1030
1031
1032
	}

	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1033
func (s *llmServer) EstimatedVRAM() uint64 {
1034
	return s.estimate.VRAMSize
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1035
1036
}

1037
func (s *llmServer) EstimatedTotal() uint64 {
1038
	return s.estimate.TotalSize
1039
1040
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1041
func (s *llmServer) EstimatedVRAMByGPU(gpuID string) uint64 {
1042
1043
	for i, gpu := range s.gpus {
		if gpu.ID == gpuID {
1044
1045
1046
			if i < len(s.estimate.GPUSizes) {
				return s.estimate.GPUSizes[i]
			}
1047
1048
1049
1050
		}
	}
	return 0
}