server.go 29.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
package llm

import (
	"bufio"
	"bytes"
	"context"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"log"
	"log/slog"
	"math/rand"
	"net"
	"net/http"
	"os"
	"os/exec"
	"path/filepath"
	"runtime"
	"strconv"
	"strings"
22
	"sync"
23
24
	"time"

Daniel Hiltgen's avatar
Daniel Hiltgen committed
25
26
	"golang.org/x/sync/semaphore"

27
	"github.com/ollama/ollama/api"
28
	"github.com/ollama/ollama/discover"
29
	"github.com/ollama/ollama/envconfig"
30
	"github.com/ollama/ollama/format"
Michael Yang's avatar
Michael Yang committed
31
	"github.com/ollama/ollama/fs/ggml"
32
	"github.com/ollama/ollama/llama"
33
	"github.com/ollama/ollama/model"
34
35
)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
36
37
38
39
type LlamaServer interface {
	Ping(ctx context.Context) error
	WaitUntilRunning(ctx context.Context) error
	Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
40
	Embedding(ctx context.Context, input string) ([]float32, error)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
41
42
43
	Tokenize(ctx context.Context, content string) ([]int, error)
	Detokenize(ctx context.Context, tokens []int) (string, error)
	Close() error
44
	EstimatedVRAM() uint64 // Total VRAM across all GPUs
45
	EstimatedTotal() uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
46
	EstimatedVRAMByGPU(gpuID string) uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
47
48
49
50
}

// llmServer is an instance of the llama.cpp server
type llmServer struct {
51
52
53
54
55
56
	port        int
	cmd         *exec.Cmd
	done        chan error // Channel to signal when the process exits
	status      *StatusWriter
	options     api.Options
	numParallel int
57
	modelPath   string
58
59
60
61
62
63
64
65
66

	// llamaModel is an instance of the cgo llama.cpp model definition
	// nil if this server is running the new engine
	llamaModel     *llama.Model
	llamaModelLock sync.Mutex

	// textProcessor handles text encoding/decoding for the model in the Ollama engine
	// nil if this server is running the llama.cpp based engine
	textProcessor model.TextProcessor
Daniel Hiltgen's avatar
Daniel Hiltgen committed
67

68
69
70
	estimate    MemoryEstimate
	totalLayers uint64
	// gpuCount     int
71
72
	gpus         discover.GpuInfoList // Recorded just before the model loaded, free space will be incorrect
	loadDuration time.Duration        // Record how long it took the model to load
73
	loadProgress float32
Daniel Hiltgen's avatar
Daniel Hiltgen committed
74
75

	sem *semaphore.Weighted
76
77
}

78
79
80
81
82
// LoadModel will load a model from disk. The model must be in the GGML format.
//
// It collects array values for arrays with a size less than or equal to
// maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If
// the maxArraySize is negative, all arrays are collected.
Michael Yang's avatar
Michael Yang committed
83
func LoadModel(model string, maxArraySize int) (*ggml.GGML, error) {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
84
85
86
87
	if _, err := os.Stat(model); err != nil {
		return nil, err
	}

88
89
90
91
92
93
	f, err := os.Open(model)
	if err != nil {
		return nil, err
	}
	defer f.Close()

Michael Yang's avatar
Michael Yang committed
94
	ggml, _, err := ggml.Decode(f, maxArraySize)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
95
96
	return ggml, err
}
97

Daniel Hiltgen's avatar
Daniel Hiltgen committed
98
99
// NewLlamaServer will run a server for the given GPUs
// The gpu list must be a single family.
100
func NewLlamaServer(gpus discover.GpuInfoList, modelPath string, f *ggml.GGML, adapters, projectors []string, opts api.Options, numParallel int) (LlamaServer, error) {
101
	systemInfo := discover.GetSystemInfo()
Michael Yang's avatar
Michael Yang committed
102
103
104
	systemTotalMemory := systemInfo.System.TotalMemory
	systemFreeMemory := systemInfo.System.FreeMemory
	systemSwapFreeMemory := systemInfo.System.FreeSwap
105
	slog.Info("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "free_swap", format.HumanBytes2(systemSwapFreeMemory))
106

107
108
	// If the user wants zero GPU layers, reset the gpu list to be CPU/system ram info
	if opts.NumGPU == 0 {
109
		gpus = discover.GetCPUInfo()
110
	}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
111

112
	estimate := EstimateGPULayers(gpus, f, projectors, opts, numParallel)
Michael Yang's avatar
Michael Yang committed
113
	if len(gpus) > 1 || gpus[0].Library != "cpu" {
Michael Yang's avatar
Michael Yang committed
114
		switch {
115
		case gpus[0].Library == "metal" && estimate.VRAMSize > systemTotalMemory:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
116
117
118
			// disable partial offloading when model is greater than total system memory as this
			// can lead to locking up the system
			opts.NumGPU = 0
119
		case gpus[0].Library != "metal" && estimate.Layers == 0:
120
			// Don't bother loading into the GPU if no layers can fit
121
			gpus = discover.GetCPUInfo()
122
123
		case opts.NumGPU < 0 && estimate.Layers > 0 && gpus[0].Library != "cpu":
			opts.NumGPU = estimate.Layers
124
125
126
		}
	}

127
128
129
	// On linux and windows, over-allocating CPU memory will almost always result in an error
	// Darwin has fully dynamic swap so has no direct concept of free swap space
	if runtime.GOOS != "darwin" {
130
		systemMemoryRequired := estimate.TotalSize - estimate.VRAMSize
131
		available := systemFreeMemory + systemSwapFreeMemory
132
133
134
		if systemMemoryRequired > available {
			slog.Warn("model request too large for system", "requested", format.HumanBytes2(systemMemoryRequired), "available", available, "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "swap", format.HumanBytes2(systemSwapFreeMemory))
			return nil, fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(systemMemoryRequired), format.HumanBytes2(available))
135
136
137
		}
	}

Michael Yang's avatar
Michael Yang committed
138
	slog.Info("offload", "", estimate)
139

140
	params := []string{
141
		"--model", modelPath,
Michael Yang's avatar
lint  
Michael Yang committed
142
143
		"--ctx-size", strconv.Itoa(opts.NumCtx),
		"--batch-size", strconv.Itoa(opts.NumBatch),
144
	}
Michael Yang's avatar
Michael Yang committed
145

Michael Yang's avatar
Michael Yang committed
146
	if opts.NumGPU >= 0 {
Michael Yang's avatar
lint  
Michael Yang committed
147
		params = append(params, "--n-gpu-layers", strconv.Itoa(opts.NumGPU))
148
149
	}

Michael Yang's avatar
Michael Yang committed
150
	if envconfig.Debug() {
151
152
153
154
		params = append(params, "--verbose")
	}

	if opts.MainGPU > 0 {
Michael Yang's avatar
lint  
Michael Yang committed
155
		params = append(params, "--main-gpu", strconv.Itoa(opts.MainGPU))
156
157
158
	}

	if len(adapters) > 0 {
159
160
161
		for _, adapter := range adapters {
			params = append(params, "--lora", adapter)
		}
162
163
	}

164
	defaultThreads := systemInfo.GetOptimalThreadCount()
165
	if opts.NumThread > 0 {
Michael Yang's avatar
lint  
Michael Yang committed
166
		params = append(params, "--threads", strconv.Itoa(opts.NumThread))
167
168
	} else if defaultThreads > 0 {
		params = append(params, "--threads", strconv.Itoa(defaultThreads))
169
170
	}

171
172
173
174
175
	fa := envconfig.FlashAttention()
	if fa && !gpus.FlashAttentionSupported() {
		slog.Warn("flash attention enabled but not supported by gpu")
		fa = false
	}
Sam's avatar
Sam committed
176

Michael Yang's avatar
Michael Yang committed
177
	if fa && !f.SupportsFlashAttention() {
178
179
180
181
		slog.Warn("flash attention enabled but not supported by model")
		fa = false
	}

182
	kvct := strings.ToLower(envconfig.KvCacheType())
183
184
185
186
187
188
189

	if fa {
		slog.Info("enabling flash attention")
		params = append(params, "--flash-attn")

		// Flash Attention also supports kv cache quantization
		// Enable if the requested and kv cache type is supported by the model
Michael Yang's avatar
Michael Yang committed
190
		if kvct != "" && f.SupportsKVCacheType(kvct) {
191
192
193
			params = append(params, "--kv-cache-type", kvct)
		} else {
			slog.Warn("kv cache type not supported by model", "type", kvct)
Sam's avatar
Sam committed
194
		}
195
196
197
	} else if kvct != "" && kvct != "f16" {
		slog.Warn("quantized kv cache requested but flash attention disabled", "type", kvct)
	}
198

199
200
	// mmap has issues with partial offloading on metal
	for _, g := range gpus {
201
202
		if g.Library == "metal" &&
			uint64(opts.NumGPU) > 0 &&
Michael Yang's avatar
Michael Yang committed
203
			uint64(opts.NumGPU) < f.KV().BlockCount()+1 {
204
205
			opts.UseMMap = new(bool)
			*opts.UseMMap = false
206
		}
Sam's avatar
Sam committed
207
	}
208

209
	// Windows CUDA should not use mmap for best performance
210
	// Linux  with a model larger than free space, mmap leads to thrashing
Daniel Hiltgen's avatar
Daniel Hiltgen committed
211
	// For CPU loads we want the memory to be allocated, not FS cache
212
213
214
215
	if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == nil) ||
		(runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == nil) ||
		(gpus[0].Library == "cpu" && opts.UseMMap == nil) ||
		(opts.UseMMap != nil && !*opts.UseMMap) {
216
217
218
219
220
221
222
		params = append(params, "--no-mmap")
	}

	if opts.UseMLock {
		params = append(params, "--mlock")
	}

223
	// TODO - NUMA support currently doesn't work properly
224

Michael Yang's avatar
lint  
Michael Yang committed
225
	params = append(params, "--parallel", strconv.Itoa(numParallel))
Daniel Hiltgen's avatar
Daniel Hiltgen committed
226

227
228
229
230
	if estimate.TensorSplit != "" {
		params = append(params, "--tensor-split", estimate.TensorSplit)
	}

231
232
233
234
	if envconfig.MultiUserCache() {
		params = append(params, "--multiuser-cache")
	}

Michael Yang's avatar
Michael Yang committed
235
	libs := make(map[string]string)
236
237
	if entries, err := os.ReadDir(discover.LibOllamaPath); err == nil {
		for _, entry := range entries {
Michael Yang's avatar
Michael Yang committed
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
			libs[entry.Name()] = filepath.Join(discover.LibOllamaPath, entry.Name())
		}
	}

	lib := gpus[0].RunnerName()
	requested := envconfig.LLMLibrary()
	if libs[requested] != "" {
		slog.Info("using requested gpu library", "requested", requested)
		lib = requested
	}

	var compatible []string
	for k := range libs {
		// exact match first
		if k == lib {
			compatible = append([]string{k}, compatible...)
254
255
			continue
		}
256

Michael Yang's avatar
Michael Yang committed
257
258
259
		// then match the family (e.g. 'cuda')
		if strings.Split(k, "_")[0] == strings.Split(lib, "_")[0] {
			compatible = append(compatible, k)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
260
		}
Michael Yang's avatar
Michael Yang committed
261
262
	}
	slog.Debug("compatible gpu libraries", "compatible", compatible)
263
264
265
266
267
268
269
270
271
272
273
	exe, err := os.Executable()
	if err != nil {
		return nil, fmt.Errorf("unable to lookup executable path: %w", err)
	}

	if eval, err := filepath.EvalSymlinks(exe); err == nil {
		exe = eval
	}

	var llamaModel *llama.Model
	var textProcessor model.TextProcessor
274
	if envconfig.NewEngine() || f.KV().OllamaEngineRequired() {
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
		textProcessor, err = model.NewTextProcessor(modelPath)
		if err != nil {
			// To prepare for opt-out mode, instead of treating this as an error, we fallback to the old runner
			slog.Debug("model not yet supported by Ollama engine, switching to compatibility mode", "model", modelPath, "error", err)
		}
	}
	if textProcessor == nil {
		llamaModel, err = llama.LoadModelFromFile(modelPath, llama.ModelParams{VocabOnly: true})
		if err != nil {
			return nil, err
		}
	}

	if len(projectors) > 0 && llamaModel != nil {
		params = append(params, "--mmproj", projectors[0])
	}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
291

Michael Yang's avatar
Michael Yang committed
292
293
294
295
	// iterate through compatible GPU libraries such as 'cuda_v12', 'cuda_v11', 'rocm', etc.
	// adding each library's respective path to the LD_LIBRARY_PATH, until finally running
	// without any LD_LIBRARY_PATH flags
	for {
296
297
298
299
300
301
302
303
304
		port := 0
		if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
			var l *net.TCPListener
			if l, err = net.ListenTCP("tcp", a); err == nil {
				port = l.Addr().(*net.TCPAddr).Port
				l.Close()
			}
		}
		if port == 0 {
305
			slog.Debug("ResolveTCPAddr failed, using random port")
306
307
			port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
		}
308
		finalParams := []string{"runner"}
309
310
311
		if textProcessor != nil {
			// New engine
			// TODO - if we have failure to load scenarios, add logic to retry with the old runner
Jesse Gross's avatar
Jesse Gross committed
312
313
			finalParams = append(finalParams, "--ollama-engine")
		}
314
315
		finalParams = append(finalParams, params...)
		finalParams = append(finalParams, "--port", strconv.Itoa(port))
316

317
318
319
		var pathEnv string
		switch runtime.GOOS {
		case "windows":
320
			pathEnv = "PATH"
321
322
323
324
		case "darwin":
			pathEnv = "DYLD_LIBRARY_PATH"
		default:
			pathEnv = "LD_LIBRARY_PATH"
325
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
326

Michael Yang's avatar
Michael Yang committed
327
		var libraryPaths []string
328
		if libraryPath, ok := os.LookupEnv(pathEnv); ok {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
329
			libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...)
330
331
		}

332
		ggmlPaths := []string{discover.LibOllamaPath}
Michael Yang's avatar
Michael Yang committed
333
334
335
336
337
		if len(compatible) > 0 {
			c := compatible[0]
			if libpath, ok := libs[c]; ok {
				slog.Debug("adding gpu library", "path", libpath)
				libraryPaths = append(libraryPaths, libpath)
338
				ggmlPaths = append(ggmlPaths, libpath)
Michael Yang's avatar
Michael Yang committed
339
340
341
			}
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
342
		// Note: we always put the dependency path first
Daniel Hiltgen's avatar
Daniel Hiltgen committed
343
		// since this was the exact version we compiled/linked against
344
		if gpus[0].DependencyPath != nil {
Michael Yang's avatar
Michael Yang committed
345
			slog.Debug("adding gpu dependency paths", "paths", gpus[0].DependencyPath)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
346
			// assume gpus from the same library have the same dependency path
347
			libraryPaths = append(gpus[0].DependencyPath, libraryPaths...)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
348
349
		}

Michael Yang's avatar
Michael Yang committed
350
351
352
		// finally, add the root library path
		libraryPaths = append(libraryPaths, discover.LibOllamaPath)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
353
		s := &llmServer{
354
355
356
357
358
359
360
361
362
363
364
365
366
			port:          port,
			cmd:           exec.Command(exe, finalParams...),
			status:        NewStatusWriter(os.Stderr),
			options:       opts,
			modelPath:     modelPath,
			llamaModel:    llamaModel,
			textProcessor: textProcessor,
			estimate:      estimate,
			numParallel:   numParallel,
			sem:           semaphore.NewWeighted(int64(numParallel)),
			totalLayers:   f.KV().BlockCount() + 1,
			gpus:          gpus,
			done:          make(chan error, 1),
367
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
368

369
		s.cmd.Env = os.Environ()
370
371
		s.cmd.Stdout = os.Stdout
		s.cmd.Stderr = s.status
372
		s.cmd.SysProcAttr = LlamaServerSysProcAttr
373

374
375
		s.cmd.Env = append(s.cmd.Env, "OLLAMA_LIBRARY_PATH="+strings.Join(ggmlPaths, string(filepath.ListSeparator)))

Daniel Hiltgen's avatar
Daniel Hiltgen committed
376
377
378
379
		envWorkarounds := [][2]string{}
		for _, gpu := range gpus {
			envWorkarounds = append(envWorkarounds, gpu.EnvWorkarounds...)
		}
Michael Yang's avatar
lint  
Michael Yang committed
380
		visibleDevicesEnv, visibleDevicesEnvVal := gpus.GetVisibleDevicesEnv()
381
382
383
384
385
386
387
388
389
390
391
392
393
		pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))

		// Update or add the path and visible devices variable with our adjusted version
		pathNeeded := true
		devicesNeeded := visibleDevicesEnv != ""
		for i := range s.cmd.Env {
			cmp := strings.SplitN(s.cmd.Env[i], "=", 2)
			if strings.EqualFold(cmp[0], pathEnv) {
				s.cmd.Env[i] = pathEnv + "=" + pathEnvVal
				pathNeeded = false
			} else if devicesNeeded && strings.EqualFold(cmp[0], visibleDevicesEnv) {
				s.cmd.Env[i] = visibleDevicesEnv + "=" + visibleDevicesEnvVal
				devicesNeeded = false
Daniel Hiltgen's avatar
Daniel Hiltgen committed
394
395
396
397
398
399
			} else if len(envWorkarounds) != 0 {
				for _, kv := range envWorkarounds {
					if strings.EqualFold(cmp[0], kv[0]) {
						s.cmd.Env[i] = kv[0] + "=" + kv[1]
					}
				}
400
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
401
		}
402
403
		if pathNeeded {
			s.cmd.Env = append(s.cmd.Env, pathEnv+"="+pathEnvVal)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
404
		}
405
406
		if devicesNeeded {
			s.cmd.Env = append(s.cmd.Env, visibleDevicesEnv+"="+visibleDevicesEnvVal)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
407
408
		}

409
		slog.Info("starting llama server", "cmd", s.cmd)
Michael Yang's avatar
Michael Yang committed
410
		if envconfig.Debug() {
411
412
			filteredEnv := []string{}
			for _, ev := range s.cmd.Env {
413
414
				if strings.HasPrefix(ev, "OLLAMA_") ||
					strings.HasPrefix(ev, "CUDA_") ||
Daniel Hiltgen's avatar
Daniel Hiltgen committed
415
					strings.HasPrefix(ev, "ROCR_") ||
416
417
					strings.HasPrefix(ev, "ROCM_") ||
					strings.HasPrefix(ev, "HIP_") ||
Daniel Hiltgen's avatar
Daniel Hiltgen committed
418
					strings.HasPrefix(ev, "GPU_") ||
419
420
421
					strings.HasPrefix(ev, "HSA_") ||
					strings.HasPrefix(ev, "GGML_") ||
					strings.HasPrefix(ev, "PATH=") ||
422
423
					strings.HasPrefix(ev, "LD_LIBRARY_PATH=") ||
					strings.HasPrefix(ev, "DYLD_LIBRARY_PATH=") {
424
425
426
427
428
429
					filteredEnv = append(filteredEnv, ev)
				}
			}
			// Log at debug as the environment is inherited and might contain sensitive information
			slog.Debug("subprocess", "environment", filteredEnv)
		}
430
431

		if err = s.cmd.Start(); err != nil {
Michael Yang's avatar
Michael Yang committed
432
			var msg string
433
434
435
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
Michael Yang's avatar
Michael Yang committed
436
437
			err := fmt.Errorf("error starting runner: %v %s", err, msg)
			if len(compatible) == 0 {
438
439
440
				if llamaModel != nil {
					llama.FreeModel(llamaModel)
				}
Michael Yang's avatar
Michael Yang committed
441
442
443
444
445
				return nil, err
			}

			slog.Warn("unable to start runner with compatible gpu", "error", err, "compatible", compatible)
			compatible = compatible[1:]
446
447
448
			continue
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
449
450
		// reap subprocess when it exits
		go func() {
451
452
453
			err := s.cmd.Wait()
			// Favor a more detailed message over the process exit status
			if err != nil && s.status != nil && s.status.LastErrMsg != "" {
Michael Yang's avatar
Michael Yang committed
454
				slog.Error("llama runner terminated", "error", err)
455
456
457
				if strings.Contains(s.status.LastErrMsg, "unknown model") {
					s.status.LastErrMsg = "this model is not supported by your version of Ollama. You may need to upgrade"
				}
Michael Yang's avatar
lint  
Michael Yang committed
458
				s.done <- errors.New(s.status.LastErrMsg)
459
460
461
			} else {
				s.done <- err
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
462
463
		}()

464
465
466
467
468
469
470
471
		return s, nil
	}
}

type ServerStatus int

const ( // iota is reset to 0
	ServerStatusReady ServerStatus = iota
472
	ServerStatusNoSlotsAvailable
473
474
475
476
477
	ServerStatusLoadingModel
	ServerStatusNotResponding
	ServerStatusError
)

478
func (s ServerStatus) String() string {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
479
480
481
	switch s {
	case ServerStatusReady:
		return "llm server ready"
482
	case ServerStatusNoSlotsAvailable:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
483
484
485
486
487
488
489
490
491
492
		return "llm busy - no slots available"
	case ServerStatusLoadingModel:
		return "llm server loading model"
	case ServerStatusNotResponding:
		return "llm server not responding"
	default:
		return "llm server error"
	}
}

493
494
495
type ServerStatusResponse struct {
	Status   ServerStatus `json:"status"`
	Progress float32      `json:"progress"`
496
497
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
498
func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
499
500
501
502
503
504
	// Fail fast if its exited
	if s.cmd.ProcessState != nil {
		msg := ""
		if s.status != nil && s.status.LastErrMsg != "" {
			msg = s.status.LastErrMsg
		}
505
506
		if s.cmd.ProcessState.ExitCode() == -1 {
			// Most likely a signal killed it, log some more details to try to help troubleshoot
507
			slog.Warn("llama runner process no longer running", "sys", s.cmd.ProcessState.Sys(), "string", s.cmd.ProcessState)
508
		}
509
510
511
512
513
514
515
516
517
518
519
520
		return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
	if err != nil {
		return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		if errors.Is(err, context.DeadlineExceeded) {
Michael Yang's avatar
Michael Yang committed
521
			return ServerStatusNotResponding, errors.New("server not responding")
522
523
524
525
526
527
528
529
530
531
		}
		return ServerStatusError, fmt.Errorf("health resp: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return ServerStatusError, fmt.Errorf("read health request: %w", err)
	}

532
533
	var ssr ServerStatusResponse
	if err := json.Unmarshal(body, &ssr); err != nil {
534
535
536
		return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
	}

537
538
539
540
541
542
	switch ssr.Status {
	case ServerStatusLoadingModel:
		s.loadProgress = ssr.Progress
		return ssr.Status, nil
	case ServerStatusReady, ServerStatusNoSlotsAvailable:
		return ssr.Status, nil
543
	default:
544
		return ssr.Status, fmt.Errorf("server error: %+v", ssr)
545
546
547
	}
}

548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
// getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received
func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) {
	var retries int
	for {
		status, err := s.getServerStatus(ctx)
		if err != nil {
			return status, err
		}

		if status == ServerStatusNoSlotsAvailable {
			if retries >= 10 {
				return status, fmt.Errorf("no slots available after %d retries", retries)
			}

			time.Sleep(5 * time.Millisecond)
			retries++
			continue
		}

		return status, nil
	}
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
571
func (s *llmServer) Ping(ctx context.Context) error {
572
573
574
575
576
577
578
579
	_, err := s.getServerStatus(ctx)
	if err != nil {
		slog.Debug("server unhealthy", "error", err)
		return err
	}
	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
580
func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
581
	start := time.Now()
582
	stallDuration := envconfig.LoadTimeout()    // If no progress happens
583
	stallTimer := time.Now().Add(stallDuration) // give up if we stall
584
585
586

	slog.Info("waiting for llama runner to start responding")
	var lastStatus ServerStatus = -1
587
	fullyLoaded := false
ManniX-ITA's avatar
ManniX-ITA committed
588

589
590
	for {
		select {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
591
		case <-ctx.Done():
592
			slog.Warn("client connection closed before server finished loading, aborting load")
593
			return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
594
		case err := <-s.done:
595
			return fmt.Errorf("llama runner process has terminated: %w", err)
596
597
		default:
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
598
		if time.Now().After(stallTimer) {
ManniX-ITA's avatar
ManniX-ITA committed
599
			// timeout
600
601
602
603
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
604
			return fmt.Errorf("timed out waiting for llama runner to start - progress %0.2f - %s", s.loadProgress, msg)
ManniX-ITA's avatar
ManniX-ITA committed
605
606
607
608
609
		}
		if s.cmd.ProcessState != nil {
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
610
			}
ManniX-ITA's avatar
ManniX-ITA committed
611
612
			return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
613
614
		ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
		defer cancel()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
615
		priorProgress := s.loadProgress
Daniel Hiltgen's avatar
Daniel Hiltgen committed
616
617
618
		status, _ := s.getServerStatus(ctx)
		if lastStatus != status && status != ServerStatusReady {
			// Only log on status changes
619
			slog.Info("waiting for server to become available", "status", status)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
620
		}
ManniX-ITA's avatar
ManniX-ITA committed
621
622
		switch status {
		case ServerStatusReady:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
623
624
			s.loadDuration = time.Since(start)
			slog.Info(fmt.Sprintf("llama runner started in %0.2f seconds", s.loadDuration.Seconds()))
ManniX-ITA's avatar
ManniX-ITA committed
625
626
			return nil
		default:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
627
			lastStatus = status
Daniel Hiltgen's avatar
Daniel Hiltgen committed
628
629
630
631
			// Reset the timer as long as we're making forward progress on the load
			if priorProgress != s.loadProgress {
				slog.Debug(fmt.Sprintf("model load progress %0.2f", s.loadProgress))
				stallTimer = time.Now().Add(stallDuration)
632
			} else if !fullyLoaded && int(s.loadProgress*100.0) >= 100 {
633
				slog.Debug("model load completed, waiting for server to become available", "status", status)
634
				stallTimer = time.Now().Add(stallDuration)
635
				fullyLoaded = true
Daniel Hiltgen's avatar
Daniel Hiltgen committed
636
			}
ManniX-ITA's avatar
ManniX-ITA committed
637
638
			time.Sleep(time.Millisecond * 250)
			continue
639
640
641
642
		}
	}
}

643
var grammarJSON = `
644
645
646
647
root   ::= object
value  ::= object | array | string | number | ("true" | "false" | "null") ws
object ::=
  "{" ws (
648
         string ":" ws value
649
    ("," ws string ":" ws value)*
650
  )? ws "}" 
651
652
653
654
array  ::=
  "[" ws (
            value
    ("," ws value)*
655
  )? ws "]" 
656
657
string ::=
  "\"" (
658
    [^"\\\x7F\x00-\x1F] |
659
    "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
660
661
  )* "\"" 
number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? 
662
663
664
665
666
667
668
# Optional space: by convention, applied in this grammar after literal chars when allowed
ws ::= ([ \t\n] ws)?
`

const maxBufferSize = 512 * format.KiloByte

type ImageData struct {
669
670
671
	Data          []byte `json:"data"`
	ID            int    `json:"id"`
	AspectRatioID int    `json:"aspect_ratio_id"`
672
673
674
675
}

type CompletionRequest struct {
	Prompt  string
676
	Format  json.RawMessage
677
	Images  []ImageData
Michael Yang's avatar
Michael Yang committed
678
	Options *api.Options
679
680

	Grammar string // set before sending the request to the subprocess
681
682
}

683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
// DoneReason represents the reason why a completion response is done
type DoneReason int

const (
	// DoneReasonStop indicates the completion stopped naturally
	DoneReasonStop DoneReason = iota
	// DoneReasonLength indicates the completion stopped due to length limits
	DoneReasonLength
	// DoneReasonConnectionClosed indicates the completion stopped due to the connection being closed
	DoneReasonConnectionClosed
)

func (d DoneReason) String() string {
	switch d {
	case DoneReasonLength:
		return "length"
	case DoneReasonStop:
		return "stop"
	default:
		return "" // closed
	}
}

706
type CompletionResponse struct {
707
	Content            string        `json:"content"`
708
	DoneReason         DoneReason    `json:"done_reason"`
709
710
711
712
713
	Done               bool          `json:"done"`
	PromptEvalCount    int           `json:"prompt_eval_count"`
	PromptEvalDuration time.Duration `json:"prompt_eval_duration"`
	EvalCount          int           `json:"eval_count"`
	EvalDuration       time.Duration `json:"eval_duration"`
714
715
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
716
func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
717
	if len(req.Format) > 0 {
718
719
720
721
722
723
		switch string(req.Format) {
		case `null`, `""`:
			// Field was set, but "missing" a value. We accept
			// these as "not set".
			break
		case `"json"`:
724
			req.Grammar = grammarJSON
725
726
727
728
		default:
			if req.Format[0] != '{' {
				return fmt.Errorf("invalid format: %q; expected \"json\" or a valid JSON Schema object", req.Format)
			}
729

730
731
732
733
			// User provided a JSON schema
			g := llama.SchemaToGrammar(req.Format)
			if g == nil {
				return fmt.Errorf("invalid JSON schema in format")
734
			}
735
			req.Grammar = string(g)
736
737
738
		}
	}

739
740
741
742
743
	if req.Options == nil {
		opts := api.DefaultOptions()
		req.Options = &opts
	}

744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
	if err := s.sem.Acquire(ctx, 1); err != nil {
		if errors.Is(err, context.Canceled) {
			slog.Info("aborting completion request due to client closing the connection")
		} else {
			slog.Error("Failed to acquire semaphore", "error", err)
		}
		return err
	}
	defer s.sem.Release(1)

	// put an upper limit on num_predict to avoid the model running on forever
	if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
		req.Options.NumPredict = 10 * s.options.NumCtx
	}

759
	// Make sure the server is ready
760
	status, err := s.getServerStatusRetry(ctx)
761
762
763
	if err != nil {
		return err
	} else if status != ServerStatusReady {
764
		return fmt.Errorf("unexpected server status: %s", status)
765
766
	}

767
768
769
770
	// Handling JSON marshaling with special characters unescaped.
	buffer := &bytes.Buffer{}
	enc := json.NewEncoder(buffer)
	enc.SetEscapeHTML(false)
771

772
	if err := enc.Encode(req); err != nil {
773
774
		return fmt.Errorf("failed to marshal data: %v", err)
	}
775

776
777
778
779
780
781
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
	serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
	if err != nil {
		return fmt.Errorf("error creating POST request: %v", err)
	}
	serverReq.Header.Set("Content-Type", "application/json")
782

783
784
785
786
787
	res, err := http.DefaultClient.Do(serverReq)
	if err != nil {
		return fmt.Errorf("POST predict: %v", err)
	}
	defer res.Body.Close()
788

789
790
	if res.StatusCode >= 400 {
		bodyBytes, err := io.ReadAll(res.Body)
791
		if err != nil {
792
			return fmt.Errorf("failed reading llm error response: %w", err)
793
		}
794
795
796
		log.Printf("llm predict error: %s", bodyBytes)
		return fmt.Errorf("%s", bodyBytes)
	}
797

798
799
800
	scanner := bufio.NewScanner(res.Body)
	buf := make([]byte, 0, maxBufferSize)
	scanner.Buffer(buf, maxBufferSize)
801

802
803
804
	// keep track of the last token generated, this is used to abort if the model starts looping
	var lastToken string
	var tokenRepeat int
805

806
807
808
809
810
811
812
813
814
815
	for scanner.Scan() {
		select {
		case <-ctx.Done():
			// This handles the request cancellation
			return ctx.Err()
		default:
			line := scanner.Bytes()
			if len(line) == 0 {
				continue
			}
816

817
818
			evt, ok := bytes.CutPrefix(line, []byte("data: "))
			if !ok {
819
				evt = line
820
			}
821

822
			var c CompletionResponse
823
			if err := json.Unmarshal(evt, &c); err != nil {
824
				return fmt.Errorf("error unmarshalling llm prediction response: %v", err)
825
826
827
828
829
830
831
832
			}
			switch {
			case strings.TrimSpace(c.Content) == lastToken:
				tokenRepeat++
			default:
				lastToken = strings.TrimSpace(c.Content)
				tokenRepeat = 0
			}
833

834
835
836
837
838
			// 30 picked as an arbitrary max token repeat limit, modify as needed
			if tokenRepeat > 30 {
				slog.Debug("prediction aborted, token repeat limit reached")
				return ctx.Err()
			}
839

840
841
842
843
844
			if c.Content != "" {
				fn(CompletionResponse{
					Content: c.Content,
				})
			}
845

846
847
			if c.Done {
				fn(c)
848
				return nil
849
850
			}
		}
851
	}
852

853
	if err := scanner.Err(); err != nil {
854
		if strings.Contains(err.Error(), "unexpected EOF") || strings.Contains(err.Error(), "forcibly closed") {
855
			s.Close()
856
			var msg string
857
858
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
859
860
			} else {
				msg = err.Error()
861
			}
862
			return fmt.Errorf("an error was encountered while running the model: %s", msg)
863
864
		}

865
		return fmt.Errorf("error reading llm response: %v", err)
866
867
	}

868
	return nil
869
870
}

871
872
type EmbeddingRequest struct {
	Content string `json:"content"`
873
874
}

875
876
type EmbeddingResponse struct {
	Embedding []float32 `json:"embedding"`
877
878
}

879
880
func (s *llmServer) Embedding(ctx context.Context, input string) ([]float32, error) {
	if err := s.sem.Acquire(ctx, 1); err != nil {
881
882
883
884
885
		if errors.Is(err, context.Canceled) {
			slog.Info("aborting embedding request due to client closing the connection")
		} else {
			slog.Error("Failed to acquire semaphore", "error", err)
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
886
887
		return nil, err
	}
888
	defer s.sem.Release(1)
889

890
	// Make sure the server is ready
891
	status, err := s.getServerStatusRetry(ctx)
892
893
894
	if err != nil {
		return nil, err
	} else if status != ServerStatusReady {
895
		return nil, fmt.Errorf("unexpected server status: %s", status)
896
897
	}

898
	data, err := json.Marshal(EmbeddingRequest{Content: input})
Michael Yang's avatar
Michael Yang committed
899
	if err != nil {
900
901
902
		return nil, fmt.Errorf("error marshaling embed data: %w", err)
	}

903
	r, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
904
905
906
	if err != nil {
		return nil, fmt.Errorf("error creating embed request: %w", err)
	}
907
	r.Header.Set("Content-Type", "application/json")
908

909
	resp, err := http.DefaultClient.Do(r)
910
911
912
913
914
915
916
917
918
919
920
	if err != nil {
		return nil, fmt.Errorf("do embedding request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("error reading embed response: %w", err)
	}

	if resp.StatusCode >= 400 {
921
		log.Printf("llm embedding error: %s", body)
922
923
924
		return nil, fmt.Errorf("%s", body)
	}

925
	var e EmbeddingResponse
926
	if err := json.Unmarshal(body, &e); err != nil {
927
928
929
		return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
	}

930
	return e.Embedding, nil
931
932
}

Michael Yang's avatar
Michael Yang committed
933
934
935
936
937
938
939
940
type TokenizeRequest struct {
	Content string `json:"content"`
}

type TokenizeResponse struct {
	Tokens []int `json:"tokens"`
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
941
func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
942
943
	s.llamaModelLock.Lock()
	defer s.llamaModelLock.Unlock()
944

945
946
	if s.llamaModel != nil {
		return s.llamaModel.Tokenize(content, false, true)
Michael Yang's avatar
Michael Yang committed
947
	}
948
	if s.textProcessor != nil {
949
		tokens, err := s.textProcessor.Encode(content, false)
950
951
		if err != nil {
			return nil, err
952
		}
953
954
955
956
957
		toks := make([]int, len(tokens))
		for i, t := range tokens {
			toks[i] = int(t)
		}
		return toks, nil
Michael Yang's avatar
Michael Yang committed
958
	}
959
960
	// not reached
	return nil, fmt.Errorf("no tokenizer configured")
Michael Yang's avatar
Michael Yang committed
961
962
963
964
965
966
967
968
}

type DetokenizeRequest struct {
	Tokens []int `json:"tokens"`
}

type DetokenizeResponse struct {
	Content string `json:"content"`
969
970
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
971
func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
972
973
974
975
	s.llamaModelLock.Lock()
	defer s.llamaModelLock.Unlock()

	if s.llamaModel != nil {
976
977
		var resp string
		for _, token := range tokens {
978
			resp += s.llamaModel.TokenToPiece(token)
979
980
981
		}
		return resp, nil
	}
982
983
984
985
	if s.textProcessor != nil {
		toks := make([]int32, len(tokens))
		for i, t := range tokens {
			toks[i] = int32(t)
986
		}
987
988
989
		content, err := s.textProcessor.Decode(toks)
		if err != nil {
			return "", err
990
		}
991
		return content, nil
Michael Yang's avatar
Michael Yang committed
992
	}
993
994
	// not reached
	return "", fmt.Errorf("no tokenizer configured")
995
996
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
997
func (s *llmServer) Close() error {
998
999
1000
1001
	s.llamaModelLock.Lock()
	if s.llamaModel != nil {
		llama.FreeModel(s.llamaModel)
		s.llamaModel = nil
1002
	}
1003
	s.llamaModelLock.Unlock()
1004

1005
1006
	if s.cmd != nil {
		slog.Debug("stopping llama server")
1007
1008
1009
		if err := s.cmd.Process.Kill(); err != nil {
			return err
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1010
1011
1012
1013
1014
		// if ProcessState is already populated, Wait already completed, no need to wait again
		if s.cmd.ProcessState == nil {
			slog.Debug("waiting for llama server to exit")
			<-s.done
		}
1015
1016

		slog.Debug("llama server stopped")
1017
1018
1019
1020
1021
	}

	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1022
func (s *llmServer) EstimatedVRAM() uint64 {
1023
	return s.estimate.VRAMSize
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1024
1025
}

1026
func (s *llmServer) EstimatedTotal() uint64 {
1027
	return s.estimate.TotalSize
1028
1029
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1030
func (s *llmServer) EstimatedVRAMByGPU(gpuID string) uint64 {
1031
1032
	for i, gpu := range s.gpus {
		if gpu.ID == gpuID {
1033
1034
1035
			if i < len(s.estimate.GPUSizes) {
				return s.estimate.GPUSizes[i]
			}
1036
1037
1038
1039
		}
	}
	return 0
}