server.go 32.2 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
package llm

import (
	"bufio"
	"bytes"
	"context"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"log"
	"log/slog"
	"math/rand"
	"net"
	"net/http"
	"os"
	"os/exec"
	"path/filepath"
	"runtime"
	"strconv"
	"strings"
22
	"sync"
23
24
	"time"

Daniel Hiltgen's avatar
Daniel Hiltgen committed
25
26
	"golang.org/x/sync/semaphore"

27
	"github.com/ollama/ollama/api"
28
	"github.com/ollama/ollama/discover"
29
	"github.com/ollama/ollama/envconfig"
30
	"github.com/ollama/ollama/format"
Michael Yang's avatar
Michael Yang committed
31
	"github.com/ollama/ollama/fs/ggml"
32
	"github.com/ollama/ollama/llama"
33
34
)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
35
36
37
38
type LlamaServer interface {
	Ping(ctx context.Context) error
	WaitUntilRunning(ctx context.Context) error
	Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
39
	Embedding(ctx context.Context, input string) ([]float32, error)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
40
41
42
	Tokenize(ctx context.Context, content string) ([]int, error)
	Detokenize(ctx context.Context, tokens []int) (string, error)
	Close() error
43
	EstimatedVRAM() uint64 // Total VRAM across all GPUs
44
	EstimatedTotal() uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
45
	EstimatedVRAMByGPU(gpuID string) uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
46
47
48
49
}

// llmServer is an instance of the llama.cpp server
type llmServer struct {
50
51
52
53
54
55
	port        int
	cmd         *exec.Cmd
	done        chan error // Channel to signal when the process exits
	status      *StatusWriter
	options     api.Options
	numParallel int
56
57
58
	modelPath   string
	modelLock   sync.Mutex   // Temporary until we switch fully to Go server
	model       *llama.Model // If non-nil, the runner is a new Go server
Daniel Hiltgen's avatar
Daniel Hiltgen committed
59

60
61
62
	estimate    MemoryEstimate
	totalLayers uint64
	// gpuCount     int
63
64
	gpus         discover.GpuInfoList // Recorded just before the model loaded, free space will be incorrect
	loadDuration time.Duration        // Record how long it took the model to load
65
	loadProgress float32
Daniel Hiltgen's avatar
Daniel Hiltgen committed
66
67

	sem *semaphore.Weighted
68
69
}

70
71
72
73
74
// LoadModel will load a model from disk. The model must be in the GGML format.
//
// It collects array values for arrays with a size less than or equal to
// maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If
// the maxArraySize is negative, all arrays are collected.
Michael Yang's avatar
Michael Yang committed
75
func LoadModel(model string, maxArraySize int) (*ggml.GGML, error) {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
76
77
78
79
	if _, err := os.Stat(model); err != nil {
		return nil, err
	}

80
81
82
83
84
85
	f, err := os.Open(model)
	if err != nil {
		return nil, err
	}
	defer f.Close()

Michael Yang's avatar
Michael Yang committed
86
	ggml, _, err := ggml.Decode(f, maxArraySize)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
87
88
	return ggml, err
}
89

Daniel Hiltgen's avatar
Daniel Hiltgen committed
90
91
// NewLlamaServer will run a server for the given GPUs
// The gpu list must be a single family.
Michael Yang's avatar
Michael Yang committed
92
func NewLlamaServer(gpus discover.GpuInfoList, model string, f *ggml.GGML, adapters, projectors []string, opts api.Options, numParallel int) (LlamaServer, error) {
93
	systemInfo := discover.GetSystemInfo()
Michael Yang's avatar
Michael Yang committed
94
95
96
	systemTotalMemory := systemInfo.System.TotalMemory
	systemFreeMemory := systemInfo.System.FreeMemory
	systemSwapFreeMemory := systemInfo.System.FreeSwap
97
	slog.Info("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "free_swap", format.HumanBytes2(systemSwapFreeMemory))
98

99
100
	// If the user wants zero GPU layers, reset the gpu list to be CPU/system ram info
	if opts.NumGPU == 0 {
101
		gpus = discover.GetCPUInfo()
102
	}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
103

Michael Yang's avatar
Michael Yang committed
104
	estimate := EstimateGPULayers(gpus, f, projectors, opts)
Michael Yang's avatar
Michael Yang committed
105
	if len(gpus) > 1 || gpus[0].Library != "cpu" {
Michael Yang's avatar
Michael Yang committed
106
		switch {
107
		case gpus[0].Library == "metal" && estimate.VRAMSize > systemTotalMemory:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
108
109
110
			// disable partial offloading when model is greater than total system memory as this
			// can lead to locking up the system
			opts.NumGPU = 0
111
		case gpus[0].Library != "metal" && estimate.Layers == 0:
112
			// Don't bother loading into the GPU if no layers can fit
113
			gpus = discover.GetCPUInfo()
114
115
		case opts.NumGPU < 0 && estimate.Layers > 0 && gpus[0].Library != "cpu":
			opts.NumGPU = estimate.Layers
116
117
118
		}
	}

119
120
121
	// On linux and windows, over-allocating CPU memory will almost always result in an error
	// Darwin has fully dynamic swap so has no direct concept of free swap space
	if runtime.GOOS != "darwin" {
122
		systemMemoryRequired := estimate.TotalSize - estimate.VRAMSize
123
		available := systemFreeMemory + systemSwapFreeMemory
124
125
126
		if systemMemoryRequired > available {
			slog.Warn("model request too large for system", "requested", format.HumanBytes2(systemMemoryRequired), "available", available, "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "swap", format.HumanBytes2(systemSwapFreeMemory))
			return nil, fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(systemMemoryRequired), format.HumanBytes2(available))
127
128
129
		}
	}

Michael Yang's avatar
Michael Yang committed
130
	slog.Info("offload", "", estimate)
131

132
133
	params := []string{
		"--model", model,
Michael Yang's avatar
lint  
Michael Yang committed
134
135
		"--ctx-size", strconv.Itoa(opts.NumCtx),
		"--batch-size", strconv.Itoa(opts.NumBatch),
136
	}
Michael Yang's avatar
Michael Yang committed
137

Michael Yang's avatar
Michael Yang committed
138
	if opts.NumGPU >= 0 {
Michael Yang's avatar
lint  
Michael Yang committed
139
		params = append(params, "--n-gpu-layers", strconv.Itoa(opts.NumGPU))
140
141
	}

Michael Yang's avatar
Michael Yang committed
142
	if envconfig.Debug() {
143
144
145
146
		params = append(params, "--verbose")
	}

	if opts.MainGPU > 0 {
Michael Yang's avatar
lint  
Michael Yang committed
147
		params = append(params, "--main-gpu", strconv.Itoa(opts.MainGPU))
148
149
150
	}

	if len(adapters) > 0 {
151
152
153
		for _, adapter := range adapters {
			params = append(params, "--lora", adapter)
		}
154
155
156
157
158
159
160
	}

	if len(projectors) > 0 {
		// TODO: applying multiple projectors is not supported by the llama.cpp server yet
		params = append(params, "--mmproj", projectors[0])
	}

161
	defaultThreads := systemInfo.GetOptimalThreadCount()
162
	if opts.NumThread > 0 {
Michael Yang's avatar
lint  
Michael Yang committed
163
		params = append(params, "--threads", strconv.Itoa(opts.NumThread))
164
165
	} else if defaultThreads > 0 {
		params = append(params, "--threads", strconv.Itoa(defaultThreads))
166
167
	}

168
169
170
171
172
	fa := envconfig.FlashAttention()
	if fa && !gpus.FlashAttentionSupported() {
		slog.Warn("flash attention enabled but not supported by gpu")
		fa = false
	}
Sam's avatar
Sam committed
173

Michael Yang's avatar
Michael Yang committed
174
	if fa && !f.SupportsFlashAttention() {
175
176
177
178
		slog.Warn("flash attention enabled but not supported by model")
		fa = false
	}

179
	kvct := strings.ToLower(envconfig.KvCacheType())
180
181
182
183
184
185
186

	if fa {
		slog.Info("enabling flash attention")
		params = append(params, "--flash-attn")

		// Flash Attention also supports kv cache quantization
		// Enable if the requested and kv cache type is supported by the model
Michael Yang's avatar
Michael Yang committed
187
		if kvct != "" && f.SupportsKVCacheType(kvct) {
188
189
190
			params = append(params, "--kv-cache-type", kvct)
		} else {
			slog.Warn("kv cache type not supported by model", "type", kvct)
Sam's avatar
Sam committed
191
		}
192
193
194
	} else if kvct != "" && kvct != "f16" {
		slog.Warn("quantized kv cache requested but flash attention disabled", "type", kvct)
	}
195

196
197
	// mmap has issues with partial offloading on metal
	for _, g := range gpus {
198
199
		if g.Library == "metal" &&
			uint64(opts.NumGPU) > 0 &&
Michael Yang's avatar
Michael Yang committed
200
			uint64(opts.NumGPU) < f.KV().BlockCount()+1 {
201
202
			opts.UseMMap = new(bool)
			*opts.UseMMap = false
203
		}
Sam's avatar
Sam committed
204
	}
205

206
	// Windows CUDA should not use mmap for best performance
207
	// Linux  with a model larger than free space, mmap leads to thrashing
Daniel Hiltgen's avatar
Daniel Hiltgen committed
208
	// For CPU loads we want the memory to be allocated, not FS cache
209
210
211
212
	if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == nil) ||
		(runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == nil) ||
		(gpus[0].Library == "cpu" && opts.UseMMap == nil) ||
		(opts.UseMMap != nil && !*opts.UseMMap) {
213
214
215
216
217
218
219
		params = append(params, "--no-mmap")
	}

	if opts.UseMLock {
		params = append(params, "--mlock")
	}

220
	// TODO - NUMA support currently doesn't work properly
221

Michael Yang's avatar
lint  
Michael Yang committed
222
	params = append(params, "--parallel", strconv.Itoa(numParallel))
Daniel Hiltgen's avatar
Daniel Hiltgen committed
223

224
225
226
227
	if estimate.TensorSplit != "" {
		params = append(params, "--tensor-split", estimate.TensorSplit)
	}

228
229
230
231
	if envconfig.MultiUserCache() {
		params = append(params, "--multiuser-cache")
	}

Michael Yang's avatar
Michael Yang committed
232
	libs := make(map[string]string)
233
234
	if entries, err := os.ReadDir(discover.LibOllamaPath); err == nil {
		for _, entry := range entries {
Michael Yang's avatar
Michael Yang committed
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
			libs[entry.Name()] = filepath.Join(discover.LibOllamaPath, entry.Name())
		}
	}

	lib := gpus[0].RunnerName()
	requested := envconfig.LLMLibrary()
	if libs[requested] != "" {
		slog.Info("using requested gpu library", "requested", requested)
		lib = requested
	}

	var compatible []string
	for k := range libs {
		// exact match first
		if k == lib {
			compatible = append([]string{k}, compatible...)
251
252
			continue
		}
253

Michael Yang's avatar
Michael Yang committed
254
255
256
		// then match the family (e.g. 'cuda')
		if strings.Split(k, "_")[0] == strings.Split(lib, "_")[0] {
			compatible = append(compatible, k)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
257
		}
Michael Yang's avatar
Michael Yang committed
258
259
	}
	slog.Debug("compatible gpu libraries", "compatible", compatible)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
260

Michael Yang's avatar
Michael Yang committed
261
262
263
264
	// iterate through compatible GPU libraries such as 'cuda_v12', 'cuda_v11', 'rocm', etc.
	// adding each library's respective path to the LD_LIBRARY_PATH, until finally running
	// without any LD_LIBRARY_PATH flags
	for {
265
266
267
268
269
270
271
272
273
		port := 0
		if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
			var l *net.TCPListener
			if l, err = net.ListenTCP("tcp", a); err == nil {
				port = l.Addr().(*net.TCPAddr).Port
				l.Close()
			}
		}
		if port == 0 {
274
			slog.Debug("ResolveTCPAddr failed, using random port")
275
276
			port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
		}
277
		finalParams := []string{"runner"}
Jesse Gross's avatar
Jesse Gross committed
278
279
280
		if envconfig.NewEngine() {
			finalParams = append(finalParams, "--ollama-engine")
		}
281
282
		finalParams = append(finalParams, params...)
		finalParams = append(finalParams, "--port", strconv.Itoa(port))
283

284
285
286
		var pathEnv string
		switch runtime.GOOS {
		case "windows":
287
			pathEnv = "PATH"
288
289
290
291
		case "darwin":
			pathEnv = "DYLD_LIBRARY_PATH"
		default:
			pathEnv = "LD_LIBRARY_PATH"
292
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
293

Michael Yang's avatar
Michael Yang committed
294
		var libraryPaths []string
295
		if libraryPath, ok := os.LookupEnv(pathEnv); ok {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
296
			libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...)
297
298
		}

Michael Yang's avatar
Michael Yang committed
299
300
301
302
303
304
305
306
		if len(compatible) > 0 {
			c := compatible[0]
			if libpath, ok := libs[c]; ok {
				slog.Debug("adding gpu library", "path", libpath)
				libraryPaths = append(libraryPaths, libpath)
			}
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
307
		// Note: we always put the dependency path first
Daniel Hiltgen's avatar
Daniel Hiltgen committed
308
		// since this was the exact version we compiled/linked against
309
		if gpus[0].DependencyPath != nil {
Michael Yang's avatar
Michael Yang committed
310
			slog.Debug("adding gpu dependency paths", "paths", gpus[0].DependencyPath)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
311
			// assume gpus from the same library have the same dependency path
312
			libraryPaths = append(gpus[0].DependencyPath, libraryPaths...)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
313
314
		}

Michael Yang's avatar
Michael Yang committed
315
316
317
318
319
320
321
322
		// finally, add the root library path
		libraryPaths = append(libraryPaths, discover.LibOllamaPath)

		exe, err := os.Executable()
		if err != nil {
			return nil, fmt.Errorf("unable to lookup executable path: %w", err)
		}

323
324
325
326
		if eval, err := filepath.EvalSymlinks(exe); err == nil {
			exe = eval
		}

327
		// TODO - once fully switched to the Go runner, load the model here for tokenize/detokenize cgo access
Daniel Hiltgen's avatar
Daniel Hiltgen committed
328
		s := &llmServer{
329
			port:        port,
Michael Yang's avatar
Michael Yang committed
330
			cmd:         exec.Command(exe, finalParams...),
331
332
			status:      NewStatusWriter(os.Stderr),
			options:     opts,
333
			modelPath:   model,
334
			estimate:    estimate,
335
			numParallel: numParallel,
336
			sem:         semaphore.NewWeighted(int64(numParallel)),
Michael Yang's avatar
Michael Yang committed
337
			totalLayers: f.KV().BlockCount() + 1,
338
			gpus:        gpus,
339
			done:        make(chan error, 1),
340
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
341

342
		s.cmd.Env = os.Environ()
343
344
		s.cmd.Stdout = os.Stdout
		s.cmd.Stderr = s.status
345
		s.cmd.SysProcAttr = LlamaServerSysProcAttr
346

Daniel Hiltgen's avatar
Daniel Hiltgen committed
347
348
349
350
		envWorkarounds := [][2]string{}
		for _, gpu := range gpus {
			envWorkarounds = append(envWorkarounds, gpu.EnvWorkarounds...)
		}
Michael Yang's avatar
lint  
Michael Yang committed
351
		visibleDevicesEnv, visibleDevicesEnvVal := gpus.GetVisibleDevicesEnv()
352
353
354
355
356
357
358
359
360
361
362
363
364
		pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))

		// Update or add the path and visible devices variable with our adjusted version
		pathNeeded := true
		devicesNeeded := visibleDevicesEnv != ""
		for i := range s.cmd.Env {
			cmp := strings.SplitN(s.cmd.Env[i], "=", 2)
			if strings.EqualFold(cmp[0], pathEnv) {
				s.cmd.Env[i] = pathEnv + "=" + pathEnvVal
				pathNeeded = false
			} else if devicesNeeded && strings.EqualFold(cmp[0], visibleDevicesEnv) {
				s.cmd.Env[i] = visibleDevicesEnv + "=" + visibleDevicesEnvVal
				devicesNeeded = false
Daniel Hiltgen's avatar
Daniel Hiltgen committed
365
366
367
368
369
370
			} else if len(envWorkarounds) != 0 {
				for _, kv := range envWorkarounds {
					if strings.EqualFold(cmp[0], kv[0]) {
						s.cmd.Env[i] = kv[0] + "=" + kv[1]
					}
				}
371
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
372
		}
373
374
		if pathNeeded {
			s.cmd.Env = append(s.cmd.Env, pathEnv+"="+pathEnvVal)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
375
		}
376
377
		if devicesNeeded {
			s.cmd.Env = append(s.cmd.Env, visibleDevicesEnv+"="+visibleDevicesEnvVal)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
378
379
		}

380
		slog.Info("starting llama server", "cmd", s.cmd.String())
Michael Yang's avatar
Michael Yang committed
381
		if envconfig.Debug() {
382
383
384
			filteredEnv := []string{}
			for _, ev := range s.cmd.Env {
				if strings.HasPrefix(ev, "CUDA_") ||
Daniel Hiltgen's avatar
Daniel Hiltgen committed
385
					strings.HasPrefix(ev, "ROCR_") ||
386
387
					strings.HasPrefix(ev, "ROCM_") ||
					strings.HasPrefix(ev, "HIP_") ||
Daniel Hiltgen's avatar
Daniel Hiltgen committed
388
					strings.HasPrefix(ev, "GPU_") ||
389
390
391
					strings.HasPrefix(ev, "HSA_") ||
					strings.HasPrefix(ev, "GGML_") ||
					strings.HasPrefix(ev, "PATH=") ||
392
393
					strings.HasPrefix(ev, "LD_LIBRARY_PATH=") ||
					strings.HasPrefix(ev, "DYLD_LIBRARY_PATH=") {
394
395
396
397
398
399
					filteredEnv = append(filteredEnv, ev)
				}
			}
			// Log at debug as the environment is inherited and might contain sensitive information
			slog.Debug("subprocess", "environment", filteredEnv)
		}
400
401

		if err = s.cmd.Start(); err != nil {
Michael Yang's avatar
Michael Yang committed
402
			var msg string
403
404
405
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
Michael Yang's avatar
Michael Yang committed
406
407
408
409
410
411
412
			err := fmt.Errorf("error starting runner: %v %s", err, msg)
			if len(compatible) == 0 {
				return nil, err
			}

			slog.Warn("unable to start runner with compatible gpu", "error", err, "compatible", compatible)
			compatible = compatible[1:]
413
414
415
			continue
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
416
417
		// reap subprocess when it exits
		go func() {
418
419
420
			err := s.cmd.Wait()
			// Favor a more detailed message over the process exit status
			if err != nil && s.status != nil && s.status.LastErrMsg != "" {
Michael Yang's avatar
Michael Yang committed
421
				slog.Error("llama runner terminated", "error", err)
422
423
424
				if strings.Contains(s.status.LastErrMsg, "unknown model") {
					s.status.LastErrMsg = "this model is not supported by your version of Ollama. You may need to upgrade"
				}
Michael Yang's avatar
lint  
Michael Yang committed
425
				s.done <- errors.New(s.status.LastErrMsg)
426
427
428
			} else {
				s.done <- err
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
429
430
		}()

431
432
433
434
435
436
437
438
		return s, nil
	}
}

type ServerStatus int

const ( // iota is reset to 0
	ServerStatusReady ServerStatus = iota
439
	ServerStatusNoSlotsAvailable
440
441
442
443
444
	ServerStatusLoadingModel
	ServerStatusNotResponding
	ServerStatusError
)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
445
446
447
448
func (s ServerStatus) ToString() string {
	switch s {
	case ServerStatusReady:
		return "llm server ready"
449
	case ServerStatusNoSlotsAvailable:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
450
451
452
453
454
455
456
457
458
459
		return "llm busy - no slots available"
	case ServerStatusLoadingModel:
		return "llm server loading model"
	case ServerStatusNotResponding:
		return "llm server not responding"
	default:
		return "llm server error"
	}
}

460
type ServerStatusResp struct {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
461
462
463
464
465
	Status          string  `json:"status"`
	SlotsIdle       int     `json:"slots_idle"`
	SlotsProcessing int     `json:"slots_processing"`
	Error           string  `json:"error"`
	Progress        float32 `json:"progress"`
466
467
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
468
func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
469
470
471
472
473
474
	// Fail fast if its exited
	if s.cmd.ProcessState != nil {
		msg := ""
		if s.status != nil && s.status.LastErrMsg != "" {
			msg = s.status.LastErrMsg
		}
475
476
477
478
		if s.cmd.ProcessState.ExitCode() == -1 {
			// Most likely a signal killed it, log some more details to try to help troubleshoot
			slog.Warn("llama runner process no longer running", "sys", s.cmd.ProcessState.Sys(), "string", s.cmd.ProcessState.String())
		}
479
480
481
482
483
484
485
486
487
488
489
490
		return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
	if err != nil {
		return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		if errors.Is(err, context.DeadlineExceeded) {
Michael Yang's avatar
Michael Yang committed
491
			return ServerStatusNotResponding, errors.New("server not responding")
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
		}
		return ServerStatusError, fmt.Errorf("health resp: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return ServerStatusError, fmt.Errorf("read health request: %w", err)
	}

	var status ServerStatusResp
	if err := json.Unmarshal(body, &status); err != nil {
		return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
	}

	switch status.Status {
	case "ok":
		return ServerStatusReady, nil
	case "no slot available":
511
		return ServerStatusNoSlotsAvailable, nil
512
	case "loading model":
Daniel Hiltgen's avatar
Daniel Hiltgen committed
513
		s.loadProgress = status.Progress
514
515
516
517
518
519
		return ServerStatusLoadingModel, nil
	default:
		return ServerStatusError, fmt.Errorf("server error: %+v", status)
	}
}

520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
// getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received
func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) {
	var retries int
	for {
		status, err := s.getServerStatus(ctx)
		if err != nil {
			return status, err
		}

		if status == ServerStatusNoSlotsAvailable {
			if retries >= 10 {
				return status, fmt.Errorf("no slots available after %d retries", retries)
			}

			time.Sleep(5 * time.Millisecond)
			retries++
			continue
		}

		return status, nil
	}
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
543
func (s *llmServer) Ping(ctx context.Context) error {
544
545
546
547
548
549
550
551
	_, err := s.getServerStatus(ctx)
	if err != nil {
		slog.Debug("server unhealthy", "error", err)
		return err
	}
	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
552
func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
553
	start := time.Now()
554
	stallDuration := envconfig.LoadTimeout()    // If no progress happens
555
	stallTimer := time.Now().Add(stallDuration) // give up if we stall
556
557
558

	slog.Info("waiting for llama runner to start responding")
	var lastStatus ServerStatus = -1
559
	fullyLoaded := false
ManniX-ITA's avatar
ManniX-ITA committed
560

561
562
	for {
		select {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
563
		case <-ctx.Done():
564
			slog.Warn("client connection closed before server finished loading, aborting load")
565
			return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
566
		case err := <-s.done:
567
			return fmt.Errorf("llama runner process has terminated: %w", err)
568
569
		default:
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
570
		if time.Now().After(stallTimer) {
ManniX-ITA's avatar
ManniX-ITA committed
571
			// timeout
572
573
574
575
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
576
			return fmt.Errorf("timed out waiting for llama runner to start - progress %0.2f - %s", s.loadProgress, msg)
ManniX-ITA's avatar
ManniX-ITA committed
577
578
579
580
581
		}
		if s.cmd.ProcessState != nil {
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
582
			}
ManniX-ITA's avatar
ManniX-ITA committed
583
584
			return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
585
586
		ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
		defer cancel()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
587
		priorProgress := s.loadProgress
Daniel Hiltgen's avatar
Daniel Hiltgen committed
588
589
590
591
592
		status, _ := s.getServerStatus(ctx)
		if lastStatus != status && status != ServerStatusReady {
			// Only log on status changes
			slog.Info("waiting for server to become available", "status", status.ToString())
		}
ManniX-ITA's avatar
ManniX-ITA committed
593
594
		switch status {
		case ServerStatusReady:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
595
596
			s.loadDuration = time.Since(start)
			slog.Info(fmt.Sprintf("llama runner started in %0.2f seconds", s.loadDuration.Seconds()))
ManniX-ITA's avatar
ManniX-ITA committed
597
598
			return nil
		default:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
599
			lastStatus = status
Daniel Hiltgen's avatar
Daniel Hiltgen committed
600
601
602
603
			// Reset the timer as long as we're making forward progress on the load
			if priorProgress != s.loadProgress {
				slog.Debug(fmt.Sprintf("model load progress %0.2f", s.loadProgress))
				stallTimer = time.Now().Add(stallDuration)
604
605
			} else if !fullyLoaded && int(s.loadProgress*100.0) >= 100 {
				slog.Debug("model load completed, waiting for server to become available", "status", status.ToString())
606
				stallTimer = time.Now().Add(stallDuration)
607
				fullyLoaded = true
Daniel Hiltgen's avatar
Daniel Hiltgen committed
608
			}
ManniX-ITA's avatar
ManniX-ITA committed
609
610
			time.Sleep(time.Millisecond * 250)
			continue
611
612
613
614
		}
	}
}

615
var grammarJSON = `
616
617
618
619
620
621
622
623
624
625
626
627
628
629
root   ::= object
value  ::= object | array | string | number | ("true" | "false" | "null") ws
object ::=
  "{" ws (
            string ":" ws value
    ("," ws string ":" ws value)*
  )? "}" ws
array  ::=
  "[" ws (
            value
    ("," ws value)*
  )? "]" ws
string ::=
  "\"" (
630
    [^"\\\x7F\x00-\x1F] |
631
632
633
634
635
636
637
638
639
640
    "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  )* "\"" ws
number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws
# Optional space: by convention, applied in this grammar after literal chars when allowed
ws ::= ([ \t\n] ws)?
`

const maxBufferSize = 512 * format.KiloByte

type ImageData struct {
641
642
643
	Data          []byte `json:"data"`
	ID            int    `json:"id"`
	AspectRatioID int    `json:"aspect_ratio_id"`
644
645
646
}

type completion struct {
647
648
649
650
651
	Content      string `json:"content"`
	Model        string `json:"model"`
	Prompt       string `json:"prompt"`
	Stop         bool   `json:"stop"`
	StoppedLimit bool   `json:"stopped_limit"`
652
653
654
655
656
657
658
659
660
661
662

	Timings struct {
		PredictedN  int     `json:"predicted_n"`
		PredictedMS float64 `json:"predicted_ms"`
		PromptN     int     `json:"prompt_n"`
		PromptMS    float64 `json:"prompt_ms"`
	}
}

type CompletionRequest struct {
	Prompt  string
663
	Format  json.RawMessage
664
	Images  []ImageData
Michael Yang's avatar
Michael Yang committed
665
	Options *api.Options
666
667
668
669
}

type CompletionResponse struct {
	Content            string
670
	DoneReason         string
671
672
673
674
675
676
677
	Done               bool
	PromptEvalCount    int
	PromptEvalDuration time.Duration
	EvalCount          int
	EvalDuration       time.Duration
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
678
func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
679
680
681
682
683
684
685
686
687
	request := map[string]any{
		"prompt":            req.Prompt,
		"stream":            true,
		"n_predict":         req.Options.NumPredict,
		"n_keep":            req.Options.NumKeep,
		"main_gpu":          req.Options.MainGPU,
		"temperature":       req.Options.Temperature,
		"top_k":             req.Options.TopK,
		"top_p":             req.Options.TopP,
688
		"min_p":             req.Options.MinP,
689
690
691
692
693
694
695
696
697
698
699
700
701
702
		"typical_p":         req.Options.TypicalP,
		"repeat_last_n":     req.Options.RepeatLastN,
		"repeat_penalty":    req.Options.RepeatPenalty,
		"presence_penalty":  req.Options.PresencePenalty,
		"frequency_penalty": req.Options.FrequencyPenalty,
		"mirostat":          req.Options.Mirostat,
		"mirostat_tau":      req.Options.MirostatTau,
		"mirostat_eta":      req.Options.MirostatEta,
		"seed":              req.Options.Seed,
		"stop":              req.Options.Stop,
		"image_data":        req.Images,
		"cache_prompt":      true,
	}

703
	if len(req.Format) > 0 {
704
705
706
707
708
709
		switch string(req.Format) {
		case `null`, `""`:
			// Field was set, but "missing" a value. We accept
			// these as "not set".
			break
		case `"json"`:
710
			request["grammar"] = grammarJSON
711
712
713
714
715
		default:
			if req.Format[0] != '{' {
				return fmt.Errorf("invalid format: %q; expected \"json\" or a valid JSON Schema object", req.Format)
			}

716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
			// User provided a JSON schema
			g := llama.SchemaToGrammar(req.Format)
			if g == nil {
				return fmt.Errorf("invalid JSON schema in format")
			}
			request["grammar"] = string(g)
		}
	}

	if err := s.sem.Acquire(ctx, 1); err != nil {
		if errors.Is(err, context.Canceled) {
			slog.Info("aborting completion request due to client closing the connection")
		} else {
			slog.Error("Failed to acquire semaphore", "error", err)
		}
		return err
	}
	defer s.sem.Release(1)

	// put an upper limit on num_predict to avoid the model running on forever
	if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
		req.Options.NumPredict = 10 * s.options.NumCtx
	}

740
	// Make sure the server is ready
741
	status, err := s.getServerStatusRetry(ctx)
742
743
744
	if err != nil {
		return err
	} else if status != ServerStatusReady {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
745
		return fmt.Errorf("unexpected server status: %s", status.ToString())
746
747
	}

748
749
750
751
	// Handling JSON marshaling with special characters unescaped.
	buffer := &bytes.Buffer{}
	enc := json.NewEncoder(buffer)
	enc.SetEscapeHTML(false)
752

753
754
755
	if err := enc.Encode(request); err != nil {
		return fmt.Errorf("failed to marshal data: %v", err)
	}
756

757
758
759
760
761
762
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
	serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
	if err != nil {
		return fmt.Errorf("error creating POST request: %v", err)
	}
	serverReq.Header.Set("Content-Type", "application/json")
763

764
765
766
767
768
	res, err := http.DefaultClient.Do(serverReq)
	if err != nil {
		return fmt.Errorf("POST predict: %v", err)
	}
	defer res.Body.Close()
769

770
771
	if res.StatusCode >= 400 {
		bodyBytes, err := io.ReadAll(res.Body)
772
		if err != nil {
773
			return fmt.Errorf("failed reading llm error response: %w", err)
774
		}
775
776
777
		log.Printf("llm predict error: %s", bodyBytes)
		return fmt.Errorf("%s", bodyBytes)
	}
778

779
780
781
	scanner := bufio.NewScanner(res.Body)
	buf := make([]byte, 0, maxBufferSize)
	scanner.Buffer(buf, maxBufferSize)
782

783
784
785
	// keep track of the last token generated, this is used to abort if the model starts looping
	var lastToken string
	var tokenRepeat int
786

787
788
789
790
791
792
793
794
795
796
	for scanner.Scan() {
		select {
		case <-ctx.Done():
			// This handles the request cancellation
			return ctx.Err()
		default:
			line := scanner.Bytes()
			if len(line) == 0 {
				continue
			}
797

798
			// slog.Debug("got line", "line", string(line))
799
800
			evt, ok := bytes.CutPrefix(line, []byte("data: "))
			if !ok {
801
				evt = line
802
			}
803

804
805
			var c completion
			if err := json.Unmarshal(evt, &c); err != nil {
806
				return fmt.Errorf("error unmarshalling llm prediction response: %v", err)
807
808
809
810
811
812
813
814
			}
			switch {
			case strings.TrimSpace(c.Content) == lastToken:
				tokenRepeat++
			default:
				lastToken = strings.TrimSpace(c.Content)
				tokenRepeat = 0
			}
815

816
817
818
819
820
			// 30 picked as an arbitrary max token repeat limit, modify as needed
			if tokenRepeat > 30 {
				slog.Debug("prediction aborted, token repeat limit reached")
				return ctx.Err()
			}
821

822
823
824
825
826
			if c.Content != "" {
				fn(CompletionResponse{
					Content: c.Content,
				})
			}
827

828
			if c.Stop {
829
830
831
832
833
				doneReason := "stop"
				if c.StoppedLimit {
					doneReason = "length"
				}

834
835
				fn(CompletionResponse{
					Done:               true,
836
					DoneReason:         doneReason,
837
838
839
840
841
842
					PromptEvalCount:    c.Timings.PromptN,
					PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
					EvalCount:          c.Timings.PredictedN,
					EvalDuration:       parseDurationMs(c.Timings.PredictedMS),
				})
				return nil
843
844
			}
		}
845
	}
846

847
	if err := scanner.Err(); err != nil {
848
		if strings.Contains(err.Error(), "unexpected EOF") || strings.Contains(err.Error(), "forcibly closed") {
849
			s.Close()
850
			var msg string
851
852
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
853
854
			} else {
				msg = err.Error()
855
			}
856
			return fmt.Errorf("an error was encountered while running the model: %s", msg)
857
858
		}

859
		return fmt.Errorf("error reading llm response: %v", err)
860
861
	}

862
	return nil
863
864
}

865
866
type EmbeddingRequest struct {
	Content string `json:"content"`
867
868
}

869
870
type EmbeddingResponse struct {
	Embedding []float32 `json:"embedding"`
871
872
}

873
874
func (s *llmServer) Embedding(ctx context.Context, input string) ([]float32, error) {
	if err := s.sem.Acquire(ctx, 1); err != nil {
875
876
877
878
879
		if errors.Is(err, context.Canceled) {
			slog.Info("aborting embedding request due to client closing the connection")
		} else {
			slog.Error("Failed to acquire semaphore", "error", err)
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
880
881
		return nil, err
	}
882
	defer s.sem.Release(1)
883

884
	// Make sure the server is ready
885
	status, err := s.getServerStatusRetry(ctx)
886
887
888
	if err != nil {
		return nil, err
	} else if status != ServerStatusReady {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
889
		return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
890
891
	}

892
	data, err := json.Marshal(EmbeddingRequest{Content: input})
Michael Yang's avatar
Michael Yang committed
893
	if err != nil {
894
895
896
		return nil, fmt.Errorf("error marshaling embed data: %w", err)
	}

897
	r, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
898
899
900
	if err != nil {
		return nil, fmt.Errorf("error creating embed request: %w", err)
	}
901
	r.Header.Set("Content-Type", "application/json")
902

903
	resp, err := http.DefaultClient.Do(r)
904
905
906
907
908
909
910
911
912
913
914
	if err != nil {
		return nil, fmt.Errorf("do embedding request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("error reading embed response: %w", err)
	}

	if resp.StatusCode >= 400 {
915
		log.Printf("llm embedding error: %s", body)
916
917
918
		return nil, fmt.Errorf("%s", body)
	}

919
	var e EmbeddingResponse
920
	if err := json.Unmarshal(body, &e); err != nil {
921
922
923
		return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
	}

924
	return e.Embedding, nil
925
926
}

Michael Yang's avatar
Michael Yang committed
927
928
929
930
931
932
933
934
type TokenizeRequest struct {
	Content string `json:"content"`
}

type TokenizeResponse struct {
	Tokens []int `json:"tokens"`
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
935
func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
936
937
938
939
940
941
	s.modelLock.Lock()
	defer s.modelLock.Unlock()
	if s.model != nil {
		return s.model.Tokenize(content, false, true)
	}

Michael Yang's avatar
Michael Yang committed
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
	// Make sure the server is ready
	status, err := s.getServerStatus(ctx)
	if err != nil {
		return nil, err
	} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
		return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
	}

	data, err := json.Marshal(TokenizeRequest{Content: content})
	if err != nil {
		return nil, fmt.Errorf("marshaling encode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("encode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do encode request: %w", err)
	}
	defer resp.Body.Close()
966
967
968
	if resp.StatusCode == http.StatusNotFound {
		if s.model == nil {
			slog.Debug("new runner detected, loading model for cgo tokenization")
969
970
971
972
			m, err := llama.LoadModelFromFile(s.modelPath, llama.ModelParams{VocabOnly: true})
			if err != nil {
				return nil, err
			}
973
974
975
976
			s.model = m
		}
		return s.model.Tokenize(content, false, true)
	}
Michael Yang's avatar
Michael Yang committed
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("read encode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var encoded TokenizeResponse
	if err := json.Unmarshal(body, &encoded); err != nil {
		return nil, fmt.Errorf("unmarshal encode response: %w", err)
	}

	return encoded.Tokens, nil
}

type DetokenizeRequest struct {
	Tokens []int `json:"tokens"`
}

type DetokenizeResponse struct {
	Content string `json:"content"`
1002
1003
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1004
func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
1005
1006
1007
1008
1009
1010
1011
1012
1013
	s.modelLock.Lock()
	defer s.modelLock.Unlock()
	if s.model != nil {
		var resp string
		for _, token := range tokens {
			resp += s.model.TokenToPiece(token)
		}
		return resp, nil
	}
Michael Yang's avatar
Michael Yang committed
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
	// Make sure the server is ready
	status, err := s.getServerStatus(ctx)
	if err != nil {
		return "", err
	} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
		return "", fmt.Errorf("unexpected server status: %s", status.ToString())
	}

	data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
	if err != nil {
		return "", fmt.Errorf("marshaling decode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
	if err != nil {
		return "", fmt.Errorf("decode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return "", fmt.Errorf("do decode request: %w", err)
	}
	defer resp.Body.Close()
1038
1039
1040
	if resp.StatusCode == http.StatusNotFound {
		if s.model == nil {
			slog.Debug("new runner detected, loading model for cgo tokenization")
1041
1042
1043
1044
			m, err := llama.LoadModelFromFile(s.modelPath, llama.ModelParams{VocabOnly: true})
			if err != nil {
				return "", err
			}
1045
1046
1047
1048
1049
1050
1051
1052
			s.model = m
		}
		var resp string
		for _, token := range tokens {
			resp += s.model.TokenToPiece(token)
		}
		return resp, nil
	}
Michael Yang's avatar
Michael Yang committed
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return "", fmt.Errorf("read decode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm decode error: %s", body)
		return "", fmt.Errorf("%s", body)
	}

	var decoded DetokenizeResponse
	if err := json.Unmarshal(body, &decoded); err != nil {
		return "", fmt.Errorf("unmarshal encode response: %w", err)
	}

	return decoded.Content, nil
1070
1071
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1072
func (s *llmServer) Close() error {
1073
	s.modelLock.Lock()
1074
1075
1076
1077
	if s.model != nil {
		llama.FreeModel(s.model)
		s.model = nil
	}
1078
1079
	s.modelLock.Unlock()

1080
1081
	if s.cmd != nil {
		slog.Debug("stopping llama server")
1082
1083
1084
		if err := s.cmd.Process.Kill(); err != nil {
			return err
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1085
1086
1087
1088
1089
		// if ProcessState is already populated, Wait already completed, no need to wait again
		if s.cmd.ProcessState == nil {
			slog.Debug("waiting for llama server to exit")
			<-s.done
		}
1090
1091

		slog.Debug("llama server stopped")
1092
1093
1094
1095
1096
	}

	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1097
func (s *llmServer) EstimatedVRAM() uint64 {
1098
	return s.estimate.VRAMSize
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1099
1100
}

1101
func (s *llmServer) EstimatedTotal() uint64 {
1102
	return s.estimate.TotalSize
1103
1104
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1105
func (s *llmServer) EstimatedVRAMByGPU(gpuID string) uint64 {
1106
1107
	for i, gpu := range s.gpus {
		if gpu.ID == gpuID {
1108
1109
1110
			if i < len(s.estimate.GPUSizes) {
				return s.estimate.GPUSizes[i]
			}
1111
1112
1113
1114
1115
		}
	}
	return 0
}

1116
1117
1118
1119
1120
1121
1122
1123
func parseDurationMs(ms float64) time.Duration {
	dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
	if err != nil {
		panic(err)
	}

	return dur
}