server.go 30.9 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
package llm

import (
	"bufio"
	"bytes"
	"context"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"log"
	"log/slog"
	"math/rand"
	"net"
	"net/http"
	"os"
	"os/exec"
	"path/filepath"
	"runtime"
	"strconv"
	"strings"
	"time"

Daniel Hiltgen's avatar
Daniel Hiltgen committed
24
25
	"golang.org/x/sync/semaphore"

26
	"github.com/ollama/ollama/api"
27
	"github.com/ollama/ollama/build"
28
	"github.com/ollama/ollama/envconfig"
29
30
	"github.com/ollama/ollama/format"
	"github.com/ollama/ollama/gpu"
31
	"github.com/ollama/ollama/runners"
32
33
)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
34
35
36
37
type LlamaServer interface {
	Ping(ctx context.Context) error
	WaitUntilRunning(ctx context.Context) error
	Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
38
	Embedding(ctx context.Context, input string) ([]float32, error)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
39
40
41
	Tokenize(ctx context.Context, content string) ([]int, error)
	Detokenize(ctx context.Context, tokens []int) (string, error)
	Close() error
42
	EstimatedVRAM() uint64 // Total VRAM across all GPUs
43
	EstimatedTotal() uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
44
	EstimatedVRAMByGPU(gpuID string) uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
45
46
47
48
}

// llmServer is an instance of the llama.cpp server
type llmServer struct {
49
50
51
52
53
54
	port        int
	cmd         *exec.Cmd
	done        chan error // Channel to signal when the process exits
	status      *StatusWriter
	options     api.Options
	numParallel int
Daniel Hiltgen's avatar
Daniel Hiltgen committed
55

56
57
58
59
60
	estimate    MemoryEstimate
	totalLayers uint64
	// gpuCount     int
	gpus         gpu.GpuInfoList // Recorded just before the model loaded, free space will be incorrect
	loadDuration time.Duration   // Record how long it took the model to load
61
	loadProgress float32
Daniel Hiltgen's avatar
Daniel Hiltgen committed
62
63

	sem *semaphore.Weighted
64
65
}

66
67
68
69
70
71
// LoadModel will load a model from disk. The model must be in the GGML format.
//
// It collects array values for arrays with a size less than or equal to
// maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If
// the maxArraySize is negative, all arrays are collected.
func LoadModel(model string, maxArraySize int) (*GGML, error) {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
72
73
74
75
	if _, err := os.Stat(model); err != nil {
		return nil, err
	}

76
77
78
79
80
81
	f, err := os.Open(model)
	if err != nil {
		return nil, err
	}
	defer f.Close()

82
	ggml, _, err := DecodeGGML(f, maxArraySize)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
83
84
	return ggml, err
}
85

Daniel Hiltgen's avatar
Daniel Hiltgen committed
86
87
// NewLlamaServer will run a server for the given GPUs
// The gpu list must be a single family.
Daniel Hiltgen's avatar
Daniel Hiltgen committed
88
func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options, numParallel int) (LlamaServer, error) {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
89
	var err error
90
	var cpuRunner string
91
	var estimate MemoryEstimate
92
93
	var systemTotalMemory uint64
	var systemFreeMemory uint64
94
	var systemSwapFreeMemory uint64
95
96
97
98
99
100
101

	systemMemInfo, err := gpu.GetCPUMem()
	if err != nil {
		slog.Error("failed to lookup system memory", "error", err)
	} else {
		systemTotalMemory = systemMemInfo.TotalMemory
		systemFreeMemory = systemMemInfo.FreeMemory
102
		systemSwapFreeMemory = systemMemInfo.FreeSwap
103
		slog.Info("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "free_swap", format.HumanBytes2(systemSwapFreeMemory))
104
	}
105

106
107
108
109
110
	// If the user wants zero GPU layers, reset the gpu list to be CPU/system ram info
	if opts.NumGPU == 0 {
		gpus = gpu.GetCPUInfo()
	}
	if len(gpus) == 1 && gpus[0].Library == "cpu" {
111
		cpuRunner = runners.ServerForCpu()
112
		estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
113
	} else {
114
		estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
115

Michael Yang's avatar
Michael Yang committed
116
		switch {
117
		case gpus[0].Library == "metal" && estimate.VRAMSize > systemTotalMemory:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
118
119
120
			// disable partial offloading when model is greater than total system memory as this
			// can lead to locking up the system
			opts.NumGPU = 0
121
		case gpus[0].Library != "metal" && estimate.Layers == 0:
122
			// Don't bother loading into the GPU if no layers can fit
123
			cpuRunner = runners.ServerForCpu()
124
			gpus = gpu.GetCPUInfo()
125
126
		case opts.NumGPU < 0 && estimate.Layers > 0 && gpus[0].Library != "cpu":
			opts.NumGPU = estimate.Layers
127
128
129
		}
	}

130
131
132
	// On linux and windows, over-allocating CPU memory will almost always result in an error
	// Darwin has fully dynamic swap so has no direct concept of free swap space
	if runtime.GOOS != "darwin" {
133
		systemMemoryRequired := estimate.TotalSize - estimate.VRAMSize
134
		available := systemFreeMemory + systemSwapFreeMemory
135
136
137
		if systemMemoryRequired > available {
			slog.Warn("model request too large for system", "requested", format.HumanBytes2(systemMemoryRequired), "available", available, "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "swap", format.HumanBytes2(systemSwapFreeMemory))
			return nil, fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(systemMemoryRequired), format.HumanBytes2(available))
138
139
140
		}
	}

141
142
	estimate.log()

Daniel Hiltgen's avatar
Daniel Hiltgen committed
143
	// Loop through potential servers
Michael Yang's avatar
Michael Yang committed
144
	finalErr := errors.New("no suitable llama servers found")
145
146
147
148
149

	if len(adapters) > 1 {
		return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
	}

150
151
152
153
154
155
	rDir, err := runners.Refresh(build.EmbedFS)
	if err != nil {
		return nil, err
	}

	availableServers := runners.GetAvailableServers(rDir)
156
	if len(availableServers) == 0 {
157
		return nil, finalErr
158
	}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
159
160
161
162
	var servers []string
	if cpuRunner != "" {
		servers = []string{cpuRunner}
	} else {
163
		servers = runners.ServersForGpu(gpus[0]) // All GPUs in the list are matching Library and Variant
Daniel Hiltgen's avatar
Daniel Hiltgen committed
164
	}
Michael Yang's avatar
string  
Michael Yang committed
165
	demandLib := envconfig.LLMLibrary()
166
167
168
169
170
171
172
	if demandLib != "" {
		serverPath := availableServers[demandLib]
		if serverPath == "" {
			slog.Info(fmt.Sprintf("Invalid OLLAMA_LLM_LIBRARY %s - not found", demandLib))
		} else {
			slog.Info("user override", "OLLAMA_LLM_LIBRARY", demandLib, "path", serverPath)
			servers = []string{demandLib}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
173
174
175
176
			if strings.HasPrefix(demandLib, "cpu") {
				// Omit the GPU flag to silence the warning
				opts.NumGPU = -1
			}
177
178
179
180
		}
	}

	if len(servers) == 0 {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
181
		return nil, fmt.Errorf("no servers found for %v", gpus)
182
183
184
185
	}

	params := []string{
		"--model", model,
Michael Yang's avatar
lint  
Michael Yang committed
186
187
		"--ctx-size", strconv.Itoa(opts.NumCtx),
		"--batch-size", strconv.Itoa(opts.NumBatch),
188
189
		"--embedding",
	}
Michael Yang's avatar
Michael Yang committed
190
191

	params = append(params, "--log-disable")
192

Michael Yang's avatar
Michael Yang committed
193
	if opts.NumGPU >= 0 {
Michael Yang's avatar
lint  
Michael Yang committed
194
		params = append(params, "--n-gpu-layers", strconv.Itoa(opts.NumGPU))
195
196
	}

Michael Yang's avatar
Michael Yang committed
197
	if envconfig.Debug() {
198
199
200
201
		params = append(params, "--verbose")
	}

	if opts.MainGPU > 0 {
Michael Yang's avatar
lint  
Michael Yang committed
202
		params = append(params, "--main-gpu", strconv.Itoa(opts.MainGPU))
203
204
205
206
207
208
209
210
211
212
213
214
215
	}

	if len(adapters) > 0 {
		// TODO: applying multiple adapters is not supported by the llama.cpp server yet
		params = append(params, "--lora", adapters[0])
	}

	if len(projectors) > 0 {
		// TODO: applying multiple projectors is not supported by the llama.cpp server yet
		params = append(params, "--mmproj", projectors[0])
	}

	if opts.NumThread > 0 {
Michael Yang's avatar
lint  
Michael Yang committed
216
		params = append(params, "--threads", strconv.Itoa(opts.NumThread))
217
218
219
220
221
222
	}

	if !opts.F16KV {
		params = append(params, "--memory-f32")
	}

Michael Yang's avatar
bool  
Michael Yang committed
223
	flashAttnEnabled := envconfig.FlashAttention()
Sam's avatar
Sam committed
224
225

	for _, g := range gpus {
226
		// only cuda (compute capability 7+) and metal support flash attention
Sam's avatar
Sam committed
227
		if g.Library != "metal" && (g.Library != "cuda" || g.DriverMajor < 7) {
228
			flashAttnEnabled = false
Sam's avatar
Sam committed
229
		}
230
231
232
233
234

		// mmap has issues with partial offloading on metal
		if g.Library == "metal" &&
			uint64(opts.NumGPU) > 0 &&
			uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 {
235
236
			opts.UseMMap = new(bool)
			*opts.UseMMap = false
237
		}
Sam's avatar
Sam committed
238
	}
239

240
	if flashAttnEnabled {
Sam's avatar
Sam committed
241
242
243
		params = append(params, "--flash-attn")
	}

244
	// Windows CUDA should not use mmap for best performance
245
	// Linux  with a model larger than free space, mmap leads to thrashing
Daniel Hiltgen's avatar
Daniel Hiltgen committed
246
	// For CPU loads we want the memory to be allocated, not FS cache
247
248
249
250
	if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == nil) ||
		(runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == nil) ||
		(gpus[0].Library == "cpu" && opts.UseMMap == nil) ||
		(opts.UseMMap != nil && !*opts.UseMMap) {
251
252
253
254
255
256
257
		params = append(params, "--no-mmap")
	}

	if opts.UseMLock {
		params = append(params, "--mlock")
	}

258
	if gpu.IsNUMA() && gpus[0].Library == "cpu" {
259
260
261
262
263
264
265
		numaMode := "distribute"
		if runtime.GOOS == "linux" {
			if _, err := exec.LookPath("numactl"); err == nil {
				numaMode = "numactl"
			}
		}
		params = append(params, "--numa", numaMode)
266
267
	}

Michael Yang's avatar
lint  
Michael Yang committed
268
	params = append(params, "--parallel", strconv.Itoa(numParallel))
Daniel Hiltgen's avatar
Daniel Hiltgen committed
269

270
271
272
273
	if estimate.TensorSplit != "" {
		params = append(params, "--tensor-split", estimate.TensorSplit)
	}

274
	for i := range servers {
275
		dir := availableServers[servers[i]]
276
277
278
		if dir == "" {
			// Shouldn't happen
			finalErr = fmt.Errorf("[%d] server %s not listed in available servers %v", i, servers[i], availableServers)
Michael Yang's avatar
Michael Yang committed
279
			slog.Error("server list inconsistent", "error", finalErr)
280
281
			continue
		}
282

Daniel Hiltgen's avatar
Daniel Hiltgen committed
283
		if strings.HasPrefix(servers[i], "cpu") {
284
			gpus = gpu.GetCPUInfo()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
285
286
		}

287
		// Find an availableServers  port, retry on each iteration in case the failure was a port conflict race
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
		port := 0
		if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
			var l *net.TCPListener
			if l, err = net.ListenTCP("tcp", a); err == nil {
				port = l.Addr().(*net.TCPAddr).Port
				l.Close()
			}
		}
		if port == 0 {
			slog.Debug("ResolveTCPAddr failed ", "error", err)
			port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
		}
		finalParams := append(params, "--port", strconv.Itoa(port))

		pathEnv := "LD_LIBRARY_PATH"
		if runtime.GOOS == "windows" {
			pathEnv = "PATH"
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
306
307
		// Start with the server directory for the LD_LIBRARY_PATH/PATH
		libraryPaths := []string{dir}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
308

309
		if libraryPath, ok := os.LookupEnv(pathEnv); ok {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
310
			// favor our bundled library dependencies over system libraries
Daniel Hiltgen's avatar
Daniel Hiltgen committed
311
			libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...)
312
313
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
314
		// Note: we always put the dependency path first
Daniel Hiltgen's avatar
Daniel Hiltgen committed
315
		// since this was the exact version we compiled/linked against
Daniel Hiltgen's avatar
Daniel Hiltgen committed
316
		if gpus[0].DependencyPath != "" {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
317
			// assume gpus from the same library have the same dependency path
Daniel Hiltgen's avatar
Daniel Hiltgen committed
318
319
320
			libraryPaths = append([]string{gpus[0].DependencyPath}, libraryPaths...)
		}

321
322
		server := filepath.Join(dir, "ollama_llama_server")
		if runtime.GOOS == "windows" {
Michael Yang's avatar
Michael Yang committed
323
			server += ".exe"
324
325
		}

326
327
328
329
		// Detect tmp cleaners wiping out the file
		_, err := os.Stat(server)
		if errors.Is(err, os.ErrNotExist) {
			slog.Warn("llama server disappeared, reinitializing payloads", "path", server, "error", err)
330
			_, err = runners.Refresh(build.EmbedFS)
331
332
333
334
335
336
			if err != nil {
				slog.Warn("failed to reinitialize payloads", "error", err)
				return nil, err
			}
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
337
		s := &llmServer{
338
339
340
341
342
			port:        port,
			cmd:         exec.Command(server, finalParams...),
			status:      NewStatusWriter(os.Stderr),
			options:     opts,
			estimate:    estimate,
343
			numParallel: numParallel,
344
345
			sem:         semaphore.NewWeighted(int64(numParallel)),
			totalLayers: ggml.KV().BlockCount() + 1,
346
			gpus:        gpus,
347
			done:        make(chan error, 1),
348
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
349

350
		s.cmd.Env = os.Environ()
351
352
		s.cmd.Stdout = os.Stdout
		s.cmd.Stderr = s.status
353
		s.cmd.SysProcAttr = LlamaServerSysProcAttr
354

Daniel Hiltgen's avatar
Daniel Hiltgen committed
355
356
357
358
		envWorkarounds := [][2]string{}
		for _, gpu := range gpus {
			envWorkarounds = append(envWorkarounds, gpu.EnvWorkarounds...)
		}
Michael Yang's avatar
lint  
Michael Yang committed
359
		visibleDevicesEnv, visibleDevicesEnvVal := gpus.GetVisibleDevicesEnv()
360
361
362
363
364
365
366
367
368
369
370
371
372
		pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))

		// Update or add the path and visible devices variable with our adjusted version
		pathNeeded := true
		devicesNeeded := visibleDevicesEnv != ""
		for i := range s.cmd.Env {
			cmp := strings.SplitN(s.cmd.Env[i], "=", 2)
			if strings.EqualFold(cmp[0], pathEnv) {
				s.cmd.Env[i] = pathEnv + "=" + pathEnvVal
				pathNeeded = false
			} else if devicesNeeded && strings.EqualFold(cmp[0], visibleDevicesEnv) {
				s.cmd.Env[i] = visibleDevicesEnv + "=" + visibleDevicesEnvVal
				devicesNeeded = false
Daniel Hiltgen's avatar
Daniel Hiltgen committed
373
374
375
376
377
378
			} else if len(envWorkarounds) != 0 {
				for _, kv := range envWorkarounds {
					if strings.EqualFold(cmp[0], kv[0]) {
						s.cmd.Env[i] = kv[0] + "=" + kv[1]
					}
				}
379
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
380
		}
381
382
		if pathNeeded {
			s.cmd.Env = append(s.cmd.Env, pathEnv+"="+pathEnvVal)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
383
		}
384
385
		if devicesNeeded {
			s.cmd.Env = append(s.cmd.Env, visibleDevicesEnv+"="+visibleDevicesEnvVal)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
386
387
		}

388
		slog.Info("starting llama server", "cmd", s.cmd.String())
Michael Yang's avatar
Michael Yang committed
389
		if envconfig.Debug() {
390
391
392
			filteredEnv := []string{}
			for _, ev := range s.cmd.Env {
				if strings.HasPrefix(ev, "CUDA_") ||
Daniel Hiltgen's avatar
Daniel Hiltgen committed
393
					strings.HasPrefix(ev, "ROCR_") ||
394
395
					strings.HasPrefix(ev, "ROCM_") ||
					strings.HasPrefix(ev, "HIP_") ||
Daniel Hiltgen's avatar
Daniel Hiltgen committed
396
					strings.HasPrefix(ev, "GPU_") ||
397
398
399
400
401
402
403
404
405
406
					strings.HasPrefix(ev, "HSA_") ||
					strings.HasPrefix(ev, "GGML_") ||
					strings.HasPrefix(ev, "PATH=") ||
					strings.HasPrefix(ev, "LD_LIBRARY_PATH=") {
					filteredEnv = append(filteredEnv, ev)
				}
			}
			// Log at debug as the environment is inherited and might contain sensitive information
			slog.Debug("subprocess", "environment", filteredEnv)
		}
407
408

		if err = s.cmd.Start(); err != nil {
409
			// Detect permission denied and augment the message about noexec
410
411
412
413
			if errors.Is(err, os.ErrPermission) {
				finalErr = fmt.Errorf("unable to start server %w.  %s may have noexec set.  Set OLLAMA_TMPDIR for server to a writable executable directory", err, dir)
				continue
			}
414
415
416
417
418
419
420
421
422
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
			err = fmt.Errorf("error starting the external llama server: %v %s", err, msg)
			finalErr = err
			continue
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
423
424
		// reap subprocess when it exits
		go func() {
425
426
427
428
429
430
431
			err := s.cmd.Wait()
			// Favor a more detailed message over the process exit status
			if err != nil && s.status != nil && s.status.LastErrMsg != "" {
				slog.Debug("llama runner terminated", "error", err)
				if strings.Contains(s.status.LastErrMsg, "unknown model") {
					s.status.LastErrMsg = "this model is not supported by your version of Ollama. You may need to upgrade"
				}
Michael Yang's avatar
lint  
Michael Yang committed
432
				s.done <- errors.New(s.status.LastErrMsg)
433
434
435
			} else {
				s.done <- err
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
436
437
		}()

438
439
440
441
442
443
444
		return s, nil
	}

	slog.Error("unable to load any llama server", "error", finalErr)
	return nil, finalErr
}

Michael Yang's avatar
Michael Yang committed
445
func projectorMemoryRequirements(filename string) uint64 {
446
447
448
449
450
451
	file, err := os.Open(filename)
	if err != nil {
		return 0
	}
	defer file.Close()

452
	ggml, _, err := DecodeGGML(file, 0)
453
454
455
456
	if err != nil {
		return 0
	}

Michael Yang's avatar
Michael Yang committed
457
458
459
	var mem uint64
	for _, layer := range ggml.Tensors().Layers() {
		mem += layer.size()
460
461
	}

Michael Yang's avatar
Michael Yang committed
462
	return mem
463
464
465
466
467
468
}

type ServerStatus int

const ( // iota is reset to 0
	ServerStatusReady ServerStatus = iota
469
	ServerStatusNoSlotsAvailable
470
471
472
473
474
	ServerStatusLoadingModel
	ServerStatusNotResponding
	ServerStatusError
)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
475
476
477
478
func (s ServerStatus) ToString() string {
	switch s {
	case ServerStatusReady:
		return "llm server ready"
479
	case ServerStatusNoSlotsAvailable:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
480
481
482
483
484
485
486
487
488
489
		return "llm busy - no slots available"
	case ServerStatusLoadingModel:
		return "llm server loading model"
	case ServerStatusNotResponding:
		return "llm server not responding"
	default:
		return "llm server error"
	}
}

490
type ServerStatusResp struct {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
491
492
493
494
495
	Status          string  `json:"status"`
	SlotsIdle       int     `json:"slots_idle"`
	SlotsProcessing int     `json:"slots_processing"`
	Error           string  `json:"error"`
	Progress        float32 `json:"progress"`
496
497
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
498
func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
499
500
501
502
503
504
	// Fail fast if its exited
	if s.cmd.ProcessState != nil {
		msg := ""
		if s.status != nil && s.status.LastErrMsg != "" {
			msg = s.status.LastErrMsg
		}
505
506
507
508
		if s.cmd.ProcessState.ExitCode() == -1 {
			// Most likely a signal killed it, log some more details to try to help troubleshoot
			slog.Warn("llama runner process no longer running", "sys", s.cmd.ProcessState.Sys(), "string", s.cmd.ProcessState.String())
		}
509
510
511
512
513
514
515
516
517
518
519
520
		return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
	if err != nil {
		return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		if errors.Is(err, context.DeadlineExceeded) {
Michael Yang's avatar
Michael Yang committed
521
			return ServerStatusNotResponding, errors.New("server not responding")
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
		}
		return ServerStatusError, fmt.Errorf("health resp: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return ServerStatusError, fmt.Errorf("read health request: %w", err)
	}

	var status ServerStatusResp
	if err := json.Unmarshal(body, &status); err != nil {
		return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
	}

	switch status.Status {
	case "ok":
		return ServerStatusReady, nil
	case "no slot available":
541
		return ServerStatusNoSlotsAvailable, nil
542
	case "loading model":
Daniel Hiltgen's avatar
Daniel Hiltgen committed
543
		s.loadProgress = status.Progress
544
545
546
547
548
549
		return ServerStatusLoadingModel, nil
	default:
		return ServerStatusError, fmt.Errorf("server error: %+v", status)
	}
}

550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
// getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received
func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) {
	var retries int
	for {
		status, err := s.getServerStatus(ctx)
		if err != nil {
			return status, err
		}

		if status == ServerStatusNoSlotsAvailable {
			if retries >= 10 {
				return status, fmt.Errorf("no slots available after %d retries", retries)
			}

			time.Sleep(5 * time.Millisecond)
			retries++
			continue
		}

		return status, nil
	}
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
573
func (s *llmServer) Ping(ctx context.Context) error {
574
575
576
577
578
579
580
581
	_, err := s.getServerStatus(ctx)
	if err != nil {
		slog.Debug("server unhealthy", "error", err)
		return err
	}
	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
582
func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
583
	start := time.Now()
584
	stallDuration := envconfig.LoadTimeout()    // If no progress happens
585
	stallTimer := time.Now().Add(stallDuration) // give up if we stall
586
587
588

	slog.Info("waiting for llama runner to start responding")
	var lastStatus ServerStatus = -1
589
	fullyLoaded := false
ManniX-ITA's avatar
ManniX-ITA committed
590

591
592
	for {
		select {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
593
		case <-ctx.Done():
594
			slog.Warn("client connection closed before server finished loading, aborting load")
595
			return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
596
		case err := <-s.done:
597
			return fmt.Errorf("llama runner process has terminated: %w", err)
598
599
		default:
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
600
		if time.Now().After(stallTimer) {
ManniX-ITA's avatar
ManniX-ITA committed
601
			// timeout
602
603
604
605
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
606
			return fmt.Errorf("timed out waiting for llama runner to start - progress %0.2f - %s", s.loadProgress, msg)
ManniX-ITA's avatar
ManniX-ITA committed
607
608
609
610
611
		}
		if s.cmd.ProcessState != nil {
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
612
			}
ManniX-ITA's avatar
ManniX-ITA committed
613
614
			return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
615
616
		ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
		defer cancel()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
617
		priorProgress := s.loadProgress
Daniel Hiltgen's avatar
Daniel Hiltgen committed
618
619
620
621
622
		status, _ := s.getServerStatus(ctx)
		if lastStatus != status && status != ServerStatusReady {
			// Only log on status changes
			slog.Info("waiting for server to become available", "status", status.ToString())
		}
ManniX-ITA's avatar
ManniX-ITA committed
623
624
		switch status {
		case ServerStatusReady:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
625
626
			s.loadDuration = time.Since(start)
			slog.Info(fmt.Sprintf("llama runner started in %0.2f seconds", s.loadDuration.Seconds()))
ManniX-ITA's avatar
ManniX-ITA committed
627
628
			return nil
		default:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
629
			lastStatus = status
Daniel Hiltgen's avatar
Daniel Hiltgen committed
630
631
632
633
			// Reset the timer as long as we're making forward progress on the load
			if priorProgress != s.loadProgress {
				slog.Debug(fmt.Sprintf("model load progress %0.2f", s.loadProgress))
				stallTimer = time.Now().Add(stallDuration)
634
635
			} else if !fullyLoaded && int(s.loadProgress*100.0) >= 100 {
				slog.Debug("model load completed, waiting for server to become available", "status", status.ToString())
636
				stallTimer = time.Now().Add(stallDuration)
637
				fullyLoaded = true
Daniel Hiltgen's avatar
Daniel Hiltgen committed
638
			}
ManniX-ITA's avatar
ManniX-ITA committed
639
640
			time.Sleep(time.Millisecond * 250)
			continue
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
		}
	}
}

const jsonGrammar = `
root   ::= object
value  ::= object | array | string | number | ("true" | "false" | "null") ws

object ::=
  "{" ws (
            string ":" ws value
    ("," ws string ":" ws value)*
  )? "}" ws

array  ::=
  "[" ws (
            value
    ("," ws value)*
  )? "]" ws

string ::=
  "\"" (
663
    [^"\\\x7F\x00-\x1F] |
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
    "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  )* "\"" ws

number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws

# Optional space: by convention, applied in this grammar after literal chars when allowed
ws ::= ([ \t\n] ws)?
`

const maxBufferSize = 512 * format.KiloByte

type ImageData struct {
	Data []byte `json:"data"`
	ID   int    `json:"id"`
}

type completion struct {
681
682
683
684
685
	Content      string `json:"content"`
	Model        string `json:"model"`
	Prompt       string `json:"prompt"`
	Stop         bool   `json:"stop"`
	StoppedLimit bool   `json:"stopped_limit"`
686
687
688
689
690
691
692
693
694
695
696
697
698

	Timings struct {
		PredictedN  int     `json:"predicted_n"`
		PredictedMS float64 `json:"predicted_ms"`
		PromptN     int     `json:"prompt_n"`
		PromptMS    float64 `json:"prompt_ms"`
	}
}

type CompletionRequest struct {
	Prompt  string
	Format  string
	Images  []ImageData
Michael Yang's avatar
Michael Yang committed
699
	Options *api.Options
700
701
702
703
}

type CompletionResponse struct {
	Content            string
704
	DoneReason         string
705
706
707
708
709
710
711
	Done               bool
	PromptEvalCount    int
	PromptEvalDuration time.Duration
	EvalCount          int
	EvalDuration       time.Duration
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
712
713
714
715
716
717
func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
	if err := s.sem.Acquire(ctx, 1); err != nil {
		slog.Error("Failed to acquire semaphore", "error", err)
		return err
	}
	defer s.sem.Release(1)
718

719
	// put an upper limit on num_predict to avoid the model running on forever
720
721
722
723
	if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
		req.Options.NumPredict = 10 * s.options.NumCtx
	}

724
725
726
727
728
729
730
731
732
	request := map[string]any{
		"prompt":            req.Prompt,
		"stream":            true,
		"n_predict":         req.Options.NumPredict,
		"n_keep":            req.Options.NumKeep,
		"main_gpu":          req.Options.MainGPU,
		"temperature":       req.Options.Temperature,
		"top_k":             req.Options.TopK,
		"top_p":             req.Options.TopP,
733
		"min_p":             req.Options.MinP,
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
		"tfs_z":             req.Options.TFSZ,
		"typical_p":         req.Options.TypicalP,
		"repeat_last_n":     req.Options.RepeatLastN,
		"repeat_penalty":    req.Options.RepeatPenalty,
		"presence_penalty":  req.Options.PresencePenalty,
		"frequency_penalty": req.Options.FrequencyPenalty,
		"mirostat":          req.Options.Mirostat,
		"mirostat_tau":      req.Options.MirostatTau,
		"mirostat_eta":      req.Options.MirostatEta,
		"penalize_nl":       req.Options.PenalizeNewline,
		"seed":              req.Options.Seed,
		"stop":              req.Options.Stop,
		"image_data":        req.Images,
		"cache_prompt":      true,
	}

	// Make sure the server is ready
751
	status, err := s.getServerStatusRetry(ctx)
752
753
754
	if err != nil {
		return err
	} else if status != ServerStatusReady {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
755
		return fmt.Errorf("unexpected server status: %s", status.ToString())
756
757
758
759
760
761
762
763
764
	}

	if req.Format == "json" {
		request["grammar"] = jsonGrammar
		if !strings.Contains(strings.ToLower(req.Prompt), "json") {
			slog.Warn("Prompt does not specify that the LLM should response in JSON, but JSON format is expected. For best results specify that JSON is expected in the system prompt.")
		}
	}

765
766
767
768
	// Handling JSON marshaling with special characters unescaped.
	buffer := &bytes.Buffer{}
	enc := json.NewEncoder(buffer)
	enc.SetEscapeHTML(false)
769

770
771
772
	if err := enc.Encode(request); err != nil {
		return fmt.Errorf("failed to marshal data: %v", err)
	}
773

774
775
776
777
778
779
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
	serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
	if err != nil {
		return fmt.Errorf("error creating POST request: %v", err)
	}
	serverReq.Header.Set("Content-Type", "application/json")
780

781
782
783
784
785
	res, err := http.DefaultClient.Do(serverReq)
	if err != nil {
		return fmt.Errorf("POST predict: %v", err)
	}
	defer res.Body.Close()
786

787
788
	if res.StatusCode >= 400 {
		bodyBytes, err := io.ReadAll(res.Body)
789
		if err != nil {
790
			return fmt.Errorf("failed reading llm error response: %w", err)
791
		}
792
793
794
		log.Printf("llm predict error: %s", bodyBytes)
		return fmt.Errorf("%s", bodyBytes)
	}
795

796
797
798
	scanner := bufio.NewScanner(res.Body)
	buf := make([]byte, 0, maxBufferSize)
	scanner.Buffer(buf, maxBufferSize)
799

800
801
802
	// keep track of the last token generated, this is used to abort if the model starts looping
	var lastToken string
	var tokenRepeat int
803

804
805
806
807
808
809
810
811
812
813
	for scanner.Scan() {
		select {
		case <-ctx.Done():
			// This handles the request cancellation
			return ctx.Err()
		default:
			line := scanner.Bytes()
			if len(line) == 0 {
				continue
			}
814

815
816
817
818
			evt, ok := bytes.CutPrefix(line, []byte("data: "))
			if !ok {
				return fmt.Errorf("error parsing llm response stream: %s", line)
			}
819

820
821
			var c completion
			if err := json.Unmarshal(evt, &c); err != nil {
822
				return fmt.Errorf("error unmarshalling llm prediction response: %v", err)
823
			}
824

825
826
827
828
829
830
831
			switch {
			case strings.TrimSpace(c.Content) == lastToken:
				tokenRepeat++
			default:
				lastToken = strings.TrimSpace(c.Content)
				tokenRepeat = 0
			}
832

833
834
835
836
837
			// 30 picked as an arbitrary max token repeat limit, modify as needed
			if tokenRepeat > 30 {
				slog.Debug("prediction aborted, token repeat limit reached")
				return ctx.Err()
			}
838

839
840
841
842
843
			if c.Content != "" {
				fn(CompletionResponse{
					Content: c.Content,
				})
			}
844

845
			if c.Stop {
846
847
848
849
850
				doneReason := "stop"
				if c.StoppedLimit {
					doneReason = "length"
				}

851
852
				fn(CompletionResponse{
					Done:               true,
853
					DoneReason:         doneReason,
854
855
856
857
858
859
					PromptEvalCount:    c.Timings.PromptN,
					PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
					EvalCount:          c.Timings.PredictedN,
					EvalDuration:       parseDurationMs(c.Timings.PredictedMS),
				})
				return nil
860
861
			}
		}
862
	}
863

864
865
866
867
868
869
	if err := scanner.Err(); err != nil {
		if strings.Contains(err.Error(), "unexpected EOF") {
			s.Close()
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
870
			}
871
			return fmt.Errorf("an unknown error was encountered while running the model %s", msg)
872
873
		}

874
		return fmt.Errorf("error reading llm response: %v", err)
875
876
	}

877
	return nil
878
879
}

880
881
type EmbeddingRequest struct {
	Content string `json:"content"`
882
883
}

884
885
type EmbeddingResponse struct {
	Embedding []float32 `json:"embedding"`
886
887
}

888
889
func (s *llmServer) Embedding(ctx context.Context, input string) ([]float32, error) {
	if err := s.sem.Acquire(ctx, 1); err != nil {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
890
891
892
		slog.Error("Failed to acquire semaphore", "error", err)
		return nil, err
	}
893
	defer s.sem.Release(1)
894

895
	// Make sure the server is ready
896
	status, err := s.getServerStatusRetry(ctx)
897
898
899
	if err != nil {
		return nil, err
	} else if status != ServerStatusReady {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
900
		return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
901
902
	}

903
	data, err := json.Marshal(EmbeddingRequest{Content: input})
Michael Yang's avatar
Michael Yang committed
904
	if err != nil {
905
906
907
		return nil, fmt.Errorf("error marshaling embed data: %w", err)
	}

908
	r, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
909
910
911
	if err != nil {
		return nil, fmt.Errorf("error creating embed request: %w", err)
	}
912
	r.Header.Set("Content-Type", "application/json")
913

914
	resp, err := http.DefaultClient.Do(r)
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
	if err != nil {
		return nil, fmt.Errorf("do embedding request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("error reading embed response: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

930
	var e EmbeddingResponse
931
	if err := json.Unmarshal(body, &e); err != nil {
932
933
934
		return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
	}

935
	return e.Embedding, nil
936
937
}

Michael Yang's avatar
Michael Yang committed
938
939
940
941
942
943
944
945
type TokenizeRequest struct {
	Content string `json:"content"`
}

type TokenizeResponse struct {
	Tokens []int `json:"tokens"`
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
946
func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
Michael Yang's avatar
Michael Yang committed
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
	// Make sure the server is ready
	status, err := s.getServerStatus(ctx)
	if err != nil {
		return nil, err
	} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
		return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
	}

	data, err := json.Marshal(TokenizeRequest{Content: content})
	if err != nil {
		return nil, fmt.Errorf("marshaling encode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("encode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do encode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("read encode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var encoded TokenizeResponse
	if err := json.Unmarshal(body, &encoded); err != nil {
		return nil, fmt.Errorf("unmarshal encode response: %w", err)
	}

	return encoded.Tokens, nil
}

type DetokenizeRequest struct {
	Tokens []int `json:"tokens"`
}

type DetokenizeResponse struct {
	Content string `json:"content"`
996
997
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
998
func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
Michael Yang's avatar
Michael Yang committed
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
	// Make sure the server is ready
	status, err := s.getServerStatus(ctx)
	if err != nil {
		return "", err
	} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
		return "", fmt.Errorf("unexpected server status: %s", status.ToString())
	}

	data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
	if err != nil {
		return "", fmt.Errorf("marshaling decode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
	if err != nil {
		return "", fmt.Errorf("decode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return "", fmt.Errorf("do decode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return "", fmt.Errorf("read decode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm decode error: %s", body)
		return "", fmt.Errorf("%s", body)
	}

	var decoded DetokenizeResponse
	if err := json.Unmarshal(body, &decoded); err != nil {
		return "", fmt.Errorf("unmarshal encode response: %w", err)
	}

	return decoded.Content, nil
1040
1041
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1042
func (s *llmServer) Close() error {
1043
1044
	if s.cmd != nil {
		slog.Debug("stopping llama server")
1045
1046
1047
		if err := s.cmd.Process.Kill(); err != nil {
			return err
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1048
1049
1050
1051
1052
		// if ProcessState is already populated, Wait already completed, no need to wait again
		if s.cmd.ProcessState == nil {
			slog.Debug("waiting for llama server to exit")
			<-s.done
		}
1053
1054

		slog.Debug("llama server stopped")
1055
1056
1057
1058
1059
	}

	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1060
func (s *llmServer) EstimatedVRAM() uint64 {
1061
	return s.estimate.VRAMSize
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1062
1063
}

1064
func (s *llmServer) EstimatedTotal() uint64 {
1065
	return s.estimate.TotalSize
1066
1067
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1068
func (s *llmServer) EstimatedVRAMByGPU(gpuID string) uint64 {
1069
1070
1071
1072
1073
1074
1075
1076
	for i, gpu := range s.gpus {
		if gpu.ID == gpuID {
			return s.estimate.GPUSizes[i]
		}
	}
	return 0
}

1077
1078
1079
1080
1081
1082
1083
1084
func parseDurationMs(ms float64) time.Duration {
	dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
	if err != nil {
		panic(err)
	}

	return dur
}