server.go 29.3 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
package llm

import (
	"bufio"
	"bytes"
	"context"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"log"
	"log/slog"
	"math/rand"
	"net"
	"net/http"
	"os"
	"os/exec"
	"path/filepath"
	"runtime"
	"strconv"
	"strings"
	"time"

Daniel Hiltgen's avatar
Daniel Hiltgen committed
24
25
	"golang.org/x/sync/semaphore"

26
	"github.com/ollama/ollama/api"
27
	"github.com/ollama/ollama/envconfig"
28
29
30
31
	"github.com/ollama/ollama/format"
	"github.com/ollama/ollama/gpu"
)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
32
33
34
35
36
37
38
39
type LlamaServer interface {
	Ping(ctx context.Context) error
	WaitUntilRunning(ctx context.Context) error
	Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
	Embedding(ctx context.Context, prompt string) ([]float64, error)
	Tokenize(ctx context.Context, content string) ([]int, error)
	Detokenize(ctx context.Context, tokens []int) (string, error)
	Close() error
40
	EstimatedVRAM() uint64 // Total VRAM across all GPUs
41
	EstimatedTotal() uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
42
	EstimatedVRAMByGPU(gpuID string) uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
43
44
45
46
}

// llmServer is an instance of the llama.cpp server
type llmServer struct {
47
48
49
50
	port    int
	cmd     *exec.Cmd
	done    chan error // Channel to signal when the process exits
	status  *StatusWriter
51
	options api.Options
Daniel Hiltgen's avatar
Daniel Hiltgen committed
52

53
54
55
56
57
	estimate    MemoryEstimate
	totalLayers uint64
	// gpuCount     int
	gpus         gpu.GpuInfoList // Recorded just before the model loaded, free space will be incorrect
	loadDuration time.Duration   // Record how long it took the model to load
58
	loadProgress float32
Daniel Hiltgen's avatar
Daniel Hiltgen committed
59
60

	sem *semaphore.Weighted
61
62
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
63
64
65
66
67
func LoadModel(model string) (*GGML, error) {
	if _, err := os.Stat(model); err != nil {
		return nil, err
	}

68
69
70
71
72
73
74
	f, err := os.Open(model)
	if err != nil {
		return nil, err
	}
	defer f.Close()

	ggml, _, err := DecodeGGML(f)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
75
76
	return ggml, err
}
77

Daniel Hiltgen's avatar
Daniel Hiltgen committed
78
79
80
81
// NewLlamaServer will run a server for the given GPUs
// The gpu list must be a single family.
func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options) (LlamaServer, error) {
	var err error
82
	var cpuRunner string
83
	var estimate MemoryEstimate
84
85
86
87
88
89
90
91
92
93
94
	var systemTotalMemory uint64
	var systemFreeMemory uint64

	systemMemInfo, err := gpu.GetCPUMem()
	if err != nil {
		slog.Error("failed to lookup system memory", "error", err)
	} else {
		systemTotalMemory = systemMemInfo.TotalMemory
		systemFreeMemory = systemMemInfo.FreeMemory
		slog.Debug("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", systemFreeMemory)
	}
95

96
97
98
99
100
	// If the user wants zero GPU layers, reset the gpu list to be CPU/system ram info
	if opts.NumGPU == 0 {
		gpus = gpu.GetCPUInfo()
	}
	if len(gpus) == 1 && gpus[0].Library == "cpu" {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
101
		cpuRunner = serverForCpu()
102
		estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
103
	} else {
104
		estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
105

Michael Yang's avatar
Michael Yang committed
106
		switch {
107
		case gpus[0].Library == "metal" && estimate.VRAMSize > systemTotalMemory:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
108
109
110
			// disable partial offloading when model is greater than total system memory as this
			// can lead to locking up the system
			opts.NumGPU = 0
111
		case gpus[0].Library != "metal" && estimate.Layers == 0:
112
113
			// Don't bother loading into the GPU if no layers can fit
			cpuRunner = serverForCpu()
114
			gpus = gpu.GetCPUInfo()
115
116
		case opts.NumGPU < 0 && estimate.Layers > 0 && gpus[0].Library != "cpu":
			opts.NumGPU = estimate.Layers
117
118
119
		}
	}

120
121
	estimate.log()

Daniel Hiltgen's avatar
Daniel Hiltgen committed
122
	// Loop through potential servers
Michael Yang's avatar
Michael Yang committed
123
	finalErr := errors.New("no suitable llama servers found")
124
125
126
127
128
129

	if len(adapters) > 1 {
		return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
	}

	availableServers := availableServers()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
130
131
132
133
134
135
	var servers []string
	if cpuRunner != "" {
		servers = []string{cpuRunner}
	} else {
		servers = serversForGpu(gpus[0]) // All GPUs in the list are matching Library and Variant
	}
136
	demandLib := envconfig.LLMLibrary
137
138
139
140
141
142
143
	if demandLib != "" {
		serverPath := availableServers[demandLib]
		if serverPath == "" {
			slog.Info(fmt.Sprintf("Invalid OLLAMA_LLM_LIBRARY %s - not found", demandLib))
		} else {
			slog.Info("user override", "OLLAMA_LLM_LIBRARY", demandLib, "path", serverPath)
			servers = []string{demandLib}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
144
145
146
147
			if strings.HasPrefix(demandLib, "cpu") {
				// Omit the GPU flag to silence the warning
				opts.NumGPU = -1
			}
148
149
150
151
		}
	}

	if len(servers) == 0 {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
152
		return nil, fmt.Errorf("no servers found for %v", gpus)
153
154
155
156
157
158
159
160
	}

	params := []string{
		"--model", model,
		"--ctx-size", fmt.Sprintf("%d", opts.NumCtx),
		"--batch-size", fmt.Sprintf("%d", opts.NumBatch),
		"--embedding",
	}
Michael Yang's avatar
Michael Yang committed
161
162

	params = append(params, "--log-disable")
163

Michael Yang's avatar
Michael Yang committed
164
	if opts.NumGPU >= 0 {
165
166
167
		params = append(params, "--n-gpu-layers", fmt.Sprintf("%d", opts.NumGPU))
	}

168
	if envconfig.Debug {
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
		params = append(params, "--verbose")
	}

	if opts.MainGPU > 0 {
		params = append(params, "--main-gpu", fmt.Sprintf("%d", opts.MainGPU))
	}

	if len(adapters) > 0 {
		// TODO: applying multiple adapters is not supported by the llama.cpp server yet
		params = append(params, "--lora", adapters[0])
	}

	if len(projectors) > 0 {
		// TODO: applying multiple projectors is not supported by the llama.cpp server yet
		params = append(params, "--mmproj", projectors[0])
	}

	if opts.NumThread > 0 {
		params = append(params, "--threads", fmt.Sprintf("%d", opts.NumThread))
	}

	if !opts.F16KV {
		params = append(params, "--memory-f32")
	}

194
	flashAttnEnabled := envconfig.FlashAttention
Sam's avatar
Sam committed
195
196

	for _, g := range gpus {
197
		// only cuda (compute capability 7+) and metal support flash attention
Sam's avatar
Sam committed
198
		if g.Library != "metal" && (g.Library != "cuda" || g.DriverMajor < 7) {
199
			flashAttnEnabled = false
Sam's avatar
Sam committed
200
		}
201
202
203
204
205

		// mmap has issues with partial offloading on metal
		if g.Library == "metal" &&
			uint64(opts.NumGPU) > 0 &&
			uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 {
206
			opts.UseMMap = api.TriStateFalse
207
		}
Sam's avatar
Sam committed
208
	}
209

210
	if flashAttnEnabled {
Sam's avatar
Sam committed
211
212
213
		params = append(params, "--flash-attn")
	}

214
	// Windows CUDA should not use mmap for best performance
215
216
217
218
	// Linux  with a model larger than free space, mmap leads to thrashing
	if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == api.TriStateUndefined) ||
		(runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == api.TriStateUndefined) ||
		opts.UseMMap == api.TriStateFalse {
219
220
221
222
223
224
225
226
227
228
229
		params = append(params, "--no-mmap")
	}

	if opts.UseMLock {
		params = append(params, "--mlock")
	}

	if opts.UseNUMA {
		params = append(params, "--numa")
	}

230
	numParallel := envconfig.NumParallel
231
232
233
234
235
236
237
238

	// TODO (jmorganca): multimodal models don't support parallel yet
	// see https://github.com/ollama/ollama/issues/4165
	if len(projectors) > 0 {
		numParallel = 1
		slog.Warn("multimodal models don't support parallel requests yet")
	}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
239
240
	params = append(params, "--parallel", fmt.Sprintf("%d", numParallel))

241
242
243
244
245
246
247
248
	if estimate.TensorSplit != "" {
		params = append(params, "--tensor-split", estimate.TensorSplit)
	}

	if estimate.TensorSplit != "" {
		params = append(params, "--tensor-split", estimate.TensorSplit)
	}

Michael Yang's avatar
lint  
Michael Yang committed
249
	for i := range len(servers) {
250
		dir := availableServers[servers[i]]
251
252
253
		if dir == "" {
			// Shouldn't happen
			finalErr = fmt.Errorf("[%d] server %s not listed in available servers %v", i, servers[i], availableServers)
Michael Yang's avatar
Michael Yang committed
254
			slog.Error("server list inconsistent", "error", finalErr)
255
256
			continue
		}
257

Daniel Hiltgen's avatar
Daniel Hiltgen committed
258
		if strings.HasPrefix(servers[i], "cpu") {
259
			gpus = gpu.GetCPUInfo()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
260
261
		}

262
		// Find an availableServers  port, retry on each iteration in case the failure was a port conflict race
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
		port := 0
		if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
			var l *net.TCPListener
			if l, err = net.ListenTCP("tcp", a); err == nil {
				port = l.Addr().(*net.TCPAddr).Port
				l.Close()
			}
		}
		if port == 0 {
			slog.Debug("ResolveTCPAddr failed ", "error", err)
			port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
		}
		finalParams := append(params, "--port", strconv.Itoa(port))

		pathEnv := "LD_LIBRARY_PATH"
		if runtime.GOOS == "windows" {
			pathEnv = "PATH"
		}
281
282
		// prepend the server directory to LD_LIBRARY_PATH/PATH and the parent dir for common dependencies
		libraryPaths := []string{dir, filepath.Dir(dir)}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
283

284
285
286
		if libraryPath, ok := os.LookupEnv(pathEnv); ok {
			// Append our runner directory to the path
			// This will favor system libraries over our bundled library dependencies
Daniel Hiltgen's avatar
Daniel Hiltgen committed
287
			libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...)
288
289
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
290
291
292
293
294
295
296
297
		// Note: we always put the dependency path first
		// since this was the exact version we verified for AMD GPUs
		// and we favor what the user had in their path
		if gpus[0].DependencyPath != "" {
			// TODO refine for multi-gpu support
			libraryPaths = append([]string{gpus[0].DependencyPath}, libraryPaths...)
		}

298
299
		server := filepath.Join(dir, "ollama_llama_server")
		if runtime.GOOS == "windows" {
Michael Yang's avatar
Michael Yang committed
300
			server += ".exe"
301
302
		}

303
304
305
306
307
308
309
310
311
312
313
		// Detect tmp cleaners wiping out the file
		_, err := os.Stat(server)
		if errors.Is(err, os.ErrNotExist) {
			slog.Warn("llama server disappeared, reinitializing payloads", "path", server, "error", err)
			err = Init()
			if err != nil {
				slog.Warn("failed to reinitialize payloads", "error", err)
				return nil, err
			}
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
314
		s := &llmServer{
315
316
317
318
319
320
321
			port:        port,
			cmd:         exec.Command(server, finalParams...),
			status:      NewStatusWriter(os.Stderr),
			options:     opts,
			estimate:    estimate,
			sem:         semaphore.NewWeighted(int64(numParallel)),
			totalLayers: ggml.KV().BlockCount() + 1,
322
			gpus:        gpus,
323
			done:        make(chan error, 1),
324
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
325

326
		s.cmd.Env = os.Environ()
327
328
329
		s.cmd.Stdout = os.Stdout
		s.cmd.Stderr = s.status

Daniel Hiltgen's avatar
Daniel Hiltgen committed
330
331
332
333
		envWorkarounds := [][2]string{}
		for _, gpu := range gpus {
			envWorkarounds = append(envWorkarounds, gpu.EnvWorkarounds...)
		}
Michael Yang's avatar
lint  
Michael Yang committed
334
		visibleDevicesEnv, visibleDevicesEnvVal := gpus.GetVisibleDevicesEnv()
335
336
337
338
339
340
341
342
343
344
345
346
347
		pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))

		// Update or add the path and visible devices variable with our adjusted version
		pathNeeded := true
		devicesNeeded := visibleDevicesEnv != ""
		for i := range s.cmd.Env {
			cmp := strings.SplitN(s.cmd.Env[i], "=", 2)
			if strings.EqualFold(cmp[0], pathEnv) {
				s.cmd.Env[i] = pathEnv + "=" + pathEnvVal
				pathNeeded = false
			} else if devicesNeeded && strings.EqualFold(cmp[0], visibleDevicesEnv) {
				s.cmd.Env[i] = visibleDevicesEnv + "=" + visibleDevicesEnvVal
				devicesNeeded = false
Daniel Hiltgen's avatar
Daniel Hiltgen committed
348
349
350
351
352
353
			} else if len(envWorkarounds) != 0 {
				for _, kv := range envWorkarounds {
					if strings.EqualFold(cmp[0], kv[0]) {
						s.cmd.Env[i] = kv[0] + "=" + kv[1]
					}
				}
354
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
355
		}
356
357
		if pathNeeded {
			s.cmd.Env = append(s.cmd.Env, pathEnv+"="+pathEnvVal)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
358
		}
359
360
		if devicesNeeded {
			s.cmd.Env = append(s.cmd.Env, visibleDevicesEnv+"="+visibleDevicesEnvVal)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
361
362
		}

363
		slog.Info("starting llama server", "cmd", s.cmd.String())
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
		if envconfig.Debug {
			filteredEnv := []string{}
			for _, ev := range s.cmd.Env {
				if strings.HasPrefix(ev, "CUDA_") ||
					strings.HasPrefix(ev, "ROCM_") ||
					strings.HasPrefix(ev, "HIP_") ||
					strings.HasPrefix(ev, "HSA_") ||
					strings.HasPrefix(ev, "GGML_") ||
					strings.HasPrefix(ev, "PATH=") ||
					strings.HasPrefix(ev, "LD_LIBRARY_PATH=") {
					filteredEnv = append(filteredEnv, ev)
				}
			}
			// Log at debug as the environment is inherited and might contain sensitive information
			slog.Debug("subprocess", "environment", filteredEnv)
		}
380
381

		if err = s.cmd.Start(); err != nil {
382
383
384
385
386
			// Detect permission denied and augment them essage about noexec
			if errors.Is(err, os.ErrPermission) {
				finalErr = fmt.Errorf("unable to start server %w.  %s may have noexec set.  Set OLLAMA_TMPDIR for server to a writable executable directory", err, dir)
				continue
			}
387
388
389
390
391
392
393
394
395
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
			err = fmt.Errorf("error starting the external llama server: %v %s", err, msg)
			finalErr = err
			continue
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
396
397
398
399
400
		// reap subprocess when it exits
		go func() {
			s.done <- s.cmd.Wait()
		}()

401
402
403
404
405
406
407
		return s, nil
	}

	slog.Error("unable to load any llama server", "error", finalErr)
	return nil, finalErr
}

Michael Yang's avatar
Michael Yang committed
408
func projectorMemoryRequirements(filename string) uint64 {
409
410
411
412
413
414
415
416
417
418
419
	file, err := os.Open(filename)
	if err != nil {
		return 0
	}
	defer file.Close()

	ggml, _, err := DecodeGGML(file)
	if err != nil {
		return 0
	}

Michael Yang's avatar
Michael Yang committed
420
421
422
	var mem uint64
	for _, layer := range ggml.Tensors().Layers() {
		mem += layer.size()
423
424
	}

Michael Yang's avatar
Michael Yang committed
425
	return mem
426
427
428
429
430
431
}

type ServerStatus int

const ( // iota is reset to 0
	ServerStatusReady ServerStatus = iota
432
	ServerStatusNoSlotsAvailable
433
434
435
436
437
	ServerStatusLoadingModel
	ServerStatusNotResponding
	ServerStatusError
)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
438
439
440
441
func (s ServerStatus) ToString() string {
	switch s {
	case ServerStatusReady:
		return "llm server ready"
442
	case ServerStatusNoSlotsAvailable:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
443
444
445
446
447
448
449
450
451
452
		return "llm busy - no slots available"
	case ServerStatusLoadingModel:
		return "llm server loading model"
	case ServerStatusNotResponding:
		return "llm server not responding"
	default:
		return "llm server error"
	}
}

453
type ServerStatusResp struct {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
454
455
456
457
458
	Status          string  `json:"status"`
	SlotsIdle       int     `json:"slots_idle"`
	SlotsProcessing int     `json:"slots_processing"`
	Error           string  `json:"error"`
	Progress        float32 `json:"progress"`
459
460
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
461
func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
462
463
464
465
466
467
	// Fail fast if its exited
	if s.cmd.ProcessState != nil {
		msg := ""
		if s.status != nil && s.status.LastErrMsg != "" {
			msg = s.status.LastErrMsg
		}
468
469
470
471
		if s.cmd.ProcessState.ExitCode() == -1 {
			// Most likely a signal killed it, log some more details to try to help troubleshoot
			slog.Warn("llama runner process no longer running", "sys", s.cmd.ProcessState.Sys(), "string", s.cmd.ProcessState.String())
		}
472
473
474
475
476
477
478
479
480
481
482
483
		return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
	if err != nil {
		return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		if errors.Is(err, context.DeadlineExceeded) {
Michael Yang's avatar
Michael Yang committed
484
			return ServerStatusNotResponding, errors.New("server not responding")
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
		}
		return ServerStatusError, fmt.Errorf("health resp: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return ServerStatusError, fmt.Errorf("read health request: %w", err)
	}

	var status ServerStatusResp
	if err := json.Unmarshal(body, &status); err != nil {
		return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
	}

	switch status.Status {
	case "ok":
		return ServerStatusReady, nil
	case "no slot available":
504
		return ServerStatusNoSlotsAvailable, nil
505
	case "loading model":
Daniel Hiltgen's avatar
Daniel Hiltgen committed
506
		s.loadProgress = status.Progress
507
508
509
510
511
512
		return ServerStatusLoadingModel, nil
	default:
		return ServerStatusError, fmt.Errorf("server error: %+v", status)
	}
}

513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
// getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received
func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) {
	var retries int
	for {
		status, err := s.getServerStatus(ctx)
		if err != nil {
			return status, err
		}

		if status == ServerStatusNoSlotsAvailable {
			if retries >= 10 {
				return status, fmt.Errorf("no slots available after %d retries", retries)
			}

			time.Sleep(5 * time.Millisecond)
			retries++
			continue
		}

		return status, nil
	}
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
536
func (s *llmServer) Ping(ctx context.Context) error {
537
538
539
540
541
542
543
544
	_, err := s.getServerStatus(ctx)
	if err != nil {
		slog.Debug("server unhealthy", "error", err)
		return err
	}
	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
545
func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
546
	start := time.Now()
547
548
549
	stallDuration := 5 * time.Minute            // If no progress happens
	finalLoadDuration := 5 * time.Minute        // After we hit 100%, give the runner more time to come online
	stallTimer := time.Now().Add(stallDuration) // give up if we stall
550
551
552

	slog.Info("waiting for llama runner to start responding")
	var lastStatus ServerStatus = -1
553
	fullyLoaded := false
ManniX-ITA's avatar
ManniX-ITA committed
554

555
556
	for {
		select {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
557
		case <-ctx.Done():
558
			slog.Warn("client connection closed before server finished loading, aborting load")
559
			return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
560
561
562
563
564
565
		case err := <-s.done:
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
			return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
566
567
		default:
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
568
		if time.Now().After(stallTimer) {
ManniX-ITA's avatar
ManniX-ITA committed
569
			// timeout
570
571
572
573
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
574
			return fmt.Errorf("timed out waiting for llama runner to start - progress %0.2f - %s", s.loadProgress, msg)
ManniX-ITA's avatar
ManniX-ITA committed
575
576
577
578
579
		}
		if s.cmd.ProcessState != nil {
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
580
			}
ManniX-ITA's avatar
ManniX-ITA committed
581
582
			return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
583
584
		ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
		defer cancel()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
585
		priorProgress := s.loadProgress
Daniel Hiltgen's avatar
Daniel Hiltgen committed
586
587
588
589
590
		status, _ := s.getServerStatus(ctx)
		if lastStatus != status && status != ServerStatusReady {
			// Only log on status changes
			slog.Info("waiting for server to become available", "status", status.ToString())
		}
ManniX-ITA's avatar
ManniX-ITA committed
591
592
		switch status {
		case ServerStatusReady:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
593
594
			s.loadDuration = time.Since(start)
			slog.Info(fmt.Sprintf("llama runner started in %0.2f seconds", s.loadDuration.Seconds()))
ManniX-ITA's avatar
ManniX-ITA committed
595
596
			return nil
		default:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
597
			lastStatus = status
Daniel Hiltgen's avatar
Daniel Hiltgen committed
598
599
600
601
			// Reset the timer as long as we're making forward progress on the load
			if priorProgress != s.loadProgress {
				slog.Debug(fmt.Sprintf("model load progress %0.2f", s.loadProgress))
				stallTimer = time.Now().Add(stallDuration)
602
603
604
605
			} else if !fullyLoaded && int(s.loadProgress*100.0) >= 100 {
				slog.Debug("model load completed, waiting for server to become available", "status", status.ToString())
				stallTimer = time.Now().Add(finalLoadDuration)
				fullyLoaded = true
Daniel Hiltgen's avatar
Daniel Hiltgen committed
606
			}
ManniX-ITA's avatar
ManniX-ITA committed
607
608
			time.Sleep(time.Millisecond * 250)
			continue
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
		}
	}
}

const jsonGrammar = `
root   ::= object
value  ::= object | array | string | number | ("true" | "false" | "null") ws

object ::=
  "{" ws (
            string ":" ws value
    ("," ws string ":" ws value)*
  )? "}" ws

array  ::=
  "[" ws (
            value
    ("," ws value)*
  )? "]" ws

string ::=
  "\"" (
631
    [^"\\\x7F\x00-\x1F] |
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
    "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  )* "\"" ws

number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws

# Optional space: by convention, applied in this grammar after literal chars when allowed
ws ::= ([ \t\n] ws)?
`

const maxBufferSize = 512 * format.KiloByte

type ImageData struct {
	Data []byte `json:"data"`
	ID   int    `json:"id"`
}

type completion struct {
649
650
651
652
653
	Content      string `json:"content"`
	Model        string `json:"model"`
	Prompt       string `json:"prompt"`
	Stop         bool   `json:"stop"`
	StoppedLimit bool   `json:"stopped_limit"`
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671

	Timings struct {
		PredictedN  int     `json:"predicted_n"`
		PredictedMS float64 `json:"predicted_ms"`
		PromptN     int     `json:"prompt_n"`
		PromptMS    float64 `json:"prompt_ms"`
	}
}

type CompletionRequest struct {
	Prompt  string
	Format  string
	Images  []ImageData
	Options api.Options
}

type CompletionResponse struct {
	Content            string
672
	DoneReason         string
673
674
675
676
677
678
679
	Done               bool
	PromptEvalCount    int
	PromptEvalDuration time.Duration
	EvalCount          int
	EvalDuration       time.Duration
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
680
681
682
683
684
685
func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
	if err := s.sem.Acquire(ctx, 1); err != nil {
		slog.Error("Failed to acquire semaphore", "error", err)
		return err
	}
	defer s.sem.Release(1)
686
687
688
689
690
691
692

	// only allow maximum 10 "context shifts" to avoid infinite generation
	if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
		req.Options.NumPredict = 10 * s.options.NumCtx
		slog.Debug("setting token limit to 10x num_ctx", "num_ctx", s.options.NumCtx, "num_predict", req.Options.NumPredict)
	}

693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
	request := map[string]any{
		"prompt":            req.Prompt,
		"stream":            true,
		"n_predict":         req.Options.NumPredict,
		"n_keep":            req.Options.NumKeep,
		"main_gpu":          req.Options.MainGPU,
		"temperature":       req.Options.Temperature,
		"top_k":             req.Options.TopK,
		"top_p":             req.Options.TopP,
		"tfs_z":             req.Options.TFSZ,
		"typical_p":         req.Options.TypicalP,
		"repeat_last_n":     req.Options.RepeatLastN,
		"repeat_penalty":    req.Options.RepeatPenalty,
		"presence_penalty":  req.Options.PresencePenalty,
		"frequency_penalty": req.Options.FrequencyPenalty,
		"mirostat":          req.Options.Mirostat,
		"mirostat_tau":      req.Options.MirostatTau,
		"mirostat_eta":      req.Options.MirostatEta,
		"penalize_nl":       req.Options.PenalizeNewline,
		"seed":              req.Options.Seed,
		"stop":              req.Options.Stop,
		"image_data":        req.Images,
		"cache_prompt":      true,
	}

	// Make sure the server is ready
719
	status, err := s.getServerStatusRetry(ctx)
720
721
722
	if err != nil {
		return err
	} else if status != ServerStatusReady {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
723
		return fmt.Errorf("unexpected server status: %s", status.ToString())
724
725
726
727
728
729
730
731
732
	}

	if req.Format == "json" {
		request["grammar"] = jsonGrammar
		if !strings.Contains(strings.ToLower(req.Prompt), "json") {
			slog.Warn("Prompt does not specify that the LLM should response in JSON, but JSON format is expected. For best results specify that JSON is expected in the system prompt.")
		}
	}

733
734
735
736
	// Handling JSON marshaling with special characters unescaped.
	buffer := &bytes.Buffer{}
	enc := json.NewEncoder(buffer)
	enc.SetEscapeHTML(false)
737

738
739
740
	if err := enc.Encode(request); err != nil {
		return fmt.Errorf("failed to marshal data: %v", err)
	}
741

742
743
744
745
746
747
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
	serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
	if err != nil {
		return fmt.Errorf("error creating POST request: %v", err)
	}
	serverReq.Header.Set("Content-Type", "application/json")
748

749
750
751
752
753
	res, err := http.DefaultClient.Do(serverReq)
	if err != nil {
		return fmt.Errorf("POST predict: %v", err)
	}
	defer res.Body.Close()
754

755
756
	if res.StatusCode >= 400 {
		bodyBytes, err := io.ReadAll(res.Body)
757
		if err != nil {
758
			return fmt.Errorf("failed reading llm error response: %w", err)
759
		}
760
761
762
		log.Printf("llm predict error: %s", bodyBytes)
		return fmt.Errorf("%s", bodyBytes)
	}
763

764
765
766
	scanner := bufio.NewScanner(res.Body)
	buf := make([]byte, 0, maxBufferSize)
	scanner.Buffer(buf, maxBufferSize)
767

768
769
770
	// keep track of the last token generated, this is used to abort if the model starts looping
	var lastToken string
	var tokenRepeat int
771

772
773
774
775
776
777
778
779
780
781
	for scanner.Scan() {
		select {
		case <-ctx.Done():
			// This handles the request cancellation
			return ctx.Err()
		default:
			line := scanner.Bytes()
			if len(line) == 0 {
				continue
			}
782

783
784
785
786
			evt, ok := bytes.CutPrefix(line, []byte("data: "))
			if !ok {
				return fmt.Errorf("error parsing llm response stream: %s", line)
			}
787

788
789
			var c completion
			if err := json.Unmarshal(evt, &c); err != nil {
790
				return fmt.Errorf("error unmarshalling llm prediction response: %v", err)
791
			}
792

793
794
795
796
797
798
799
			switch {
			case strings.TrimSpace(c.Content) == lastToken:
				tokenRepeat++
			default:
				lastToken = strings.TrimSpace(c.Content)
				tokenRepeat = 0
			}
800

801
802
803
804
805
			// 30 picked as an arbitrary max token repeat limit, modify as needed
			if tokenRepeat > 30 {
				slog.Debug("prediction aborted, token repeat limit reached")
				return ctx.Err()
			}
806

807
808
809
810
811
			if c.Content != "" {
				fn(CompletionResponse{
					Content: c.Content,
				})
			}
812

813
			if c.Stop {
814
815
816
817
818
				doneReason := "stop"
				if c.StoppedLimit {
					doneReason = "length"
				}

819
820
				fn(CompletionResponse{
					Done:               true,
821
					DoneReason:         doneReason,
822
823
824
825
826
827
					PromptEvalCount:    c.Timings.PromptN,
					PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
					EvalCount:          c.Timings.PredictedN,
					EvalDuration:       parseDurationMs(c.Timings.PredictedMS),
				})
				return nil
828
829
			}
		}
830
	}
831

832
833
834
835
836
837
	if err := scanner.Err(); err != nil {
		if strings.Contains(err.Error(), "unexpected EOF") {
			s.Close()
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
838
			}
839
			return fmt.Errorf("an unknown error was encountered while running the model %s", msg)
840
841
		}

842
		return fmt.Errorf("error reading llm response: %v", err)
843
844
	}

845
	return nil
846
847
848
849
850
851
852
853
854
855
}

type EmbeddingRequest struct {
	Content string `json:"content"`
}

type EmbeddingResponse struct {
	Embedding []float64 `json:"embedding"`
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
856
857
858
859
860
861
func (s *llmServer) Embedding(ctx context.Context, prompt string) ([]float64, error) {
	if err := s.sem.Acquire(ctx, 1); err != nil {
		slog.Error("Failed to acquire semaphore", "error", err)
		return nil, err
	}
	defer s.sem.Release(1)
862

863
	// Make sure the server is ready
864
	status, err := s.getServerStatusRetry(ctx)
865
866
867
	if err != nil {
		return nil, err
	} else if status != ServerStatusReady {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
868
		return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
869
870
	}

Michael Yang's avatar
Michael Yang committed
871
872
	data, err := json.Marshal(TokenizeRequest{Content: prompt})
	if err != nil {
873
874
875
		return nil, fmt.Errorf("error marshaling embed data: %w", err)
	}

Michael Yang's avatar
Michael Yang committed
876
	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
	if err != nil {
		return nil, fmt.Errorf("error creating embed request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do embedding request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("error reading embed response: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var embedding EmbeddingResponse
	if err := json.Unmarshal(body, &embedding); err != nil {
		return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
	}

	return embedding.Embedding, nil
}

Michael Yang's avatar
Michael Yang committed
906
907
908
909
910
911
912
913
type TokenizeRequest struct {
	Content string `json:"content"`
}

type TokenizeResponse struct {
	Tokens []int `json:"tokens"`
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
914
func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
Michael Yang's avatar
Michael Yang committed
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
	// Make sure the server is ready
	status, err := s.getServerStatus(ctx)
	if err != nil {
		return nil, err
	} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
		return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
	}

	data, err := json.Marshal(TokenizeRequest{Content: content})
	if err != nil {
		return nil, fmt.Errorf("marshaling encode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("encode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do encode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("read encode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var encoded TokenizeResponse
	if err := json.Unmarshal(body, &encoded); err != nil {
		return nil, fmt.Errorf("unmarshal encode response: %w", err)
	}

	return encoded.Tokens, nil
}

type DetokenizeRequest struct {
	Tokens []int `json:"tokens"`
}

type DetokenizeResponse struct {
	Content string `json:"content"`
964
965
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
966
func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
Michael Yang's avatar
Michael Yang committed
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
	// Make sure the server is ready
	status, err := s.getServerStatus(ctx)
	if err != nil {
		return "", err
	} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
		return "", fmt.Errorf("unexpected server status: %s", status.ToString())
	}

	data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
	if err != nil {
		return "", fmt.Errorf("marshaling decode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
	if err != nil {
		return "", fmt.Errorf("decode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return "", fmt.Errorf("do decode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return "", fmt.Errorf("read decode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm decode error: %s", body)
		return "", fmt.Errorf("%s", body)
	}

	var decoded DetokenizeResponse
	if err := json.Unmarshal(body, &decoded); err != nil {
		return "", fmt.Errorf("unmarshal encode response: %w", err)
	}

	return decoded.Content, nil
1008
1009
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1010
func (s *llmServer) Close() error {
1011
1012
	if s.cmd != nil {
		slog.Debug("stopping llama server")
1013
1014
1015
		if err := s.cmd.Process.Kill(); err != nil {
			return err
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1016
1017
1018
1019
1020
		// if ProcessState is already populated, Wait already completed, no need to wait again
		if s.cmd.ProcessState == nil {
			slog.Debug("waiting for llama server to exit")
			<-s.done
		}
1021
1022

		slog.Debug("llama server stopped")
1023
1024
1025
1026
1027
	}

	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1028
func (s *llmServer) EstimatedVRAM() uint64 {
1029
	return s.estimate.VRAMSize
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1030
1031
}

1032
func (s *llmServer) EstimatedTotal() uint64 {
1033
	return s.estimate.TotalSize
1034
1035
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1036
func (s *llmServer) EstimatedVRAMByGPU(gpuID string) uint64 {
1037
1038
1039
1040
1041
1042
1043
1044
	for i, gpu := range s.gpus {
		if gpu.ID == gpuID {
			return s.estimate.GPUSizes[i]
		}
	}
	return 0
}

1045
1046
1047
1048
1049
1050
1051
1052
func parseDurationMs(ms float64) time.Duration {
	dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
	if err != nil {
		panic(err)
	}

	return dur
}