server.go 28.3 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
package llm

import (
	"bufio"
	"bytes"
	"context"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"log"
	"log/slog"
	"math/rand"
	"net"
	"net/http"
	"os"
	"os/exec"
	"path/filepath"
	"runtime"
	"strconv"
	"strings"
	"time"

Daniel Hiltgen's avatar
Daniel Hiltgen committed
24
25
	"golang.org/x/sync/semaphore"

26
	"github.com/ollama/ollama/api"
27
	"github.com/ollama/ollama/envconfig"
28
29
30
31
	"github.com/ollama/ollama/format"
	"github.com/ollama/ollama/gpu"
)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
32
33
34
35
36
37
38
39
40
type LlamaServer interface {
	Ping(ctx context.Context) error
	WaitUntilRunning(ctx context.Context) error
	Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
	Embedding(ctx context.Context, prompt string) ([]float64, error)
	Tokenize(ctx context.Context, content string) ([]int, error)
	Detokenize(ctx context.Context, tokens []int) (string, error)
	Close() error
	EstimatedVRAM() uint64
41
	EstimatedTotal() uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
42
43
44
45
}

// llmServer is an instance of the llama.cpp server
type llmServer struct {
46
47
48
49
	port    int
	cmd     *exec.Cmd
	done    chan error // Channel to signal when the process exits
	status  *StatusWriter
50
	options api.Options
Daniel Hiltgen's avatar
Daniel Hiltgen committed
51
52

	// TODO - this should be broken down by GPU
Daniel Hiltgen's avatar
Daniel Hiltgen committed
53
54
55
56
	estimatedVRAM  uint64 // Estimated usage of VRAM by the loaded model
	estimatedTotal uint64 // Total size of model
	totalLayers    uint64
	gpuCount       int
Daniel Hiltgen's avatar
Daniel Hiltgen committed
57
	loadDuration   time.Duration // Record how long it took the model to load
Daniel Hiltgen's avatar
Daniel Hiltgen committed
58
	loadProgress   float32
Daniel Hiltgen's avatar
Daniel Hiltgen committed
59
60

	sem *semaphore.Weighted
61
62
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
63
64
65
66
67
func LoadModel(model string) (*GGML, error) {
	if _, err := os.Stat(model); err != nil {
		return nil, err
	}

68
69
70
71
72
73
74
	f, err := os.Open(model)
	if err != nil {
		return nil, err
	}
	defer f.Close()

	ggml, _, err := DecodeGGML(f)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
75
76
	return ggml, err
}
77

Daniel Hiltgen's avatar
Daniel Hiltgen committed
78
79
80
81
// NewLlamaServer will run a server for the given GPUs
// The gpu list must be a single family.
func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options) (LlamaServer, error) {
	var err error
82
	var cpuRunner string
Daniel Hiltgen's avatar
Daniel Hiltgen committed
83
	var estimatedVRAM uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
84
	var estimatedTotal uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
85
	var systemMemory uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
86
	gpuCount := len(gpus)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
87
	if (len(gpus) == 1 && gpus[0].Library == "cpu") || opts.NumGPU == 0 {
88

Daniel Hiltgen's avatar
Daniel Hiltgen committed
89
		// TODO evaluate system memory to see if we should block the load, or force an unload of another CPU runner
90

Daniel Hiltgen's avatar
Daniel Hiltgen committed
91
		cpuRunner = serverForCpu()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
92
		gpuCount = 0
93
		_, _, estimatedTotal = EstimateGPULayers(gpus, ggml, projectors, opts)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
94
95
96
97
98
99
100
101
102
	} else {
		if gpus[0].Library == "metal" {
			memInfo, err := gpu.GetCPUMem()
			if err != nil {
				slog.Error("failed to lookup system memory", "error", err)
			} else {
				systemMemory = memInfo.TotalMemory
				slog.Debug("system memory", "total", format.HumanBytes2(systemMemory))
			}
103
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
104
		var layers int
Daniel Hiltgen's avatar
Daniel Hiltgen committed
105
		layers, estimatedVRAM, estimatedTotal = EstimateGPULayers(gpus, ggml, projectors, opts)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
106

Michael Yang's avatar
Michael Yang committed
107
108
		switch {
		case gpus[0].Library == "metal" && estimatedVRAM > systemMemory:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
109
110
111
			// disable partial offloading when model is greater than total system memory as this
			// can lead to locking up the system
			opts.NumGPU = 0
Michael Yang's avatar
Michael Yang committed
112
		case gpus[0].Library != "metal" && layers == 0:
113
114
115
			// Don't bother loading into the GPU if no layers can fit
			cpuRunner = serverForCpu()
			gpuCount = 0
Michael Yang's avatar
Michael Yang committed
116
		case opts.NumGPU < 0 && layers > 0 && gpus[0].Library != "cpu":
Daniel Hiltgen's avatar
Daniel Hiltgen committed
117
			opts.NumGPU = layers
118
119
120
		}
	}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
121
	// Loop through potential servers
Michael Yang's avatar
Michael Yang committed
122
	finalErr := errors.New("no suitable llama servers found")
123
124
125
126
127
128

	if len(adapters) > 1 {
		return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
	}

	availableServers := availableServers()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
129
130
131
132
133
134
	var servers []string
	if cpuRunner != "" {
		servers = []string{cpuRunner}
	} else {
		servers = serversForGpu(gpus[0]) // All GPUs in the list are matching Library and Variant
	}
135
	demandLib := envconfig.LLMLibrary
136
137
138
139
140
141
142
	if demandLib != "" {
		serverPath := availableServers[demandLib]
		if serverPath == "" {
			slog.Info(fmt.Sprintf("Invalid OLLAMA_LLM_LIBRARY %s - not found", demandLib))
		} else {
			slog.Info("user override", "OLLAMA_LLM_LIBRARY", demandLib, "path", serverPath)
			servers = []string{demandLib}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
143
144
145
146
			if strings.HasPrefix(demandLib, "cpu") {
				// Omit the GPU flag to silence the warning
				opts.NumGPU = -1
			}
147
148
149
150
		}
	}

	if len(servers) == 0 {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
151
		return nil, fmt.Errorf("no servers found for %v", gpus)
152
153
154
155
156
157
158
159
	}

	params := []string{
		"--model", model,
		"--ctx-size", fmt.Sprintf("%d", opts.NumCtx),
		"--batch-size", fmt.Sprintf("%d", opts.NumBatch),
		"--embedding",
	}
Michael Yang's avatar
Michael Yang committed
160
161

	params = append(params, "--log-disable")
162

Michael Yang's avatar
Michael Yang committed
163
	if opts.NumGPU >= 0 {
164
165
166
		params = append(params, "--n-gpu-layers", fmt.Sprintf("%d", opts.NumGPU))
	}

167
	if envconfig.Debug {
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
		params = append(params, "--verbose")
	}

	if opts.MainGPU > 0 {
		params = append(params, "--main-gpu", fmt.Sprintf("%d", opts.MainGPU))
	}

	if len(adapters) > 0 {
		// TODO: applying multiple adapters is not supported by the llama.cpp server yet
		params = append(params, "--lora", adapters[0])
	}

	if len(projectors) > 0 {
		// TODO: applying multiple projectors is not supported by the llama.cpp server yet
		params = append(params, "--mmproj", projectors[0])
	}

	if opts.NumThread > 0 {
		params = append(params, "--threads", fmt.Sprintf("%d", opts.NumThread))
	}

	if !opts.F16KV {
		params = append(params, "--memory-f32")
	}

193
	flashAttnEnabled := envconfig.FlashAttention
Sam's avatar
Sam committed
194
195

	for _, g := range gpus {
196
		// only cuda (compute capability 7+) and metal support flash attention
Sam's avatar
Sam committed
197
		if g.Library != "metal" && (g.Library != "cuda" || g.DriverMajor < 7) {
198
			flashAttnEnabled = false
Sam's avatar
Sam committed
199
		}
200
201
202
203
204
205
206

		// mmap has issues with partial offloading on metal
		if g.Library == "metal" &&
			uint64(opts.NumGPU) > 0 &&
			uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 {
			opts.UseMMap = false
		}
Sam's avatar
Sam committed
207
	}
208

209
	if flashAttnEnabled {
Sam's avatar
Sam committed
210
211
212
		params = append(params, "--flash-attn")
	}

213
214
215
216
217
218
219
220
221
222
223
224
	if !opts.UseMMap {
		params = append(params, "--no-mmap")
	}

	if opts.UseMLock {
		params = append(params, "--mlock")
	}

	if opts.UseNUMA {
		params = append(params, "--numa")
	}

225
	numParallel := envconfig.NumParallel
226
227
228
229
230
231
232
233

	// TODO (jmorganca): multimodal models don't support parallel yet
	// see https://github.com/ollama/ollama/issues/4165
	if len(projectors) > 0 {
		numParallel = 1
		slog.Warn("multimodal models don't support parallel requests yet")
	}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
234
235
	params = append(params, "--parallel", fmt.Sprintf("%d", numParallel))

236
237
	for i := 0; i < len(servers); i++ {
		dir := availableServers[servers[i]]
238
239
240
		if dir == "" {
			// Shouldn't happen
			finalErr = fmt.Errorf("[%d] server %s not listed in available servers %v", i, servers[i], availableServers)
Michael Yang's avatar
Michael Yang committed
241
			slog.Error("server list inconsistent", "error", finalErr)
242
243
			continue
		}
244

Daniel Hiltgen's avatar
Daniel Hiltgen committed
245
246
247
248
249
		if strings.HasPrefix(servers[i], "cpu") {
			// TODO if we tried a gpu runner first, and it failed, record the error and bubble that back up
			gpuCount = 0
		}

250
		// Find an availableServers  port, retry on each iteration in case the failure was a port conflict race
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
		port := 0
		if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
			var l *net.TCPListener
			if l, err = net.ListenTCP("tcp", a); err == nil {
				port = l.Addr().(*net.TCPAddr).Port
				l.Close()
			}
		}
		if port == 0 {
			slog.Debug("ResolveTCPAddr failed ", "error", err)
			port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
		}
		finalParams := append(params, "--port", strconv.Itoa(port))

		pathEnv := "LD_LIBRARY_PATH"
		if runtime.GOOS == "windows" {
			pathEnv = "PATH"
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
269
		// prepend the server directory to LD_LIBRARY_PATH/PATH
270
		libraryPaths := []string{dir}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
271

272
273
274
		if libraryPath, ok := os.LookupEnv(pathEnv); ok {
			// Append our runner directory to the path
			// This will favor system libraries over our bundled library dependencies
Daniel Hiltgen's avatar
Daniel Hiltgen committed
275
			libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...)
276
277
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
278
279
280
281
282
283
284
285
		// Note: we always put the dependency path first
		// since this was the exact version we verified for AMD GPUs
		// and we favor what the user had in their path
		if gpus[0].DependencyPath != "" {
			// TODO refine for multi-gpu support
			libraryPaths = append([]string{gpus[0].DependencyPath}, libraryPaths...)
		}

286
287
		server := filepath.Join(dir, "ollama_llama_server")
		if runtime.GOOS == "windows" {
Michael Yang's avatar
Michael Yang committed
288
			server += ".exe"
289
290
		}

291
292
293
294
295
296
297
298
299
300
301
		// Detect tmp cleaners wiping out the file
		_, err := os.Stat(server)
		if errors.Is(err, os.ErrNotExist) {
			slog.Warn("llama server disappeared, reinitializing payloads", "path", server, "error", err)
			err = Init()
			if err != nil {
				slog.Warn("failed to reinitialize payloads", "error", err)
				return nil, err
			}
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
302
		s := &llmServer{
Daniel Hiltgen's avatar
Daniel Hiltgen committed
303
304
305
306
307
308
309
310
311
			port:           port,
			cmd:            exec.Command(server, finalParams...),
			status:         NewStatusWriter(os.Stderr),
			options:        opts,
			estimatedVRAM:  estimatedVRAM,
			estimatedTotal: estimatedTotal,
			sem:            semaphore.NewWeighted(int64(numParallel)),
			totalLayers:    ggml.KV().BlockCount() + 1,
			gpuCount:       gpuCount,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
312
			done:           make(chan error, 1),
313
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
314

315
		s.cmd.Env = os.Environ()
316
317
318
		s.cmd.Stdout = os.Stdout
		s.cmd.Stderr = s.status

319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
		visibleDevicesEnv, visibleDevicesEnvVal := gpu.GpuInfoList(gpus).GetVisibleDevicesEnv()
		pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))

		// Update or add the path and visible devices variable with our adjusted version
		pathNeeded := true
		devicesNeeded := visibleDevicesEnv != ""
		for i := range s.cmd.Env {
			cmp := strings.SplitN(s.cmd.Env[i], "=", 2)
			if strings.EqualFold(cmp[0], pathEnv) {
				s.cmd.Env[i] = pathEnv + "=" + pathEnvVal
				pathNeeded = false
			} else if devicesNeeded && strings.EqualFold(cmp[0], visibleDevicesEnv) {
				s.cmd.Env[i] = visibleDevicesEnv + "=" + visibleDevicesEnvVal
				devicesNeeded = false
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
334
		}
335
336
		if pathNeeded {
			s.cmd.Env = append(s.cmd.Env, pathEnv+"="+pathEnvVal)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
337
		}
338
339
		if devicesNeeded {
			s.cmd.Env = append(s.cmd.Env, visibleDevicesEnv+"="+visibleDevicesEnvVal)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
340
341
		}

342
		slog.Info("starting llama server", "cmd", s.cmd.String())
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
		if envconfig.Debug {
			filteredEnv := []string{}
			for _, ev := range s.cmd.Env {
				if strings.HasPrefix(ev, "CUDA_") ||
					strings.HasPrefix(ev, "ROCM_") ||
					strings.HasPrefix(ev, "HIP_") ||
					strings.HasPrefix(ev, "HSA_") ||
					strings.HasPrefix(ev, "GGML_") ||
					strings.HasPrefix(ev, "PATH=") ||
					strings.HasPrefix(ev, "LD_LIBRARY_PATH=") {
					filteredEnv = append(filteredEnv, ev)
				}
			}
			// Log at debug as the environment is inherited and might contain sensitive information
			slog.Debug("subprocess", "environment", filteredEnv)
		}
359
360

		if err = s.cmd.Start(); err != nil {
361
362
363
364
365
			// Detect permission denied and augment them essage about noexec
			if errors.Is(err, os.ErrPermission) {
				finalErr = fmt.Errorf("unable to start server %w.  %s may have noexec set.  Set OLLAMA_TMPDIR for server to a writable executable directory", err, dir)
				continue
			}
366
367
368
369
370
371
372
373
374
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
			err = fmt.Errorf("error starting the external llama server: %v %s", err, msg)
			finalErr = err
			continue
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
375
376
377
378
379
		// reap subprocess when it exits
		go func() {
			s.done <- s.cmd.Wait()
		}()

380
381
382
383
384
385
386
		return s, nil
	}

	slog.Error("unable to load any llama server", "error", finalErr)
	return nil, finalErr
}

Michael Yang's avatar
Michael Yang committed
387
func projectorMemoryRequirements(filename string) uint64 {
388
389
390
391
392
393
394
395
396
397
398
	file, err := os.Open(filename)
	if err != nil {
		return 0
	}
	defer file.Close()

	ggml, _, err := DecodeGGML(file)
	if err != nil {
		return 0
	}

Michael Yang's avatar
Michael Yang committed
399
400
401
	var mem uint64
	for _, layer := range ggml.Tensors().Layers() {
		mem += layer.size()
402
403
	}

Michael Yang's avatar
Michael Yang committed
404
	return mem
405
406
407
408
409
410
}

type ServerStatus int

const ( // iota is reset to 0
	ServerStatusReady ServerStatus = iota
411
	ServerStatusNoSlotsAvailable
412
413
414
415
416
	ServerStatusLoadingModel
	ServerStatusNotResponding
	ServerStatusError
)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
417
418
419
420
func (s ServerStatus) ToString() string {
	switch s {
	case ServerStatusReady:
		return "llm server ready"
421
	case ServerStatusNoSlotsAvailable:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
422
423
424
425
426
427
428
429
430
431
		return "llm busy - no slots available"
	case ServerStatusLoadingModel:
		return "llm server loading model"
	case ServerStatusNotResponding:
		return "llm server not responding"
	default:
		return "llm server error"
	}
}

432
type ServerStatusResp struct {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
433
434
435
436
437
	Status          string  `json:"status"`
	SlotsIdle       int     `json:"slots_idle"`
	SlotsProcessing int     `json:"slots_processing"`
	Error           string  `json:"error"`
	Progress        float32 `json:"progress"`
438
439
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
440
func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
441
442
443
444
445
446
	// Fail fast if its exited
	if s.cmd.ProcessState != nil {
		msg := ""
		if s.status != nil && s.status.LastErrMsg != "" {
			msg = s.status.LastErrMsg
		}
447
448
449
450
		if s.cmd.ProcessState.ExitCode() == -1 {
			// Most likely a signal killed it, log some more details to try to help troubleshoot
			slog.Warn("llama runner process no longer running", "sys", s.cmd.ProcessState.Sys(), "string", s.cmd.ProcessState.String())
		}
451
452
453
454
455
456
457
458
459
460
461
462
		return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
	if err != nil {
		return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		if errors.Is(err, context.DeadlineExceeded) {
Michael Yang's avatar
Michael Yang committed
463
			return ServerStatusNotResponding, errors.New("server not responding")
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
		}
		return ServerStatusError, fmt.Errorf("health resp: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return ServerStatusError, fmt.Errorf("read health request: %w", err)
	}

	var status ServerStatusResp
	if err := json.Unmarshal(body, &status); err != nil {
		return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
	}

	switch status.Status {
	case "ok":
		return ServerStatusReady, nil
	case "no slot available":
483
		return ServerStatusNoSlotsAvailable, nil
484
	case "loading model":
Daniel Hiltgen's avatar
Daniel Hiltgen committed
485
		s.loadProgress = status.Progress
486
487
488
489
490
491
		return ServerStatusLoadingModel, nil
	default:
		return ServerStatusError, fmt.Errorf("server error: %+v", status)
	}
}

492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
// getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received
func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) {
	var retries int
	for {
		status, err := s.getServerStatus(ctx)
		if err != nil {
			return status, err
		}

		if status == ServerStatusNoSlotsAvailable {
			if retries >= 10 {
				return status, fmt.Errorf("no slots available after %d retries", retries)
			}

			time.Sleep(5 * time.Millisecond)
			retries++
			continue
		}

		return status, nil
	}
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
515
func (s *llmServer) Ping(ctx context.Context) error {
516
517
518
519
520
521
522
523
	_, err := s.getServerStatus(ctx)
	if err != nil {
		slog.Debug("server unhealthy", "error", err)
		return err
	}
	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
524
func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
525
	start := time.Now()
526
527
528
	stallDuration := 5 * time.Minute            // If no progress happens
	finalLoadDuration := 5 * time.Minute        // After we hit 100%, give the runner more time to come online
	stallTimer := time.Now().Add(stallDuration) // give up if we stall
529
530
531

	slog.Info("waiting for llama runner to start responding")
	var lastStatus ServerStatus = -1
532
	fullyLoaded := false
ManniX-ITA's avatar
ManniX-ITA committed
533

534
535
	for {
		select {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
536
		case <-ctx.Done():
537
			slog.Warn("client connection closed before server finished loading, aborting load")
538
			return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
539
540
541
542
543
544
		case err := <-s.done:
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
			return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
545
546
		default:
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
547
		if time.Now().After(stallTimer) {
ManniX-ITA's avatar
ManniX-ITA committed
548
			// timeout
549
550
551
552
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
553
			return fmt.Errorf("timed out waiting for llama runner to start - progress %0.2f - %s", s.loadProgress, msg)
ManniX-ITA's avatar
ManniX-ITA committed
554
555
556
557
558
		}
		if s.cmd.ProcessState != nil {
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
559
			}
ManniX-ITA's avatar
ManniX-ITA committed
560
561
			return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
562
563
		ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
		defer cancel()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
564
		priorProgress := s.loadProgress
Daniel Hiltgen's avatar
Daniel Hiltgen committed
565
566
567
568
569
		status, _ := s.getServerStatus(ctx)
		if lastStatus != status && status != ServerStatusReady {
			// Only log on status changes
			slog.Info("waiting for server to become available", "status", status.ToString())
		}
ManniX-ITA's avatar
ManniX-ITA committed
570
571
		switch status {
		case ServerStatusReady:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
572
573
			s.loadDuration = time.Since(start)
			slog.Info(fmt.Sprintf("llama runner started in %0.2f seconds", s.loadDuration.Seconds()))
ManniX-ITA's avatar
ManniX-ITA committed
574
575
			return nil
		default:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
576
			lastStatus = status
Daniel Hiltgen's avatar
Daniel Hiltgen committed
577
578
579
580
			// Reset the timer as long as we're making forward progress on the load
			if priorProgress != s.loadProgress {
				slog.Debug(fmt.Sprintf("model load progress %0.2f", s.loadProgress))
				stallTimer = time.Now().Add(stallDuration)
581
582
583
584
			} else if !fullyLoaded && int(s.loadProgress*100.0) >= 100 {
				slog.Debug("model load completed, waiting for server to become available", "status", status.ToString())
				stallTimer = time.Now().Add(finalLoadDuration)
				fullyLoaded = true
Daniel Hiltgen's avatar
Daniel Hiltgen committed
585
			}
ManniX-ITA's avatar
ManniX-ITA committed
586
587
			time.Sleep(time.Millisecond * 250)
			continue
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
		}
	}
}

const jsonGrammar = `
root   ::= object
value  ::= object | array | string | number | ("true" | "false" | "null") ws

object ::=
  "{" ws (
            string ":" ws value
    ("," ws string ":" ws value)*
  )? "}" ws

array  ::=
  "[" ws (
            value
    ("," ws value)*
  )? "]" ws

string ::=
  "\"" (
    [^"\\] |
    "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  )* "\"" ws

number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws

# Optional space: by convention, applied in this grammar after literal chars when allowed
ws ::= ([ \t\n] ws)?
`

const maxBufferSize = 512 * format.KiloByte

type ImageData struct {
	Data []byte `json:"data"`
	ID   int    `json:"id"`
}

type completion struct {
628
629
630
631
632
	Content      string `json:"content"`
	Model        string `json:"model"`
	Prompt       string `json:"prompt"`
	Stop         bool   `json:"stop"`
	StoppedLimit bool   `json:"stopped_limit"`
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650

	Timings struct {
		PredictedN  int     `json:"predicted_n"`
		PredictedMS float64 `json:"predicted_ms"`
		PromptN     int     `json:"prompt_n"`
		PromptMS    float64 `json:"prompt_ms"`
	}
}

type CompletionRequest struct {
	Prompt  string
	Format  string
	Images  []ImageData
	Options api.Options
}

type CompletionResponse struct {
	Content            string
651
	DoneReason         string
652
653
654
655
656
657
658
	Done               bool
	PromptEvalCount    int
	PromptEvalDuration time.Duration
	EvalCount          int
	EvalDuration       time.Duration
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
659
660
661
662
663
664
func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
	if err := s.sem.Acquire(ctx, 1); err != nil {
		slog.Error("Failed to acquire semaphore", "error", err)
		return err
	}
	defer s.sem.Release(1)
665
666
667
668
669
670
671

	// only allow maximum 10 "context shifts" to avoid infinite generation
	if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
		req.Options.NumPredict = 10 * s.options.NumCtx
		slog.Debug("setting token limit to 10x num_ctx", "num_ctx", s.options.NumCtx, "num_predict", req.Options.NumPredict)
	}

672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
	request := map[string]any{
		"prompt":            req.Prompt,
		"stream":            true,
		"n_predict":         req.Options.NumPredict,
		"n_keep":            req.Options.NumKeep,
		"main_gpu":          req.Options.MainGPU,
		"temperature":       req.Options.Temperature,
		"top_k":             req.Options.TopK,
		"top_p":             req.Options.TopP,
		"tfs_z":             req.Options.TFSZ,
		"typical_p":         req.Options.TypicalP,
		"repeat_last_n":     req.Options.RepeatLastN,
		"repeat_penalty":    req.Options.RepeatPenalty,
		"presence_penalty":  req.Options.PresencePenalty,
		"frequency_penalty": req.Options.FrequencyPenalty,
		"mirostat":          req.Options.Mirostat,
		"mirostat_tau":      req.Options.MirostatTau,
		"mirostat_eta":      req.Options.MirostatEta,
		"penalize_nl":       req.Options.PenalizeNewline,
		"seed":              req.Options.Seed,
		"stop":              req.Options.Stop,
		"image_data":        req.Images,
		"cache_prompt":      true,
	}

	// Make sure the server is ready
698
	status, err := s.getServerStatusRetry(ctx)
699
700
701
	if err != nil {
		return err
	} else if status != ServerStatusReady {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
702
		return fmt.Errorf("unexpected server status: %s", status.ToString())
703
704
705
706
707
708
709
710
711
	}

	if req.Format == "json" {
		request["grammar"] = jsonGrammar
		if !strings.Contains(strings.ToLower(req.Prompt), "json") {
			slog.Warn("Prompt does not specify that the LLM should response in JSON, but JSON format is expected. For best results specify that JSON is expected in the system prompt.")
		}
	}

712
713
714
715
	// Handling JSON marshaling with special characters unescaped.
	buffer := &bytes.Buffer{}
	enc := json.NewEncoder(buffer)
	enc.SetEscapeHTML(false)
716

717
718
719
	if err := enc.Encode(request); err != nil {
		return fmt.Errorf("failed to marshal data: %v", err)
	}
720

721
722
723
724
725
726
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
	serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
	if err != nil {
		return fmt.Errorf("error creating POST request: %v", err)
	}
	serverReq.Header.Set("Content-Type", "application/json")
727

728
729
730
731
732
	res, err := http.DefaultClient.Do(serverReq)
	if err != nil {
		return fmt.Errorf("POST predict: %v", err)
	}
	defer res.Body.Close()
733

734
735
	if res.StatusCode >= 400 {
		bodyBytes, err := io.ReadAll(res.Body)
736
		if err != nil {
737
			return fmt.Errorf("failed reading llm error response: %w", err)
738
		}
739
740
741
		log.Printf("llm predict error: %s", bodyBytes)
		return fmt.Errorf("%s", bodyBytes)
	}
742

743
744
745
	scanner := bufio.NewScanner(res.Body)
	buf := make([]byte, 0, maxBufferSize)
	scanner.Buffer(buf, maxBufferSize)
746

747
748
749
	// keep track of the last token generated, this is used to abort if the model starts looping
	var lastToken string
	var tokenRepeat int
750

751
752
753
754
755
756
757
758
759
760
	for scanner.Scan() {
		select {
		case <-ctx.Done():
			// This handles the request cancellation
			return ctx.Err()
		default:
			line := scanner.Bytes()
			if len(line) == 0 {
				continue
			}
761

762
763
764
765
			evt, ok := bytes.CutPrefix(line, []byte("data: "))
			if !ok {
				return fmt.Errorf("error parsing llm response stream: %s", line)
			}
766

767
768
			var c completion
			if err := json.Unmarshal(evt, &c); err != nil {
769
				return fmt.Errorf("error unmarshalling llm prediction response: %v", err)
770
			}
771

772
773
774
775
776
777
778
			switch {
			case strings.TrimSpace(c.Content) == lastToken:
				tokenRepeat++
			default:
				lastToken = strings.TrimSpace(c.Content)
				tokenRepeat = 0
			}
779

780
781
782
783
784
			// 30 picked as an arbitrary max token repeat limit, modify as needed
			if tokenRepeat > 30 {
				slog.Debug("prediction aborted, token repeat limit reached")
				return ctx.Err()
			}
785

786
787
788
789
790
			if c.Content != "" {
				fn(CompletionResponse{
					Content: c.Content,
				})
			}
791

792
			if c.Stop {
793
794
795
796
797
				doneReason := "stop"
				if c.StoppedLimit {
					doneReason = "length"
				}

798
799
				fn(CompletionResponse{
					Done:               true,
800
					DoneReason:         doneReason,
801
802
803
804
805
806
					PromptEvalCount:    c.Timings.PromptN,
					PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
					EvalCount:          c.Timings.PredictedN,
					EvalDuration:       parseDurationMs(c.Timings.PredictedMS),
				})
				return nil
807
808
			}
		}
809
	}
810

811
812
813
814
815
816
	if err := scanner.Err(); err != nil {
		if strings.Contains(err.Error(), "unexpected EOF") {
			s.Close()
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
817
			}
818
			return fmt.Errorf("an unknown error was encountered while running the model %s", msg)
819
820
		}

821
		return fmt.Errorf("error reading llm response: %v", err)
822
823
	}

824
	return nil
825
826
827
828
829
830
831
832
833
834
}

type EmbeddingRequest struct {
	Content string `json:"content"`
}

type EmbeddingResponse struct {
	Embedding []float64 `json:"embedding"`
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
835
836
837
838
839
840
func (s *llmServer) Embedding(ctx context.Context, prompt string) ([]float64, error) {
	if err := s.sem.Acquire(ctx, 1); err != nil {
		slog.Error("Failed to acquire semaphore", "error", err)
		return nil, err
	}
	defer s.sem.Release(1)
841

842
	// Make sure the server is ready
843
	status, err := s.getServerStatusRetry(ctx)
844
845
846
	if err != nil {
		return nil, err
	} else if status != ServerStatusReady {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
847
		return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
848
849
	}

Michael Yang's avatar
Michael Yang committed
850
851
	data, err := json.Marshal(TokenizeRequest{Content: prompt})
	if err != nil {
852
853
854
		return nil, fmt.Errorf("error marshaling embed data: %w", err)
	}

Michael Yang's avatar
Michael Yang committed
855
	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
	if err != nil {
		return nil, fmt.Errorf("error creating embed request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do embedding request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("error reading embed response: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var embedding EmbeddingResponse
	if err := json.Unmarshal(body, &embedding); err != nil {
		return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
	}

	return embedding.Embedding, nil
}

Michael Yang's avatar
Michael Yang committed
885
886
887
888
889
890
891
892
type TokenizeRequest struct {
	Content string `json:"content"`
}

type TokenizeResponse struct {
	Tokens []int `json:"tokens"`
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
893
func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
Michael Yang's avatar
Michael Yang committed
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
	// Make sure the server is ready
	status, err := s.getServerStatus(ctx)
	if err != nil {
		return nil, err
	} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
		return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
	}

	data, err := json.Marshal(TokenizeRequest{Content: content})
	if err != nil {
		return nil, fmt.Errorf("marshaling encode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("encode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do encode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("read encode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var encoded TokenizeResponse
	if err := json.Unmarshal(body, &encoded); err != nil {
		return nil, fmt.Errorf("unmarshal encode response: %w", err)
	}

	return encoded.Tokens, nil
}

type DetokenizeRequest struct {
	Tokens []int `json:"tokens"`
}

type DetokenizeResponse struct {
	Content string `json:"content"`
943
944
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
945
func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
Michael Yang's avatar
Michael Yang committed
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
	// Make sure the server is ready
	status, err := s.getServerStatus(ctx)
	if err != nil {
		return "", err
	} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
		return "", fmt.Errorf("unexpected server status: %s", status.ToString())
	}

	data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
	if err != nil {
		return "", fmt.Errorf("marshaling decode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
	if err != nil {
		return "", fmt.Errorf("decode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return "", fmt.Errorf("do decode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return "", fmt.Errorf("read decode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm decode error: %s", body)
		return "", fmt.Errorf("%s", body)
	}

	var decoded DetokenizeResponse
	if err := json.Unmarshal(body, &decoded); err != nil {
		return "", fmt.Errorf("unmarshal encode response: %w", err)
	}

	return decoded.Content, nil
987
988
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
989
func (s *llmServer) Close() error {
990
991
	if s.cmd != nil {
		slog.Debug("stopping llama server")
992
993
994
		if err := s.cmd.Process.Kill(); err != nil {
			return err
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
995
996
997
998
999
		// if ProcessState is already populated, Wait already completed, no need to wait again
		if s.cmd.ProcessState == nil {
			slog.Debug("waiting for llama server to exit")
			<-s.done
		}
1000
1001

		slog.Debug("llama server stopped")
1002
1003
1004
1005
1006
	}

	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1007
1008
1009
1010
func (s *llmServer) EstimatedVRAM() uint64 {
	return s.estimatedVRAM
}

1011
1012
1013
1014
func (s *llmServer) EstimatedTotal() uint64 {
	return s.estimatedTotal
}

1015
1016
1017
1018
1019
1020
1021
1022
func parseDurationMs(ms float64) time.Duration {
	dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
	if err != nil {
		panic(err)
	}

	return dur
}