server.go 33.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
package llm

import (
	"bufio"
	"bytes"
	"context"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"log"
	"log/slog"
	"math/rand"
	"net"
	"net/http"
	"os"
	"os/exec"
	"path/filepath"
	"runtime"
	"strconv"
	"strings"
22
	"sync"
23
24
	"time"

Daniel Hiltgen's avatar
Daniel Hiltgen committed
25
26
	"golang.org/x/sync/semaphore"

27
	"github.com/ollama/ollama/api"
28
	"github.com/ollama/ollama/discover"
29
	"github.com/ollama/ollama/envconfig"
30
	"github.com/ollama/ollama/format"
31
	"github.com/ollama/ollama/llama"
32
	"github.com/ollama/ollama/runners"
33
34
)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
35
36
37
38
type LlamaServer interface {
	Ping(ctx context.Context) error
	WaitUntilRunning(ctx context.Context) error
	Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
39
	Embedding(ctx context.Context, input string) ([]float32, error)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
40
41
42
	Tokenize(ctx context.Context, content string) ([]int, error)
	Detokenize(ctx context.Context, tokens []int) (string, error)
	Close() error
43
	EstimatedVRAM() uint64 // Total VRAM across all GPUs
44
	EstimatedTotal() uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
45
	EstimatedVRAMByGPU(gpuID string) uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
46
47
48
49
}

// llmServer is an instance of the llama.cpp server
type llmServer struct {
50
51
52
53
54
55
	port        int
	cmd         *exec.Cmd
	done        chan error // Channel to signal when the process exits
	status      *StatusWriter
	options     api.Options
	numParallel int
56
57
58
	modelPath   string
	modelLock   sync.Mutex   // Temporary until we switch fully to Go server
	model       *llama.Model // If non-nil, the runner is a new Go server
Daniel Hiltgen's avatar
Daniel Hiltgen committed
59

60
61
62
	estimate    MemoryEstimate
	totalLayers uint64
	// gpuCount     int
63
64
	gpus         discover.GpuInfoList // Recorded just before the model loaded, free space will be incorrect
	loadDuration time.Duration        // Record how long it took the model to load
65
	loadProgress float32
Daniel Hiltgen's avatar
Daniel Hiltgen committed
66
67

	sem *semaphore.Weighted
68
69
}

70
71
72
73
74
75
// LoadModel will load a model from disk. The model must be in the GGML format.
//
// It collects array values for arrays with a size less than or equal to
// maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If
// the maxArraySize is negative, all arrays are collected.
func LoadModel(model string, maxArraySize int) (*GGML, error) {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
76
77
78
79
	if _, err := os.Stat(model); err != nil {
		return nil, err
	}

80
81
82
83
84
85
	f, err := os.Open(model)
	if err != nil {
		return nil, err
	}
	defer f.Close()

86
	ggml, _, err := DecodeGGML(f, maxArraySize)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
87
88
	return ggml, err
}
89

Daniel Hiltgen's avatar
Daniel Hiltgen committed
90
91
// NewLlamaServer will run a server for the given GPUs
// The gpu list must be a single family.
92
func NewLlamaServer(gpus discover.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options, numParallel int) (LlamaServer, error) {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
93
	var err error
94
	var cpuRunner string
95
	var estimate MemoryEstimate
96
97
	var systemTotalMemory uint64
	var systemFreeMemory uint64
98
	var systemSwapFreeMemory uint64
99

100
	systemInfo := discover.GetSystemInfo()
101
102
103
104
	systemTotalMemory = systemInfo.System.TotalMemory
	systemFreeMemory = systemInfo.System.FreeMemory
	systemSwapFreeMemory = systemInfo.System.FreeSwap
	slog.Info("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "free_swap", format.HumanBytes2(systemSwapFreeMemory))
105

106
107
	// If the user wants zero GPU layers, reset the gpu list to be CPU/system ram info
	if opts.NumGPU == 0 {
108
		gpus = discover.GetCPUInfo()
109
110
	}
	if len(gpus) == 1 && gpus[0].Library == "cpu" {
111
		cpuRunner = runners.ServerForCpu()
112
		estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
113
	} else {
114
		estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
115

Michael Yang's avatar
Michael Yang committed
116
		switch {
117
		case gpus[0].Library == "metal" && estimate.VRAMSize > systemTotalMemory:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
118
119
120
			// disable partial offloading when model is greater than total system memory as this
			// can lead to locking up the system
			opts.NumGPU = 0
121
		case gpus[0].Library != "metal" && estimate.Layers == 0:
122
			// Don't bother loading into the GPU if no layers can fit
123
			cpuRunner = runners.ServerForCpu()
124
			gpus = discover.GetCPUInfo()
125
126
		case opts.NumGPU < 0 && estimate.Layers > 0 && gpus[0].Library != "cpu":
			opts.NumGPU = estimate.Layers
127
128
129
		}
	}

130
131
132
	// On linux and windows, over-allocating CPU memory will almost always result in an error
	// Darwin has fully dynamic swap so has no direct concept of free swap space
	if runtime.GOOS != "darwin" {
133
		systemMemoryRequired := estimate.TotalSize - estimate.VRAMSize
134
		available := systemFreeMemory + systemSwapFreeMemory
135
136
137
		if systemMemoryRequired > available {
			slog.Warn("model request too large for system", "requested", format.HumanBytes2(systemMemoryRequired), "available", available, "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "swap", format.HumanBytes2(systemSwapFreeMemory))
			return nil, fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(systemMemoryRequired), format.HumanBytes2(available))
138
139
140
		}
	}

141
142
	estimate.log()

Daniel Hiltgen's avatar
Daniel Hiltgen committed
143
	// Loop through potential servers
Michael Yang's avatar
Michael Yang committed
144
	finalErr := errors.New("no suitable llama servers found")
145

146
	availableServers := runners.GetAvailableServers()
147

Daniel Hiltgen's avatar
Daniel Hiltgen committed
148
149
150
151
	var servers []string
	if cpuRunner != "" {
		servers = []string{cpuRunner}
	} else {
152
		servers = runners.ServersForGpu(gpus[0].RunnerName()) // All GPUs in the list are matching Library and Variant
Daniel Hiltgen's avatar
Daniel Hiltgen committed
153
	}
Michael Yang's avatar
string  
Michael Yang committed
154
	demandLib := envconfig.LLMLibrary()
155
156
157
158
159
160
161
	if demandLib != "" {
		serverPath := availableServers[demandLib]
		if serverPath == "" {
			slog.Info(fmt.Sprintf("Invalid OLLAMA_LLM_LIBRARY %s - not found", demandLib))
		} else {
			slog.Info("user override", "OLLAMA_LLM_LIBRARY", demandLib, "path", serverPath)
			servers = []string{demandLib}
162
			if strings.HasPrefix(demandLib, "cpu") || (!(runtime.GOOS == "darwin" && runtime.GOARCH == "arm64") && demandLib == runners.BuiltinName()) {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
163
164
165
				// Omit the GPU flag to silence the warning
				opts.NumGPU = -1
			}
166
167
168
169
		}
	}

	if len(servers) == 0 {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
170
		return nil, fmt.Errorf("no servers found for %v", gpus)
171
172
173
174
	}

	params := []string{
		"--model", model,
Michael Yang's avatar
lint  
Michael Yang committed
175
176
		"--ctx-size", strconv.Itoa(opts.NumCtx),
		"--batch-size", strconv.Itoa(opts.NumBatch),
177
	}
Michael Yang's avatar
Michael Yang committed
178

Michael Yang's avatar
Michael Yang committed
179
	if opts.NumGPU >= 0 {
Michael Yang's avatar
lint  
Michael Yang committed
180
		params = append(params, "--n-gpu-layers", strconv.Itoa(opts.NumGPU))
181
182
	}

Michael Yang's avatar
Michael Yang committed
183
	if envconfig.Debug() {
184
185
186
187
		params = append(params, "--verbose")
	}

	if opts.MainGPU > 0 {
Michael Yang's avatar
lint  
Michael Yang committed
188
		params = append(params, "--main-gpu", strconv.Itoa(opts.MainGPU))
189
190
191
	}

	if len(adapters) > 0 {
192
193
194
		for _, adapter := range adapters {
			params = append(params, "--lora", adapter)
		}
195
196
197
198
199
200
201
	}

	if len(projectors) > 0 {
		// TODO: applying multiple projectors is not supported by the llama.cpp server yet
		params = append(params, "--mmproj", projectors[0])
	}

202
	defaultThreads := systemInfo.GetOptimalThreadCount()
203
	if opts.NumThread > 0 {
Michael Yang's avatar
lint  
Michael Yang committed
204
		params = append(params, "--threads", strconv.Itoa(opts.NumThread))
205
206
	} else if defaultThreads > 0 {
		params = append(params, "--threads", strconv.Itoa(defaultThreads))
207
208
	}

209
210
211
212
213
	fa := envconfig.FlashAttention()
	if fa && !gpus.FlashAttentionSupported() {
		slog.Warn("flash attention enabled but not supported by gpu")
		fa = false
	}
Sam's avatar
Sam committed
214

215
216
217
218
219
	if fa && !ggml.SupportsFlashAttention() {
		slog.Warn("flash attention enabled but not supported by model")
		fa = false
	}

220
	kvct := strings.ToLower(envconfig.KvCacheType())
221
222
223
224
225
226
227
228
229
230
231

	if fa {
		slog.Info("enabling flash attention")
		params = append(params, "--flash-attn")

		// Flash Attention also supports kv cache quantization
		// Enable if the requested and kv cache type is supported by the model
		if kvct != "" && ggml.SupportsKVCacheType(kvct) {
			params = append(params, "--kv-cache-type", kvct)
		} else {
			slog.Warn("kv cache type not supported by model", "type", kvct)
Sam's avatar
Sam committed
232
		}
233
234
235
	} else if kvct != "" && kvct != "f16" {
		slog.Warn("quantized kv cache requested but flash attention disabled", "type", kvct)
	}
236

237
238
	// mmap has issues with partial offloading on metal
	for _, g := range gpus {
239
240
241
		if g.Library == "metal" &&
			uint64(opts.NumGPU) > 0 &&
			uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 {
242
243
			opts.UseMMap = new(bool)
			*opts.UseMMap = false
244
		}
Sam's avatar
Sam committed
245
	}
246

247
	// Windows CUDA should not use mmap for best performance
248
	// Linux  with a model larger than free space, mmap leads to thrashing
Daniel Hiltgen's avatar
Daniel Hiltgen committed
249
	// For CPU loads we want the memory to be allocated, not FS cache
250
251
252
253
	if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == nil) ||
		(runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == nil) ||
		(gpus[0].Library == "cpu" && opts.UseMMap == nil) ||
		(opts.UseMMap != nil && !*opts.UseMMap) {
254
255
256
257
258
259
260
		params = append(params, "--no-mmap")
	}

	if opts.UseMLock {
		params = append(params, "--mlock")
	}

261
	// TODO - NUMA support currently doesn't work properly
262

Michael Yang's avatar
lint  
Michael Yang committed
263
	params = append(params, "--parallel", strconv.Itoa(numParallel))
Daniel Hiltgen's avatar
Daniel Hiltgen committed
264

265
266
267
268
	if estimate.TensorSplit != "" {
		params = append(params, "--tensor-split", estimate.TensorSplit)
	}

269
270
271
272
	if envconfig.MultiUserCache() {
		params = append(params, "--multiuser-cache")
	}

273
	for i := range servers {
274
275
276
		builtin := servers[i] == runners.BuiltinName()
		server := availableServers[servers[i]]
		if server == "" {
277
278
			// Shouldn't happen
			finalErr = fmt.Errorf("[%d] server %s not listed in available servers %v", i, servers[i], availableServers)
Michael Yang's avatar
Michael Yang committed
279
			slog.Error("server list inconsistent", "error", finalErr)
280
281
			continue
		}
282

283
		if strings.HasPrefix(servers[i], "cpu") || (builtin && !(runtime.GOOS == "darwin" && runtime.GOARCH == "arm64")) {
284
			gpus = discover.GetCPUInfo()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
285
286
		}

287
		// Find an availableServers  port, retry on each iteration in case the failure was a port conflict race
288
289
290
291
292
293
294
295
296
297
298
299
		port := 0
		if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
			var l *net.TCPListener
			if l, err = net.ListenTCP("tcp", a); err == nil {
				port = l.Addr().(*net.TCPAddr).Port
				l.Close()
			}
		}
		if port == 0 {
			slog.Debug("ResolveTCPAddr failed ", "error", err)
			port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
		}
300
301
302
		finalParams := []string{"runner"}
		finalParams = append(finalParams, params...)
		finalParams = append(finalParams, "--port", strconv.Itoa(port))
303
304
305
306
307

		pathEnv := "LD_LIBRARY_PATH"
		if runtime.GOOS == "windows" {
			pathEnv = "PATH"
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
308
		// Start with the server directory for the LD_LIBRARY_PATH/PATH
309
		libraryPaths := []string{filepath.Dir(server)}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
310

311
		if libraryPath, ok := os.LookupEnv(pathEnv); ok {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
312
			// favor our bundled library dependencies over system libraries
Daniel Hiltgen's avatar
Daniel Hiltgen committed
313
			libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...)
314
315
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
316
		// Note: we always put the dependency path first
Daniel Hiltgen's avatar
Daniel Hiltgen committed
317
		// since this was the exact version we compiled/linked against
318
		if gpus[0].DependencyPath != nil {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
319
			// assume gpus from the same library have the same dependency path
320
			libraryPaths = append(gpus[0].DependencyPath, libraryPaths...)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
321
322
		}

323
		// TODO - once fully switched to the Go runner, load the model here for tokenize/detokenize cgo access
Daniel Hiltgen's avatar
Daniel Hiltgen committed
324
		s := &llmServer{
325
326
327
328
			port:        port,
			cmd:         exec.Command(server, finalParams...),
			status:      NewStatusWriter(os.Stderr),
			options:     opts,
329
			modelPath:   model,
330
			estimate:    estimate,
331
			numParallel: numParallel,
332
333
			sem:         semaphore.NewWeighted(int64(numParallel)),
			totalLayers: ggml.KV().BlockCount() + 1,
334
			gpus:        gpus,
335
			done:        make(chan error, 1),
336
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
337

338
		s.cmd.Env = os.Environ()
339
340
		s.cmd.Stdout = os.Stdout
		s.cmd.Stderr = s.status
341
		s.cmd.SysProcAttr = LlamaServerSysProcAttr
342

Daniel Hiltgen's avatar
Daniel Hiltgen committed
343
344
345
346
		envWorkarounds := [][2]string{}
		for _, gpu := range gpus {
			envWorkarounds = append(envWorkarounds, gpu.EnvWorkarounds...)
		}
Michael Yang's avatar
lint  
Michael Yang committed
347
		visibleDevicesEnv, visibleDevicesEnvVal := gpus.GetVisibleDevicesEnv()
348
349
350
351
352
353
354
355
356
357
358
359
360
		pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))

		// Update or add the path and visible devices variable with our adjusted version
		pathNeeded := true
		devicesNeeded := visibleDevicesEnv != ""
		for i := range s.cmd.Env {
			cmp := strings.SplitN(s.cmd.Env[i], "=", 2)
			if strings.EqualFold(cmp[0], pathEnv) {
				s.cmd.Env[i] = pathEnv + "=" + pathEnvVal
				pathNeeded = false
			} else if devicesNeeded && strings.EqualFold(cmp[0], visibleDevicesEnv) {
				s.cmd.Env[i] = visibleDevicesEnv + "=" + visibleDevicesEnvVal
				devicesNeeded = false
Daniel Hiltgen's avatar
Daniel Hiltgen committed
361
362
363
364
365
366
			} else if len(envWorkarounds) != 0 {
				for _, kv := range envWorkarounds {
					if strings.EqualFold(cmp[0], kv[0]) {
						s.cmd.Env[i] = kv[0] + "=" + kv[1]
					}
				}
367
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
368
		}
369
370
		if pathNeeded {
			s.cmd.Env = append(s.cmd.Env, pathEnv+"="+pathEnvVal)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
371
		}
372
373
		if devicesNeeded {
			s.cmd.Env = append(s.cmd.Env, visibleDevicesEnv+"="+visibleDevicesEnvVal)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
374
375
		}

376
		slog.Info("starting llama server", "cmd", s.cmd.String())
Michael Yang's avatar
Michael Yang committed
377
		if envconfig.Debug() {
378
379
380
			filteredEnv := []string{}
			for _, ev := range s.cmd.Env {
				if strings.HasPrefix(ev, "CUDA_") ||
Daniel Hiltgen's avatar
Daniel Hiltgen committed
381
					strings.HasPrefix(ev, "ROCR_") ||
382
383
					strings.HasPrefix(ev, "ROCM_") ||
					strings.HasPrefix(ev, "HIP_") ||
Daniel Hiltgen's avatar
Daniel Hiltgen committed
384
					strings.HasPrefix(ev, "GPU_") ||
385
386
387
388
389
390
391
392
393
394
					strings.HasPrefix(ev, "HSA_") ||
					strings.HasPrefix(ev, "GGML_") ||
					strings.HasPrefix(ev, "PATH=") ||
					strings.HasPrefix(ev, "LD_LIBRARY_PATH=") {
					filteredEnv = append(filteredEnv, ev)
				}
			}
			// Log at debug as the environment is inherited and might contain sensitive information
			slog.Debug("subprocess", "environment", filteredEnv)
		}
395
396

		if err = s.cmd.Start(); err != nil {
397
			// Detect permission denied and augment the message about noexec
398
			if errors.Is(err, os.ErrPermission) {
399
				finalErr = fmt.Errorf("unable to start server %w.  %s may have noexec set.  Set OLLAMA_TMPDIR for server to a writable executable directory", err, server)
400
401
				continue
			}
402
403
404
405
406
407
408
409
410
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
			err = fmt.Errorf("error starting the external llama server: %v %s", err, msg)
			finalErr = err
			continue
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
411
412
		// reap subprocess when it exits
		go func() {
413
414
415
416
417
418
419
			err := s.cmd.Wait()
			// Favor a more detailed message over the process exit status
			if err != nil && s.status != nil && s.status.LastErrMsg != "" {
				slog.Debug("llama runner terminated", "error", err)
				if strings.Contains(s.status.LastErrMsg, "unknown model") {
					s.status.LastErrMsg = "this model is not supported by your version of Ollama. You may need to upgrade"
				}
Michael Yang's avatar
lint  
Michael Yang committed
420
				s.done <- errors.New(s.status.LastErrMsg)
421
422
423
			} else {
				s.done <- err
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
424
425
		}()

426
427
428
429
430
431
432
433
434
435
436
		return s, nil
	}

	slog.Error("unable to load any llama server", "error", finalErr)
	return nil, finalErr
}

type ServerStatus int

const ( // iota is reset to 0
	ServerStatusReady ServerStatus = iota
437
	ServerStatusNoSlotsAvailable
438
439
440
441
442
	ServerStatusLoadingModel
	ServerStatusNotResponding
	ServerStatusError
)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
443
444
445
446
func (s ServerStatus) ToString() string {
	switch s {
	case ServerStatusReady:
		return "llm server ready"
447
	case ServerStatusNoSlotsAvailable:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
448
449
450
451
452
453
454
455
456
457
		return "llm busy - no slots available"
	case ServerStatusLoadingModel:
		return "llm server loading model"
	case ServerStatusNotResponding:
		return "llm server not responding"
	default:
		return "llm server error"
	}
}

458
type ServerStatusResp struct {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
459
460
461
462
463
	Status          string  `json:"status"`
	SlotsIdle       int     `json:"slots_idle"`
	SlotsProcessing int     `json:"slots_processing"`
	Error           string  `json:"error"`
	Progress        float32 `json:"progress"`
464
465
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
466
func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
467
468
469
470
471
472
	// Fail fast if its exited
	if s.cmd.ProcessState != nil {
		msg := ""
		if s.status != nil && s.status.LastErrMsg != "" {
			msg = s.status.LastErrMsg
		}
473
474
475
476
		if s.cmd.ProcessState.ExitCode() == -1 {
			// Most likely a signal killed it, log some more details to try to help troubleshoot
			slog.Warn("llama runner process no longer running", "sys", s.cmd.ProcessState.Sys(), "string", s.cmd.ProcessState.String())
		}
477
478
479
480
481
482
483
484
485
486
487
488
		return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
	if err != nil {
		return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		if errors.Is(err, context.DeadlineExceeded) {
Michael Yang's avatar
Michael Yang committed
489
			return ServerStatusNotResponding, errors.New("server not responding")
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
		}
		return ServerStatusError, fmt.Errorf("health resp: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return ServerStatusError, fmt.Errorf("read health request: %w", err)
	}

	var status ServerStatusResp
	if err := json.Unmarshal(body, &status); err != nil {
		return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
	}

	switch status.Status {
	case "ok":
		return ServerStatusReady, nil
	case "no slot available":
509
		return ServerStatusNoSlotsAvailable, nil
510
	case "loading model":
Daniel Hiltgen's avatar
Daniel Hiltgen committed
511
		s.loadProgress = status.Progress
512
513
514
515
516
517
		return ServerStatusLoadingModel, nil
	default:
		return ServerStatusError, fmt.Errorf("server error: %+v", status)
	}
}

518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
// getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received
func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) {
	var retries int
	for {
		status, err := s.getServerStatus(ctx)
		if err != nil {
			return status, err
		}

		if status == ServerStatusNoSlotsAvailable {
			if retries >= 10 {
				return status, fmt.Errorf("no slots available after %d retries", retries)
			}

			time.Sleep(5 * time.Millisecond)
			retries++
			continue
		}

		return status, nil
	}
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
541
func (s *llmServer) Ping(ctx context.Context) error {
542
543
544
545
546
547
548
549
	_, err := s.getServerStatus(ctx)
	if err != nil {
		slog.Debug("server unhealthy", "error", err)
		return err
	}
	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
550
func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
551
	start := time.Now()
552
	stallDuration := envconfig.LoadTimeout()    // If no progress happens
553
	stallTimer := time.Now().Add(stallDuration) // give up if we stall
554
555
556

	slog.Info("waiting for llama runner to start responding")
	var lastStatus ServerStatus = -1
557
	fullyLoaded := false
ManniX-ITA's avatar
ManniX-ITA committed
558

559
560
	for {
		select {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
561
		case <-ctx.Done():
562
			slog.Warn("client connection closed before server finished loading, aborting load")
563
			return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
564
		case err := <-s.done:
565
			return fmt.Errorf("llama runner process has terminated: %w", err)
566
567
		default:
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
568
		if time.Now().After(stallTimer) {
ManniX-ITA's avatar
ManniX-ITA committed
569
			// timeout
570
571
572
573
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
574
			return fmt.Errorf("timed out waiting for llama runner to start - progress %0.2f - %s", s.loadProgress, msg)
ManniX-ITA's avatar
ManniX-ITA committed
575
576
577
578
579
		}
		if s.cmd.ProcessState != nil {
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
580
			}
ManniX-ITA's avatar
ManniX-ITA committed
581
582
			return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
583
584
		ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
		defer cancel()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
585
		priorProgress := s.loadProgress
Daniel Hiltgen's avatar
Daniel Hiltgen committed
586
587
588
589
590
		status, _ := s.getServerStatus(ctx)
		if lastStatus != status && status != ServerStatusReady {
			// Only log on status changes
			slog.Info("waiting for server to become available", "status", status.ToString())
		}
ManniX-ITA's avatar
ManniX-ITA committed
591
592
		switch status {
		case ServerStatusReady:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
593
594
			s.loadDuration = time.Since(start)
			slog.Info(fmt.Sprintf("llama runner started in %0.2f seconds", s.loadDuration.Seconds()))
ManniX-ITA's avatar
ManniX-ITA committed
595
596
			return nil
		default:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
597
			lastStatus = status
Daniel Hiltgen's avatar
Daniel Hiltgen committed
598
599
600
601
			// Reset the timer as long as we're making forward progress on the load
			if priorProgress != s.loadProgress {
				slog.Debug(fmt.Sprintf("model load progress %0.2f", s.loadProgress))
				stallTimer = time.Now().Add(stallDuration)
602
603
			} else if !fullyLoaded && int(s.loadProgress*100.0) >= 100 {
				slog.Debug("model load completed, waiting for server to become available", "status", status.ToString())
604
				stallTimer = time.Now().Add(stallDuration)
605
				fullyLoaded = true
Daniel Hiltgen's avatar
Daniel Hiltgen committed
606
			}
ManniX-ITA's avatar
ManniX-ITA committed
607
608
			time.Sleep(time.Millisecond * 250)
			continue
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
		}
	}
}

const jsonGrammar = `
root   ::= object
value  ::= object | array | string | number | ("true" | "false" | "null") ws
object ::=
  "{" ws (
            string ":" ws value
    ("," ws string ":" ws value)*
  )? "}" ws
array  ::=
  "[" ws (
            value
    ("," ws value)*
  )? "]" ws
string ::=
  "\"" (
628
    [^"\\\x7F\x00-\x1F] |
629
630
631
632
633
634
635
636
637
638
    "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  )* "\"" ws
number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws
# Optional space: by convention, applied in this grammar after literal chars when allowed
ws ::= ([ \t\n] ws)?
`

const maxBufferSize = 512 * format.KiloByte

type ImageData struct {
639
640
641
	Data          []byte `json:"data"`
	ID            int    `json:"id"`
	AspectRatioID int    `json:"aspect_ratio_id"`
642
643
644
}

type completion struct {
645
646
647
648
649
	Content      string `json:"content"`
	Model        string `json:"model"`
	Prompt       string `json:"prompt"`
	Stop         bool   `json:"stop"`
	StoppedLimit bool   `json:"stopped_limit"`
650
651
652
653
654
655
656
657
658
659
660

	Timings struct {
		PredictedN  int     `json:"predicted_n"`
		PredictedMS float64 `json:"predicted_ms"`
		PromptN     int     `json:"prompt_n"`
		PromptMS    float64 `json:"prompt_ms"`
	}
}

type CompletionRequest struct {
	Prompt  string
661
	Format  json.RawMessage
662
	Images  []ImageData
Michael Yang's avatar
Michael Yang committed
663
	Options *api.Options
664
665
666
667
}

type CompletionResponse struct {
	Content            string
668
	DoneReason         string
669
670
671
672
673
674
675
	Done               bool
	PromptEvalCount    int
	PromptEvalDuration time.Duration
	EvalCount          int
	EvalDuration       time.Duration
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
676
677
func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
	if err := s.sem.Acquire(ctx, 1); err != nil {
678
679
680
681
682
		if errors.Is(err, context.Canceled) {
			slog.Info("aborting completion request due to client closing the connection")
		} else {
			slog.Error("Failed to acquire semaphore", "error", err)
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
683
684
685
		return err
	}
	defer s.sem.Release(1)
686

687
	// put an upper limit on num_predict to avoid the model running on forever
688
689
690
691
	if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
		req.Options.NumPredict = 10 * s.options.NumCtx
	}

692
693
694
695
696
697
698
699
700
	request := map[string]any{
		"prompt":            req.Prompt,
		"stream":            true,
		"n_predict":         req.Options.NumPredict,
		"n_keep":            req.Options.NumKeep,
		"main_gpu":          req.Options.MainGPU,
		"temperature":       req.Options.Temperature,
		"top_k":             req.Options.TopK,
		"top_p":             req.Options.TopP,
701
		"min_p":             req.Options.MinP,
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
		"tfs_z":             req.Options.TFSZ,
		"typical_p":         req.Options.TypicalP,
		"repeat_last_n":     req.Options.RepeatLastN,
		"repeat_penalty":    req.Options.RepeatPenalty,
		"presence_penalty":  req.Options.PresencePenalty,
		"frequency_penalty": req.Options.FrequencyPenalty,
		"mirostat":          req.Options.Mirostat,
		"mirostat_tau":      req.Options.MirostatTau,
		"mirostat_eta":      req.Options.MirostatEta,
		"penalize_nl":       req.Options.PenalizeNewline,
		"seed":              req.Options.Seed,
		"stop":              req.Options.Stop,
		"image_data":        req.Images,
		"cache_prompt":      true,
	}

	// Make sure the server is ready
719
	status, err := s.getServerStatusRetry(ctx)
720
721
722
	if err != nil {
		return err
	} else if status != ServerStatusReady {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
723
		return fmt.Errorf("unexpected server status: %s", status.ToString())
724
725
	}

726
727
	// TODO (parthsareen): Move conversion to grammar with sampling logic
	// API should do error handling for invalid formats
728
	if req.Format != nil && strings.TrimSpace(string(req.Format)) != "null" {
729
730
731
732
733
734
735
736
737
738
739
740
741
		if strings.ToLower(strings.TrimSpace(string(req.Format))) == `"json"` {
			request["grammar"] = jsonGrammar
			if !strings.Contains(strings.ToLower(req.Prompt), "json") {
				slog.Warn("prompt does not specify that the LLM should response in JSON, but JSON format is expected. For best results specify that JSON is expected in the system prompt.")
			}
		} else if schema, err := func() (llama.JsonSchema, error) {
			var schema llama.JsonSchema
			err := json.Unmarshal(req.Format, &schema)
			return schema, err
		}(); err == nil {
			request["grammar"] = schema.AsGrammar()
		} else {
			slog.Warn(`format is neither a schema or "json"`, "format", req.Format)
742
743
744
		}
	}

745
746
747
748
	// Handling JSON marshaling with special characters unescaped.
	buffer := &bytes.Buffer{}
	enc := json.NewEncoder(buffer)
	enc.SetEscapeHTML(false)
749

750
751
752
	if err := enc.Encode(request); err != nil {
		return fmt.Errorf("failed to marshal data: %v", err)
	}
753

754
755
756
757
758
759
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
	serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
	if err != nil {
		return fmt.Errorf("error creating POST request: %v", err)
	}
	serverReq.Header.Set("Content-Type", "application/json")
760

761
762
763
764
765
	res, err := http.DefaultClient.Do(serverReq)
	if err != nil {
		return fmt.Errorf("POST predict: %v", err)
	}
	defer res.Body.Close()
766

767
768
	if res.StatusCode >= 400 {
		bodyBytes, err := io.ReadAll(res.Body)
769
		if err != nil {
770
			return fmt.Errorf("failed reading llm error response: %w", err)
771
		}
772
773
774
		log.Printf("llm predict error: %s", bodyBytes)
		return fmt.Errorf("%s", bodyBytes)
	}
775

776
777
778
	scanner := bufio.NewScanner(res.Body)
	buf := make([]byte, 0, maxBufferSize)
	scanner.Buffer(buf, maxBufferSize)
779

780
781
782
	// keep track of the last token generated, this is used to abort if the model starts looping
	var lastToken string
	var tokenRepeat int
783

784
785
786
787
788
789
790
791
792
793
	for scanner.Scan() {
		select {
		case <-ctx.Done():
			// This handles the request cancellation
			return ctx.Err()
		default:
			line := scanner.Bytes()
			if len(line) == 0 {
				continue
			}
794

795
			// slog.Debug("got line", "line", string(line))
796
797
			evt, ok := bytes.CutPrefix(line, []byte("data: "))
			if !ok {
798
				evt = line
799
			}
800

801
802
			var c completion
			if err := json.Unmarshal(evt, &c); err != nil {
803
				return fmt.Errorf("error unmarshalling llm prediction response: %v", err)
804
805
806
807
808
809
810
811
			}
			switch {
			case strings.TrimSpace(c.Content) == lastToken:
				tokenRepeat++
			default:
				lastToken = strings.TrimSpace(c.Content)
				tokenRepeat = 0
			}
812

813
814
815
816
817
			// 30 picked as an arbitrary max token repeat limit, modify as needed
			if tokenRepeat > 30 {
				slog.Debug("prediction aborted, token repeat limit reached")
				return ctx.Err()
			}
818

819
820
821
822
823
			if c.Content != "" {
				fn(CompletionResponse{
					Content: c.Content,
				})
			}
824

825
			if c.Stop {
826
827
828
829
830
				doneReason := "stop"
				if c.StoppedLimit {
					doneReason = "length"
				}

831
832
				fn(CompletionResponse{
					Done:               true,
833
					DoneReason:         doneReason,
834
835
836
837
838
839
					PromptEvalCount:    c.Timings.PromptN,
					PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
					EvalCount:          c.Timings.PredictedN,
					EvalDuration:       parseDurationMs(c.Timings.PredictedMS),
				})
				return nil
840
841
			}
		}
842
	}
843

844
	if err := scanner.Err(); err != nil {
845
		if strings.Contains(err.Error(), "unexpected EOF") || strings.Contains(err.Error(), "forcibly closed") {
846
			s.Close()
847
			var msg string
848
849
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
850
851
			} else {
				msg = err.Error()
852
			}
853
			return fmt.Errorf("an error was encountered while running the model: %s", msg)
854
855
		}

856
		return fmt.Errorf("error reading llm response: %v", err)
857
858
	}

859
	return nil
860
861
}

862
863
type EmbeddingRequest struct {
	Content string `json:"content"`
864
865
}

866
867
type EmbeddingResponse struct {
	Embedding []float32 `json:"embedding"`
868
869
}

870
871
func (s *llmServer) Embedding(ctx context.Context, input string) ([]float32, error) {
	if err := s.sem.Acquire(ctx, 1); err != nil {
872
873
874
875
876
		if errors.Is(err, context.Canceled) {
			slog.Info("aborting embedding request due to client closing the connection")
		} else {
			slog.Error("Failed to acquire semaphore", "error", err)
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
877
878
		return nil, err
	}
879
	defer s.sem.Release(1)
880

881
	// Make sure the server is ready
882
	status, err := s.getServerStatusRetry(ctx)
883
884
885
	if err != nil {
		return nil, err
	} else if status != ServerStatusReady {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
886
		return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
887
888
	}

889
	data, err := json.Marshal(EmbeddingRequest{Content: input})
Michael Yang's avatar
Michael Yang committed
890
	if err != nil {
891
892
893
		return nil, fmt.Errorf("error marshaling embed data: %w", err)
	}

894
	r, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
895
896
897
	if err != nil {
		return nil, fmt.Errorf("error creating embed request: %w", err)
	}
898
	r.Header.Set("Content-Type", "application/json")
899

900
	resp, err := http.DefaultClient.Do(r)
901
902
903
904
905
906
907
908
909
910
911
	if err != nil {
		return nil, fmt.Errorf("do embedding request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("error reading embed response: %w", err)
	}

	if resp.StatusCode >= 400 {
912
		log.Printf("llm embedding error: %s", body)
913
914
915
		return nil, fmt.Errorf("%s", body)
	}

916
	var e EmbeddingResponse
917
	if err := json.Unmarshal(body, &e); err != nil {
918
919
920
		return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
	}

921
	return e.Embedding, nil
922
923
}

Michael Yang's avatar
Michael Yang committed
924
925
926
927
928
929
930
931
type TokenizeRequest struct {
	Content string `json:"content"`
}

type TokenizeResponse struct {
	Tokens []int `json:"tokens"`
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
932
func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
933
934
935
936
937
938
	s.modelLock.Lock()
	defer s.modelLock.Unlock()
	if s.model != nil {
		return s.model.Tokenize(content, false, true)
	}

Michael Yang's avatar
Michael Yang committed
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
	// Make sure the server is ready
	status, err := s.getServerStatus(ctx)
	if err != nil {
		return nil, err
	} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
		return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
	}

	data, err := json.Marshal(TokenizeRequest{Content: content})
	if err != nil {
		return nil, fmt.Errorf("marshaling encode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("encode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do encode request: %w", err)
	}
	defer resp.Body.Close()
963
964
965
	if resp.StatusCode == http.StatusNotFound {
		if s.model == nil {
			slog.Debug("new runner detected, loading model for cgo tokenization")
966
967
968
969
			m, err := llama.LoadModelFromFile(s.modelPath, llama.ModelParams{VocabOnly: true})
			if err != nil {
				return nil, err
			}
970
971
972
973
			s.model = m
		}
		return s.model.Tokenize(content, false, true)
	}
Michael Yang's avatar
Michael Yang committed
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("read encode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var encoded TokenizeResponse
	if err := json.Unmarshal(body, &encoded); err != nil {
		return nil, fmt.Errorf("unmarshal encode response: %w", err)
	}

	return encoded.Tokens, nil
}

type DetokenizeRequest struct {
	Tokens []int `json:"tokens"`
}

type DetokenizeResponse struct {
	Content string `json:"content"`
999
1000
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1001
func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
1002
1003
1004
1005
1006
1007
1008
1009
1010
	s.modelLock.Lock()
	defer s.modelLock.Unlock()
	if s.model != nil {
		var resp string
		for _, token := range tokens {
			resp += s.model.TokenToPiece(token)
		}
		return resp, nil
	}
Michael Yang's avatar
Michael Yang committed
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
	// Make sure the server is ready
	status, err := s.getServerStatus(ctx)
	if err != nil {
		return "", err
	} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
		return "", fmt.Errorf("unexpected server status: %s", status.ToString())
	}

	data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
	if err != nil {
		return "", fmt.Errorf("marshaling decode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
	if err != nil {
		return "", fmt.Errorf("decode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return "", fmt.Errorf("do decode request: %w", err)
	}
	defer resp.Body.Close()
1035
1036
1037
	if resp.StatusCode == http.StatusNotFound {
		if s.model == nil {
			slog.Debug("new runner detected, loading model for cgo tokenization")
1038
1039
1040
1041
			m, err := llama.LoadModelFromFile(s.modelPath, llama.ModelParams{VocabOnly: true})
			if err != nil {
				return "", err
			}
1042
1043
1044
1045
1046
1047
1048
1049
			s.model = m
		}
		var resp string
		for _, token := range tokens {
			resp += s.model.TokenToPiece(token)
		}
		return resp, nil
	}
Michael Yang's avatar
Michael Yang committed
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return "", fmt.Errorf("read decode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm decode error: %s", body)
		return "", fmt.Errorf("%s", body)
	}

	var decoded DetokenizeResponse
	if err := json.Unmarshal(body, &decoded); err != nil {
		return "", fmt.Errorf("unmarshal encode response: %w", err)
	}

	return decoded.Content, nil
1067
1068
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1069
func (s *llmServer) Close() error {
1070
	s.modelLock.Lock()
1071
1072
1073
1074
	if s.model != nil {
		llama.FreeModel(s.model)
		s.model = nil
	}
1075
1076
	s.modelLock.Unlock()

1077
1078
	if s.cmd != nil {
		slog.Debug("stopping llama server")
1079
1080
1081
		if err := s.cmd.Process.Kill(); err != nil {
			return err
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1082
1083
1084
1085
1086
		// if ProcessState is already populated, Wait already completed, no need to wait again
		if s.cmd.ProcessState == nil {
			slog.Debug("waiting for llama server to exit")
			<-s.done
		}
1087
1088

		slog.Debug("llama server stopped")
1089
1090
1091
1092
1093
	}

	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1094
func (s *llmServer) EstimatedVRAM() uint64 {
1095
	return s.estimate.VRAMSize
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1096
1097
}

1098
func (s *llmServer) EstimatedTotal() uint64 {
1099
	return s.estimate.TotalSize
1100
1101
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1102
func (s *llmServer) EstimatedVRAMByGPU(gpuID string) uint64 {
1103
1104
	for i, gpu := range s.gpus {
		if gpu.ID == gpuID {
1105
1106
1107
			if i < len(s.estimate.GPUSizes) {
				return s.estimate.GPUSizes[i]
			}
1108
1109
1110
1111
1112
		}
	}
	return 0
}

1113
1114
1115
1116
1117
1118
1119
1120
func parseDurationMs(ms float64) time.Duration {
	dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
	if err != nil {
		panic(err)
	}

	return dur
}