llm.go 4.09 KB
Newer Older
1
2
3
package llm

import (
4
	"context"
5
	"fmt"
6
	"log/slog"
7
	"os"
8
	"runtime"
9
10

	"github.com/jmorganca/ollama/api"
11
	"github.com/jmorganca/ollama/gpu"
12
13
14
)

type LLM interface {
Bruce MacDonald's avatar
Bruce MacDonald committed
15
	Predict(context.Context, PredictOpts, func(PredictResult)) error
16
17
18
	Embedding(context.Context, string) ([]float64, error)
	Encode(context.Context, string) ([]int, error)
	Decode(context.Context, []int) (string, error)
19
20
21
	Close()
}

Michael Yang's avatar
Michael Yang committed
22
func New(workDir, model string, adapters, projectors []string, opts api.Options) (LLM, error) {
23
24
25
26
27
28
29
30
	if _, err := os.Stat(model); err != nil {
		return nil, err
	}

	f, err := os.Open(model)
	if err != nil {
		return nil, err
	}
Michael Yang's avatar
Michael Yang committed
31
	defer f.Close()
32

Bruce MacDonald's avatar
Bruce MacDonald committed
33
	ggml, err := DecodeGGML(f)
34
35
36
37
	if err != nil {
		return nil, err
	}

Michael Yang's avatar
Michael Yang committed
38
	if opts.NumCtx > int(ggml.NumCtx()) {
39
		slog.Warn(fmt.Sprintf("requested context length is greater than model's max context length (%d > %d), using %d instead", opts.NumCtx, ggml.NumCtx(), ggml.NumCtx()))
Michael Yang's avatar
Michael Yang committed
40
41
42
		opts.NumCtx = int(ggml.NumCtx())
	}

43
44
45
46
	if opts.NumCtx < 4 {
		opts.NumCtx = 4
	}

47
48
	vram, _ := gpu.CheckVRAM()
	size := ggml.Size
49
50

	// fp16 k,v matrices require = n_ctx * n_layer * n_embd / n_head * n_head_kv * 2 bytes each * 2 key and value
51
	kv := 2 * 2 * int64(opts.NumCtx) * int64(ggml.NumLayers()) * int64(ggml.NumEmbed()) * int64(ggml.NumHeadKv()) / int64(ggml.NumHead())
52

53
	// this amount is the overhead + tensors in memory
Michael Yang's avatar
typo  
Michael Yang committed
54
	// TODO: get this from the llama.cpp's graph calculations instead of
55
	// estimating it's 1/6 * kv_cache_size * num_gqa
56
	graph := int64(ggml.NumGQA()) * kv / 6
57
58

	info := gpu.GetGPUInfo()
59
60
61
62
63
	switch runtime.GOOS {
	case "darwin":
		if opts.NumGPU == 0 {
			break
		}
64

65
		if size+kv+graph > vram {
66
			slog.Info("not enough vram available, falling back to CPU only")
67
68
			info.Library = "cpu"
			info.Variant = gpu.GetCPUVariant()
69
70
71
72
73
74
			opts.NumGPU = 0
			break
		}

		opts.NumGPU = 1
	default:
75
		if info.Library == "cpu" {
76
			slog.Info("GPU not available, falling back to CPU")
77
78
79
80
81
82
			opts.NumGPU = 0
			break
		}

		// don't use GPU at all if no layers are loaded
		if opts.NumGPU == 0 {
83
84
			info.Library = "cpu"
			info.Variant = gpu.GetCPUVariant()
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
			break
		}

		// user-defined GPU count
		if opts.NumGPU != -1 {
			break
		}

		// the "main" GPU needs the most memory and determines the limit
		// of how many layers can be loaded. It needs to fit:
		// 1. the full compute graph allocation for all devices (graph)
		// 2. the proportional kv cache for all devices (kv * % layers)
		// 3. the proportional model (size * % layers / # devices)
		// This estimates the number of layers
		maxlayers := int64(ggml.NumLayers()) + 1
		devices := int64(info.DeviceCount)
		avg := vram / devices
		layers := maxlayers * (avg - graph) / (kv + size/devices)
		if layers > maxlayers {
			layers = maxlayers
		}
106

107
108
109
		// 1 + 2 must fit on the main gpu
		min := graph + kv*layers/maxlayers
		if layers <= 0 || min > avg {
110
			slog.Info("not enough vram available, falling back to CPU only")
111
112
			info.Library = "cpu"
			info.Variant = gpu.GetCPUVariant()
113
114
			opts.NumGPU = 0
			break
115
		}
116
117

		opts.NumGPU = int(layers)
118
119
	}

Bruce MacDonald's avatar
Bruce MacDonald committed
120
121
	opts.RopeFrequencyBase = 0.0
	opts.RopeFrequencyScale = 0.0
122
	return newLlmServer(info, model, adapters, projectors, opts)
123
124
125
126
127
}

// Give any native cgo implementations an opportunity to initialize
func Init(workdir string) error {
	return nativeInit(workdir)
128
}
129

130
131
func newLlmServer(gpuInfo gpu.GpuInfo, model string, adapters, projectors []string, opts api.Options) (LLM, error) {
	dynLibs := getDynLibs(gpuInfo)
132
133
134
135

	// Check to see if the user has requested a specific library instead of auto-detecting
	demandLib := os.Getenv("OLLAMA_LLM_LIBRARY")
	if demandLib != "" {
136
		libPath := availableDynLibs[demandLib]
137
		if libPath == "" {
138
			slog.Info(fmt.Sprintf("Invalid OLLAMA_LLM_LIBRARY %s - not found", demandLib))
139
		} else {
140
			slog.Info(fmt.Sprintf("Loading OLLAMA_LLM_LIBRARY=%s", demandLib))
141
			dynLibs = []string{libPath}
142
143
144
		}
	}

145
146
147
	err2 := fmt.Errorf("unable to locate suitable llm library")
	for _, dynLib := range dynLibs {
		srv, err := newDynExtServer(dynLib, model, adapters, projectors, opts)
148
149
150
		if err == nil {
			return srv, nil
		}
151
		slog.Warn(fmt.Sprintf("Failed to load dynamic library %s  %s", dynLib, err))
152
		err2 = err
153
154
	}

155
	return nil, err2
156
}