dyn_ext_server.go 11.2 KB
Newer Older
1
2
3
package llm

/*
4
#cgo CFLAGS: -I${SRCDIR}/ext_server -I${SRCDIR}/llama.cpp -I${SRCDIR}/llama.cpp/common -I${SRCDIR}/llama.cpp/examples/server
5
#cgo CFLAGS: -DNDEBUG -DLLAMA_SERVER_LIBRARY=1 -D_XOPEN_SOURCE=600 -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64
6
7
#cgo CFLAGS: -Wmissing-noreturn -Wextra -Wcast-qual -Wno-unused-function -Wno-array-bounds
#cgo CPPFLAGS: -Ofast -Wextra -Wno-unused-function -Wno-unused-variable -Wno-deprecated-declarations -Wno-unused-but-set-variable
8
9
#cgo darwin CFLAGS: -D_DARWIN_C_SOURCE
#cgo darwin CPPFLAGS:  -DGGML_USE_ACCELERATE
Daniel Hiltgen's avatar
Daniel Hiltgen committed
10
#cgo darwin CPPFLAGS: -DGGML_USE_METAL -DGGML_METAL_NDEBUG
11
#cgo darwin LDFLAGS: -lc++ -framework Accelerate
Daniel Hiltgen's avatar
Daniel Hiltgen committed
12
#cgo darwin LDFLAGS: -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
13
#cgo linux CFLAGS: -D_GNU_SOURCE
14
15
#cgo linux LDFLAGS: -lrt -ldl -lstdc++ -lm
#cgo linux windows LDFLAGS: -lpthread
16
17

#include <stdlib.h>
18
#include "dyn_ext_server.h"
19
20
21

*/
import "C"
22

23
24
25
26
27
28
import (
	"bytes"
	"context"
	"encoding/json"
	"fmt"
	"log"
29
30
31
	"os"
	"path/filepath"
	"runtime"
32
	"strings"
33
34
35
36
37
38
39
	"sync"
	"time"
	"unsafe"

	"github.com/jmorganca/ollama/api"
)

40
41
42
type dynExtServer struct {
	s       C.struct_dynamic_llama_server
	options api.Options
43
44
45
46
47
}

// Note: current implementation does not support concurrent instantiations
var mutex sync.Mutex

48
49
50
51
52
53
func newExtServerResp(len C.size_t) C.ext_server_resp_t {
	var resp C.ext_server_resp_t
	resp.msg_len = len
	bytes := make([]byte, len)
	resp.msg = (*C.char)(C.CBytes(bytes))
	return resp
54
55
}

56
57
58
59
60
func freeExtServerResp(resp C.ext_server_resp_t) {
	if resp.msg_len == 0 {
		return
	}
	C.free(unsafe.Pointer(resp.msg))
61
62
}

63
64
func extServerResponseToErr(resp C.ext_server_resp_t) error {
	return fmt.Errorf(C.GoString(resp.msg))
65
66
}

67
68
69
70
// Note: current implementation does not support concurrent instantiations
var llm *dynExtServer

func newDynExtServer(library, model string, adapters, projectors []string, opts api.Options) (LLM, error) {
71
72
73
74
	if !mutex.TryLock() {
		log.Printf("concurrent llm servers not yet supported, waiting for prior server to complete")
		mutex.Lock()
	}
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
	updatePath(filepath.Dir(library))
	libPath := C.CString(library)
	defer C.free(unsafe.Pointer(libPath))
	resp := newExtServerResp(128)
	defer freeExtServerResp(resp)
	var srv C.struct_dynamic_llama_server
	C.dyn_init(libPath, &srv, &resp)
	if resp.id < 0 {
		mutex.Unlock()
		return nil, fmt.Errorf("Unable to load dynamic library: %s", C.GoString(resp.msg))
	}
	llm = &dynExtServer{
		s:       srv,
		options: opts,
	}
	log.Printf("Loading Dynamic llm server: %s", library)
91

92
	var sparams C.ext_server_params_t
93
94
95
96
97
98
	sparams.model = C.CString(model)
	defer C.free(unsafe.Pointer(sparams.model))

	sparams.embedding = true
	sparams.n_ctx = C.uint(opts.NumCtx)
	sparams.n_batch = C.uint(opts.NumBatch)
99
	sparams.n_gpu_layers = C.int(opts.NumGPU)
100
	sparams.main_gpu = C.int(opts.MainGPU)
101
	sparams.n_parallel = 1 // TODO - wire up concurrency
102
103
104
105

	// Always use the value encoded in the model
	sparams.rope_freq_base = 0.0
	sparams.rope_freq_scale = 0.0
106
107
108
109
	sparams.memory_f16 = C.bool(opts.F16KV)
	sparams.use_mlock = C.bool(opts.UseMLock)
	sparams.use_mmap = C.bool(opts.UseMMap)
	sparams.numa = C.bool(opts.UseNUMA)
110
111
112

	sparams.lora_adapters = nil
	for i := 0; i < len(adapters); i++ {
113
		la := (*C.ext_server_lora_adapter_t)(C.malloc(C.sizeof_ext_server_lora_adapter_t))
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
		defer C.free(unsafe.Pointer(la))
		la.adapter = C.CString(adapters[i])
		defer C.free(unsafe.Pointer(la.adapter))
		la.scale = C.float(1.0) // TODO expose scale/weights up through ollama UX
		la.next = nil
		if i == 0 {
			sparams.lora_adapters = la
		} else {
			tmp := sparams.lora_adapters
			for ; tmp.next != nil; tmp = tmp.next {
			}
			tmp.next = la
		}
	}

129
130
131
132
133
134
135
	if len(projectors) > 0 {
		// TODO: applying multiple projectors is not supported by the llama.cpp server yet
		sparams.mmproj = C.CString(projectors[0])
		defer C.free(unsafe.Pointer(sparams.mmproj))
	} else {
		sparams.mmproj = nil
	}
136

137
	sparams.n_threads = C.uint(opts.NumThread)
138

139
140
141
142
143
144
	log.Printf("Initializing llama server")
	initResp := newExtServerResp(128)
	defer freeExtServerResp(initResp)
	C.dyn_llama_server_init(llm.s, &sparams, &initResp)
	if initResp.id < 0 {
		return nil, extServerResponseToErr(initResp)
145
146
	}

147
148
149
	log.Printf("Starting llama main loop")
	C.dyn_llama_server_start(llm.s)
	return llm, nil
150
151
}

152
func (llm *dynExtServer) Predict(ctx context.Context, predict PredictOpts, fn func(PredictResult)) error {
153
154
155
156
157
158
159
160
161
	resp := newExtServerResp(128)
	defer freeExtServerResp(resp)
	var imageData []ImageData
	if len(predict.Images) > 0 {
		for cnt, i := range predict.Images {
			imageData = append(imageData, ImageData{Data: i, ID: cnt})
		}
	}
	log.Printf("loaded %d images", len(imageData))
162
163
164
165

	request := map[string]any{
		"prompt":            predict.Prompt,
		"stream":            true,
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
		"n_predict":         predict.Options.NumPredict,
		"n_keep":            predict.Options.NumKeep,
		"temperature":       predict.Options.Temperature,
		"top_k":             predict.Options.TopK,
		"top_p":             predict.Options.TopP,
		"tfs_z":             predict.Options.TFSZ,
		"typical_p":         predict.Options.TypicalP,
		"repeat_last_n":     predict.Options.RepeatLastN,
		"repeat_penalty":    predict.Options.RepeatPenalty,
		"presence_penalty":  predict.Options.PresencePenalty,
		"frequency_penalty": predict.Options.FrequencyPenalty,
		"mirostat":          predict.Options.Mirostat,
		"mirostat_tau":      predict.Options.MirostatTau,
		"mirostat_eta":      predict.Options.MirostatEta,
		"penalize_nl":       predict.Options.PenalizeNewline,
		"seed":              predict.Options.Seed,
		"stop":              predict.Options.Stop,
183
184
		"image_data":        imageData,
		"cache_prompt":      true,
185
186
187
188
189
190
	}

	if predict.Format == "json" {
		request["grammar"] = jsonGrammar
	}

191
192
193
194
195
196
	retryDelay := 100 * time.Microsecond
	for retries := 0; retries < maxRetries; retries++ {
		if retries > 0 {
			time.Sleep(retryDelay) // wait before retrying
			retryDelay *= 2        // exponential backoff
		}
197

198
199
200
201
		// Handling JSON marshaling with special characters unescaped.
		buffer := &bytes.Buffer{}
		enc := json.NewEncoder(buffer)
		enc.SetEscapeHTML(false)
202

203
204
205
		if err := enc.Encode(request); err != nil {
			return fmt.Errorf("failed to marshal data: %w", err)
		}
206

207
208
		req := C.CString(buffer.String())
		defer C.free(unsafe.Pointer(req))
209

210
		C.dyn_llama_server_completion(llm.s, req, &resp)
211
212
213
		if resp.id < 0 {
			return extServerResponseToErr(resp)
		}
214

215
216
217
218
219
220
		retryNeeded := false
	out:
		for {
			select {
			case <-ctx.Done():
				// This handles the request cancellation
221
				C.dyn_llama_server_completion_cancel(llm.s, resp.id, &resp)
222
223
224
225
226
227
228
				if resp.id < 0 {
					return extServerResponseToErr(resp)
				} else {
					return nil
				}
			default:
				var result C.ext_server_task_result_t
229
				C.dyn_llama_server_completion_next_result(llm.s, resp.id, &result)
230
				json_resp := C.GoString(result.json_resp)
231
				C.dyn_llama_server_release_task_result(llm.s, &result)
232
233
234

				var p prediction
				if err := json.Unmarshal([]byte(json_resp), &p); err != nil {
235
					C.dyn_llama_server_completion_cancel(llm.s, resp.id, &resp)
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
					if resp.id < 0 {
						return fmt.Errorf("error unmarshaling llm prediction response: %w and cancel %s", err, C.GoString(resp.msg))
					} else {
						return fmt.Errorf("error unmarshaling llm prediction response: %w", err)
					}
				}

				if bool(result.error) && strings.Contains(json_resp, "slot unavailable") {
					retryNeeded = true
					// task will already be canceled
					break out
				}

				if p.Content != "" {
					fn(PredictResult{
						Content: p.Content,
					})
				}

				if p.Stop {
					fn(PredictResult{
						Done:               true,
						PromptEvalCount:    p.Timings.PromptN,
						PromptEvalDuration: parseDurationMs(p.Timings.PromptMS),
						EvalCount:          p.Timings.PredictedN,
						EvalDuration:       parseDurationMs(p.Timings.PredictedMS),
					})
					return nil
				}
265
266
			}
		}
267
268
269
		if !retryNeeded {
			return nil // success
		}
270
271
	}

272
273
274
275
	// should never reach here ideally
	return fmt.Errorf("max retries exceeded")
}

276
func (llm *dynExtServer) Encode(ctx context.Context, prompt string) ([]int, error) {
277
278
279
280
281
282
	data, err := json.Marshal(TokenizeRequest{Content: prompt})
	if err != nil {
		return nil, fmt.Errorf("marshaling encode data: %w", err)
	}
	req := C.CString(string(data))
	defer C.free(unsafe.Pointer(req))
283
284
285
	var json_resp *C.char
	resp := newExtServerResp(128)
	defer freeExtServerResp(resp)
286
	C.dyn_llama_server_tokenize(llm.s, req, &json_resp, &resp)
287
288
	if resp.id < 0 {
		return nil, extServerResponseToErr(resp)
289
	}
290
	defer C.dyn_llama_server_release_json_resp(llm.s, &json_resp)
291
292

	var encoded TokenizeResponse
293
	if err2 := json.Unmarshal([]byte(C.GoString(json_resp)), &encoded); err2 != nil {
294
295
296
297
298
299
		return nil, fmt.Errorf("unmarshal encode response: %w", err2)
	}

	return encoded.Tokens, err
}

300
func (llm *dynExtServer) Decode(ctx context.Context, tokens []int) (string, error) {
301
302
303
304
305
306
307
308
309
310
	if len(tokens) == 0 {
		return "", nil
	}
	data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
	if err != nil {
		return "", fmt.Errorf("marshaling decode data: %w", err)
	}

	req := C.CString(string(data))
	defer C.free(unsafe.Pointer(req))
311
312
313
	var json_resp *C.char
	resp := newExtServerResp(128)
	defer freeExtServerResp(resp)
314
	C.dyn_llama_server_detokenize(llm.s, req, &json_resp, &resp)
315
316
	if resp.id < 0 {
		return "", extServerResponseToErr(resp)
317
	}
318
	defer C.dyn_llama_server_release_json_resp(llm.s, &json_resp)
319
320

	var decoded DetokenizeResponse
321
	if err2 := json.Unmarshal([]byte(C.GoString(json_resp)), &decoded); err2 != nil {
322
323
324
325
326
327
		return "", fmt.Errorf("unmarshal encode response: %w", err2)
	}

	return decoded.Content, err
}

328
func (llm *dynExtServer) Embedding(ctx context.Context, input string) ([]float64, error) {
329
330
331
332
333
334
335
	data, err := json.Marshal(TokenizeRequest{Content: input})
	if err != nil {
		return nil, fmt.Errorf("error marshaling embed data: %w", err)
	}

	req := C.CString(string(data))
	defer C.free(unsafe.Pointer(req))
336
337
338
	var json_resp *C.char
	resp := newExtServerResp(128)
	defer freeExtServerResp(resp)
339
	C.dyn_llama_server_embedding(llm.s, req, &json_resp, &resp)
340
341
	if resp.id < 0 {
		return nil, extServerResponseToErr(resp)
342
	}
343
	defer C.dyn_llama_server_release_json_resp(llm.s, &json_resp)
344
345

	var embedding EmbeddingResponse
346
	if err := json.Unmarshal([]byte(C.GoString(json_resp)), &embedding); err != nil {
347
348
349
350
351
352
		return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
	}

	return embedding.Embedding, nil
}

353
354
func (llm *dynExtServer) Close() {
	C.dyn_llama_server_stop(llm.s)
355
356
	mutex.Unlock()
}
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387

func updatePath(dir string) {
	if runtime.GOOS == "windows" {
		tmpDir := filepath.Dir(dir)
		pathComponents := strings.Split(os.Getenv("PATH"), ";")
		i := 0
		for _, comp := range pathComponents {
			if strings.EqualFold(comp, dir) {
				return
			}
			// Remove any other prior paths to our temp dir
			if !strings.HasPrefix(strings.ToLower(comp), strings.ToLower(tmpDir)) {
				pathComponents[i] = comp
				i++
			}
		}
		newPath := strings.Join(append([]string{dir}, pathComponents...), ";")
		log.Printf("Updating PATH to %s", newPath)
		os.Setenv("PATH", newPath)
	} else {
		pathComponents := strings.Split(os.Getenv("LD_LIBRARY_PATH"), ":")
		for _, comp := range pathComponents {
			if comp == dir {
				return
			}
		}
		newPath := strings.Join(append([]string{dir}, pathComponents...), ":")
		log.Printf("Updating LD_LIBRARY_PATH to %s", newPath)
		os.Setenv("LD_LIBRARY_PATH", newPath)
	}
}