ext_server.go 13.5 KB
Newer Older
1
2
3
package llm

/*
4
#cgo CFLAGS: -I${SRCDIR}/llama.cpp/gguf -I${SRCDIR}/llama.cpp/gguf/common -I${SRCDIR}/llama.cpp/gguf/examples/server
5
6
7
8
9
#cgo CFLAGS: -DNDEBUG -DLLAMA_SERVER_LIBRARY=1 -D_XOPEN_SOURCE=600 -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64
#cgo CFLAGS: -Wmissing-noreturn -Wall -Wextra -Wcast-qual -Wno-unused-function -Wno-array-bounds
#cgo CPPFLAGS: -Ofast -Wall -Wextra -Wno-unused-function -Wno-unused-variable -Wno-deprecated-declarations -Wno-unused-but-set-variable
#cgo darwin CFLAGS: -D_DARWIN_C_SOURCE
#cgo darwin CPPFLAGS:  -DGGML_USE_ACCELERATE
Daniel Hiltgen's avatar
Daniel Hiltgen committed
10
#cgo darwin CPPFLAGS: -DGGML_USE_METAL -DGGML_METAL_NDEBUG
11
#cgo darwin LDFLAGS: -lc++ -framework Accelerate
Daniel Hiltgen's avatar
Daniel Hiltgen committed
12
13
14
15
16
#cgo darwin LDFLAGS: -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
#cgo darwin LDFLAGS: ${SRCDIR}/llama.cpp/gguf/build/metal/common/libcommon.a
#cgo darwin LDFLAGS: ${SRCDIR}/llama.cpp/gguf/build/metal/examples/server/libext_server.a
#cgo darwin LDFLAGS: ${SRCDIR}/llama.cpp/gguf/build/metal/libllama.a
#cgo darwin LDFLAGS: ${SRCDIR}/llama.cpp/gguf/build/metal/libggml_static.a
17
18
19
#cgo linux CFLAGS: -D_GNU_SOURCE
#cgo linux windows CFLAGS: -DGGML_CUDA_DMMV_X=32 -DGGML_CUDA_MMV_Y=1 -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128 -DGGML_USE_CUBLAS
#cgo linux LDFLAGS: -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/local/cuda/lib64 -L/usr/local/cuda/targets/x86_64-linux/lib/stubs
20
21
22
23
#cgo linux LDFLAGS: ${SRCDIR}/llama.cpp/gguf/build/cpu/examples/server/libext_server.a
#cgo linux LDFLAGS: ${SRCDIR}/llama.cpp/gguf/build/cpu/common/libcommon.a
#cgo linux LDFLAGS: ${SRCDIR}/llama.cpp/gguf/build/cpu/libllama.a
#cgo linux LDFLAGS: ${SRCDIR}/llama.cpp/gguf/build/cpu/libggml_static.a
24
#cgo linux LDFLAGS: -lrt -lpthread -ldl -lstdc++ -lm
Daniel Hiltgen's avatar
Daniel Hiltgen committed
25
26
#cgo windows LDFLAGS: -L${SRCDIR}/llama.cpp/gguf/build/wincpu/dist/lib
#cgo windows LDFLAGS: -lcpu_server -lpthread
27
28

#include <stdlib.h>
29
#include "server.h"
30
31
32
33
34
35
36
37
38
39

*/
import "C"
import (
	"bytes"
	"context"
	"encoding/json"
	"fmt"
	"log"
	"os"
40
	"strings"
41
42
43
44
45
	"sync"
	"time"
	"unsafe"

	"github.com/jmorganca/ollama/api"
46
	"github.com/jmorganca/ollama/gpu"
47
48
)

49
50
51
52
53
54
55
56
57
58
59
func newExtServerResp(len C.size_t) C.ext_server_resp_t {
	var resp C.ext_server_resp_t
	resp.msg_len = len
	bytes := make([]byte, len)
	resp.msg = (*C.char)(C.CBytes(bytes))
	return resp
}

func freeExtServerResp(resp C.ext_server_resp_t) {
	if resp.msg_len == 0 {
		return
60
	}
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
	C.free(unsafe.Pointer(resp.msg))
}

func extServerResponseToErr(resp C.ext_server_resp_t) error {
	return fmt.Errorf(C.GoString(resp.msg))
}

type extServer interface {
	LLM
	llama_server_init(sparams *C.ext_server_params_t, err *C.ext_server_resp_t)
	llama_server_start()
	llama_server_stop()
	llama_server_completion(json_req *C.char, resp *C.ext_server_resp_t)
	llama_server_completion_next_result(task_id C.int, resp *C.ext_server_task_result_t)
	llama_server_completion_cancel(task_id C.int, err *C.ext_server_resp_t)
	llama_server_release_task_result(result *C.ext_server_task_result_t)
	llama_server_tokenize(json_req *C.char, json_resp **C.char, err *C.ext_server_resp_t)
	llama_server_detokenize(json_req *C.char, json_resp **C.char, err *C.ext_server_resp_t)
	llama_server_embedding(json_req *C.char, json_resp **C.char, err *C.ext_server_resp_t)
	llama_server_release_json_resp(json_resp **C.char)
81
82
83
84
85
86
87
88
89
}

type llamaExtServer struct {
	api.Options
}

// Note: current implementation does not support concurrent instantiations
var mutex sync.Mutex

90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
func (llm *llamaExtServer) llama_server_init(sparams *C.ext_server_params_t, err *C.ext_server_resp_t) {
	C.llama_server_init(sparams, err)
}
func (llm *llamaExtServer) llama_server_start() {
	C.llama_server_start()
}
func (llm *llamaExtServer) llama_server_stop() {
	C.llama_server_stop()
}

func (llm *llamaExtServer) llama_server_completion(json_req *C.char, resp *C.ext_server_resp_t) {
	C.llama_server_completion(json_req, resp)
}
func (llm *llamaExtServer) llama_server_completion_next_result(task_id C.int, resp *C.ext_server_task_result_t) {
	C.llama_server_completion_next_result(task_id, resp)
}
func (llm *llamaExtServer) llama_server_completion_cancel(task_id C.int, err *C.ext_server_resp_t) {
	C.llama_server_completion_cancel(task_id, err)
}
func (llm *llamaExtServer) llama_server_release_task_result(result *C.ext_server_task_result_t) {
	C.llama_server_release_task_result(result)
}

func (llm *llamaExtServer) llama_server_tokenize(json_req *C.char, json_resp **C.char, err *C.ext_server_resp_t) {
	C.llama_server_tokenize(json_req, json_resp, err)
}
func (llm *llamaExtServer) llama_server_detokenize(json_req *C.char, json_resp **C.char, err *C.ext_server_resp_t) {
	C.llama_server_detokenize(json_req, json_resp, err)
}
func (llm *llamaExtServer) llama_server_embedding(json_req *C.char, json_resp **C.char, err *C.ext_server_resp_t) {
	C.llama_server_embedding(json_req, json_resp, err)
}
func (llm *llamaExtServer) llama_server_release_json_resp(json_resp **C.char) {
	C.llama_server_release_json_resp(json_resp)
}

126
func newDefaultExtServer(model string, adapters, projectors []string, numLayers int64, opts api.Options) (extServer, error) {
127
128
129
130
131
	server := &llamaExtServer{opts}
	return newExtServer(server, model, adapters, projectors, numLayers, opts)
}

func newExtServer(server extServer, model string, adapters, projectors []string, numLayers int64, opts api.Options) (extServer, error) {
132
133
134
135
136
137
138
139
	if !mutex.TryLock() {
		log.Printf("concurrent llm servers not yet supported, waiting for prior server to complete")
		mutex.Lock()
	}
	fileInfo, err := os.Stat(model)
	if err != nil {
		return nil, err
	}
140
	var sparams C.ext_server_params_t
141
142
143
	sparams.model = C.CString(model)
	defer C.free(unsafe.Pointer(sparams.model))

144
	numGPU := gpu.NumGPU(numLayers, fileInfo.Size(), opts)
145
146
147
148
149
150

	sparams.embedding = true
	sparams.n_ctx = C.uint(opts.NumCtx)
	sparams.n_batch = C.uint(opts.NumBatch)
	sparams.n_gpu_layers = C.int(numGPU)
	sparams.main_gpu = C.int(opts.MainGPU)
151
	sparams.n_parallel = 1 // TODO - wire up concurrency
152
153
154
155

	// Always use the value encoded in the model
	sparams.rope_freq_base = 0.0
	sparams.rope_freq_scale = 0.0
156
157
158
159
	sparams.memory_f16 = C.bool(opts.F16KV)
	sparams.use_mlock = C.bool(opts.UseMLock)
	sparams.use_mmap = C.bool(opts.UseMMap)
	sparams.numa = C.bool(opts.UseNUMA)
160
161
162

	sparams.lora_adapters = nil
	for i := 0; i < len(adapters); i++ {
163
		la := (*C.ext_server_lora_adapter_t)(C.malloc(C.sizeof_ext_server_lora_adapter_t))
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
		defer C.free(unsafe.Pointer(la))
		la.adapter = C.CString(adapters[i])
		defer C.free(unsafe.Pointer(la.adapter))
		la.scale = C.float(1.0) // TODO expose scale/weights up through ollama UX
		la.next = nil
		if i == 0 {
			sparams.lora_adapters = la
		} else {
			tmp := sparams.lora_adapters
			for ; tmp.next != nil; tmp = tmp.next {
			}
			tmp.next = la
		}
	}

179
180
181
182
183
184
185
	if len(projectors) > 0 {
		// TODO: applying multiple projectors is not supported by the llama.cpp server yet
		sparams.mmproj = C.CString(projectors[0])
		defer C.free(unsafe.Pointer(sparams.mmproj))
	} else {
		sparams.mmproj = nil
	}
186

187
	sparams.n_threads = C.uint(opts.NumThread)
188
189

	log.Printf("Initializing internal llama server")
190
191
192
193
194
	resp := newExtServerResp(128)
	defer freeExtServerResp(resp)
	server.llama_server_init(&sparams, &resp)
	if resp.id < 0 {
		return nil, extServerResponseToErr(resp)
195
196
197
	}

	log.Printf("Starting internal llama main loop")
198
	server.llama_server_start()
199
200
201
	return server, nil
}

202
203
204
205
206
207
208
209
210
211
212
213
214
215
func (llm *llamaExtServer) Predict(ctx context.Context, pred PredictOpts, fn func(PredictResult)) error {
	return predict(llm, llm.Options, ctx, pred, fn)
}

func predict(llm extServer, opts api.Options, ctx context.Context, predict PredictOpts, fn func(PredictResult)) error {
	resp := newExtServerResp(128)
	defer freeExtServerResp(resp)
	var imageData []ImageData
	if len(predict.Images) > 0 {
		for cnt, i := range predict.Images {
			imageData = append(imageData, ImageData{Data: i, ID: cnt})
		}
	}
	log.Printf("loaded %d images", len(imageData))
216
217
218
219

	request := map[string]any{
		"prompt":            predict.Prompt,
		"stream":            true,
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
		"n_predict":         opts.NumPredict,
		"n_keep":            opts.NumKeep,
		"temperature":       opts.Temperature,
		"top_k":             opts.TopK,
		"top_p":             opts.TopP,
		"tfs_z":             opts.TFSZ,
		"typical_p":         opts.TypicalP,
		"repeat_last_n":     opts.RepeatLastN,
		"repeat_penalty":    opts.RepeatPenalty,
		"presence_penalty":  opts.PresencePenalty,
		"frequency_penalty": opts.FrequencyPenalty,
		"mirostat":          opts.Mirostat,
		"mirostat_tau":      opts.MirostatTau,
		"mirostat_eta":      opts.MirostatEta,
		"penalize_nl":       opts.PenalizeNewline,
		"seed":              opts.Seed,
		"stop":              opts.Stop,
		"image_data":        imageData,
238
239
240
241
242
243
	}

	if predict.Format == "json" {
		request["grammar"] = jsonGrammar
	}

244
245
246
247
248
249
	retryDelay := 100 * time.Microsecond
	for retries := 0; retries < maxRetries; retries++ {
		if retries > 0 {
			time.Sleep(retryDelay) // wait before retrying
			retryDelay *= 2        // exponential backoff
		}
250

251
252
253
254
		// Handling JSON marshaling with special characters unescaped.
		buffer := &bytes.Buffer{}
		enc := json.NewEncoder(buffer)
		enc.SetEscapeHTML(false)
255

256
257
258
		if err := enc.Encode(request); err != nil {
			return fmt.Errorf("failed to marshal data: %w", err)
		}
259

260
261
		req := C.CString(buffer.String())
		defer C.free(unsafe.Pointer(req))
262

263
264
265
266
		llm.llama_server_completion(req, &resp)
		if resp.id < 0 {
			return extServerResponseToErr(resp)
		}
267

268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
		retryNeeded := false
	out:
		for {
			select {
			case <-ctx.Done():
				// This handles the request cancellation
				llm.llama_server_completion_cancel(resp.id, &resp)
				if resp.id < 0 {
					return extServerResponseToErr(resp)
				} else {
					return nil
				}
			default:
				var result C.ext_server_task_result_t
				llm.llama_server_completion_next_result(resp.id, &result)
				json_resp := C.GoString(result.json_resp)
				llm.llama_server_release_task_result(&result)

				var p prediction
				if err := json.Unmarshal([]byte(json_resp), &p); err != nil {
					llm.llama_server_completion_cancel(resp.id, &resp)
					if resp.id < 0 {
						return fmt.Errorf("error unmarshaling llm prediction response: %w and cancel %s", err, C.GoString(resp.msg))
					} else {
						return fmt.Errorf("error unmarshaling llm prediction response: %w", err)
					}
				}

				if bool(result.error) && strings.Contains(json_resp, "slot unavailable") {
					retryNeeded = true
					// task will already be canceled
					break out
				}

				if p.Content != "" {
					fn(PredictResult{
						Content: p.Content,
					})
				}

				if p.Stop {
					fn(PredictResult{
						Done:               true,
						PromptEvalCount:    p.Timings.PromptN,
						PromptEvalDuration: parseDurationMs(p.Timings.PromptMS),
						EvalCount:          p.Timings.PredictedN,
						EvalDuration:       parseDurationMs(p.Timings.PredictedMS),
					})
					return nil
				}
318
319
			}
		}
320
321
322
		if !retryNeeded {
			return nil // success
		}
323
324
	}

325
326
327
	// should never reach here ideally
	return fmt.Errorf("max retries exceeded")
}
328
func (llm *llamaExtServer) Encode(ctx context.Context, prompt string) ([]int, error) {
329
330
331
332
	return encode(llm, ctx, prompt)
}

func encode(llm extServer, ctx context.Context, prompt string) ([]int, error) {
333
334
335
336
337
338
	data, err := json.Marshal(TokenizeRequest{Content: prompt})
	if err != nil {
		return nil, fmt.Errorf("marshaling encode data: %w", err)
	}
	req := C.CString(string(data))
	defer C.free(unsafe.Pointer(req))
339
340
341
342
343
344
	var json_resp *C.char
	resp := newExtServerResp(128)
	defer freeExtServerResp(resp)
	llm.llama_server_tokenize(req, &json_resp, &resp)
	if resp.id < 0 {
		return nil, extServerResponseToErr(resp)
345
	}
346
	defer llm.llama_server_release_json_resp(&json_resp)
347
348

	var encoded TokenizeResponse
349
	if err2 := json.Unmarshal([]byte(C.GoString(json_resp)), &encoded); err2 != nil {
350
351
352
353
354
355
356
		return nil, fmt.Errorf("unmarshal encode response: %w", err2)
	}

	return encoded.Tokens, err
}

func (llm *llamaExtServer) Decode(ctx context.Context, tokens []int) (string, error) {
357
358
359
360
	return decode(llm, ctx, tokens)
}

func decode(llm extServer, ctx context.Context, tokens []int) (string, error) {
361
362
363
364
365
366
367
368
369
370
	if len(tokens) == 0 {
		return "", nil
	}
	data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
	if err != nil {
		return "", fmt.Errorf("marshaling decode data: %w", err)
	}

	req := C.CString(string(data))
	defer C.free(unsafe.Pointer(req))
371
372
373
374
375
376
	var json_resp *C.char
	resp := newExtServerResp(128)
	defer freeExtServerResp(resp)
	llm.llama_server_detokenize(req, &json_resp, &resp)
	if resp.id < 0 {
		return "", extServerResponseToErr(resp)
377
	}
378
	defer llm.llama_server_release_json_resp(&json_resp)
379
380

	var decoded DetokenizeResponse
381
	if err2 := json.Unmarshal([]byte(C.GoString(json_resp)), &decoded); err2 != nil {
382
383
384
385
386
387
388
		return "", fmt.Errorf("unmarshal encode response: %w", err2)
	}

	return decoded.Content, err
}

func (llm *llamaExtServer) Embedding(ctx context.Context, input string) ([]float64, error) {
389
390
391
	return embedding(llm, ctx, input)
}
func embedding(llm extServer, ctx context.Context, input string) ([]float64, error) {
392
393
394
395
396
397
398
	data, err := json.Marshal(TokenizeRequest{Content: input})
	if err != nil {
		return nil, fmt.Errorf("error marshaling embed data: %w", err)
	}

	req := C.CString(string(data))
	defer C.free(unsafe.Pointer(req))
399
400
401
402
403
404
	var json_resp *C.char
	resp := newExtServerResp(128)
	defer freeExtServerResp(resp)
	llm.llama_server_embedding(req, &json_resp, &resp)
	if resp.id < 0 {
		return nil, extServerResponseToErr(resp)
405
	}
406
	defer llm.llama_server_release_json_resp(&json_resp)
407
408

	var embedding EmbeddingResponse
409
	if err := json.Unmarshal([]byte(C.GoString(json_resp)), &embedding); err != nil {
410
411
412
413
414
415
		return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
	}

	return embedding.Embedding, nil
}

416
417
func (llm *llamaExtServer) Close() {
	close(llm)
418
419
}

420
421
func close(llm extServer) {
	llm.llama_server_stop()
422
423
	mutex.Unlock()
}