0001-Expose-callable-API-for-server.patch 16.7 KB
Newer Older
1
From 90c332fe2ef61149b38561d02836e66715df214d Mon Sep 17 00:00:00 2001
2
3
4
5
6
7
From: Daniel Hiltgen <daniel@ollama.com>
Date: Mon, 13 Nov 2023 12:25:58 -0800
Subject: [PATCH] Expose callable API for server

This adds an extern "C" interface within the example server
---
8
9
 examples/server/CMakeLists.txt |  27 ++++
 examples/server/server.cpp     | 280 +++++++++++++++++++++++++++++++++
10
 examples/server/server.h       |  89 +++++++++++
11
 ggml-cuda.cu                   |   1 +
12
 4 files changed, 397 insertions(+)
13
14
15
 create mode 100644 examples/server/server.h

diff --git a/examples/server/CMakeLists.txt b/examples/server/CMakeLists.txt
16
index 859cd12..da2b9bf 100644
17
18
--- a/examples/server/CMakeLists.txt
+++ b/examples/server/CMakeLists.txt
19
@@ -11,3 +11,30 @@ if (WIN32)
20
21
22
23
24
25
26
27
28
29
30
31
     TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32)
 endif()
 target_compile_features(${TARGET} PRIVATE cxx_std_11)
+
+set(TARGET ext_server)
+option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON)
+add_library(${TARGET} STATIC server.cpp)
+target_include_directories(${TARGET} PRIVATE ../../common)
+target_include_directories(${TARGET} PRIVATE ../..)
+target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_definitions(${TARGET} PUBLIC LLAMA_SERVER_LIBRARY=1)
+target_link_libraries(${TARGET} PRIVATE common llama llava ${CMAKE_THREAD_LIBS_INIT})
32
33
34
+target_compile_definitions(${TARGET} PRIVATE
+    SERVER_VERBOSE=$<BOOL:${LLAMA_SERVER_VERBOSE}>
+)
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
+
+if (BUILD_SHARED_LIBS)
+    set_target_properties(ext_server PROPERTIES POSITION_INDEPENDENT_CODE ON)
+    target_compile_definitions(ext_server PRIVATE LLAMA_SHARED LLAMA_BUILD)
+    add_library(ext_server_shared SHARED $<TARGET_OBJECTS:ext_server>)
+    target_link_libraries(ext_server_shared PRIVATE ggml llama llava common ${CMAKE_THREAD_LIBS_INIT})
+    install(TARGETS ext_server_shared LIBRARY)
+endif()
+
+if (CUDAToolkit_FOUND)
+    target_include_directories(${TARGET} PRIVATE ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
+    if (WIN32)
+        target_link_libraries(ext_server_shared PRIVATE nvml)
+    endif()
+endif()
\ No newline at end of file
diff --git a/examples/server/server.cpp b/examples/server/server.cpp
52
index 0403853..07fb05c 100644
53
54
55
56
57
58
59
60
61
62
63
64
--- a/examples/server/server.cpp
+++ b/examples/server/server.cpp
@@ -5,6 +5,9 @@
 #include "../llava/clip.h"
 
 #include "stb_image.h"
+#if defined(LLAMA_SERVER_LIBRARY)
+#include "server.h"
+#endif
 
 #ifndef NDEBUG
 // crash the server in debug mode, otherwise send an http 500 error
65
@@ -2643,6 +2646,7 @@ static void append_to_generated_text_from_generated_token_probs(llama_server_con
66
67
68
69
70
71
     }
 }
 
+#ifndef LLAMA_SERVER_LIBRARY
 int main(int argc, char **argv)
 {
72
 #if SERVER_VERBOSE != 1
73
@@ -3123,3 +3127,279 @@ int main(int argc, char **argv)
74
75
76
77
78
79
     llama_backend_free();
     return 0;
 }
+
+#else // LLAMA_SERVER_LIBRARY
+// Expose the llama server as a callable extern "C" API
80
+llama_server_context *llama = NULL;
81
82
83
+std::atomic<bool> ext_server_running(false);
+std::thread ext_server_thread;
+
84
+void llama_server_init(ext_server_params *sparams, ext_server_resp_t *err)
85
+{
86
+#if SERVER_VERBOSE != 1
87
+    LOG_TEE("disabling verbose llm logging\n");
88
89
+    log_disable();
+#endif
90
91
92
+    assert(err != NULL && sparams != NULL);
+    err->id = 0;
+    err->msg[0] = '\0';
93
+    try {
94
95
96
97
98
+        llama = new llama_server_context;
+        log_set_target(stdout);
+        gpt_params params;
+        params.n_ctx = sparams->n_ctx;
+        params.n_batch = sparams->n_batch;
99
100
101
+        if (sparams->n_threads > 0) {
+            params.n_threads = sparams->n_threads;
+        }
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
+        params.n_parallel = sparams->n_parallel;
+        params.rope_freq_base = sparams->rope_freq_base;
+        params.rope_freq_scale = sparams->rope_freq_scale;
+
+        if (sparams->memory_f16)  {
+            params.cache_type_k = "f16";
+            params.cache_type_v = "f16";
+        } else {
+            params.cache_type_k = "f32";
+            params.cache_type_v = "f32";
+        }
+
+        params.n_gpu_layers = sparams->n_gpu_layers;
+        params.main_gpu = sparams->main_gpu;
+        params.use_mlock = sparams->use_mlock;
+        params.use_mmap = sparams->use_mmap;
+        params.numa = sparams->numa;
+        params.embedding = sparams->embedding;
+        if (sparams->model != NULL) {
+            params.model = sparams->model;
+        }
+
+        for (ext_server_lora_adapter *la = sparams->lora_adapters; la != NULL; la = la->next) {
+            params.lora_adapter.push_back(std::make_tuple(la->adapter, la->scale));
+        }
+
+        if (sparams->mmproj != NULL) {
+            params.mmproj = std::string(sparams->mmproj);
+        }
+           
132
133
134
+        llama_backend_init(params.numa);
+
+        // load the model
135
+        if (!llama->load_model(params))
136
137
138
+        {
+            // TODO - consider modifying the logging logic or patching load_model so we can capture more detailed error messages
+            // and pass them back to the caller for better UX
139
140
141
+            err->id = -1;
+            snprintf(err->msg, err->msg_len, "error loading model %s", params.model.c_str());
+            return;
142
143
+        }
+
144
+        llama->initialize();
145
+    } catch (std::exception &e) {
146
147
+        err->id = -1;
+        snprintf(err->msg, err->msg_len, "exception %s", e.what());
148
+    } catch (...) {
149
150
+        err->id = -1;
+        snprintf(err->msg, err->msg_len, "Unknown exception initializing llama server");
151
152
153
154
155
+    }
+}
+
+void llama_server_start()
+{
156
+    assert(llama != NULL);
157
158
159
160
161
162
163
164
165
+     // TODO mutex to protect thread creation
+    ext_server_thread = std::thread([&]()
+    {
+        ext_server_running = true;
+        try {
+            LOG_TEE("llama server main loop starting\n");
+            ggml_time_init();
+            while (ext_server_running.load())
+            {
166
+                if (!llama->update_slots()) {
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
+                    LOG_TEE("unexpected error in llama server update_slots - exiting main loop\n");
+                    break;
+                }
+            }
+        } catch (std::exception &e) {
+            LOG_TEE("caught exception in llama server main loop: %s\n", e.what());
+        } catch (...) {
+            LOG_TEE("caught unknown exception in llama server main loop\n");
+        }
+        LOG_TEE("\nllama server shutting down\n");
+        llama_backend_free();
+    });
+}
+
+void llama_server_stop() {
182
+    assert(llama != NULL);
183
184
185
186
+    // TODO - too verbose, remove once things are solid
+    LOG_TEE("requesting llama server shutdown\n");
+    ext_server_running = false;
+    ext_server_thread.join();
187
188
+    delete llama;
+    llama = NULL;
189
190
191
+    LOG_TEE("llama server shutdown complete\n");
+}
+
192
193
194
195
+void llama_server_completion(const char *json_req, ext_server_resp_t *resp) {
+    assert(llama != NULL && json_req != NULL && resp != NULL);
+    resp->id = -1;
+    resp->msg[0] = '\0';
196
197
+    try {
+        json data = json::parse(json_req);
198
+        resp->id = llama->request_completion(data, false, false, -1);
199
+    } catch (std::exception &e) {
200
+        snprintf(resp->msg, resp->msg_len, "exception %s", e.what());
201
+    } catch (...) {
202
+        snprintf(resp->msg, resp->msg_len, "Unknown exception during completion");
203
204
205
+    }
+}
+
206
207
+void llama_server_completion_next_result(const int task_id, ext_server_task_result_t *resp) {
+    assert(llama != NULL && resp != NULL);
208
+    std::string msg;
209
210
211
212
213
+    resp->id = -1;
+    resp->stop = false;
+    resp->error = false;
+    resp->json_resp = NULL;
+    std::string result_json;
214
+    try {
215
216
217
218
219
+        task_result result = llama->next_result(task_id);
+        result_json = result.result_json.dump(-1, ' ', false, json::error_handler_t::replace);
+        resp->id = result.id;
+        resp->stop = result.stop;
+        resp->error = result.error;
220
+        if (result.error) {
221
+            llama->request_cancel(task_id);
222
+        } else if (result.stop) {
223
+            llama->request_cancel(task_id);
224
225
+        }
+    } catch (std::exception &e) {
226
227
228
+        resp->error = true;
+        resp->id = -1;
+        result_json = "{\"error\":\"exception " + std::string(e.what()) + "\"}";
229
+    } catch (...) {
230
231
232
+        resp->error = true;
+        resp->id = -1;
+        result_json = "{\"error\":\"Unknown exception during completion\"}";
233
+    }
234
235
236
+    const std::string::size_type size = result_json.size() + 1;
+    resp->json_resp = new char[size];
+    snprintf(resp->json_resp, size, "%s", result_json.c_str());
237
238
+}
+
239
240
241
242
243
244
245
246
247
248
249
+void llama_server_release_task_result(ext_server_task_result_t *result) {
+    if (result == NULL || result->json_resp == NULL) {
+        return;
+    }
+    delete[] result->json_resp;
+}
+
+void llama_server_completion_cancel(const int task_id, ext_server_resp_t *err) {
+    assert(llama != NULL && err != NULL);
+    err->id = 0;
+    err->msg[0] = '\0';
250
+    try {
251
+        llama->request_cancel(task_id);
252
+    } catch (std::exception &e) {
253
254
+        err->id = -1;
+        snprintf(err->msg, err->msg_len, "exception %s", e.what());
255
+    } catch (...) {
256
257
+        err->id = -1;
+        snprintf(err->msg, err->msg_len, "Unknown exception completion cancel in llama server");
258
259
260
+    }
+}
+
261
262
263
264
265
+void llama_server_tokenize(const char *json_req, char **json_resp, ext_server_resp_t *err) {
+    assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
+    *json_resp = NULL;
+    err->id = 0;
+    err->msg[0] = '\0';
266
267
268
269
270
+    try {
+        const json body = json::parse(json_req);
+        std::vector<llama_token> tokens;
+        if (body.count("content") != 0)
+        {
271
+            tokens = llama->tokenize(body["content"], false);
272
273
274
+        }
+        const json data = format_tokenizer_response(tokens);
+        std::string result_json = data.dump();
275
276
277
+        const std::string::size_type size = result_json.size() + 1;
+        *json_resp = new char[size];
+        snprintf(*json_resp, size, "%s", result_json.c_str());
278
+    } catch (std::exception &e) {
279
280
+        err->id = -1;
+        snprintf(err->msg, err->msg_len, "exception %s", e.what());
281
+    } catch (...) {
282
283
+        err->id = -1;
+        snprintf(err->msg, err->msg_len, "Unknown exception during tokenize");
284
285
286
+    }
+}
+
287
288
289
290
291
292
293
294
295
296
297
298
+void llama_server_release_json_resp(char **json_resp) {
+    if (json_resp == NULL || *json_resp == NULL) {
+        return;
+    }
+    delete[] *json_resp;
+}
+
+void llama_server_detokenize(const char *json_req, char **json_resp, ext_server_resp_t *err) {
+    assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
+    *json_resp = NULL;
+    err->id = 0;
+    err->msg[0] = '\0';
299
300
301
302
303
304
+    try {
+        const json body = json::parse(json_req);
+        std::string content;
+        if (body.count("tokens") != 0)
+        {
+            const std::vector<llama_token> tokens = body["tokens"];
305
+            content = tokens_to_str(llama->ctx, tokens.cbegin(), tokens.cend());
306
307
308
+        }
+        const json data = format_detokenized_response(content);
+        std::string result_json = data.dump();
309
310
311
+        const std::string::size_type size = result_json.size() + 1;
+        *json_resp = new char[size];
+        snprintf(*json_resp, size, "%s", result_json.c_str());
312
+    } catch (std::exception &e) {
313
314
+        err->id = -1;
+        snprintf(err->msg, err->msg_len, "exception %s", e.what());
315
+    } catch (...) {
316
317
+        err->id = -1;
+        snprintf(err->msg, err->msg_len, "Unknown exception during detokenize");
318
319
320
+    }
+}
+
321
322
323
324
325
+void llama_server_embedding(const char *json_req, char** json_resp, ext_server_resp_t *err) {
+    assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
+    *json_resp = NULL;
+    err->id = 0;
+    err->msg[0] = '\0';
326
327
328
329
330
331
332
333
334
335
336
+    try {
+        const json body = json::parse(json_req);
+        json prompt;
+        if (body.count("content") != 0)
+        {
+            prompt = body["content"];
+        }
+        else
+        {
+            prompt = "";
+        }
337
338
+        const int task_id = llama->request_completion({ {"prompt", prompt}, { "n_predict", 0} }, false, true, -1);
+        task_result result = llama->next_result(task_id);
339
+        std::string result_json = result.result_json.dump();
340
341
342
+        const std::string::size_type size = result_json.size() + 1;
+        *json_resp = new char[size];
+        snprintf(*json_resp, size, "%s", result_json.c_str());
343
+    } catch (std::exception &e) {
344
345
+        err->id = -1;
+        snprintf(err->msg, err->msg_len, "exception %s", e.what());
346
+    } catch (...) {
347
348
+        err->id = -1;
+        snprintf(err->msg, err->msg_len, "Unknown exception during embedding");
349
350
351
352
353
354
355
+    }
+}
+
+#endif // LLAMA_SERVER_LIBRARY
\ No newline at end of file
diff --git a/examples/server/server.h b/examples/server/server.h
new file mode 100644
356
index 0000000..d22f1b6
357
358
--- /dev/null
+++ b/examples/server/server.h
359
@@ -0,0 +1,89 @@
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
+#if defined(LLAMA_SERVER_LIBRARY)
+#ifndef LLAMA_SERVER_H
+#define LLAMA_SERVER_H
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdbool.h>
+
+// This exposes extern C entrypoints into the llama_server 
+// To enable the server compile with LLAMA_SERVER_LIBRARY
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
375
376
377
378
379
+    typedef struct ext_server_resp {
+        int id; // < 0 on error
+        size_t msg_len; // caller must allocate msg and set msg_len
+        char *msg;
+    } ext_server_resp_t;
380
+
381
+    // Allocated and freed by caller
382
383
384
385
+    typedef struct ext_server_lora_adapter {
+        char *adapter;
+        float scale;
+        struct ext_server_lora_adapter *next;
386
387
388
+    } ext_server_lora_adapter_t;
+
+    // Allocated and freed by caller
389
390
+    typedef struct ext_server_params
+    {
391
+        char *model;            
392
393
394
395
396
397
398
399
400
401
402
403
404
+        uint32_t n_ctx;         // text context, 0 = from model
+        uint32_t n_batch;       // prompt processing maximum batch size
+        uint32_t n_threads;     // number of threads to use for generation
+        int32_t n_parallel;     // number of parallel sequences to decodewra
+        float rope_freq_base;   // RoPE base frequency, 0 = from model
+        float rope_freq_scale;  // RoPE frequency scaling factor, 0 = from model
+        bool memory_f16;        // use f16 instead of f32 for memory kv
+        int32_t n_gpu_layers;   // number of layers to store in VRAM (-1 - use default)
+        int32_t main_gpu;       // the GPU that is used for scratch and small tensors
+        bool use_mlock;         // force system to keep model in RAM
+        bool use_mmap;          // use mmap if possible
+        bool numa;              // attempt optimizations that help on some NUMA systems
+        bool embedding;         // get only sentence embedding
405
406
407
408
409
410
411
412
413
414
415
+        ext_server_lora_adapter_t* lora_adapters;
+        char *mmproj;
+    } ext_server_params_t;
+
+    typedef struct ext_server_task_result
+    {
+        int id;
+        bool stop;
+        bool error;
+        char* json_resp; // null terminated, memory managed by ext_server
+    } ext_server_task_result_t;
416
417
+
+    // Initialize the server once per process
418
419
420
+    // err->id = 0 for success and err->msg[0] = NULL
+    // err->id != 0 for failure, and err->msg contains error message
+    void llama_server_init(ext_server_params_t *sparams, ext_server_resp_t *err);
421
+
422
+    // Run the main loop, called once per init
423
+    void llama_server_start();
424
+    // Stop the main loop and free up resources allocated in init and start.  Init must be called again to reuse
425
426
+    void llama_server_stop();
+
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
+    // json_req null terminated string, memory managed by caller
+    // resp->id >= 0 on success (task ID)
+    // resp->id < 0 on error, and resp->msg contains error message
+    void llama_server_completion(const char *json_req, ext_server_resp_t *resp);
+
+    // Caller must call llama_server_release_task_result to free resp->json_resp
+    void llama_server_completion_next_result(const int task_id, ext_server_task_result_t *result);
+    void llama_server_completion_cancel(const int task_id, ext_server_resp_t *err);
+    void llama_server_release_task_result(ext_server_task_result_t *result);
+
+    // Caller must call llama_server_releaes_json_resp to free json_resp if err.id < 0
+    void llama_server_tokenize(const char *json_req, char **json_resp, ext_server_resp_t *err);
+    void llama_server_detokenize(const char *json_req, char **json_resp, ext_server_resp_t *err);
+    void llama_server_embedding(const char *json_req, char** json_resp, ext_server_resp_t *err);
+    void llama_server_release_json_resp(char **json_resp);
442
443
444
445
446
447
448
449
450
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+#endif // LLAMA_SERVER_LIBRARY
\ No newline at end of file
diff --git a/ggml-cuda.cu b/ggml-cuda.cu
451
index f20846f..9640cf3 100644
452
453
--- a/ggml-cuda.cu
+++ b/ggml-cuda.cu
454
@@ -6757,6 +6757,7 @@ static cudaError_t ggml_cuda_cpy_tensor_2d(
455
456
457
458
459
460
461
462
463
464
         CUDA_CHECK(cudaGetDevice(&id));
         src_ptr = (char *) extra->data_device[id];
     } else {
+        fprintf(stderr, "ggml_cuda_cpy_tensor_2d assert: backend: %d\n", src->backend);
         GGML_ASSERT(false);
     }
     char * dst_ptr = (char *) dst;
-- 
2.39.3 (Apple Git-145)