Commit 35934b2e authored by Daniel Hiltgen's avatar Daniel Hiltgen
Browse files

Adapted rocm support to cgo based llama.cpp

parent f8ef4439
...@@ -49,3 +49,7 @@ git_module_setup ...@@ -49,3 +49,7 @@ git_module_setup
apply_patches apply_patches
build build
install install
# TODO - implement ROCm support on windows
md gguf/build/winrocm/lib -ea 0
echo $null >> gguf/build/winrocm/lib/.generated
package llm package llm
//go:generate sh ./gen_linux.sh //go:generate bash ./gen_linux.sh
//go:build cuda
package llm
//go:generate git submodule init
//go:generate git submodule update --force ggml
//go:generate git -C ggml apply ../patches/0001-add-detokenize-endpoint.patch
//go:generate git -C ggml apply ../patches/0002-34B-model-support.patch
//go:generate git -C ggml apply ../patches/0005-ggml-support-CUDA-s-half-type-for-aarch64-1455-2670.patch
//go:generate git -C ggml apply ../patches/0001-copy-cuda-runtime-libraries.patch
//go:generate rm -rf ggml/build/cuda
//go:generate cmake -S ggml -B ggml/build/cuda -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on
//go:generate cmake --build ggml/build/cuda --target server --config Release
//go:generate mv ggml/build/cuda/bin/server ggml/build/cuda/bin/ollama-runner
//go:generate git -C gguf apply ../patches/0001-copy-cuda-runtime-libraries.patch
//go:generate git -C gguf apply ../patches/0001-update-default-log-target.patch
//go:generate rm -rf gguf/build/cuda
//go:generate cmake -S gguf -B gguf/build/cuda -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off
//go:generate cmake --build gguf/build/cuda --target server --config Release
//go:generate mv gguf/build/cuda/bin/server gguf/build/cuda/bin/ollama-runner
//go:build rocm
package llm
//go:generate git submodule init
//go:generate git submodule update --force ggml
//go:generate git -C ggml apply ../patches/0001-add-detokenize-endpoint.patch
//go:generate git -C ggml apply ../patches/0002-34B-model-support.patch
//go:generate git -C ggml apply ../patches/0005-ggml-support-CUDA-s-half-type-for-aarch64-1455-2670.patch
//go:generate git -C ggml apply ../patches/0001-copy-cuda-runtime-libraries.patch
//go:generate git submodule update --force gguf
//go:generate git -C gguf apply ../patches/0001-copy-cuda-runtime-libraries.patch
//go:generate git -C gguf apply ../patches/0001-update-default-log-target.patch
//go:generate rm -rf ggml/build/rocm
//go:generate cmake -S ggml -B ggml/build/rocm -DLLAMA_CLBLAST=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on
//go:generate cmake --build ggml/build/rocm --target server --config Release
//go:generate mv ggml/build/rocm/bin/server ggml/build/rocm/bin/ollama-runner
//go:generate rm -rf gguf/build/rocm
//go:generate cmake -S gguf -B gguf/build/rocm -DLLAMA_HIPBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_C_COMPILER=$ROCM_PATH/llvm/bin/clang -DCMAKE_CXX_COMPILER=$ROCM_PATH/llvm/bin/clang++ -DAMDGPU_TARGETS='gfx803;gfx900;gfx906:xnack-;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102' -DGPU_TARGETS='gfx803;gfx900;gfx906:xnack-;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102'
//go:generate cmake --build gguf/build/rocm --target server --config Release
//go:generate mv gguf/build/rocm/bin/server gguf/build/rocm/bin/ollama-runner
From 64b3fbb150d12b3ca63ac2fb4e57bc46f41d2ccd Mon Sep 17 00:00:00 2001 From 087cf3300e973d7790db8f7cad01d2a790de38be Mon Sep 17 00:00:00 2001
From: Daniel Hiltgen <daniel@ollama.com> From: Daniel Hiltgen <daniel@ollama.com>
Date: Mon, 13 Nov 2023 12:25:58 -0800 Date: Mon, 13 Nov 2023 12:25:58 -0800
Subject: [PATCH] Expose callable API for server Subject: [PATCH] Expose callable API for server
This adds an extern "C" interface within the example server This adds an extern "C" interface within the example server
--- ---
examples/server/CMakeLists.txt | 24 ++++ examples/server/CMakeLists.txt | 24 +++
examples/server/server.cpp | 247 +++++++++++++++++++++++++++++++++ examples/server/server.cpp | 274 +++++++++++++++++++++++++++++++++
examples/server/server.h | 83 +++++++++++ examples/server/server.h | 89 +++++++++++
ggml-cuda.cu | 1 + ggml-cuda.cu | 1 +
4 files changed, 355 insertions(+) 4 files changed, 388 insertions(+)
create mode 100644 examples/server/server.h create mode 100644 examples/server/server.h
diff --git a/examples/server/CMakeLists.txt b/examples/server/CMakeLists.txt diff --git a/examples/server/CMakeLists.txt b/examples/server/CMakeLists.txt
...@@ -46,7 +46,7 @@ index 859cd12..4ea47a7 100644 ...@@ -46,7 +46,7 @@ index 859cd12..4ea47a7 100644
+endif() +endif()
\ No newline at end of file \ No newline at end of file
diff --git a/examples/server/server.cpp b/examples/server/server.cpp diff --git a/examples/server/server.cpp b/examples/server/server.cpp
index 895f751..f939590 100644 index d0cd8e1..5f5d4c5 100644
--- a/examples/server/server.cpp --- a/examples/server/server.cpp
+++ b/examples/server/server.cpp +++ b/examples/server/server.cpp
@@ -5,6 +5,9 @@ @@ -5,6 +5,9 @@
...@@ -59,7 +59,7 @@ index 895f751..f939590 100644 ...@@ -59,7 +59,7 @@ index 895f751..f939590 100644
#ifndef NDEBUG #ifndef NDEBUG
// crash the server in debug mode, otherwise send an http 500 error // crash the server in debug mode, otherwise send an http 500 error
@@ -2631,6 +2634,7 @@ static void append_to_generated_text_from_generated_token_probs(llama_server_con @@ -2632,6 +2635,7 @@ static void append_to_generated_text_from_generated_token_probs(llama_server_con
} }
} }
...@@ -67,31 +67,24 @@ index 895f751..f939590 100644 ...@@ -67,31 +67,24 @@ index 895f751..f939590 100644
int main(int argc, char **argv) int main(int argc, char **argv)
{ {
// own arguments required by this example // own arguments required by this example
@@ -3065,3 +3069,246 @@ int main(int argc, char **argv) @@ -3066,3 +3070,273 @@ int main(int argc, char **argv)
llama_backend_free(); llama_backend_free();
return 0; return 0;
} }
+ +
+#else // LLAMA_SERVER_LIBRARY +#else // LLAMA_SERVER_LIBRARY
+// Expose the llama server as a callable extern "C" API +// Expose the llama server as a callable extern "C" API
+llama_server_context llama; +llama_server_context *llama = NULL;
+std::atomic<bool> ext_server_running(false); +std::atomic<bool> ext_server_running(false);
+std::thread ext_server_thread; +std::thread ext_server_thread;
+inline ext_server_err makeErr(uint32_t code, std::string msg) {
+ if (code == 0) {
+ return ext_server_err{0, NULL};
+ }
+ const std::string::size_type size = msg.size();
+ ext_server_err ret = {
+ code,
+ new char[size + 1],
+ };
+ memcpy(ret.err, msg.c_str(), size + 1);
+ return ret;
+}
+ +
+ext_server_err llama_server_init(ext_server_params *sparams) +void llama_server_init(ext_server_params *sparams, ext_server_resp_t *err)
+{ +{
+ assert(err != NULL && sparams != NULL);
+ err->id = 0;
+ err->msg[0] = '\0';
+ try {
+ llama = new llama_server_context;
+ log_set_target(stdout); + log_set_target(stdout);
+ gpt_params params; + gpt_params params;
+ params.n_ctx = sparams->n_ctx; + params.n_ctx = sparams->n_ctx;
...@@ -123,28 +116,35 @@ index 895f751..f939590 100644 ...@@ -123,28 +116,35 @@ index 895f751..f939590 100644
+ params.lora_adapter.push_back(std::make_tuple(la->adapter, la->scale)); + params.lora_adapter.push_back(std::make_tuple(la->adapter, la->scale));
+ } + }
+ +
+ try { + if (sparams->mmproj != NULL) {
+ params.mmproj = std::string(sparams->mmproj);
+ }
+
+ llama_backend_init(params.numa); + llama_backend_init(params.numa);
+ +
+ // load the model + // load the model
+ if (!llama.load_model(params)) + if (!llama->load_model(params))
+ { + {
+ // TODO - consider modifying the logging logic or patching load_model so we can capture more detailed error messages + // TODO - consider modifying the logging logic or patching load_model so we can capture more detailed error messages
+ // and pass them back to the caller for better UX + // and pass them back to the caller for better UX
+ return makeErr(1, "error loading model " + params.model); + err->id = -1;
+ snprintf(err->msg, err->msg_len, "error loading model %s", params.model.c_str());
+ return;
+ } + }
+ +
+ llama.initialize(); + llama->initialize();
+ } catch (std::exception &e) { + } catch (std::exception &e) {
+ return makeErr(1, e.what()); + err->id = -1;
+ snprintf(err->msg, err->msg_len, "exception %s", e.what());
+ } catch (...) { + } catch (...) {
+ return makeErr(1, "Unknown Exception initializing llama server"); + err->id = -1;
+ snprintf(err->msg, err->msg_len, "Unknown exception initializing llama server");
+ } + }
+ return makeErr(0, "");
+} +}
+ +
+void llama_server_start() +void llama_server_start()
+{ +{
+ assert(llama != NULL);
+ // TODO mutex to protect thread creation + // TODO mutex to protect thread creation
+ ext_server_thread = std::thread([&]() + ext_server_thread = std::thread([&]()
+ { + {
...@@ -154,7 +154,7 @@ index 895f751..f939590 100644 ...@@ -154,7 +154,7 @@ index 895f751..f939590 100644
+ ggml_time_init(); + ggml_time_init();
+ while (ext_server_running.load()) + while (ext_server_running.load())
+ { + {
+ if (!llama.update_slots()) { + if (!llama->update_slots()) {
+ LOG_TEE("unexpected error in llama server update_slots - exiting main loop\n"); + LOG_TEE("unexpected error in llama server update_slots - exiting main loop\n");
+ break; + break;
+ } + }
...@@ -170,124 +170,150 @@ index 895f751..f939590 100644 ...@@ -170,124 +170,150 @@ index 895f751..f939590 100644
+} +}
+ +
+void llama_server_stop() { +void llama_server_stop() {
+ assert(llama != NULL);
+ // TODO - too verbose, remove once things are solid + // TODO - too verbose, remove once things are solid
+ LOG_TEE("requesting llama server shutdown\n"); + LOG_TEE("requesting llama server shutdown\n");
+ ext_server_running = false; + ext_server_running = false;
+ ext_server_thread.join(); + ext_server_thread.join();
+ delete llama;
+ llama = NULL;
+ LOG_TEE("llama server shutdown complete\n"); + LOG_TEE("llama server shutdown complete\n");
+} +}
+ +
+ext_server_completion_resp llama_server_completion(const char *json_req) { +void llama_server_completion(const char *json_req, ext_server_resp_t *resp) {
+ std::string msg; + assert(llama != NULL && json_req != NULL && resp != NULL);
+ ext_server_completion_resp resp = { + resp->id = -1;
+ 0, + resp->msg[0] = '\0';
+ NULL,
+ };
+ try { + try {
+ json data = json::parse(json_req); + json data = json::parse(json_req);
+ resp.task_id = llama.request_completion(data, false, false, -1); + resp->id = llama->request_completion(data, false, false, -1);
+ return resp;
+ } catch (std::exception &e) { + } catch (std::exception &e) {
+ msg = e.what(); + snprintf(resp->msg, resp->msg_len, "exception %s", e.what());
+ } catch (...) { + } catch (...) {
+ msg = "Unknown Exception during completion"; + snprintf(resp->msg, resp->msg_len, "Unknown exception during completion");
+ } + }
+ const std::string::size_type size = msg.size();
+ resp.task_id = 0;
+ resp.err = new char[size + 1];
+ memcpy(resp.err, msg.c_str(), size + 1);
+ return resp;
+} +}
+ +
+ext_task_result llama_server_completion_next_result(const int task_id) { +void llama_server_completion_next_result(const int task_id, ext_server_task_result_t *resp) {
+ assert(llama != NULL && resp != NULL);
+ std::string msg; + std::string msg;
+ ext_task_result resp = {-1,false,false,NULL}; + resp->id = -1;
+ resp->stop = false;
+ resp->error = false;
+ resp->json_resp = NULL;
+ std::string result_json;
+ try { + try {
+ task_result result = llama.next_result(task_id); + task_result result = llama->next_result(task_id);
+ std::string result_json = result.result_json.dump(-1, ' ', false, json::error_handler_t::replace); + result_json = result.result_json.dump(-1, ' ', false, json::error_handler_t::replace);
+ const std::string::size_type size = result_json.size(); + resp->id = result.id;
+ resp.id = result.id; + resp->stop = result.stop;
+ resp.stop = result.stop; + resp->error = result.error;
+ resp.error = result.error;
+ resp.result_json = new char[size + 1];
+ memcpy(resp.result_json, result_json.c_str(), size + 1);
+ if (result.error) { + if (result.error) {
+ llama.request_cancel(task_id); + llama->request_cancel(task_id);
+ } else if (result.stop) { + } else if (result.stop) {
+ llama.request_cancel(task_id); + llama->request_cancel(task_id);
+ } + }
+ return resp;
+ } catch (std::exception &e) { + } catch (std::exception &e) {
+ msg = e.what(); // TODO - json? + resp->error = true;
+ resp->id = -1;
+ result_json = "{\"error\":\"exception " + std::string(e.what()) + "\"}";
+ } catch (...) { + } catch (...) {
+ msg = "Unknown Exception during completion"; + resp->error = true;
+ resp->id = -1;
+ result_json = "{\"error\":\"Unknown exception during completion\"}";
+ } + }
+ resp.error = true; + const std::string::size_type size = result_json.size() + 1;
+ const std::string::size_type size = msg.size(); + resp->json_resp = new char[size];
+ resp.result_json = new char[size + 1]; + snprintf(resp->json_resp, size, "%s", result_json.c_str());
+ memcpy(resp.result_json, msg.c_str(), size + 1);
+ return resp;
+} +}
+ +
+ext_server_err llama_server_completion_cancel(const int task_id) { +void llama_server_release_task_result(ext_server_task_result_t *result) {
+ if (result == NULL || result->json_resp == NULL) {
+ return;
+ }
+ delete[] result->json_resp;
+}
+
+void llama_server_completion_cancel(const int task_id, ext_server_resp_t *err) {
+ assert(llama != NULL && err != NULL);
+ err->id = 0;
+ err->msg[0] = '\0';
+ try { + try {
+ llama.request_cancel(task_id); + llama->request_cancel(task_id);
+ } catch (std::exception &e) { + } catch (std::exception &e) {
+ return makeErr(1, e.what()); + err->id = -1;
+ snprintf(err->msg, err->msg_len, "exception %s", e.what());
+ } catch (...) { + } catch (...) {
+ return makeErr(1, "Unknown Exception running llama server"); + err->id = -1;
+ snprintf(err->msg, err->msg_len, "Unknown exception completion cancel in llama server");
+ } + }
+ return makeErr(0, "");
+} +}
+ +
+ +void llama_server_tokenize(const char *json_req, char **json_resp, ext_server_resp_t *err) {
+ext_server_err llama_server_tokenize(const char *json_req, ext_server_resp *resp) { + assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
+ resp->json_resp = NULL; + *json_resp = NULL;
+ err->id = 0;
+ err->msg[0] = '\0';
+ try { + try {
+ const json body = json::parse(json_req); + const json body = json::parse(json_req);
+ std::vector<llama_token> tokens; + std::vector<llama_token> tokens;
+ if (body.count("content") != 0) + if (body.count("content") != 0)
+ { + {
+ tokens = llama.tokenize(body["content"], false); + tokens = llama->tokenize(body["content"], false);
+ } + }
+ const json data = format_tokenizer_response(tokens); + const json data = format_tokenizer_response(tokens);
+ std::string result_json = data.dump(); + std::string result_json = data.dump();
+ const std::string::size_type size = result_json.size(); + const std::string::size_type size = result_json.size() + 1;
+ resp->json_resp = new char[size + 1]; + *json_resp = new char[size];
+ memcpy(resp->json_resp, result_json.c_str(), size + 1); + snprintf(*json_resp, size, "%s", result_json.c_str());
+ } catch (std::exception &e) { + } catch (std::exception &e) {
+ return makeErr(1, e.what()); + err->id = -1;
+ snprintf(err->msg, err->msg_len, "exception %s", e.what());
+ } catch (...) { + } catch (...) {
+ return makeErr(1, "Unknown Exception during tokenize"); + err->id = -1;
+ snprintf(err->msg, err->msg_len, "Unknown exception during tokenize");
+ } + }
+ return makeErr(0, "");
+} +}
+ +
+ext_server_err llama_server_detokenize(const char *json_req, ext_server_resp *resp) { +void llama_server_release_json_resp(char **json_resp) {
+ resp->json_resp = NULL; + if (json_resp == NULL || *json_resp == NULL) {
+ return;
+ }
+ delete[] *json_resp;
+}
+
+void llama_server_detokenize(const char *json_req, char **json_resp, ext_server_resp_t *err) {
+ assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
+ *json_resp = NULL;
+ err->id = 0;
+ err->msg[0] = '\0';
+ try { + try {
+ const json body = json::parse(json_req); + const json body = json::parse(json_req);
+ std::string content; + std::string content;
+ if (body.count("tokens") != 0) + if (body.count("tokens") != 0)
+ { + {
+ const std::vector<llama_token> tokens = body["tokens"]; + const std::vector<llama_token> tokens = body["tokens"];
+ content = tokens_to_str(llama.ctx, tokens.cbegin(), tokens.cend()); + content = tokens_to_str(llama->ctx, tokens.cbegin(), tokens.cend());
+ } + }
+ const json data = format_detokenized_response(content); + const json data = format_detokenized_response(content);
+ std::string result_json = data.dump(); + std::string result_json = data.dump();
+ const std::string::size_type size = result_json.size(); + const std::string::size_type size = result_json.size() + 1;
+ resp->json_resp = new char[size + 1]; + *json_resp = new char[size];
+ memcpy(resp->json_resp, result_json.c_str(), size + 1); + snprintf(*json_resp, size, "%s", result_json.c_str());
+ } catch (std::exception &e) { + } catch (std::exception &e) {
+ return makeErr(1, e.what()); + err->id = -1;
+ snprintf(err->msg, err->msg_len, "exception %s", e.what());
+ } catch (...) { + } catch (...) {
+ return makeErr(1, "Unknown Exception during detokenize"); + err->id = -1;
+ snprintf(err->msg, err->msg_len, "Unknown exception during detokenize");
+ } + }
+ return makeErr(0, "");
+} +}
+ +
+ext_server_err llama_server_embedding(const char *json_req, ext_server_resp *resp) { +void llama_server_embedding(const char *json_req, char** json_resp, ext_server_resp_t *err) {
+ resp->json_resp = NULL; + assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
+ *json_resp = NULL;
+ err->id = 0;
+ err->msg[0] = '\0';
+ try { + try {
+ const json body = json::parse(json_req); + const json body = json::parse(json_req);
+ json prompt; + json prompt;
...@@ -299,28 +325,29 @@ index 895f751..f939590 100644 ...@@ -299,28 +325,29 @@ index 895f751..f939590 100644
+ { + {
+ prompt = ""; + prompt = "";
+ } + }
+ const int task_id = llama.request_completion({ {"prompt", prompt}, { "n_predict", 0} }, false, true, -1); + const int task_id = llama->request_completion({ {"prompt", prompt}, { "n_predict", 0} }, false, true, -1);
+ task_result result = llama.next_result(task_id); + task_result result = llama->next_result(task_id);
+ std::string result_json = result.result_json.dump(); + std::string result_json = result.result_json.dump();
+ const std::string::size_type size = result_json.size(); + const std::string::size_type size = result_json.size() + 1;
+ resp->json_resp = new char[size + 1]; + *json_resp = new char[size];
+ memcpy(resp->json_resp, result_json.c_str(), size + 1); + snprintf(*json_resp, size, "%s", result_json.c_str());
+ } catch (std::exception &e) { + } catch (std::exception &e) {
+ return makeErr(1, e.what()); + err->id = -1;
+ snprintf(err->msg, err->msg_len, "exception %s", e.what());
+ } catch (...) { + } catch (...) {
+ return makeErr(1, "Unknown Exception during detokenize"); + err->id = -1;
+ snprintf(err->msg, err->msg_len, "Unknown exception during embedding");
+ } + }
+ return makeErr(0, "");
+} +}
+ +
+#endif // LLAMA_SERVER_LIBRARY +#endif // LLAMA_SERVER_LIBRARY
\ No newline at end of file \ No newline at end of file
diff --git a/examples/server/server.h b/examples/server/server.h diff --git a/examples/server/server.h b/examples/server/server.h
new file mode 100644 new file mode 100644
index 0000000..4d03b1e index 0000000..d22f1b6
--- /dev/null --- /dev/null
+++ b/examples/server/server.h +++ b/examples/server/server.h
@@ -0,0 +1,83 @@ @@ -0,0 +1,89 @@
+#if defined(LLAMA_SERVER_LIBRARY) +#if defined(LLAMA_SERVER_LIBRARY)
+#ifndef LLAMA_SERVER_H +#ifndef LLAMA_SERVER_H
+#define LLAMA_SERVER_H +#define LLAMA_SERVER_H
...@@ -336,17 +363,20 @@ index 0000000..4d03b1e ...@@ -336,17 +363,20 @@ index 0000000..4d03b1e
+extern "C" +extern "C"
+{ +{
+#endif +#endif
+ // TODO - clean the type def's up a bit for better consistency + typedef struct ext_server_resp {
+ typedef struct ext_server_err { + int id; // < 0 on error
+ uint32_t code; // 0 on success, > 0 on error + size_t msg_len; // caller must allocate msg and set msg_len
+ char *err; // null if code == 0; else contains error message. Caller responsible for freeing memory + char *msg;
+ } ext_server_err; + } ext_server_resp_t;
+ +
+ // Allocated and freed by caller
+ typedef struct ext_server_lora_adapter { + typedef struct ext_server_lora_adapter {
+ char *adapter; + char *adapter;
+ float scale; + float scale;
+ struct ext_server_lora_adapter *next; + struct ext_server_lora_adapter *next;
+ } ext_server_lora_adapter; + } ext_server_lora_adapter_t;
+
+ // Allocated and freed by caller
+ typedef struct ext_server_params + typedef struct ext_server_params
+ { + {
+ char *model; + char *model;
...@@ -363,40 +393,43 @@ index 0000000..4d03b1e ...@@ -363,40 +393,43 @@ index 0000000..4d03b1e
+ bool use_mmap; // use mmap if possible + bool use_mmap; // use mmap if possible
+ bool numa; // attempt optimizations that help on some NUMA systems + bool numa; // attempt optimizations that help on some NUMA systems
+ bool embedding; // get only sentence embedding + bool embedding; // get only sentence embedding
+ ext_server_lora_adapter* lora_adapters; + ext_server_lora_adapter_t* lora_adapters;
+ } ext_server_params; + char *mmproj;
+ } ext_server_params_t;
+
+ typedef struct ext_server_task_result
+ {
+ int id;
+ bool stop;
+ bool error;
+ char* json_resp; // null terminated, memory managed by ext_server
+ } ext_server_task_result_t;
+ +
+ // Initialize the server once per process + // Initialize the server once per process
+ ext_server_err llama_server_init(ext_server_params *sparams); + // err->id = 0 for success and err->msg[0] = NULL
+ // err->id != 0 for failure, and err->msg contains error message
+ void llama_server_init(ext_server_params_t *sparams, ext_server_resp_t *err);
+ +
+ // Run the main loop + // Run the main loop, called once per init
+ void llama_server_start(); + void llama_server_start();
+ // Stop the main loop + // Stop the main loop and free up resources allocated in init and start. Init must be called again to reuse
+ void llama_server_stop(); + void llama_server_stop();
+ +
+ typedef struct ext_task_result + // json_req null terminated string, memory managed by caller
+ { + // resp->id >= 0 on success (task ID)
+ int id; + // resp->id < 0 on error, and resp->msg contains error message
+ bool stop; + void llama_server_completion(const char *json_req, ext_server_resp_t *resp);
+ bool error; +
+ char* result_json; // caller responsible to free this memory + // Caller must call llama_server_release_task_result to free resp->json_resp
+ } ext_task_result; + void llama_server_completion_next_result(const int task_id, ext_server_task_result_t *result);
+ + void llama_server_completion_cancel(const int task_id, ext_server_resp_t *err);
+ typedef struct ext_server_completion_resp { + void llama_server_release_task_result(ext_server_task_result_t *result);
+ int task_id; // < 0 on error, >= 0 on success +
+ char *err; // null if task_id >= 0; else contains error message. Caller responsible for freeing memory + // Caller must call llama_server_releaes_json_resp to free json_resp if err.id < 0
+ } ext_server_completion_resp; + void llama_server_tokenize(const char *json_req, char **json_resp, ext_server_resp_t *err);
+ ext_server_completion_resp llama_server_completion(const char *json_req); + void llama_server_detokenize(const char *json_req, char **json_resp, ext_server_resp_t *err);
+ ext_task_result llama_server_completion_next_result(const int task_id); + void llama_server_embedding(const char *json_req, char** json_resp, ext_server_resp_t *err);
+ ext_server_err llama_server_completion_cancel(const int task_id); + void llama_server_release_json_resp(char **json_resp);
+
+ // Caller responsible for freeing json_resp
+ typedef struct ext_server_resp {
+ char *json_resp; // Caller responsible for freeing string
+ } ext_server_resp;
+ ext_server_err llama_server_tokenize(const char *json_req, ext_server_resp *resp);
+ ext_server_err llama_server_detokenize(const char *json_req, ext_server_resp *resp);
+ ext_server_err llama_server_embedding(const char *json_req, ext_server_resp *resp);
+ +
+#ifdef __cplusplus +#ifdef __cplusplus
+} +}
...@@ -406,10 +439,10 @@ index 0000000..4d03b1e ...@@ -406,10 +439,10 @@ index 0000000..4d03b1e
+#endif // LLAMA_SERVER_LIBRARY +#endif // LLAMA_SERVER_LIBRARY
\ No newline at end of file \ No newline at end of file
diff --git a/ggml-cuda.cu b/ggml-cuda.cu diff --git a/ggml-cuda.cu b/ggml-cuda.cu
index 85f7a29..ce51364 100644 index 9e1acd3..ea64b55 100644
--- a/ggml-cuda.cu --- a/ggml-cuda.cu
+++ b/ggml-cuda.cu +++ b/ggml-cuda.cu
@@ -6410,6 +6410,7 @@ static cudaError_t ggml_cuda_cpy_tensor_2d( @@ -6505,6 +6505,7 @@ static cudaError_t ggml_cuda_cpy_tensor_2d(
CUDA_CHECK(cudaGetDevice(&id)); CUDA_CHECK(cudaGetDevice(&id));
src_ptr = (char *) extra->data_device[id]; src_ptr = (char *) extra->data_device[id];
} else { } else {
......
...@@ -3,6 +3,7 @@ package llm ...@@ -3,6 +3,7 @@ package llm
import ( import (
"bytes" "bytes"
"context" "context"
_ "embed"
"errors" "errors"
"fmt" "fmt"
"os" "os"
...@@ -112,12 +113,6 @@ type ImageData struct { ...@@ -112,12 +113,6 @@ type ImageData struct {
ID int `json:"id"` ID int `json:"id"`
} }
type llama struct {
api.Options
ImageData []ImageData
Running
}
var ( var (
errNvidiaSMI = errors.New("warning: gpu support may not be enabled, check that you have installed GPU drivers: nvidia-smi command failed") errNvidiaSMI = errors.New("warning: gpu support may not be enabled, check that you have installed GPU drivers: nvidia-smi command failed")
errAvailableVRAM = errors.New("not enough VRAM available, falling back to CPU only") errAvailableVRAM = errors.New("not enough VRAM available, falling back to CPU only")
...@@ -166,7 +161,8 @@ type prediction struct { ...@@ -166,7 +161,8 @@ type prediction struct {
} }
const maxBufferSize = 512 * format.KiloByte const maxBufferSize = 512 * format.KiloByte
const maxRetries = 6 const maxRetries = 3
const retryDelay = 1 * time.Second
type PredictOpts struct { type PredictOpts struct {
Prompt string Prompt string
......
...@@ -11,6 +11,7 @@ import ( ...@@ -11,6 +11,7 @@ import (
"github.com/jmorganca/ollama/api" "github.com/jmorganca/ollama/api"
"github.com/jmorganca/ollama/format" "github.com/jmorganca/ollama/format"
"github.com/jmorganca/ollama/gpu"
) )
type LLM interface { type LLM interface {
...@@ -19,7 +20,6 @@ type LLM interface { ...@@ -19,7 +20,6 @@ type LLM interface {
Encode(context.Context, string) ([]int, error) Encode(context.Context, string) ([]int, error)
Decode(context.Context, []int) (string, error) Decode(context.Context, []int) (string, error)
Close() Close()
Ping(context.Context) error
} }
func New(workDir, model string, adapters, projectors []string, opts api.Options) (LLM, error) { func New(workDir, model string, adapters, projectors []string, opts api.Options) (LLM, error) {
...@@ -78,5 +78,17 @@ func New(workDir, model string, adapters, projectors []string, opts api.Options) ...@@ -78,5 +78,17 @@ func New(workDir, model string, adapters, projectors []string, opts api.Options)
opts.NumGQA = 0 opts.NumGQA = 0
opts.RopeFrequencyBase = 0.0 opts.RopeFrequencyBase = 0.0
opts.RopeFrequencyScale = 0.0 opts.RopeFrequencyScale = 0.0
gpuInfo := gpu.GetGPUInfo()
switch gpuInfo.Driver {
case "ROCM":
return newRocmShimExtServer(model, adapters, projectors, ggml.NumLayers(), opts)
default:
// Rely on the built-in CUDA based server which will fall back to CPU
return newLlamaExtServer(model, adapters, projectors, ggml.NumLayers(), opts) return newLlamaExtServer(model, adapters, projectors, ggml.NumLayers(), opts)
}
}
// Give any native cgo implementations an opportunity to initialize
func Init(workdir string) error {
return nativeInit(workdir)
} }
#include "rocm_shim.h"
#include <stdio.h>
#include <string.h>
#ifndef _WIN32
#include <dlfcn.h>
#define LOAD_LIBRARY(lib, flags) dlopen(lib, flags)
#define LOAD_SYMBOL(handle, sym) dlsym(handle, sym)
#define LOAD_ERR() dlerror()
#define UNLOAD_LIBRARY(handle) dlclose(handle)
#else
#include <windows.h>
#define LOAD_LIBRARY(lib, flags) LoadLibrary(lib)
#define LOAD_SYMBOL(handle, sym) GetProcAddress(handle, sym)
#define UNLOAD_LIBRARY(handle) FreeLibrary(handle)
// TODO - refactor this with proper error message handling on windows
inline static char *LOAD_ERR() {
static char errbuf[8];
snprintf(errbuf, 8, "0x%lx", GetLastError());
return errbuf;
}
#endif
void rocm_shim_init(const char *libPath, struct rocm_llama_server *s,
ext_server_resp_t *err) {
int i = 0;
struct lookup {
char *s;
void **p;
} l[] = {
{"llama_server_init", (void *)&s->llama_server_init},
{"llama_server_start", (void *)&s->llama_server_start},
{"llama_server_stop", (void *)&s->llama_server_stop},
{"llama_server_completion", (void *)&s->llama_server_completion},
{"llama_server_completion_next_result",
(void *)&s->llama_server_completion_next_result},
{"llama_server_completion_cancel",
(void *)&s->llama_server_completion_cancel},
{"llama_server_release_task_result",
(void *)&s->llama_server_release_task_result},
{"llama_server_tokenize", (void *)&s->llama_server_tokenize},
{"llama_server_detokenize", (void *)&s->llama_server_detokenize},
{"llama_server_embedding", (void *)&s->llama_server_embedding},
{"llama_server_release_json_resp",
(void *)&s->llama_server_release_json_resp},
{"", NULL},
};
printf("Lazy loading %s library\n", libPath);
s->handle = LOAD_LIBRARY(libPath, RTLD_LAZY);
if (!s->handle) {
err->id = -1;
snprintf(
err->msg, err->msg_len,
"Unable to load rocm server library: %s (If you have a Radeon card, "
"did you install the ROCM libraries?)",
LOAD_ERR());
return;
}
for (i = 0; l[i].p != NULL; i++) {
*l[i].p = LOAD_SYMBOL(s->handle, l[i].s);
if (!l[i].p) {
UNLOAD_LIBRARY(s->handle);
err->id = -1;
snprintf(err->msg, err->msg_len, "symbol lookup for %s failed: %s",
l[i].s, LOAD_ERR());
return;
}
}
}
inline void rocm_shim_llama_server_init(struct rocm_llama_server s,
ext_server_params_t *sparams,
ext_server_resp_t *err) {
s.llama_server_init(sparams, err);
}
inline void rocm_shim_llama_server_start(struct rocm_llama_server s) {
s.llama_server_start();
}
inline void rocm_shim_llama_server_stop(struct rocm_llama_server s) {
s.llama_server_stop();
}
inline void rocm_shim_llama_server_completion(struct rocm_llama_server s,
const char *json_req,
ext_server_resp_t *resp) {
s.llama_server_completion(json_req, resp);
}
inline void rocm_shim_llama_server_completion_next_result(
struct rocm_llama_server s, const int task_id,
ext_server_task_result_t *result) {
s.llama_server_completion_next_result(task_id, result);
}
inline void rocm_shim_llama_server_completion_cancel(struct rocm_llama_server s,
const int task_id,
ext_server_resp_t *err) {
s.llama_server_completion_cancel(task_id, err);
}
inline void rocm_shim_llama_server_release_task_result(
struct rocm_llama_server s, ext_server_task_result_t *result) {
s.llama_server_release_task_result(result);
}
inline void rocm_shim_llama_server_tokenize(struct rocm_llama_server s,
const char *json_req,
char **json_resp,
ext_server_resp_t *err) {
s.llama_server_tokenize(json_req, json_resp, err);
}
inline void rocm_shim_llama_server_detokenize(struct rocm_llama_server s,
const char *json_req,
char **json_resp,
ext_server_resp_t *err) {
s.llama_server_detokenize(json_req, json_resp, err);
}
inline void rocm_shim_llama_server_embedding(struct rocm_llama_server s,
const char *json_req,
char **json_resp,
ext_server_resp_t *err) {
s.llama_server_embedding(json_req, json_resp, err);
}
inline void rocm_shim_llama_server_release_json_resp(struct rocm_llama_server s,
char **json_resp) {
s.llama_server_release_json_resp(json_resp);
}
#include <stdlib.h>
#include "server.h"
#ifdef __cplusplus
extern "C" {
#endif
struct rocm_llama_server {
void *handle;
void (*llama_server_init)(ext_server_params_t *sparams,
ext_server_resp_t *err);
void (*llama_server_start)();
void (*llama_server_stop)();
void (*llama_server_completion)(const char *json_req,
ext_server_resp_t *resp);
void (*llama_server_completion_next_result)(const int task_id,
ext_server_task_result_t *result);
void (*llama_server_completion_cancel)(const int task_id,
ext_server_resp_t *err);
void (*llama_server_release_task_result)(ext_server_task_result_t *result);
void (*llama_server_tokenize)(const char *json_req, char **json_resp,
ext_server_resp_t *err);
void (*llama_server_detokenize)(const char *json_req, char **json_resp,
ext_server_resp_t *err);
void (*llama_server_embedding)(const char *json_req, char **json_resp,
ext_server_resp_t *err);
void (*llama_server_release_json_resp)(char **json_resp);
};
void rocm_shim_init(const char *libPath, struct rocm_llama_server *s,
ext_server_resp_t *err);
// No good way to call C function pointers from Go so inline the indirection
void rocm_shim_llama_server_init(struct rocm_llama_server s,
ext_server_params_t *sparams,
ext_server_resp_t *err);
void rocm_shim_llama_server_start(struct rocm_llama_server s);
void rocm_shim_llama_server_stop(struct rocm_llama_server s);
void rocm_shim_llama_server_completion(struct rocm_llama_server s,
const char *json_req,
ext_server_resp_t *resp);
void rocm_shim_llama_server_completion_next_result(
struct rocm_llama_server s, const int task_id,
ext_server_task_result_t *result);
void rocm_shim_llama_server_completion_cancel(struct rocm_llama_server s,
const int task_id,
ext_server_resp_t *err);
void rocm_shim_llama_server_release_task_result(
struct rocm_llama_server s, ext_server_task_result_t *result);
void rocm_shim_llama_server_tokenize(struct rocm_llama_server s,
const char *json_req, char **json_resp,
ext_server_resp_t *err);
void rocm_shim_llama_server_detokenize(struct rocm_llama_server s,
const char *json_req, char **json_resp,
ext_server_resp_t *err);
void rocm_shim_llama_server_embedding(struct rocm_llama_server s,
const char *json_req, char **json_resp,
ext_server_resp_t *err);
void rocm_shim_llama_server_release_json_resp(struct rocm_llama_server s,
char **json_resp);
#ifdef __cplusplus
}
#endif
\ No newline at end of file
package llm
import (
"fmt"
"github.com/jmorganca/ollama/api"
)
// no-op stubs for mac
func newRocmShimExtServer(model string, adapters, projectors []string, numLayers int64, opts api.Options) (extServer, error) {
// should never happen...
return nil, fmt.Errorf("ROCM GPUs not supported on Mac")
}
func nativeInit(workDir string) error {
return nil
}
//go:build !darwin
package llm
/*
#include <stdlib.h>
#include "rocm_shim.h"
*/
import "C"
import (
"context"
"embed"
"errors"
"fmt"
"io"
"io/fs"
"log"
"os"
"path/filepath"
"runtime"
"sync"
"unsafe"
"github.com/jmorganca/ollama/api"
)
//go:embed llama.cpp/gguf/build/*/lib/*
var libEmbed embed.FS
var RocmShimMissing = fmt.Errorf("ROCm shim library not included in this build of ollama. Radeon GPUs are not supported")
var NoShim = true
type shimExtServer struct {
s C.struct_rocm_llama_server
options api.Options
}
// Note: current implementation does not support concurrent instantiations
var shimMutex sync.Mutex
var llm *shimExtServer
func (llm *shimExtServer) llama_server_init(sparams *C.ext_server_params_t, err *C.ext_server_resp_t) {
C.rocm_shim_llama_server_init(llm.s, sparams, err)
}
func (llm *shimExtServer) llama_server_start() {
C.rocm_shim_llama_server_start(llm.s)
}
func (llm *shimExtServer) llama_server_stop() {
C.rocm_shim_llama_server_stop(llm.s)
}
func (llm *shimExtServer) llama_server_completion(json_req *C.char, resp *C.ext_server_resp_t) {
C.rocm_shim_llama_server_completion(llm.s, json_req, resp)
}
func (llm *shimExtServer) llama_server_completion_next_result(task_id C.int, resp *C.ext_server_task_result_t) {
C.rocm_shim_llama_server_completion_next_result(llm.s, task_id, resp)
}
func (llm *shimExtServer) llama_server_completion_cancel(task_id C.int, err *C.ext_server_resp_t) {
C.rocm_shim_llama_server_completion_cancel(llm.s, task_id, err)
}
func (llm *shimExtServer) llama_server_release_task_result(result *C.ext_server_task_result_t) {
C.rocm_shim_llama_server_release_task_result(llm.s, result)
}
func (llm *shimExtServer) llama_server_tokenize(json_req *C.char, json_resp **C.char, err *C.ext_server_resp_t) {
C.rocm_shim_llama_server_tokenize(llm.s, json_req, json_resp, err)
}
func (llm *shimExtServer) llama_server_detokenize(json_req *C.char, json_resp **C.char, err *C.ext_server_resp_t) {
C.rocm_shim_llama_server_detokenize(llm.s, json_req, json_resp, err)
}
func (llm *shimExtServer) llama_server_embedding(json_req *C.char, json_resp **C.char, err *C.ext_server_resp_t) {
C.rocm_shim_llama_server_embedding(llm.s, json_req, json_resp, err)
}
func (llm *shimExtServer) llama_server_release_json_resp(json_resp **C.char) {
C.rocm_shim_llama_server_release_json_resp(llm.s, json_resp)
}
func newRocmShimExtServer(model string, adapters, projectors []string, numLayers int64, opts api.Options) (extServer, error) {
if NoShim {
return nil, RocmShimMissing
}
log.Printf("Loading ROCM llm server")
if llm == nil {
return nil, fmt.Errorf("nativeInit wasnt called or libary load failed")
}
llm.options = opts
return newExtServer(llm, model, adapters, projectors, numLayers, opts)
}
func (llm *shimExtServer) Predict(ctx context.Context, pred PredictOpts, fn func(PredictResult)) error {
return predict(llm, llm.options, ctx, pred, fn)
}
func (llm *shimExtServer) Encode(ctx context.Context, prompt string) ([]int, error) {
return encode(llm, ctx, prompt)
}
func (llm *shimExtServer) Decode(ctx context.Context, tokens []int) (string, error) {
return decode(llm, ctx, tokens)
}
func (llm *shimExtServer) Embedding(ctx context.Context, input string) ([]float64, error) {
return embedding(llm, ctx, input)
}
func (llm *shimExtServer) Close() {
close(llm)
}
func nativeInit(workdir string) error {
err := extractLib(workdir)
if err != nil {
if err == RocmShimMissing {
log.Printf("%s", err)
return nil
}
return err
}
// Verify we have permissions - either running as root, or we have group access to the driver
fd, err := os.OpenFile("/dev/kfd", os.O_RDWR, 0666)
if err != nil {
if errors.Is(err, fs.ErrPermission) {
log.Fatalf("Radeon card detected, but permissions not set up properly. Either run ollama as root, or add you user account to the render group.")
return err
} else if errors.Is(err, fs.ErrNotExist) {
// expected behavior without a radeon card
return nil
}
return fmt.Errorf("failed to check permission on /dev/kfd: %w", err)
}
fd.Close()
shimMutex.Lock()
defer shimMutex.Unlock()
if llm != nil {
return nil
}
var libName string
switch runtime.GOOS {
case "darwin":
// shouldn't happen
return nil
case "linux":
libName = "librocm_server.so"
case "windows":
libName = "rocm_server.dll"
default:
// shouldn't happen
return nil
}
libPath := C.CString(filepath.Join(workdir, libName))
defer C.free(unsafe.Pointer(libPath))
resp := newExtServerResp(128)
defer freeExtServerResp(resp)
var srv C.struct_rocm_llama_server
C.rocm_shim_init(libPath, &srv, &resp)
if resp.id < 0 {
// TODO - consider softening this failure mode to allow fall-back to the CUDA based built-in llm
// and run against CPU
return fmt.Errorf("Unable to load AMD GPU library: %s", C.GoString(resp.msg))
}
llm = &shimExtServer{
s: srv,
options: api.DefaultOptions(),
}
return nil
}
func extractLib(workDir string) error {
files, err := fs.Glob(libEmbed, "llama.cpp/gguf/build/*/lib/*rocm_server*")
if err != nil || len(files) == 0 {
// this is expected, ollama may be compiled without shim library packed in
return RocmShimMissing
}
if len(files) != 1 {
// Shouldn't happen, but just use the first one we find
log.Printf("WARNING: multiple rocm libraries detected - using %s", files[0])
}
srcFile, err := libEmbed.Open(files[0])
if err != nil {
return fmt.Errorf("read ROCm shim %s: %v", files[0], err)
}
defer srcFile.Close()
if err := os.MkdirAll(workDir, 0o755); err != nil {
return fmt.Errorf("create ROCm shim temp dir %s: %v", workDir, err)
}
destFile := filepath.Join(workDir, filepath.Base(files[0]))
_, err = os.Stat(destFile)
switch {
case errors.Is(err, os.ErrNotExist):
destFile, err := os.OpenFile(destFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755)
if err != nil {
return fmt.Errorf("write ROCm shim %s: %v", files[0], err)
}
defer destFile.Close()
if _, err := io.Copy(destFile, srcFile); err != nil {
return fmt.Errorf("copy ROCm shim %s: %v", files[0], err)
}
case err != nil:
return fmt.Errorf("stat ROCm shim %s: %v", files[0], err)
}
NoShim = false
return nil
}
...@@ -8,7 +8,7 @@ export GOFLAGS="'-ldflags=-w -s \"-X=github.com/jmorganca/ollama/version.Version ...@@ -8,7 +8,7 @@ export GOFLAGS="'-ldflags=-w -s \"-X=github.com/jmorganca/ollama/version.Version
mkdir -p dist mkdir -p dist
for TARGETARCH in amd64 arm64; do for TARGETARCH in amd64 arm64; do
docker buildx build --load --platform=linux/$TARGETARCH --build-arg=VERSION --build-arg=GOFLAGS -f Dockerfile.build -t builder:$TARGETARCH . docker buildx build --load --progress=plain --platform=linux/$TARGETARCH --build-arg=VERSION --build-arg=GOFLAGS -f Dockerfile.build -t builder:$TARGETARCH .
docker create --platform linux/$TARGETARCH --name builder-$TARGETARCH builder:$TARGETARCH docker create --platform linux/$TARGETARCH --name builder-$TARGETARCH builder:$TARGETARCH
docker cp builder-$TARGETARCH:/go/src/github.com/jmorganca/ollama/ollama ./dist/ollama-linux-$TARGETARCH docker cp builder-$TARGETARCH:/go/src/github.com/jmorganca/ollama/ollama ./dist/ollama-linux-$TARGETARCH
docker rm builder-$TARGETARCH docker rm builder-$TARGETARCH
......
#!/usr/bin/env python3
import subprocess
import sys
from urllib.parse import urlparse
from git import Repo
# Helper script to be able to build on remote repos using git to push local changes
# (e.g. particularly helpful to target a remote windows build system)
#
# Typical windows remote git config looks like this:
#
#[remote "windows-pa"]
# url = jdoe@desktop-foo:C:/Users/Jdoe/code/ollama
# fetch = +refs/heads/*:refs/remotes/windows-pa/*
# uploadpack = powershell git upload-pack
# receivepack = powershell git receive-pack
#
# TODO - add argpare and make this more configurable
# - force flag becomes optional
# - generate, build or test ...
# Note: remote repo will need this run once:
# git config --local receive.denyCurrentBranch updateInstead
repo = Repo(".")
# On linux, add links in /usr/local/bin to the go binaries to avoid needing this
# GoCmd = "/usr/local/go/bin/go"
GoCmd = "go"
if repo.is_dirty():
print("Tree is dirty. Commit your changes before running this script")
sys.exit(1)
if len(sys.argv) != 2:
print("Please specify the remote name: " + ', '.join([r.name for r in repo.remotes]))
sys.exit(1)
remote_name = sys.argv[1]
remote = {r.name: r for r in repo.remotes}[remote_name]
raw_url = list(remote.urls)[0]
url = urlparse(raw_url)
# Windows urls don't quite parse properly
if url.scheme == "" and url.netloc == "":
url = urlparse("ssh://" + raw_url)
print("URL: " + str(url))
netloc = url.netloc.split(":")[0]
path = url.path
branch_name = repo.active_branch.name
print("Force pushing content to remote...")
# Use with care given the force push
remote.push(force=True).raise_if_error()
print("Ensuring correct branch checked out on remote via ssh...")
subprocess.check_call(['ssh', netloc, 'cd', path, ';', 'git', 'checkout', branch_name])
# TODO - add some hardening to try to figure out how to set up the path properly
# subprocess.check_call(['ssh', netloc, 'cd', path, ';', 'env'])
# TODO - or consider paramiko maybe
print("Performing generate")
subprocess.check_call(['ssh', netloc, 'cd', path, ';', GoCmd, 'generate', './...'])
print("Building")
subprocess.check_call(['ssh', netloc, 'cd', path, ';', GoCmd, 'build', '.'])
...@@ -2,14 +2,17 @@ package server ...@@ -2,14 +2,17 @@ package server
import ( import (
"context" "context"
"os"
"strings" "strings"
"sync" "sync"
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/jmorganca/ollama/api" "github.com/jmorganca/ollama/api"
"github.com/jmorganca/ollama/llm"
) )
// TODO - this would ideally be in the llm package, but that would require some refactoring of interfaces in the server // TODO - this would ideally be in the llm package, but that would require some refactoring of interfaces in the server
...@@ -33,12 +36,16 @@ var ( ...@@ -33,12 +36,16 @@ var (
} }
resp = [2]string{ resp = [2]string{
"once upon a time", "once upon a time",
"fourth thursday", "united states thanksgiving",
} }
) )
func TestIntegrationSimpleOrcaMini(t *testing.T) { func TestIntegrationSimpleOrcaMini(t *testing.T) {
SkipIFNoTestData(t) SkipIFNoTestData(t)
workDir, err := os.MkdirTemp("", "ollama")
require.NoError(t, err)
defer os.RemoveAll(workDir)
require.NoError(t, llm.Init(workDir))
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60) ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel() defer cancel()
opts := api.DefaultOptions() opts := api.DefaultOptions()
...@@ -56,7 +63,13 @@ func TestIntegrationSimpleOrcaMini(t *testing.T) { ...@@ -56,7 +63,13 @@ func TestIntegrationSimpleOrcaMini(t *testing.T) {
// get true concurrency working with n_parallel support in the backend // get true concurrency working with n_parallel support in the backend
func TestIntegrationConcurrentPredictOrcaMini(t *testing.T) { func TestIntegrationConcurrentPredictOrcaMini(t *testing.T) {
SkipIFNoTestData(t) SkipIFNoTestData(t)
t.Skip("concurrent prediction on single runner not currently supported") t.Skip("concurrent prediction on single runner not currently supported")
workDir, err := os.MkdirTemp("", "ollama")
require.NoError(t, err)
defer os.RemoveAll(workDir)
require.NoError(t, llm.Init(workDir))
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60) ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel() defer cancel()
opts := api.DefaultOptions() opts := api.DefaultOptions()
...@@ -79,6 +92,10 @@ func TestIntegrationConcurrentPredictOrcaMini(t *testing.T) { ...@@ -79,6 +92,10 @@ func TestIntegrationConcurrentPredictOrcaMini(t *testing.T) {
func TestIntegrationConcurrentRunnersOrcaMini(t *testing.T) { func TestIntegrationConcurrentRunnersOrcaMini(t *testing.T) {
SkipIFNoTestData(t) SkipIFNoTestData(t)
workDir, err := os.MkdirTemp("", "ollama")
require.NoError(t, err)
defer os.RemoveAll(workDir)
require.NoError(t, llm.Init(workDir))
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60) ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel() defer cancel()
opts := api.DefaultOptions() opts := api.DefaultOptions()
...@@ -87,6 +104,7 @@ func TestIntegrationConcurrentRunnersOrcaMini(t *testing.T) { ...@@ -87,6 +104,7 @@ func TestIntegrationConcurrentRunnersOrcaMini(t *testing.T) {
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(len(req)) wg.Add(len(req))
t.Logf("Running %d concurrently", len(req))
for i := 0; i < len(req); i++ { for i := 0; i < len(req); i++ {
go func(i int) { go func(i int) {
defer wg.Done() defer wg.Done()
......
...@@ -25,6 +25,7 @@ import ( ...@@ -25,6 +25,7 @@ import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/jmorganca/ollama/api" "github.com/jmorganca/ollama/api"
"github.com/jmorganca/ollama/gpu"
"github.com/jmorganca/ollama/llm" "github.com/jmorganca/ollama/llm"
"github.com/jmorganca/ollama/parser" "github.com/jmorganca/ollama/parser"
"github.com/jmorganca/ollama/version" "github.com/jmorganca/ollama/version"
...@@ -81,20 +82,6 @@ func load(c *gin.Context, modelName string, reqOpts map[string]interface{}, sess ...@@ -81,20 +82,6 @@ func load(c *gin.Context, modelName string, reqOpts map[string]interface{}, sess
return nil, err return nil, err
} }
ctx := c.Request.Context()
// check if the loaded model is still running in a subprocess, in case something unexpected happened
if loaded.runner != nil {
if err := loaded.runner.Ping(ctx); err != nil {
log.Print("loaded llm process not responding, closing now")
// the subprocess is no longer running, so close it
loaded.runner.Close()
loaded.runner = nil
loaded.Model = nil
loaded.Options = nil
}
}
needLoad := loaded.runner == nil || // is there a model loaded? needLoad := loaded.runner == nil || // is there a model loaded?
loaded.ModelPath != model.ModelPath || // has the base model changed? loaded.ModelPath != model.ModelPath || // has the base model changed?
!reflect.DeepEqual(loaded.AdapterPaths, model.AdapterPaths) || // have the adapters changed? !reflect.DeepEqual(loaded.AdapterPaths, model.AdapterPaths) || // have the adapters changed?
...@@ -905,9 +892,12 @@ func Serve(ln net.Listener) error { ...@@ -905,9 +892,12 @@ func Serve(ln net.Listener) error {
os.Exit(0) os.Exit(0)
}() }()
if runtime.GOOS == "linux" { if err := llm.Init(s.WorkDir); err != nil {
return fmt.Errorf("unable to initialize llm library %w", err)
}
if runtime.GOOS == "linux" { // TODO - windows too
// check compatibility to log warnings // check compatibility to log warnings
if _, err := llm.CheckVRAM(); err != nil { if _, err := gpu.CheckVRAM(); err != nil {
log.Print(err.Error()) log.Print(err.Error())
} }
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment