Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
ollama
Commits
c9167494
Commit
c9167494
authored
Oct 23, 2023
by
Michael Yang
Browse files
update default log target
parent
ba2da6ce
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
29 additions
and
29 deletions
+29
-29
llm/llama.cpp/generate_darwin_amd64.go
llm/llama.cpp/generate_darwin_amd64.go
+1
-1
llm/llama.cpp/generate_darwin_arm64.go
llm/llama.cpp/generate_darwin_arm64.go
+1
-1
llm/llama.cpp/generate_linux.go
llm/llama.cpp/generate_linux.go
+1
-1
llm/llama.cpp/generate_windows.go
llm/llama.cpp/generate_windows.go
+1
-1
llm/llama.cpp/patches/0001-remove-warm-up-logging.patch
llm/llama.cpp/patches/0001-remove-warm-up-logging.patch
+0
-25
llm/llama.cpp/patches/0001-update-default-log-target.patch
llm/llama.cpp/patches/0001-update-default-log-target.patch
+25
-0
No files found.
llm/llama.cpp/generate_darwin_amd64.go
View file @
c9167494
...
@@ -12,7 +12,7 @@ package llm
...
@@ -12,7 +12,7 @@ package llm
//go:generate mv ggml/build/cpu/bin/server ggml/build/cpu/bin/ollama-runner
//go:generate mv ggml/build/cpu/bin/server ggml/build/cpu/bin/ollama-runner
//go:generate git submodule update --force gguf
//go:generate git submodule update --force gguf
//go:generate git -C gguf apply ../patches/0001-
remove-warm-up-logging
.patch
//go:generate git -C gguf apply ../patches/0001-
update-default-log-target
.patch
//go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DCMAKE_SYSTEM_PROCESSOR=x86_64 -DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0
//go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DCMAKE_SYSTEM_PROCESSOR=x86_64 -DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0
//go:generate cmake --build gguf/build/cpu --target server --config Release
//go:generate cmake --build gguf/build/cpu --target server --config Release
//go:generate mv gguf/build/cpu/bin/server gguf/build/cpu/bin/ollama-runner
//go:generate mv gguf/build/cpu/bin/server gguf/build/cpu/bin/ollama-runner
llm/llama.cpp/generate_darwin_arm64.go
View file @
c9167494
...
@@ -12,7 +12,7 @@ package llm
...
@@ -12,7 +12,7 @@ package llm
//go:generate mv ggml/build/metal/bin/server ggml/build/metal/bin/ollama-runner
//go:generate mv ggml/build/metal/bin/server ggml/build/metal/bin/ollama-runner
//go:generate git submodule update --force gguf
//go:generate git submodule update --force gguf
//go:generate git -C gguf apply ../patches/0001-
remove-warm-up-logging
.patch
//go:generate git -C gguf apply ../patches/0001-
update-default-log-target
.patch
//go:generate cmake -S gguf -B gguf/build/metal -DLLAMA_METAL=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DCMAKE_SYSTEM_PROCESSOR=arm64 -DCMAKE_OSX_ARCHITECTURES=arm64 -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0
//go:generate cmake -S gguf -B gguf/build/metal -DLLAMA_METAL=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DCMAKE_SYSTEM_PROCESSOR=arm64 -DCMAKE_OSX_ARCHITECTURES=arm64 -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0
//go:generate cmake --build gguf/build/metal --target server --config Release
//go:generate cmake --build gguf/build/metal --target server --config Release
//go:generate mv gguf/build/metal/bin/server gguf/build/metal/bin/ollama-runner
//go:generate mv gguf/build/metal/bin/server gguf/build/metal/bin/ollama-runner
llm/llama.cpp/generate_linux.go
View file @
c9167494
...
@@ -13,7 +13,7 @@ package llm
...
@@ -13,7 +13,7 @@ package llm
//go:generate git submodule update --force gguf
//go:generate git submodule update --force gguf
//go:generate git -C gguf apply ../patches/0001-copy-cuda-runtime-libraries.patch
//go:generate git -C gguf apply ../patches/0001-copy-cuda-runtime-libraries.patch
//go:generate git -C gguf apply ../patches/0001-
remove-warm-up-logging
.patch
//go:generate git -C gguf apply ../patches/0001-
update-default-log-target
.patch
//go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_K_QUANTS=on
//go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_K_QUANTS=on
//go:generate cmake --build gguf/build/cpu --target server --config Release
//go:generate cmake --build gguf/build/cpu --target server --config Release
//go:generate mv gguf/build/cpu/bin/server gguf/build/cpu/bin/ollama-runner
//go:generate mv gguf/build/cpu/bin/server gguf/build/cpu/bin/ollama-runner
...
...
llm/llama.cpp/generate_windows.go
View file @
c9167494
...
@@ -10,7 +10,7 @@ package llm
...
@@ -10,7 +10,7 @@ package llm
//go:generate cmd /c move ggml\build\cpu\bin\Release\server.exe ggml\build\cpu\bin\Release\ollama-runner.exe
//go:generate cmd /c move ggml\build\cpu\bin\Release\server.exe ggml\build\cpu\bin\Release\ollama-runner.exe
//go:generate git submodule update --force gguf
//go:generate git submodule update --force gguf
//go:generate git -C gguf apply ../patches/0001-
remove-warm-up-logging
.patch
//go:generate git -C gguf apply ../patches/0001-
update-default-log-target
.patch
//go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_K_QUANTS=on
//go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_K_QUANTS=on
//go:generate cmake --build gguf/build/cpu --target server --config Release
//go:generate cmake --build gguf/build/cpu --target server --config Release
//go:generate cmd /c move gguf\build\cpu\bin\Release\server.exe gguf\build\cpu\bin\Release\ollama-runner.exe
//go:generate cmd /c move gguf\build\cpu\bin\Release\server.exe gguf\build\cpu\bin\Release\ollama-runner.exe
llm/llama.cpp/patches/0001-remove-warm-up-logging.patch
deleted
100644 → 0
View file @
ba2da6ce
From 8dbb5449db259a9c24796e7927d89bee98b6c8f5 Mon Sep 17 00:00:00 2001
From: Bruce MacDonald <brucewmacdonald@gmail.com>
Date: Thu, 5 Oct 2023 11:21:12 -0400
Subject: [PATCH] remove warm up logging
---
common/common.cpp | 2 --
1 file changed, 2 deletions(-)
diff --git a/common/common.cpp b/common/common.cpp
index 7370017..c4433fe 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -839,8 +839,6 @@
std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par
}
{
- LOG("warming up the model with an empty run\n");
-
std::vector<llama_token> tmp = { llama_token_bos(lctx), llama_token_eos(lctx), };
llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch), 0, 0));
llama_kv_cache_tokens_rm(lctx, -1, -1);
--
2.39.2 (Apple Git-143)
llm/llama.cpp/patches/0001-update-default-log-target.patch
0 → 100644
View file @
c9167494
From 6465fec6290f0a7f5d4d0fbe6bcf634e4810dde6 Mon Sep 17 00:00:00 2001
From: Michael Yang <mxyng@pm.me>
Date: Mon, 23 Oct 2023 10:39:34 -0700
Subject: [PATCH] default log stderr
---
common/log.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/common/log.h b/common/log.h
index b8953fd..25522cd 100644
--- a/common/log.h
+++ b/common/log.h
@@ -90,7 +90,7 @@
// }
//
#ifndef LOG_TARGET
- #define LOG_TARGET log_handler()
+ #define LOG_TARGET nullptr
#endif
#ifndef LOG_TEE_TARGET
--
2.42.0
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment