"tests/vscode:/vscode.git/clone" did not exist on "a6d9f6a1a9a9ede2c64972d83ccee192b801c4a0"
Commit 0ce8bcfd authored by xuxzh1's avatar xuxzh1 🎱
Browse files

init

parent b0135f4b
...@@ -140,7 +140,7 @@ func PullIfMissing(ctx context.Context, client *api.Client, modelName string) er ...@@ -140,7 +140,7 @@ func PullIfMissing(ctx context.Context, client *api.Client, modelName string) er
showCtx, cancel := context.WithDeadlineCause( showCtx, cancel := context.WithDeadlineCause(
ctx, ctx,
time.Now().Add(5*time.Second), time.Now().Add(10*time.Second),
fmt.Errorf("show for existing model %s took too long", modelName), fmt.Errorf("show for existing model %s took too long", modelName),
) )
defer cancel() defer cancel()
...@@ -162,7 +162,7 @@ func PullIfMissing(ctx context.Context, client *api.Client, modelName string) er ...@@ -162,7 +162,7 @@ func PullIfMissing(ctx context.Context, client *api.Client, modelName string) er
fn := func(resp api.ProgressResponse) error { fn := func(resp api.ProgressResponse) error {
// fmt.Print(".") // fmt.Print(".")
if !stallTimer.Reset(stallDuration) { if !stallTimer.Reset(stallDuration) {
return fmt.Errorf("stall was detected, aborting status reporting") return errors.New("stall was detected, aborting status reporting")
} }
return nil return nil
} }
...@@ -180,7 +180,7 @@ func PullIfMissing(ctx context.Context, client *api.Client, modelName string) er ...@@ -180,7 +180,7 @@ func PullIfMissing(ctx context.Context, client *api.Client, modelName string) er
select { select {
case <-stallTimer.C: case <-stallTimer.C:
return fmt.Errorf("download stalled") return errors.New("download stalled")
case <-done: case <-done:
return pullError return pullError
} }
...@@ -243,7 +243,7 @@ func DoGenerate(ctx context.Context, t *testing.T, client *api.Client, genReq ap ...@@ -243,7 +243,7 @@ func DoGenerate(ctx context.Context, t *testing.T, client *api.Client, genReq ap
// fmt.Print(".") // fmt.Print(".")
buf.Write([]byte(response.Response)) buf.Write([]byte(response.Response))
if !stallTimer.Reset(streamTimeout) { if !stallTimer.Reset(streamTimeout) {
return fmt.Errorf("stall was detected while streaming response, aborting") return errors.New("stall was detected while streaming response, aborting")
} }
return nil return nil
} }
...@@ -287,41 +287,46 @@ func DoGenerate(ctx context.Context, t *testing.T, client *api.Client, genReq ap ...@@ -287,41 +287,46 @@ func DoGenerate(ctx context.Context, t *testing.T, client *api.Client, genReq ap
func GenerateRequests() ([]api.GenerateRequest, [][]string) { func GenerateRequests() ([]api.GenerateRequest, [][]string) {
return []api.GenerateRequest{ return []api.GenerateRequest{
{ {
Model: "orca-mini", Model: "orca-mini",
Prompt: "why is the ocean blue?", Prompt: "why is the ocean blue?",
Stream: &stream, Stream: &stream,
KeepAlive: &api.Duration{Duration: 10 * time.Second},
Options: map[string]interface{}{ Options: map[string]interface{}{
"seed": 42, "seed": 42,
"temperature": 0.0, "temperature": 0.0,
}, },
}, { }, {
Model: "orca-mini", Model: "orca-mini",
Prompt: "why is the color of dirt brown?", Prompt: "why is the color of dirt brown?",
Stream: &stream, Stream: &stream,
KeepAlive: &api.Duration{Duration: 10 * time.Second},
Options: map[string]interface{}{ Options: map[string]interface{}{
"seed": 42, "seed": 42,
"temperature": 0.0, "temperature": 0.0,
}, },
}, { }, {
Model: "orca-mini", Model: "orca-mini",
Prompt: "what is the origin of the us thanksgiving holiday?", Prompt: "what is the origin of the us thanksgiving holiday?",
Stream: &stream, Stream: &stream,
KeepAlive: &api.Duration{Duration: 10 * time.Second},
Options: map[string]interface{}{ Options: map[string]interface{}{
"seed": 42, "seed": 42,
"temperature": 0.0, "temperature": 0.0,
}, },
}, { }, {
Model: "orca-mini", Model: "orca-mini",
Prompt: "what is the origin of independence day?", Prompt: "what is the origin of independence day?",
Stream: &stream, Stream: &stream,
KeepAlive: &api.Duration{Duration: 10 * time.Second},
Options: map[string]interface{}{ Options: map[string]interface{}{
"seed": 42, "seed": 42,
"temperature": 0.0, "temperature": 0.0,
}, },
}, { }, {
Model: "orca-mini", Model: "orca-mini",
Prompt: "what is the composition of air?", Prompt: "what is the composition of air?",
Stream: &stream, Stream: &stream,
KeepAlive: &api.Duration{Duration: 10 * time.Second},
Options: map[string]interface{}{ Options: map[string]interface{}{
"seed": 42, "seed": 42,
"temperature": 0.0, "temperature": 0.0,
...@@ -329,10 +334,10 @@ func GenerateRequests() ([]api.GenerateRequest, [][]string) { ...@@ -329,10 +334,10 @@ func GenerateRequests() ([]api.GenerateRequest, [][]string) {
}, },
}, },
[][]string{ [][]string{
[]string{"sunlight"}, {"sunlight"},
[]string{"soil", "organic", "earth", "black", "tan"}, {"soil", "organic", "earth", "black", "tan"},
[]string{"england", "english", "massachusetts", "pilgrims"}, {"england", "english", "massachusetts", "pilgrims", "british"},
[]string{"fourth", "july", "declaration", "independence"}, {"fourth", "july", "declaration", "independence"},
[]string{"nitrogen", "oxygen", "carbon", "dioxide"}, {"nitrogen", "oxygen", "carbon", "dioxide"},
} }
} }
set(TARGET ollama_llama_server) set(TARGET ollama_llama_server)
option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON) option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}) include_directories(${CMAKE_CURRENT_SOURCE_DIR})
...@@ -7,7 +6,7 @@ install(TARGETS ${TARGET} RUNTIME) ...@@ -7,7 +6,7 @@ install(TARGETS ${TARGET} RUNTIME)
target_compile_definitions(${TARGET} PRIVATE target_compile_definitions(${TARGET} PRIVATE
SERVER_VERBOSE=$<BOOL:${LLAMA_SERVER_VERBOSE}> SERVER_VERBOSE=$<BOOL:${LLAMA_SERVER_VERBOSE}>
) )
target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT}) target_link_libraries(${TARGET} PRIVATE ggml llama common llava ${CMAKE_THREAD_LIBS_INIT})
if (WIN32) if (WIN32)
TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32) TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32)
endif() endif()
......
...@@ -41,8 +41,10 @@ ...@@ -41,8 +41,10 @@
#if defined(_WIN32) #if defined(_WIN32)
#include <windows.h> #include <windows.h>
#include <errhandlingapi.h>
#endif #endif
#include <algorithm>
#include <cstddef> #include <cstddef>
#include <thread> #include <thread>
#include <chrono> #include <chrono>
...@@ -56,7 +58,6 @@ struct server_params { ...@@ -56,7 +58,6 @@ struct server_params {
std::string hostname = "127.0.0.1"; std::string hostname = "127.0.0.1";
std::vector<std::string> api_keys; std::vector<std::string> api_keys;
std::string public_path = "examples/server/public"; std::string public_path = "examples/server/public";
std::string chat_template = "";
int32_t port = 8080; int32_t port = 8080;
int32_t read_timeout = 600; int32_t read_timeout = 600;
int32_t write_timeout = 600; int32_t write_timeout = 600;
...@@ -359,7 +360,6 @@ struct llama_server_context ...@@ -359,7 +360,6 @@ struct llama_server_context
// slots / clients // slots / clients
std::vector<server_slot> slots; std::vector<server_slot> slots;
json default_generation_settings_for_props;
llama_server_queue queue_tasks; llama_server_queue queue_tasks;
llama_server_response queue_results; llama_server_response queue_results;
...@@ -403,7 +403,9 @@ struct llama_server_context ...@@ -403,7 +403,9 @@ struct llama_server_context
} }
} }
std::tie(model, ctx) = llama_init_from_gpt_params(params); auto init_result = llama_init_from_gpt_params(params);
model = init_result.model;
ctx = init_result.context;
if (model == nullptr) if (model == nullptr)
{ {
LOG_ERROR("unable to load model", {{"model", params.model}}); LOG_ERROR("unable to load model", {{"model", params.model}});
...@@ -428,16 +430,6 @@ struct llama_server_context ...@@ -428,16 +430,6 @@ struct llama_server_context
return true; return true;
} }
void validate_model_chat_template(server_params & sparams) {
llama_chat_message chat[] = {{"user", "test"}};
std::vector<char> buf(1);
int res = llama_chat_apply_template(model, nullptr, chat, 1, true, buf.data(), buf.size());
if (res < 0) {
LOG_ERROR("The chat template comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses", {});
sparams.chat_template = "chatml";
}
}
void initialize() { void initialize() {
// create slots // create slots
all_slots_are_idle = true; all_slots_are_idle = true;
...@@ -483,9 +475,6 @@ struct llama_server_context ...@@ -483,9 +475,6 @@ struct llama_server_context
slots.push_back(slot); slots.push_back(slot);
} }
default_generation_settings_for_props = get_formated_generation(slots.front());
default_generation_settings_for_props["seed"] = -1;
batch = llama_batch_init(n_ctx, 0, params.n_parallel); batch = llama_batch_init(n_ctx, 0, params.n_parallel);
} }
...@@ -584,7 +573,7 @@ struct llama_server_context ...@@ -584,7 +573,7 @@ struct llama_server_context
slot->sparams.mirostat_eta = json_value(data, "mirostat_eta", default_sparams.mirostat_eta); slot->sparams.mirostat_eta = json_value(data, "mirostat_eta", default_sparams.mirostat_eta);
slot->sparams.penalize_nl = json_value(data, "penalize_nl", default_sparams.penalize_nl); slot->sparams.penalize_nl = json_value(data, "penalize_nl", default_sparams.penalize_nl);
slot->params.n_keep = json_value(data, "n_keep", slot->params.n_keep); slot->params.n_keep = json_value(data, "n_keep", slot->params.n_keep);
slot->params.seed = json_value(data, "seed", default_params.seed); slot->sparams.seed = json_value(data, "seed", default_params.seed);
slot->sparams.grammar = json_value(data, "grammar", default_sparams.grammar); slot->sparams.grammar = json_value(data, "grammar", default_sparams.grammar);
slot->sparams.n_probs = json_value(data, "n_probs", default_sparams.n_probs); slot->sparams.n_probs = json_value(data, "n_probs", default_sparams.n_probs);
slot->sparams.min_keep = json_value(data, "min_keep", default_sparams.min_keep); slot->sparams.min_keep = json_value(data, "min_keep", default_sparams.min_keep);
...@@ -811,7 +800,6 @@ struct llama_server_context ...@@ -811,7 +800,6 @@ struct llama_server_context
llama_sampling_free(slot->ctx_sampling); llama_sampling_free(slot->ctx_sampling);
} }
slot->ctx_sampling = llama_sampling_init(slot->sparams); slot->ctx_sampling = llama_sampling_init(slot->sparams);
llama_set_rng_seed(ctx, slot->params.seed);
slot->command = LOAD_PROMPT; slot->command = LOAD_PROMPT;
all_slots_are_idle = false; all_slots_are_idle = false;
...@@ -835,7 +823,7 @@ struct llama_server_context ...@@ -835,7 +823,7 @@ struct llama_server_context
system_tokens.clear(); system_tokens.clear();
if (!system_prompt.empty()) { if (!system_prompt.empty()) {
system_tokens = ::llama_tokenize(ctx, system_prompt, add_bos_token); system_tokens = ::llama_tokenize(ctx, system_prompt, true);
llama_batch_clear(batch); llama_batch_clear(batch);
...@@ -1398,12 +1386,50 @@ struct llama_server_context ...@@ -1398,12 +1386,50 @@ struct llama_server_context
} }
} }
std::string common_prefix(const std::string& str1, const std::string& str2) {
auto mismatch_pair = std::mismatch(str1.begin(), str1.end(), str2.begin());
return std::string(str1.begin(), mismatch_pair.first);
}
// Find the slot that has the greatest common prefix
server_slot *prefix_slot(const json &prompt) {
if (!prompt.is_string()) {
return nullptr;
}
std::string prompt_str = prompt.get<std::string>();
server_slot *slot = nullptr;
size_t longest = 0;
for (server_slot &s : slots) {
if (s.available() && s.prompt.is_string()) {
std::string s_prompt = s.prompt.get<std::string>();
std::string prefix = common_prefix(s_prompt, prompt_str);
if (prefix.size() > longest) {
slot = &s;
longest = prefix.size();
}
}
}
if (!slot) {
return get_slot(-1);
}
LOG_DEBUG("slot with common prefix found", {{
"slot_id", slot->id,
"characters", longest
}});
return slot;
}
void process_single_task(task_server& task) void process_single_task(task_server& task)
{ {
switch (task.type) switch (task.type)
{ {
case TASK_TYPE_COMPLETION: { case TASK_TYPE_COMPLETION: {
server_slot *slot = get_slot(json_value(task.data, "slot_id", -1)); server_slot *slot = prefix_slot(task.data["prompt"]);
if (slot == nullptr) if (slot == nullptr)
{ {
// if no slot is available, we defer this task for processing later // if no slot is available, we defer this task for processing later
...@@ -1656,7 +1682,7 @@ struct llama_server_context ...@@ -1656,7 +1682,7 @@ struct llama_server_context
slot.t_start_process_prompt = ggml_time_us(); slot.t_start_process_prompt = ggml_time_us();
slot.t_start_genereration = 0; slot.t_start_genereration = 0;
prompt_tokens = tokenize(slot.prompt, system_prompt.empty() && add_bos_token); // add BOS if there isn't system prompt prompt_tokens = tokenize(slot.prompt, system_prompt.empty()); // add BOS if there isn't system prompt
slot.n_prompt_tokens = prompt_tokens.size(); slot.n_prompt_tokens = prompt_tokens.size();
...@@ -1670,22 +1696,23 @@ struct llama_server_context ...@@ -1670,22 +1696,23 @@ struct llama_server_context
if (slot.ga_n == 1 && slot.n_prompt_tokens >= slot.n_ctx) if (slot.ga_n == 1 && slot.n_prompt_tokens >= slot.n_ctx)
{ {
const int n_left = slot.n_ctx - slot.params.n_keep; const int n_left = slot.n_ctx - slot.params.n_keep;
const int n_block_size = n_left / 2; const int n_shift = n_left / 2;
const int erased_blocks = (slot.n_prompt_tokens - slot.params.n_keep - n_block_size) / n_block_size; const int n_erase = slot.n_prompt_tokens - slot.params.n_keep - n_shift;
std::vector<llama_token> new_tokens( std::vector<llama_token> new_tokens(
prompt_tokens.begin(), prompt_tokens.begin(),
prompt_tokens.begin() + slot.params.n_keep); prompt_tokens.begin() + slot.params.n_keep);
new_tokens.insert( new_tokens.insert(
new_tokens.end(), new_tokens.end(),
prompt_tokens.begin() + slot.params.n_keep + erased_blocks * n_block_size, prompt_tokens.begin() + slot.params.n_keep + n_erase,
prompt_tokens.end()); prompt_tokens.end());
LOG_VERBOSE("input truncated", { LOG_INFO("input truncated", {
{"n_ctx", slot.n_ctx}, {"n_ctx", slot.n_ctx},
{"n_keep", slot.params.n_keep}, {"n_keep", slot.params.n_keep},
{"n_left", n_left}, {"n_left", n_left},
{"new_tokens", tokens_to_str(ctx, new_tokens.cbegin(), new_tokens.cend())}, {"n_shift", n_shift},
{"n_erase", n_erase},
}); });
slot.truncated = true; slot.truncated = true;
prompt_tokens = new_tokens; prompt_tokens = new_tokens;
...@@ -1720,7 +1747,7 @@ struct llama_server_context ...@@ -1720,7 +1747,7 @@ struct llama_server_context
slot.n_past -= 1; slot.n_past -= 1;
} }
slot.n_prompt_tokens_processed = slot.n_prompt_tokens - slot.n_past; slot.n_prompt_tokens_processed = slot.n_prompt_tokens;
if (slot.ga_n != 1) if (slot.ga_n != 1)
{ {
...@@ -2340,9 +2367,9 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, g ...@@ -2340,9 +2367,9 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, g
invalid_param = true; invalid_param = true;
break; break;
} }
#ifndef GGML_USE_CUBLAS #ifndef GGML_USE_CUDA
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. Setting the split mode has no effect.\n"); fprintf(stderr, "warning: llama.cpp was compiled without CUDA. Setting the split mode has no effect.\n");
#endif // GGML_USE_CUBLAS #endif // GGML_USE_CUDA
} }
else if (arg == "--tensor-split" || arg == "-ts") else if (arg == "--tensor-split" || arg == "-ts")
{ {
...@@ -2351,7 +2378,7 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, g ...@@ -2351,7 +2378,7 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, g
invalid_param = true; invalid_param = true;
break; break;
} }
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_SYCL) #if defined(GGML_USE_CUDA) || defined(GGML_USE_SYCL)
std::string arg_next = argv[i]; std::string arg_next = argv[i];
// split string by , and / // split string by , and /
...@@ -2372,8 +2399,8 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, g ...@@ -2372,8 +2399,8 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, g
} }
} }
#else #else
LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.\n", {}); LOG_WARNING("llama.cpp was compiled without CUDA. It is not possible to set a tensor split.\n", {});
#endif // GGML_USE_CUBLAS #endif // GGML_USE_CUDA
} }
else if (arg == "--main-gpu" || arg == "-mg") else if (arg == "--main-gpu" || arg == "-mg")
{ {
...@@ -2382,7 +2409,7 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, g ...@@ -2382,7 +2409,7 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, g
invalid_param = true; invalid_param = true;
break; break;
} }
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_SYCL) #if defined(GGML_USE_CUDA) || defined(GGML_USE_SYCL)
params.main_gpu = std::stoi(argv[i]); params.main_gpu = std::stoi(argv[i]);
#else #else
LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.", {}); LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.", {});
...@@ -2395,7 +2422,10 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, g ...@@ -2395,7 +2422,10 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, g
invalid_param = true; invalid_param = true;
break; break;
} }
params.lora_adapter.emplace_back(argv[i], 1.0f); params.lora_adapters.push_back({
std::string(argv[i]),
1.0,
});
params.use_mmap = false; params.use_mmap = false;
} }
else if (arg == "--lora-scaled") else if (arg == "--lora-scaled")
...@@ -2411,18 +2441,12 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, g ...@@ -2411,18 +2441,12 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, g
invalid_param = true; invalid_param = true;
break; break;
} }
params.lora_adapter.emplace_back(lora_adapter, std::stof(argv[i])); params.lora_adapters.push_back({
lora_adapter,
std::stof(argv[i])
});
params.use_mmap = false; params.use_mmap = false;
} }
else if (arg == "--lora-base")
{
if (++i >= argc)
{
invalid_param = true;
break;
}
params.lora_base = argv[i];
}
else if (arg == "-v" || arg == "--verbose") else if (arg == "-v" || arg == "--verbose")
{ {
server_verbose = true; server_verbose = true;
...@@ -2540,7 +2564,6 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, g ...@@ -2540,7 +2564,6 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, g
invalid_param = true; invalid_param = true;
break; break;
} }
sparams.chat_template = argv[i];
} }
else if (arg == "--override-kv") else if (arg == "--override-kv")
{ {
...@@ -2715,6 +2738,9 @@ int wmain(int argc, wchar_t **wargv) { ...@@ -2715,6 +2738,9 @@ int wmain(int argc, wchar_t **wargv) {
for (int i = 0; i < argc; ++i) { for (int i = 0; i < argc; ++i) {
argv[i] = wchar_to_char(wargv[i]); argv[i] = wchar_to_char(wargv[i]);
} }
// Adjust error mode to avoid error dialog after we start.
SetErrorMode(SEM_FAILCRITICALERRORS);
#else #else
int main(int argc, char **argv) { int main(int argc, char **argv) {
#endif #endif
...@@ -3013,11 +3039,6 @@ int main(int argc, char **argv) { ...@@ -3013,11 +3039,6 @@ int main(int argc, char **argv) {
} }
const auto model_meta = llama.model_meta(); const auto model_meta = llama.model_meta();
if (sparams.chat_template.empty()) { // custom chat template is not supplied
// check if the template comes with the model is supported by us
llama.validate_model_chat_template(sparams);
}
// Middleware for API key validation // Middleware for API key validation
auto validate_api_key = [&sparams](const httplib::Request &req, httplib::Response &res) -> bool { auto validate_api_key = [&sparams](const httplib::Request &req, httplib::Response &res) -> bool {
// If API key is not set, skip validation // If API key is not set, skip validation
...@@ -3171,19 +3192,10 @@ int main(int argc, char **argv) { ...@@ -3171,19 +3192,10 @@ int main(int argc, char **argv) {
prompt = ""; prompt = "";
} }
json image_data;
if (body.count("image_data") != 0) {
image_data = body["image_data"];
}
else
{
image_data = "";
}
// create and queue the task // create and queue the task
const int task_id = llama.queue_tasks.get_new_id(); const int task_id = llama.queue_tasks.get_new_id();
llama.queue_results.add_waiting_task_id(task_id); llama.queue_results.add_waiting_task_id(task_id);
llama.request_completion(task_id, { {"prompt", prompt}, { "n_predict", 0}, {"image_data", image_data} }, true, -1); llama.request_completion(task_id, {{"prompt", prompt}}, true, -1);
// get the result // get the result
task_result result = llama.queue_results.recv(task_id); task_result result = llama.queue_results.recv(task_id);
......
# common logic across linux and darwin # common logic across linux and darwin
#
init_vars() { init_vars() {
case "${GOARCH}" in case "${GOARCH}" in
...@@ -9,13 +8,12 @@ init_vars() { ...@@ -9,13 +8,12 @@ init_vars() {
"arm64") "arm64")
ARCH="arm64" ARCH="arm64"
;; ;;
*) *)
ARCH=$(uname -m | sed -e "s/aarch64/arm64/g") ARCH=$(uname -m | sed -e "s/aarch64/arm64/g")
esac esac
LLAMACPP_DIR=../llama.cpp LLAMACPP_DIR=../llama.cpp
CMAKE_DEFS="" CMAKE_DEFS=""
# 这里和llama.cpp不一样
CMAKE_TARGETS="--target ollama_llama_server" CMAKE_TARGETS="--target ollama_llama_server"
if echo "${CGO_CFLAGS}" | grep -- '-g' >/dev/null; then if echo "${CGO_CFLAGS}" | grep -- '-g' >/dev/null; then
CMAKE_DEFS="-DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_VERBOSE_MAKEFILE=on -DLLAMA_GPROF=on -DLLAMA_SERVER_VERBOSE=on ${CMAKE_DEFS}" CMAKE_DEFS="-DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_VERBOSE_MAKEFILE=on -DLLAMA_GPROF=on -DLLAMA_SERVER_VERBOSE=on ${CMAKE_DEFS}"
...@@ -56,8 +54,8 @@ git_module_setup() { ...@@ -56,8 +54,8 @@ git_module_setup() {
echo "Cleaning up old submodule" echo "Cleaning up old submodule"
rm -rf ${LLAMACPP_DIR} rm -rf ${LLAMACPP_DIR}
fi fi
# git submodule init git submodule init
# git submodule update --force ${LLAMACPP_DIR} git submodule update --force ${LLAMACPP_DIR}
} }
......
...@@ -18,16 +18,16 @@ sign() { ...@@ -18,16 +18,16 @@ sign() {
fi fi
} }
COMMON_DARWIN_DEFS="-DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DLLAMA_METAL_MACOSX_VERSION_MIN=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DLLAMA_METAL_EMBED_LIBRARY=on" COMMON_DARWIN_DEFS="-DBUILD_SHARED_LIBS=off -DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DLLAMA_METAL_MACOSX_VERSION_MIN=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DGGML_METAL_EMBED_LIBRARY=on -DGGML_OPENMP=off"
case "${GOARCH}" in case "${GOARCH}" in
"amd64") "amd64")
COMMON_CPU_DEFS="${COMMON_DARWIN_DEFS} -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} -DLLAMA_METAL=off -DLLAMA_NATIVE=off" COMMON_CPU_DEFS="${COMMON_DARWIN_DEFS} -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} -DGGML_METAL=off -DGGML_NATIVE=off"
# Static build for linking into the Go binary # Static build for linking into the Go binary
init_vars init_vars
CMAKE_TARGETS="--target llama --target ggml" CMAKE_TARGETS="--target llama --target ggml"
CMAKE_DEFS="${COMMON_CPU_DEFS} -DBUILD_SHARED_LIBS=off -DLLAMA_ACCELERATE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}" CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_BLAS=off -DGGML_ACCELERATE=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
BUILD_DIR="../build/darwin/${ARCH}_static" BUILD_DIR="../build/darwin/${ARCH}_static"
echo "Building static library" echo "Building static library"
build build
...@@ -37,7 +37,7 @@ case "${GOARCH}" in ...@@ -37,7 +37,7 @@ case "${GOARCH}" in
# CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta) # CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta)
# #
init_vars init_vars
CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_ACCELERATE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}" CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_ACCELERATE=off -DGGML_BLAS=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
BUILD_DIR="../build/darwin/${ARCH}/cpu" BUILD_DIR="../build/darwin/${ARCH}/cpu"
echo "Building LCD CPU" echo "Building LCD CPU"
build build
...@@ -49,7 +49,7 @@ case "${GOARCH}" in ...@@ -49,7 +49,7 @@ case "${GOARCH}" in
# Approximately 400% faster than LCD on same CPU # Approximately 400% faster than LCD on same CPU
# #
init_vars init_vars
CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_ACCELERATE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}" CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_ACCELERATE=off -DGGML_BLAS=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
BUILD_DIR="../build/darwin/${ARCH}/cpu_avx" BUILD_DIR="../build/darwin/${ARCH}/cpu_avx"
echo "Building AVX CPU" echo "Building AVX CPU"
build build
...@@ -61,7 +61,7 @@ case "${GOARCH}" in ...@@ -61,7 +61,7 @@ case "${GOARCH}" in
# Approximately 10% faster than AVX on same CPU # Approximately 10% faster than AVX on same CPU
# #
init_vars init_vars
CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_ACCELERATE=on -DLLAMA_AVX=on -DLLAMA_AVX2=on -DLLAMA_AVX512=off -DLLAMA_FMA=on -DLLAMA_F16C=on ${CMAKE_DEFS}" CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_ACCELERATE=on -DGGML_BLAS=off -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on ${CMAKE_DEFS}"
BUILD_DIR="../build/darwin/${ARCH}/cpu_avx2" BUILD_DIR="../build/darwin/${ARCH}/cpu_avx2"
echo "Building AVX2 CPU" echo "Building AVX2 CPU"
EXTRA_LIBS="${EXTRA_LIBS} -framework Accelerate -framework Foundation" EXTRA_LIBS="${EXTRA_LIBS} -framework Accelerate -framework Foundation"
...@@ -75,14 +75,14 @@ case "${GOARCH}" in ...@@ -75,14 +75,14 @@ case "${GOARCH}" in
# Static build for linking into the Go binary # Static build for linking into the Go binary
init_vars init_vars
CMAKE_TARGETS="--target llama --target ggml" CMAKE_TARGETS="--target llama --target ggml"
CMAKE_DEFS="-DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DBUILD_SHARED_LIBS=off -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} -DLLAMA_METAL=off -DLLAMA_ACCELERATE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}" CMAKE_DEFS="${COMMON_DARWIN_DEFS} -DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} ${CMAKE_DEFS}"
BUILD_DIR="../build/darwin/${ARCH}_static" BUILD_DIR="../build/darwin/${ARCH}_static"
echo "Building static library" echo "Building static library"
build build
if [ -z "$OLLAMA_SKIP_METAL_GENERATE" ]; then if [ -z "$OLLAMA_SKIP_METAL_GENERATE" ]; then
init_vars init_vars
CMAKE_DEFS="${COMMON_DARWIN_DEFS} -DLLAMA_ACCELERATE=on -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} -DLLAMA_METAL=on ${CMAKE_DEFS}" CMAKE_DEFS="${COMMON_DARWIN_DEFS} -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} ${CMAKE_DEFS}"
BUILD_DIR="../build/darwin/${ARCH}/metal" BUILD_DIR="../build/darwin/${ARCH}/metal"
EXTRA_LIBS="${EXTRA_LIBS} -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders" EXTRA_LIBS="${EXTRA_LIBS} -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders"
build build
......
...@@ -26,8 +26,6 @@ amdGPUs() { ...@@ -26,8 +26,6 @@ amdGPUs() {
"gfx908:xnack-" "gfx908:xnack-"
"gfx90a:xnack+" "gfx90a:xnack+"
"gfx90a:xnack-" "gfx90a:xnack-"
"gfx906"
"gfx928"
"gfx940" "gfx940"
"gfx941" "gfx941"
"gfx942" "gfx942"
...@@ -53,7 +51,7 @@ if [ -z "${CUDACXX}" ]; then ...@@ -53,7 +51,7 @@ if [ -z "${CUDACXX}" ]; then
export CUDACXX=$(command -v nvcc) export CUDACXX=$(command -v nvcc)
fi fi
fi fi
COMMON_CMAKE_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off" COMMON_CMAKE_DEFS="-DBUILD_SHARED_LIBS=off -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_OPENMP=off"
source $(dirname $0)/gen_common.sh source $(dirname $0)/gen_common.sh
init_vars init_vars
# git_module_setup # git_module_setup
...@@ -66,7 +64,7 @@ if [ -z "${OLLAMA_SKIP_STATIC_GENERATE}" -o "${OLLAMA_CPU_TARGET}" = "static" ]; ...@@ -66,7 +64,7 @@ if [ -z "${OLLAMA_SKIP_STATIC_GENERATE}" -o "${OLLAMA_CPU_TARGET}" = "static" ];
# Static build for linking into the Go binary # Static build for linking into the Go binary
init_vars init_vars
CMAKE_TARGETS="--target llama --target ggml" CMAKE_TARGETS="--target llama --target ggml"
CMAKE_DEFS="-DBUILD_SHARED_LIBS=off -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}" CMAKE_DEFS="-DBUILD_SHARED_LIBS=off -DGGML_NATIVE=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_OPENMP=off ${CMAKE_DEFS}"
BUILD_DIR="../build/linux/${ARCH}_static" BUILD_DIR="../build/linux/${ARCH}_static"
echo "Building static library" echo "Building static library"
build build
...@@ -79,29 +77,29 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then ...@@ -79,29 +77,29 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
if [ -n "${OLLAMA_CUSTOM_CPU_DEFS}" ]; then if [ -n "${OLLAMA_CUSTOM_CPU_DEFS}" ]; then
init_vars init_vars
echo "OLLAMA_CUSTOM_CPU_DEFS=\"${OLLAMA_CUSTOM_CPU_DEFS}\"" echo "OLLAMA_CUSTOM_CPU_DEFS=\"${OLLAMA_CUSTOM_CPU_DEFS}\""
CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DCMAKE_POSITION_INDEPENDENT_CODE=on ${CMAKE_DEFS}" CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DBUILD_SHARED_LIBS=off -DCMAKE_POSITION_INDEPENDENT_CODE=on ${CMAKE_DEFS}"
BUILD_DIR="../build/linux/${ARCH}/cpu" BUILD_DIR="../build/linux/${ARCH}/cpu"
echo "Building custom CPU" echo "Building custom CPU"
build build
compress compress
else else
# Darwin Rosetta x86 emulation does NOT support AVX, AVX2, AVX512 # Darwin Rosetta x86 emulation does NOT support AVX, AVX2, AVX512
# -DLLAMA_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer # -DGGML_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer
# -DLLAMA_F16C -- 2012 Intel Ivy Bridge & AMD 2011 Bulldozer (No significant improvement over just AVX) # -DGGML_F16C -- 2012 Intel Ivy Bridge & AMD 2011 Bulldozer (No significant improvement over just AVX)
# -DLLAMA_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen # -DGGML_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen
# -DLLAMA_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver # -DGGML_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver
# Note: the following seem to yield slower results than AVX2 - ymmv # Note: the following seem to yield slower results than AVX2 - ymmv
# -DLLAMA_AVX512 -- 2017 Intel Skylake and High End DeskTop (HEDT) # -DGGML_AVX512 -- 2017 Intel Skylake and High End DeskTop (HEDT)
# -DLLAMA_AVX512_VBMI -- 2018 Intel Cannon Lake # -DGGML_AVX512_VBMI -- 2018 Intel Cannon Lake
# -DLLAMA_AVX512_VNNI -- 2021 Intel Alder Lake # -DGGML_AVX512_VNNI -- 2021 Intel Alder Lake
COMMON_CPU_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off" COMMON_CPU_DEFS="-DBUILD_SHARED_LIBS=off -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_OPENMP=off"
if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "cpu" ]; then if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "cpu" ]; then
# #
# CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta) # CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta)
# #
init_vars init_vars
CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}" CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
BUILD_DIR="../build/linux/${ARCH}/cpu" BUILD_DIR="../build/linux/${ARCH}/cpu"
echo "Building LCD CPU" echo "Building LCD CPU"
build build
...@@ -118,7 +116,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then ...@@ -118,7 +116,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
# Approximately 400% faster than LCD on same CPU # Approximately 400% faster than LCD on same CPU
# #
init_vars init_vars
CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}" CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
BUILD_DIR="../build/linux/${ARCH}/cpu_avx" BUILD_DIR="../build/linux/${ARCH}/cpu_avx"
echo "Building AVX CPU" echo "Building AVX CPU"
build build
...@@ -131,7 +129,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then ...@@ -131,7 +129,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
# Approximately 10% faster than AVX on same CPU # Approximately 10% faster than AVX on same CPU
# #
init_vars init_vars
CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_AVX=on -DLLAMA_AVX2=on -DLLAMA_AVX512=off -DLLAMA_FMA=on -DLLAMA_F16C=on ${CMAKE_DEFS}" CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on ${CMAKE_DEFS}"
BUILD_DIR="../build/linux/${ARCH}/cpu_avx2" BUILD_DIR="../build/linux/${ARCH}/cpu_avx2"
echo "Building AVX2 CPU" echo "Building AVX2 CPU"
build build
...@@ -172,15 +170,15 @@ if [ -z "${OLLAMA_SKIP_CUDA_GENERATE}" -a -d "${CUDA_LIB_DIR}" ]; then ...@@ -172,15 +170,15 @@ if [ -z "${OLLAMA_SKIP_CUDA_GENERATE}" -a -d "${CUDA_LIB_DIR}" ]; then
# #
# CUDA compute < 6.0 lacks proper FP16 support on ARM. # CUDA compute < 6.0 lacks proper FP16 support on ARM.
# Disabling has minimal performance effect while maintaining compatibility. # Disabling has minimal performance effect while maintaining compatibility.
ARM64_DEFS="-DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_CUDA_F16=off" ARM64_DEFS="-DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_CUDA_F16=off"
fi fi
# Users building from source can tune the exact flags we pass to cmake for configuring llama.cpp # Users building from source can tune the exact flags we pass to cmake for configuring llama.cpp
if [ -n "${OLLAMA_CUSTOM_CUDA_DEFS}" ]; then if [ -n "${OLLAMA_CUSTOM_CUDA_DEFS}" ]; then
echo "OLLAMA_CUSTOM_CUDA_DEFS=\"${OLLAMA_CUSTOM_CUDA_DEFS}\"" echo "OLLAMA_CUSTOM_CUDA_DEFS=\"${OLLAMA_CUSTOM_CUDA_DEFS}\""
CMAKE_CUDA_DEFS="-DLLAMA_CUDA=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} ${OLLAMA_CUSTOM_CUDA_DEFS}" CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} ${OLLAMA_CUSTOM_CUDA_DEFS}"
echo "Building custom CUDA GPU" echo "Building custom CUDA GPU"
else else
CMAKE_CUDA_DEFS="-DLLAMA_CUDA=on -DLLAMA_CUDA_FORCE_MMQ=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES}" CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_FLAGS=-t8 -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES}"
fi fi
CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} ${ARM64_DEFS} ${CMAKE_CUDA_DEFS}" CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} ${ARM64_DEFS} ${CMAKE_CUDA_DEFS}"
BUILD_DIR="../build/linux/${ARCH}/cuda${CUDA_VARIANT}" BUILD_DIR="../build/linux/${ARCH}/cuda${CUDA_VARIANT}"
...@@ -213,12 +211,12 @@ if [ -z "${ONEAPI_ROOT}" ]; then ...@@ -213,12 +211,12 @@ if [ -z "${ONEAPI_ROOT}" ]; then
ONEAPI_ROOT=/opt/intel/oneapi ONEAPI_ROOT=/opt/intel/oneapi
fi fi
if [ -d "${ONEAPI_ROOT}" ]; then if [ -z "${OLLAMA_SKIP_ONEAPI_GENERATE}" -a -d "${ONEAPI_ROOT}" ]; then
echo "OneAPI libraries detected - building dynamic OneAPI library" echo "OneAPI libraries detected - building dynamic OneAPI library"
init_vars init_vars
source ${ONEAPI_ROOT}/setvars.sh --force # set up environment variables for oneAPI source ${ONEAPI_ROOT}/setvars.sh --force # set up environment variables for oneAPI
CC=icx CC=icx
CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_SYCL=ON -DLLAMA_SYCL_F16=OFF" CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL=ON -DGGML_SYCL_F16=OFF"
BUILD_DIR="../build/linux/${ARCH}/oneapi" BUILD_DIR="../build/linux/${ARCH}/oneapi"
EXTRA_LIBS="-fsycl -Wl,-rpath,${ONEAPI_ROOT}/compiler/latest/lib,-rpath,${ONEAPI_ROOT}/mkl/latest/lib,-rpath,${ONEAPI_ROOT}/tbb/latest/lib,-rpath,${ONEAPI_ROOT}/compiler/latest/opt/oclfpga/linux64/lib -lOpenCL -lmkl_core -lmkl_sycl_blas -lmkl_intel_ilp64 -lmkl_tbb_thread -ltbb" EXTRA_LIBS="-fsycl -Wl,-rpath,${ONEAPI_ROOT}/compiler/latest/lib,-rpath,${ONEAPI_ROOT}/mkl/latest/lib,-rpath,${ONEAPI_ROOT}/tbb/latest/lib,-rpath,${ONEAPI_ROOT}/compiler/latest/opt/oclfpga/linux64/lib -lOpenCL -lmkl_core -lmkl_sycl_blas -lmkl_intel_ilp64 -lmkl_tbb_thread -ltbb"
DEBUG_FLAGS="" # icx compiles with -O0 if we pass -g, so we must remove it DEBUG_FLAGS="" # icx compiles with -O0 if we pass -g, so we must remove it
...@@ -256,9 +254,7 @@ if [ -z "${OLLAMA_SKIP_ROCM_GENERATE}" -a -d "${ROCM_PATH}" ]; then ...@@ -256,9 +254,7 @@ if [ -z "${OLLAMA_SKIP_ROCM_GENERATE}" -a -d "${ROCM_PATH}" ]; then
ROCM_VARIANT=_v$(ls ${ROCM_PATH}/lib/librocblas.so.*.*.????? | cut -f5 -d. || true) ROCM_VARIANT=_v$(ls ${ROCM_PATH}/lib/librocblas.so.*.*.????? | cut -f5 -d. || true)
fi fi
init_vars init_vars
echo $(amdGPUs) CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DGGML_HIPBLAS=on -DLLAMA_CUDA_NO_PEER_COPY=on -DCMAKE_C_COMPILER=$ROCM_PATH/llvm/bin/clang -DCMAKE_CXX_COMPILER=$ROCM_PATH/llvm/bin/clang++ -DAMDGPU_TARGETS=$(amdGPUs) -DGPU_TARGETS=$(amdGPUs)"
CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DLLAMA_HIPBLAS=on -DCMAKE_C_COMPILER=$ROCM_PATH/llvm/bin/clang -DCMAKE_CXX_COMPILER=$ROCM_PATH/llvm/bin/clang++ -DAMDGPU_TARGETS=$(amdGPUs)"
# Users building from source can tune the exact flags we pass to cmake for configuring llama.cpp # Users building from source can tune the exact flags we pass to cmake for configuring llama.cpp
if [ -n "${OLLAMA_CUSTOM_ROCM_DEFS}" ]; then if [ -n "${OLLAMA_CUSTOM_ROCM_DEFS}" ]; then
echo "OLLAMA_CUSTOM_ROCM_DEFS=\"${OLLAMA_CUSTOM_ROCM_DEFS}\"" echo "OLLAMA_CUSTOM_ROCM_DEFS=\"${OLLAMA_CUSTOM_ROCM_DEFS}\""
...@@ -266,7 +262,7 @@ if [ -z "${OLLAMA_SKIP_ROCM_GENERATE}" -a -d "${ROCM_PATH}" ]; then ...@@ -266,7 +262,7 @@ if [ -z "${OLLAMA_SKIP_ROCM_GENERATE}" -a -d "${ROCM_PATH}" ]; then
echo "Building custom ROCM GPU" echo "Building custom ROCM GPU"
fi fi
BUILD_DIR="../build/linux/${ARCH}/rocm${ROCM_VARIANT}" BUILD_DIR="../build/linux/${ARCH}/rocm${ROCM_VARIANT}"
EXTRA_LIBS="-L${ROCM_PATH}/lib -Wl,-rpath,\$ORIGIN/../../rocm/ -lhipblas -lrocblas -lamdhip64 -lrocsolver -lamd_comgr -lhsa-runtime64 -lrocsparse -ldrm -ldrm_amdgpu" EXTRA_LIBS="-L${ROCM_PATH}/lib -L/opt/amdgpu/lib/x86_64-linux-gnu/ -Wl,-rpath,\$ORIGIN/../../rocm/ -lhipblas -lrocblas -lamdhip64 -lrocsolver -lamd_comgr -lhsa-runtime64 -lrocsparse -ldrm -ldrm_amdgpu"
build build
# Record the ROCM dependencies # Record the ROCM dependencies
...@@ -276,12 +272,12 @@ if [ -z "${OLLAMA_SKIP_ROCM_GENERATE}" -a -d "${ROCM_PATH}" ]; then ...@@ -276,12 +272,12 @@ if [ -z "${OLLAMA_SKIP_ROCM_GENERATE}" -a -d "${ROCM_PATH}" ]; then
echo "${dep}" >> "${BUILD_DIR}/bin/deps.txt" echo "${dep}" >> "${BUILD_DIR}/bin/deps.txt"
done done
# bomb out if for some reason we didn't get a few deps # bomb out if for some reason we didn't get a few deps
#if [ $(cat "${BUILD_DIR}/bin/deps.txt" | wc -l ) -lt 8 ] ; then # if [ $(cat "${BUILD_DIR}/bin/deps.txt" | wc -l ) -lt 8 ] ; then
# cat "${BUILD_DIR}/bin/deps.txt" # cat "${BUILD_DIR}/bin/deps.txt"
# echo "ERROR: deps file short" # echo "ERROR: deps file short"
# exit 1 # exit 1
#fi # fi
# compress compress
fi fi
cleanup cleanup
......
...@@ -6,18 +6,9 @@ function amdGPUs { ...@@ -6,18 +6,9 @@ function amdGPUs {
if ($env:AMDGPU_TARGETS) { if ($env:AMDGPU_TARGETS) {
return $env:AMDGPU_TARGETS return $env:AMDGPU_TARGETS
} }
# TODO - load from some common data file for linux + windows build consistency # Current supported rocblas list from ROCm v6.1.2 on windows
# https://rocm.docs.amd.com/projects/install-on-windows/en/latest/reference/system-requirements.html#windows-supported-gpus
$GPU_LIST = @( $GPU_LIST = @(
"gfx900"
"gfx906:xnack-"
"gfx908:xnack-"
"gfx90a:xnack+"
"gfx90a:xnack-"
"gfx940"
"gfx941"
"gfx942"
"gfx1010"
"gfx1012"
"gfx1030" "gfx1030"
"gfx1100" "gfx1100"
"gfx1101" "gfx1101"
...@@ -39,7 +30,8 @@ function init_vars { ...@@ -39,7 +30,8 @@ function init_vars {
} }
$script:cmakeDefs = @( $script:cmakeDefs = @(
"-DBUILD_SHARED_LIBS=on", "-DBUILD_SHARED_LIBS=on",
"-DLLAMA_NATIVE=off" "-DGGML_NATIVE=off",
"-DGGML_OPENMP=off"
) )
$script:commonCpuDefs = @("-DCMAKE_POSITION_INDEPENDENT_CODE=on") $script:commonCpuDefs = @("-DCMAKE_POSITION_INDEPENDENT_CODE=on")
$script:ARCH = $Env:PROCESSOR_ARCHITECTURE.ToLower() $script:ARCH = $Env:PROCESSOR_ARCHITECTURE.ToLower()
...@@ -122,8 +114,13 @@ function build { ...@@ -122,8 +114,13 @@ function build {
& cmake --version & cmake --version
& cmake -S "${script:llamacppDir}" -B $script:buildDir $script:cmakeDefs & cmake -S "${script:llamacppDir}" -B $script:buildDir $script:cmakeDefs
if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
write-host "building with: cmake --build $script:buildDir --config $script:config $($script:cmakeTargets | ForEach-Object { `"--target`", $_ })" if ($cmakeDefs -contains "-G") {
& cmake --build $script:buildDir --config $script:config ($script:cmakeTargets | ForEach-Object { "--target", $_ }) $extra=@("-j8")
} else {
$extra= @("--", "/p:CL_MPcount=8")
}
write-host "building with: cmake --build $script:buildDir --config $script:config $($script:cmakeTargets | ForEach-Object { `"--target`", $_ }) $extra"
& cmake --build $script:buildDir --config $script:config ($script:cmakeTargets | ForEach-Object { "--target", $_ }) $extra
if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
# Rearrange output to be consistent between different generators # Rearrange output to be consistent between different generators
if ($null -ne ${script:config} -And (test-path -path "${script:buildDir}/bin/${script:config}" ) ) { if ($null -ne ${script:config} -And (test-path -path "${script:buildDir}/bin/${script:config}" ) ) {
...@@ -176,9 +173,9 @@ function cleanup { ...@@ -176,9 +173,9 @@ function cleanup {
} }
# -DLLAMA_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer # -DGGML_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer
# -DLLAMA_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen # -DGGML_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen
# -DLLAMA_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver # -DGGML_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver
function build_static() { function build_static() {
...@@ -198,12 +195,13 @@ function build_static() { ...@@ -198,12 +195,13 @@ function build_static() {
"-DCMAKE_C_COMPILER=gcc.exe", "-DCMAKE_C_COMPILER=gcc.exe",
"-DCMAKE_CXX_COMPILER=g++.exe", "-DCMAKE_CXX_COMPILER=g++.exe",
"-DBUILD_SHARED_LIBS=off", "-DBUILD_SHARED_LIBS=off",
"-DLLAMA_NATIVE=off", "-DGGML_NATIVE=off",
"-DLLAMA_AVX=off", "-DGGML_AVX=off",
"-DLLAMA_AVX2=off", "-DGGML_AVX2=off",
"-DLLAMA_AVX512=off", "-DGGML_AVX512=off",
"-DLLAMA_F16C=off", "-DGGML_F16C=off",
"-DLLAMA_FMA=off") "-DGGML_FMA=off",
"-DGGML_OPENMP=off")
$script:buildDir="../build/windows/${script:ARCH}_static" $script:buildDir="../build/windows/${script:ARCH}_static"
write-host "Building static library" write-host "Building static library"
build build
...@@ -217,7 +215,7 @@ function build_cpu($gen_arch) { ...@@ -217,7 +215,7 @@ function build_cpu($gen_arch) {
if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu"))) { if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu"))) {
# remaining llama.cpp builds use MSVC # remaining llama.cpp builds use MSVC
init_vars init_vars
$script:cmakeDefs = $script:commonCpuDefs + @("-A", $gen_arch, "-DLLAMA_AVX=off", "-DLLAMA_AVX2=off", "-DLLAMA_AVX512=off", "-DLLAMA_FMA=off", "-DLLAMA_F16C=off") + $script:cmakeDefs $script:cmakeDefs = $script:commonCpuDefs + @("-A", $gen_arch, "-DGGML_AVX=off", "-DGGML_AVX2=off", "-DGGML_AVX512=off", "-DGGML_FMA=off", "-DGGML_F16C=off") + $script:cmakeDefs
$script:buildDir="../build/windows/${script:ARCH}/cpu" $script:buildDir="../build/windows/${script:ARCH}/cpu"
$script:distDir="$script:DIST_BASE\cpu" $script:distDir="$script:DIST_BASE\cpu"
write-host "Building LCD CPU" write-host "Building LCD CPU"
...@@ -232,7 +230,7 @@ function build_cpu($gen_arch) { ...@@ -232,7 +230,7 @@ function build_cpu($gen_arch) {
function build_cpu_avx() { function build_cpu_avx() {
if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu_avx"))) { if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu_avx"))) {
init_vars init_vars
$script:cmakeDefs = $script:commonCpuDefs + @("-A", "x64", "-DLLAMA_AVX=on", "-DLLAMA_AVX2=off", "-DLLAMA_AVX512=off", "-DLLAMA_FMA=off", "-DLLAMA_F16C=off") + $script:cmakeDefs $script:cmakeDefs = $script:commonCpuDefs + @("-A", "x64", "-DGGML_AVX=on", "-DGGML_AVX2=off", "-DGGML_AVX512=off", "-DGGML_FMA=off", "-DGGML_F16C=off") + $script:cmakeDefs
$script:buildDir="../build/windows/${script:ARCH}/cpu_avx" $script:buildDir="../build/windows/${script:ARCH}/cpu_avx"
$script:distDir="$script:DIST_BASE\cpu_avx" $script:distDir="$script:DIST_BASE\cpu_avx"
write-host "Building AVX CPU" write-host "Building AVX CPU"
...@@ -247,7 +245,7 @@ function build_cpu_avx() { ...@@ -247,7 +245,7 @@ function build_cpu_avx() {
function build_cpu_avx2() { function build_cpu_avx2() {
if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu_avx2"))) { if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu_avx2"))) {
init_vars init_vars
$script:cmakeDefs = $script:commonCpuDefs + @("-A", "x64", "-DLLAMA_AVX=on", "-DLLAMA_AVX2=on", "-DLLAMA_AVX512=off", "-DLLAMA_FMA=on", "-DLLAMA_F16C=on") + $script:cmakeDefs $script:cmakeDefs = $script:commonCpuDefs + @("-A", "x64", "-DGGML_AVX=on", "-DGGML_AVX2=on", "-DGGML_AVX512=off", "-DGGML_FMA=on", "-DGGML_F16C=on") + $script:cmakeDefs
$script:buildDir="../build/windows/${script:ARCH}/cpu_avx2" $script:buildDir="../build/windows/${script:ARCH}/cpu_avx2"
$script:distDir="$script:DIST_BASE\cpu_avx2" $script:distDir="$script:DIST_BASE\cpu_avx2"
write-host "Building AVX2 CPU" write-host "Building AVX2 CPU"
...@@ -270,7 +268,15 @@ function build_cuda() { ...@@ -270,7 +268,15 @@ function build_cuda() {
init_vars init_vars
$script:buildDir="../build/windows/${script:ARCH}/cuda$script:CUDA_VARIANT" $script:buildDir="../build/windows/${script:ARCH}/cuda$script:CUDA_VARIANT"
$script:distDir="$script:DIST_BASE\cuda$script:CUDA_VARIANT" $script:distDir="$script:DIST_BASE\cuda$script:CUDA_VARIANT"
$script:cmakeDefs += @("-A", "x64", "-DLLAMA_CUDA=ON", "-DLLAMA_AVX=on", "-DLLAMA_AVX2=off", "-DCUDAToolkit_INCLUDE_DIR=$script:CUDA_INCLUDE_DIR", "-DCMAKE_CUDA_ARCHITECTURES=${script:CMAKE_CUDA_ARCHITECTURES}") $script:cmakeDefs += @(
"-A", "x64",
"-DGGML_CUDA=ON",
"-DGGML_AVX=on",
"-DGGML_AVX2=off",
"-DCUDAToolkit_INCLUDE_DIR=$script:CUDA_INCLUDE_DIR",
"-DCMAKE_CUDA_FLAGS=-t8",
"-DCMAKE_CUDA_ARCHITECTURES=${script:CMAKE_CUDA_ARCHITECTURES}"
)
if ($null -ne $env:OLLAMA_CUSTOM_CUDA_DEFS) { if ($null -ne $env:OLLAMA_CUSTOM_CUDA_DEFS) {
write-host "OLLAMA_CUSTOM_CUDA_DEFS=`"${env:OLLAMA_CUSTOM_CUDA_DEFS}`"" write-host "OLLAMA_CUSTOM_CUDA_DEFS=`"${env:OLLAMA_CUSTOM_CUDA_DEFS}`""
$script:cmakeDefs +=@("${env:OLLAMA_CUSTOM_CUDA_DEFS}") $script:cmakeDefs +=@("${env:OLLAMA_CUSTOM_CUDA_DEFS}")
...@@ -280,17 +286,19 @@ function build_cuda() { ...@@ -280,17 +286,19 @@ function build_cuda() {
sign sign
install install
write-host "copying CUDA dependencies to ${script:SRC_DIR}\dist\windows-${script:ARCH}\" rm -ea 0 -recurse -force -path "${script:SRC_DIR}\dist\windows-${script:ARCH}\cuda\"
cp "${script:CUDA_LIB_DIR}\cudart64_*.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\" md "${script:SRC_DIR}\dist\windows-${script:ARCH}\cuda\" -ea 0 > $null
cp "${script:CUDA_LIB_DIR}\cublas64_*.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\" write-host "copying CUDA dependencies to ${script:SRC_DIR}\dist\windows-${script:ARCH}\cuda\"
cp "${script:CUDA_LIB_DIR}\cublasLt64_*.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\" cp "${script:CUDA_LIB_DIR}\cudart64_*.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\cuda\"
cp "${script:CUDA_LIB_DIR}\cublas64_*.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\cuda\"
cp "${script:CUDA_LIB_DIR}\cublasLt64_*.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\cuda\"
} else { } else {
write-host "Skipping CUDA generation step" write-host "Skipping CUDA generation step"
} }
} }
function build_oneapi() { function build_oneapi() {
if ((-not "${env:OLLAMA_SKIP_CUDA_GENERATE}") -and ("${env:ONEAPI_ROOT}")) { if ((-not "${env:OLLAMA_SKIP_ONEAPI_GENERATE}") -and ("${env:ONEAPI_ROOT}")) {
# Get oneAPI version # Get oneAPI version
$script:ONEAPI_VERSION = icpx --version $script:ONEAPI_VERSION = icpx --version
$script:ONEAPI_VERSION = [regex]::Match($script:ONEAPI_VERSION, '(?<=oneAPI DPC\+\+/C\+\+ Compiler )(?<version>\d+\.\d+\.\d+)').Value $script:ONEAPI_VERSION = [regex]::Match($script:ONEAPI_VERSION, '(?<=oneAPI DPC\+\+/C\+\+ Compiler )(?<version>\d+\.\d+\.\d+)').Value
...@@ -302,7 +310,7 @@ function build_oneapi() { ...@@ -302,7 +310,7 @@ function build_oneapi() {
$script:distDir ="$script:DIST_BASE\oneapi$script:ONEAPI_VARIANT" $script:distDir ="$script:DIST_BASE\oneapi$script:ONEAPI_VARIANT"
$script:cmakeDefs += @( $script:cmakeDefs += @(
"-G", "MinGW Makefiles", "-G", "MinGW Makefiles",
"-DLLAMA_SYCL=ON", "-DGGML_SYCL=ON",
"-DCMAKE_C_COMPILER=icx", "-DCMAKE_C_COMPILER=icx",
"-DCMAKE_CXX_COMPILER=icx", "-DCMAKE_CXX_COMPILER=icx",
"-DCMAKE_BUILD_TYPE=Release" "-DCMAKE_BUILD_TYPE=Release"
...@@ -317,16 +325,18 @@ function build_oneapi() { ...@@ -317,16 +325,18 @@ function build_oneapi() {
sign sign
install install
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\libirngmd.dll" "${script:distDir}" rm -ea 0 -recurse -force -path "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\libmmd.dll" "${script:distDir}" md "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\" -ea 0 > $null
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\pi_level_zero.dll" "${script:distDir}" cp "${env:ONEAPI_ROOT}\compiler\latest\bin\libirngmd.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\pi_unified_runtime.dll" "${script:distDir}" cp "${env:ONEAPI_ROOT}\compiler\latest\bin\libmmd.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\pi_win_proxy_loader.dll" "${script:distDir}" cp "${env:ONEAPI_ROOT}\compiler\latest\bin\pi_level_zero.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\svml_dispmd.dll" "${script:distDir}" cp "${env:ONEAPI_ROOT}\compiler\latest\bin\pi_unified_runtime.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\sycl7.dll" "${script:distDir}" cp "${env:ONEAPI_ROOT}\compiler\latest\bin\pi_win_proxy_loader.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
cp "${env:ONEAPI_ROOT}\mkl\latest\bin\mkl_core.2.dll" "${script:distDir}" cp "${env:ONEAPI_ROOT}\compiler\latest\bin\svml_dispmd.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
cp "${env:ONEAPI_ROOT}\mkl\latest\bin\mkl_sycl_blas.4.dll" "${script:distDir}" cp "${env:ONEAPI_ROOT}\compiler\latest\bin\sycl7.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
cp "${env:ONEAPI_ROOT}\mkl\latest\bin\mkl_tbb_thread.2.dll" "${script:distDir}" cp "${env:ONEAPI_ROOT}\mkl\latest\bin\mkl_core.2.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
cp "${env:ONEAPI_ROOT}\mkl\latest\bin\mkl_sycl_blas.4.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
cp "${env:ONEAPI_ROOT}\mkl\latest\bin\mkl_tbb_thread.2.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
} else { } else {
Write-Host "Skipping oneAPI generation step" Write-Host "Skipping oneAPI generation step"
} }
...@@ -346,10 +356,11 @@ function build_rocm() { ...@@ -346,10 +356,11 @@ function build_rocm() {
"-G", "Ninja", "-G", "Ninja",
"-DCMAKE_C_COMPILER=clang.exe", "-DCMAKE_C_COMPILER=clang.exe",
"-DCMAKE_CXX_COMPILER=clang++.exe", "-DCMAKE_CXX_COMPILER=clang++.exe",
"-DLLAMA_HIPBLAS=on", "-DGGML_HIPBLAS=on",
"-DLLAMA_CUDA_NO_PEER_COPY=on",
"-DHIP_PLATFORM=amd", "-DHIP_PLATFORM=amd",
"-DLLAMA_AVX=on", "-DGGML_AVX=on",
"-DLLAMA_AVX2=off", "-DGGML_AVX2=off",
"-DCMAKE_POSITION_INDEPENDENT_CODE=on", "-DCMAKE_POSITION_INDEPENDENT_CODE=on",
"-DAMDGPU_TARGETS=$(amdGPUs)", "-DAMDGPU_TARGETS=$(amdGPUs)",
"-DGPU_TARGETS=$(amdGPUs)" "-DGPU_TARGETS=$(amdGPUs)"
...@@ -375,7 +386,6 @@ function build_rocm() { ...@@ -375,7 +386,6 @@ function build_rocm() {
sign sign
install install
# Assumes v5.7, may need adjustments for v6
rm -ea 0 -recurse -force -path "${script:SRC_DIR}\dist\windows-${script:ARCH}\rocm\" rm -ea 0 -recurse -force -path "${script:SRC_DIR}\dist\windows-${script:ARCH}\rocm\"
md "${script:SRC_DIR}\dist\windows-${script:ARCH}\rocm\rocblas\library\" -ea 0 > $null md "${script:SRC_DIR}\dist\windows-${script:ARCH}\rocm\rocblas\library\" -ea 0 > $null
cp "${env:HIP_PATH}\bin\hipblas.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\rocm\" cp "${env:HIP_PATH}\bin\hipblas.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\rocm\"
......
...@@ -36,6 +36,8 @@ type ggla struct { ...@@ -36,6 +36,8 @@ type ggla struct {
kv KV kv KV
tensors []*Tensor tensors []*Tensor
tensorOffset uint64
} }
func newGGLA(container *containerGGLA) *ggla { func newGGLA(container *containerGGLA) *ggla {
...@@ -50,10 +52,13 @@ func (llm *ggla) KV() KV { ...@@ -50,10 +52,13 @@ func (llm *ggla) KV() KV {
} }
func (llm *ggla) Tensors() Tensors { func (llm *ggla) Tensors() Tensors {
return llm.tensors return Tensors{
Items: llm.tensors,
Offset: llm.tensorOffset,
}
} }
func (llm *ggla) decode(rs io.ReadSeeker) error { func (llm *ggla) decode(rs io.ReadSeeker) (retErr error) {
var r uint32 var r uint32
if err := binary.Read(rs, binary.LittleEndian, &r); err != nil { if err := binary.Read(rs, binary.LittleEndian, &r); err != nil {
return err return err
...@@ -66,12 +71,28 @@ func (llm *ggla) decode(rs io.ReadSeeker) error { ...@@ -66,12 +71,28 @@ func (llm *ggla) decode(rs io.ReadSeeker) error {
} }
llm.kv["alpha"] = alpha llm.kv["alpha"] = alpha
offset, err := rs.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
llm.tensorOffset = uint64(offset)
for { for {
var dims uint32 var dims uint32
if err := binary.Read(rs, binary.LittleEndian, &dims); err != nil { if err := binary.Read(rs, binary.LittleEndian, &dims); err != nil {
if errors.Is(err, io.EOF) {
return nil
}
return err return err
} }
defer func() {
if errors.Is(retErr, io.EOF) {
retErr = io.ErrUnexpectedEOF
}
}()
var namesize uint32 var namesize uint32
if err := binary.Read(rs, binary.LittleEndian, &namesize); err != nil { if err := binary.Read(rs, binary.LittleEndian, &namesize); err != nil {
return err return err
...@@ -108,7 +129,7 @@ func (llm *ggla) decode(rs io.ReadSeeker) error { ...@@ -108,7 +129,7 @@ func (llm *ggla) decode(rs io.ReadSeeker) error {
return err return err
} }
if _, err := rs.Seek((offset+31)&-32, io.SeekStart); err != nil { if _, err := rs.Seek((offset+31)&-32-offset, io.SeekCurrent); err != nil {
return err return err
} }
......
...@@ -6,6 +6,8 @@ import ( ...@@ -6,6 +6,8 @@ import (
"fmt" "fmt"
"io" "io"
"strings" "strings"
"github.com/ollama/ollama/util/bufioutil"
) )
type GGML struct { type GGML struct {
...@@ -69,6 +71,30 @@ func (kv KV) HeadCountKV() uint64 { ...@@ -69,6 +71,30 @@ func (kv KV) HeadCountKV() uint64 {
return 1 return 1
} }
func (kv KV) EmbeddingHeadCount() uint64 {
if heads := kv.HeadCount(); heads > 0 {
return kv.EmbeddingLength() / kv.HeadCount()
}
return 0
}
func (kv KV) EmbeddingHeadCountK() uint64 {
if k := kv.u64(fmt.Sprintf("%s.attention.key_length", kv.Architecture())); k > 0 {
return k
}
return kv.EmbeddingHeadCount()
}
func (kv KV) EmbeddingHeadCountV() uint64 {
if v := kv.u64(fmt.Sprintf("%s.attention.value_length", kv.Architecture())); v > 0 {
return v
}
return kv.EmbeddingHeadCount()
}
func (kv KV) GQA() uint64 { func (kv KV) GQA() uint64 {
return kv.HeadCount() / kv.HeadCountKV() return kv.HeadCount() / kv.HeadCountKV()
} }
...@@ -81,11 +107,19 @@ func (kv KV) ContextLength() uint64 { ...@@ -81,11 +107,19 @@ func (kv KV) ContextLength() uint64 {
return kv.u64(fmt.Sprintf("%s.context_length", kv.Architecture())) return kv.u64(fmt.Sprintf("%s.context_length", kv.Architecture()))
} }
type Tensors []*Tensor func (kv KV) ChatTemplate() string {
s, _ := kv["tokenizer.chat_template"].(string)
return s
}
type Tensors struct {
Items []*Tensor
Offset uint64
}
func (ts Tensors) Layers() map[string]Layer { func (ts Tensors) Layers() map[string]Layer {
layers := make(map[string]Layer) layers := make(map[string]Layer)
for _, t := range ts { for _, t := range ts.Items {
parts := strings.Split(t.Name, ".") parts := strings.Split(t.Name, ".")
if parts[0] == "blk" { if parts[0] == "blk" {
// join first and second part, e.g. blk.%d // join first and second part, e.g. blk.%d
...@@ -249,7 +283,18 @@ func DetectGGMLType(b []byte) string { ...@@ -249,7 +283,18 @@ func DetectGGMLType(b []byte) string {
} }
} }
func DecodeGGML(rs io.ReadSeeker) (*GGML, int64, error) { // DecodeGGML decodes a GGML model from the given reader.
//
// It collects array values for arrays with a size less than or equal to
// maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If
// the maxArraySize is negative, all arrays are collected.
func DecodeGGML(rs io.ReadSeeker, maxArraySize int) (*GGML, int64, error) {
if maxArraySize == 0 {
maxArraySize = 1024
}
rs = bufioutil.NewBufferedSeeker(rs, 32<<10)
var magic uint32 var magic uint32
if err := binary.Read(rs, binary.LittleEndian, &magic); err != nil { if err := binary.Read(rs, binary.LittleEndian, &magic); err != nil {
return nil, 0, err return nil, 0, err
...@@ -262,17 +307,15 @@ func DecodeGGML(rs io.ReadSeeker) (*GGML, int64, error) { ...@@ -262,17 +307,15 @@ func DecodeGGML(rs io.ReadSeeker) (*GGML, int64, error) {
case FILE_MAGIC_GGLA: case FILE_MAGIC_GGLA:
c = &containerGGLA{} c = &containerGGLA{}
case FILE_MAGIC_GGUF_LE: case FILE_MAGIC_GGUF_LE:
c = &containerGGUF{ByteOrder: binary.LittleEndian} c = &containerGGUF{ByteOrder: binary.LittleEndian, maxArraySize: maxArraySize}
case FILE_MAGIC_GGUF_BE: case FILE_MAGIC_GGUF_BE:
c = &containerGGUF{ByteOrder: binary.BigEndian} c = &containerGGUF{ByteOrder: binary.BigEndian, maxArraySize: maxArraySize}
default: default:
return nil, 0, errors.New("invalid file magic") return nil, 0, errors.New("invalid file magic")
} }
model, err := c.Decode(rs) model, err := c.Decode(rs)
if errors.Is(err, io.EOF) { if err != nil {
// noop
} else if err != nil {
return nil, 0, err return nil, 0, err
} }
...@@ -292,7 +335,10 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui ...@@ -292,7 +335,10 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui
embedding := llm.KV().EmbeddingLength() embedding := llm.KV().EmbeddingLength()
heads := llm.KV().HeadCount() heads := llm.KV().HeadCount()
headsKV := llm.KV().HeadCountKV() headsKV := llm.KV().HeadCountKV()
vocab := uint64(len(llm.KV()["tokenizer.ggml.tokens"].([]any))) vocab := uint64(llm.KV()["tokenizer.ggml.tokens"].(*array).size)
embeddingHeads := llm.KV().EmbeddingHeadCount()
embeddingHeadsK := llm.KV().EmbeddingHeadCountK()
layers := llm.Tensors().Layers() layers := llm.Tensors().Layers()
...@@ -302,7 +348,8 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui ...@@ -302,7 +348,8 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui
partialOffload = 4 * batch * embedding partialOffload = 4 * batch * embedding
partialOffload += max( partialOffload += max(
4*batch*(1+embedding+max(context, embedding))+embedding*embedding*9/16+4*context*(batch*heads+embedding/heads*headsKV), // 4*batch*(4+6*embedding+context*(2*heads)+llm.KV().GQA()),
4*batch*(1+embedding+max(context, embedding))+embedding*embedding*9/16+4*context*(batch*heads+embeddingHeads*headsKV),
4*batch*(embedding+vocab)+embedding*vocab*105/128, 4*batch*(embedding+vocab)+embedding*vocab*105/128,
) )
...@@ -310,21 +357,30 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui ...@@ -310,21 +357,30 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui
// mixtral 8x22b // mixtral 8x22b
ff := uint64(llm.KV()["llama.feed_forward_length"].(uint32)) ff := uint64(llm.KV()["llama.feed_forward_length"].(uint32))
partialOffload = max( partialOffload = max(
3*ffnGateExpsWeight.Size()+4*batch*(2*ff+headsKV+embedding+context+embedding/heads*headsKV), 3*ffnGateExpsWeight.Size()+4*batch*(2*ff+headsKV+embedding+context+embeddingHeads*headsKV),
4*(context*batch*heads+context*embedding/heads*headsKV+batch*1024+embedding/heads*headsKV*batch), 4*(context*batch*heads+context*embeddingHeads*headsKV+batch*1024+embeddingHeads*headsKV*batch),
) )
} else if ffnGateWeight, ok := layers["blk.0"]["ffn_gate.0.weight"]; ok { } else if ffnGateWeight, ok := layers["blk.0"]["ffn_gate.0.weight"]; ok {
// mixtral 8x7b // mixtral 8x7b
ffnGateWeight1 := ffnGateWeight.Shape[1] ffnGateWeight1 := ffnGateWeight.Shape[1]
fullOffload = 4 * batch * (2 + 3*embedding + context*(1+heads) + 2*headsKV + ffnGateWeight1) fullOffload = 4 * batch * (2 + 3*embedding + context*(1+heads) + 2*headsKV + ffnGateWeight1)
partialOffload = max( partialOffload = max(
4*batch*(3+embedding/heads*headsKV+embedding+context*(1+heads)+ffnGateWeight1)+(embedding*embedding+3*embedding*headsKV*ffnGateWeight1)*9/16, 4*batch*(3+embeddingHeads*headsKV+embedding+context*(1+heads)+ffnGateWeight1)+(embedding*embedding+3*embedding*headsKV*ffnGateWeight1)*9/16,
4*batch*(1+2*embedding+context*(1+heads))+embedding*(6*context*headsKV/heads+embedding*9/16), 4*batch*(1+2*embedding+context*(1+heads))+embedding*(6*context*headsKV/heads+embedding*9/16),
) )
} }
case "gemma": case "gemma", "gemma2":
fullOffload = 4 * batch * (embedding + vocab) fullOffload = max(
partialOffload = 4*batch*(2*embedding+vocab+1) + embedding*vocab*105/128 4*batch*(embedding+vocab),
4*batch*(2+context+context*heads+2*embedding+2*embeddingHeadsK*heads),
)
partialOffload = max(
4*embedding*batch+embedding*vocab*105/128+4*vocab*batch,
4*batch*(2*embedding+1+2*embeddingHeadsK*heads+context+context*heads)+
4*embeddingHeadsK*context*8+
embedding*embeddingHeadsK*heads*9/16,
)
case "command-r": case "command-r":
fullOffload = max( fullOffload = max(
4*batch*(embedding+vocab), 4*batch*(embedding+vocab),
...@@ -361,6 +417,42 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui ...@@ -361,6 +417,42 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui
4*batch*(vocab+2*embedding), 4*batch*(vocab+2*embedding),
fullOffload, fullOffload,
) )
case "deepseek2":
fullOffload = max(
4*batch*(3*embedding+vocab),
4*batch*(3*embedding+2+context*(1+headsKV)+2*embeddingHeadsK*headsKV),
)
partialOffload = max(
4*batch*(3*embedding+vocab)+embedding*vocab*105/128,
4*batch*(2*embedding+1+2*embeddingHeadsK*headsKV+context+context*headsKV)+4*embeddingHeadsK*context*headsKV+embedding*embeddingHeadsK*headsKV*9/16,
)
case "chatglm":
fullOffload = 4 * batch * (embedding + vocab)
partialOffload = 4*batch*(embedding+vocab) + embedding*vocab*105/128
if qkvBias, ok := layers["blk.0"]["attn_qkv.bias"]; ok {
fullOffload = max(
fullOffload,
4*batch*(2+
2*embedding+
context+
context*heads+
embeddingHeadsK*heads+
qkvBias.Shape[0]),
)
partialOffload = max(
partialOffload,
4*batch*(1+
2*embedding+
embeddingHeadsK*heads+
context+
context*heads)+
4*embeddingHeadsK*context+
4*context*embeddingHeadsK+
4*qkvBias.Shape[0],
)
}
} }
return return
......
...@@ -2,12 +2,16 @@ package llm ...@@ -2,12 +2,16 @@ package llm
import ( import (
"bytes" "bytes"
"cmp"
"encoding/binary" "encoding/binary"
"encoding/json"
"fmt" "fmt"
"io" "io"
"log/slog"
"slices"
"strings" "strings"
"log/slog" "golang.org/x/exp/maps"
) )
type containerGGUF struct { type containerGGUF struct {
...@@ -29,6 +33,12 @@ type containerGGUF struct { ...@@ -29,6 +33,12 @@ type containerGGUF struct {
NumTensor uint64 NumTensor uint64
NumKV uint64 NumKV uint64
} }
maxArraySize int
}
func (c *containerGGUF) canCollectArray(size int) bool {
return c.maxArraySize < 0 || size <= c.maxArraySize
} }
func (c *containerGGUF) Name() string { func (c *containerGGUF) Name() string {
...@@ -54,7 +64,6 @@ func (c *containerGGUF) Decode(rs io.ReadSeeker) (model, error) { ...@@ -54,7 +64,6 @@ func (c *containerGGUF) Decode(rs io.ReadSeeker) (model, error) {
} }
model := newGGUF(c) model := newGGUF(c)
slog.Debug(fmt.Sprintf("model = %#v", model))
if err := model.Decode(rs); err != nil { if err := model.Decode(rs); err != nil {
return nil, err return nil, err
} }
...@@ -84,7 +93,10 @@ type gguf struct { ...@@ -84,7 +93,10 @@ type gguf struct {
kv KV kv KV
tensors []*Tensor tensors []*Tensor
parameters uint64 parameters uint64
tensorOffset uint64
scratch [16 << 10]byte
} }
func newGGUF(container *containerGGUF) *gguf { func newGGUF(container *containerGGUF) *gguf {
...@@ -94,16 +106,15 @@ func newGGUF(container *containerGGUF) *gguf { ...@@ -94,16 +106,15 @@ func newGGUF(container *containerGGUF) *gguf {
} }
} }
func NewGGUFV3(bo binary.ByteOrder) *gguf {
return newGGUF(&containerGGUF{ByteOrder: bo, Version: 3})
}
func (llm *gguf) KV() KV { func (llm *gguf) KV() KV {
return llm.kv return llm.kv
} }
func (llm *gguf) Tensors() Tensors { func (llm *gguf) Tensors() Tensors {
return llm.tensors return Tensors{
Items: llm.tensors,
Offset: llm.tensorOffset,
}
} }
func (llm *gguf) numTensor() uint64 { func (llm *gguf) numTensor() uint64 {
...@@ -181,34 +192,34 @@ func (llm *gguf) Decode(rs io.ReadSeeker) error { ...@@ -181,34 +192,34 @@ func (llm *gguf) Decode(rs io.ReadSeeker) error {
} }
// decode tensors // decode tensors
for i := 0; uint64(i) < llm.numTensor(); i++ { for range llm.numTensor() {
name, err := readGGUFString(llm, rs) name, err := readGGUFString(llm, rs)
if err != nil { if err != nil {
return err return fmt.Errorf("failed to read tensor name: %w", err)
} }
// dims is the number of dimensions in the tensor // dims is the number of dimensions in the tensor
dims, err := readGGUF[uint32](llm, rs) dims, err := readGGUF[uint32](llm, rs)
if err != nil { if err != nil {
return err return fmt.Errorf("failed to read tensor dimensions: %w", err)
} }
shape := [4]uint64{1, 1, 1, 1} shape := make([]uint64, dims)
for i := 0; uint32(i) < dims; i++ { for i := 0; uint32(i) < dims; i++ {
shape[i], err = readGGUF[uint64](llm, rs) shape[i], err = readGGUF[uint64](llm, rs)
if err != nil { if err != nil {
return err return fmt.Errorf("failed to read tensor shape: %w", err)
} }
} }
kind, err := readGGUF[uint32](llm, rs) kind, err := readGGUF[uint32](llm, rs)
if err != nil { if err != nil {
return err return fmt.Errorf("failed to read tensor kind: %w", err)
} }
offset, err := readGGUF[uint64](llm, rs) offset, err := readGGUF[uint64](llm, rs)
if err != nil { if err != nil {
return err return fmt.Errorf("failed to read tensor offset: %w", err)
} }
tensor := Tensor{ tensor := Tensor{
...@@ -235,19 +246,22 @@ func (llm *gguf) Decode(rs io.ReadSeeker) error { ...@@ -235,19 +246,22 @@ func (llm *gguf) Decode(rs io.ReadSeeker) error {
return err return err
} }
padding := llm.padding(offset, int64(alignment)) padding := ggufPadding(offset, int64(alignment))
if _, err := rs.Seek(padding, io.SeekCurrent); err != nil { llm.tensorOffset = uint64(offset + padding)
return err
}
for _, tensor := range llm.tensors { for _, tensor := range llm.tensors {
if _, err := rs.Seek(int64(tensor.Size()), io.SeekCurrent); err != nil { offset, err := rs.Seek(0, io.SeekCurrent)
return err if err != nil {
return fmt.Errorf("failed to get current offset: %w", err)
} }
padding := llm.padding(int64(tensor.Size()), int64(alignment)) padding := ggufPadding(offset, int64(alignment))
if _, err := rs.Seek(padding, io.SeekCurrent); err != nil { if _, err := rs.Seek(padding, io.SeekCurrent); err != nil {
return err return fmt.Errorf("failed to seek to init padding: %w", err)
}
if _, err := rs.Seek(int64(tensor.Size()), io.SeekCurrent); err != nil {
return fmt.Errorf("failed to seek to tensor: %w", err)
} }
} }
...@@ -260,12 +274,12 @@ func readGGUF[T any](llm *gguf, r io.Reader) (T, error) { ...@@ -260,12 +274,12 @@ func readGGUF[T any](llm *gguf, r io.Reader) (T, error) {
return t, err return t, err
} }
func writeGGUF[V any](llm *gguf, w io.Writer, t uint32, v V) error { func writeGGUF[V any](w io.Writer, t uint32, v V) error {
if err := binary.Write(w, llm.ByteOrder, t); err != nil { if err := binary.Write(w, binary.LittleEndian, t); err != nil {
return err return err
} }
return binary.Write(w, llm.ByteOrder, v) return binary.Write(w, binary.LittleEndian, v)
} }
func readGGUFV1String(llm *gguf, r io.Reader) (string, error) { func readGGUFV1String(llm *gguf, r io.Reader) (string, error) {
...@@ -285,30 +299,56 @@ func readGGUFV1String(llm *gguf, r io.Reader) (string, error) { ...@@ -285,30 +299,56 @@ func readGGUFV1String(llm *gguf, r io.Reader) (string, error) {
return b.String(), nil return b.String(), nil
} }
func discardGGUFString(llm *gguf, r io.Reader) error {
buf := llm.scratch[:8]
_, err := io.ReadFull(r, buf)
if err != nil {
return err
}
size := int(llm.ByteOrder.Uint64(buf))
for size > 0 {
n, err := r.Read(llm.scratch[:min(size, cap(llm.scratch))])
if err != nil {
return err
}
size -= n
}
return nil
}
func readGGUFString(llm *gguf, r io.Reader) (string, error) { func readGGUFString(llm *gguf, r io.Reader) (string, error) {
if llm.Version == 1 { if llm.Version == 1 {
return readGGUFV1String(llm, r) return readGGUFV1String(llm, r)
} }
var length uint64 buf := llm.scratch[:8]
if err := binary.Read(r, llm.ByteOrder, &length); err != nil { _, err := io.ReadFull(r, buf)
if err != nil {
return "", err return "", err
} }
var b bytes.Buffer length := int(llm.ByteOrder.Uint64(buf))
if _, err := io.CopyN(&b, r, int64(length)); err != nil { if length > len(llm.scratch) {
return "", err buf = make([]byte, length)
} else {
buf = llm.scratch[:length]
} }
clear(buf)
return b.String(), nil _, err = io.ReadFull(r, buf)
if err != nil {
return "", err
}
return string(buf), nil
} }
func writeGGUFString(llm *gguf, w io.Writer, s string) error { func writeGGUFString(w io.Writer, s string) error {
if err := binary.Write(w, llm.ByteOrder, ggufTypeString); err != nil { if err := binary.Write(w, binary.LittleEndian, ggufTypeString); err != nil {
return err return err
} }
if err := binary.Write(w, llm.ByteOrder, uint64(len(s))); err != nil { if err := binary.Write(w, binary.LittleEndian, uint64(len(s))); err != nil {
return err return err
} }
...@@ -316,7 +356,16 @@ func writeGGUFString(llm *gguf, w io.Writer, s string) error { ...@@ -316,7 +356,16 @@ func writeGGUFString(llm *gguf, w io.Writer, s string) error {
return err return err
} }
func readGGUFV1Array(llm *gguf, r io.Reader) (a []any, err error) { type array struct {
size int
values []any
}
func (a *array) MarshalJSON() ([]byte, error) {
return json.Marshal(a.values)
}
func readGGUFV1Array(llm *gguf, r io.Reader) (*array, error) {
t, err := readGGUF[uint32](llm, r) t, err := readGGUF[uint32](llm, r)
if err != nil { if err != nil {
return nil, err return nil, err
...@@ -327,7 +376,12 @@ func readGGUFV1Array(llm *gguf, r io.Reader) (a []any, err error) { ...@@ -327,7 +376,12 @@ func readGGUFV1Array(llm *gguf, r io.Reader) (a []any, err error) {
return nil, err return nil, err
} }
for i := 0; uint32(i) < n; i++ { a := &array{size: int(n)}
if llm.canCollectArray(int(n)) {
a.values = make([]any, 0, int(n))
}
for i := range n {
var e any var e any
switch t { switch t {
case ggufTypeUint8: case ggufTypeUint8:
...@@ -361,13 +415,15 @@ func readGGUFV1Array(llm *gguf, r io.Reader) (a []any, err error) { ...@@ -361,13 +415,15 @@ func readGGUFV1Array(llm *gguf, r io.Reader) (a []any, err error) {
return nil, err return nil, err
} }
a = append(a, e) if a.values != nil {
a.values[i] = e
}
} }
return return a, nil
} }
func readGGUFArray(llm *gguf, r io.Reader) (a []any, err error) { func readGGUFArray(llm *gguf, r io.Reader) (*array, error) {
if llm.Version == 1 { if llm.Version == 1 {
return readGGUFV1Array(llm, r) return readGGUFV1Array(llm, r)
} }
...@@ -382,7 +438,12 @@ func readGGUFArray(llm *gguf, r io.Reader) (a []any, err error) { ...@@ -382,7 +438,12 @@ func readGGUFArray(llm *gguf, r io.Reader) (a []any, err error) {
return nil, err return nil, err
} }
for i := 0; uint64(i) < n; i++ { a := &array{size: int(n)}
if llm.canCollectArray(int(n)) {
a.values = make([]any, int(n))
}
for i := range n {
var e any var e any
switch t { switch t {
case ggufTypeUint8: case ggufTypeUint8:
...@@ -408,7 +469,11 @@ func readGGUFArray(llm *gguf, r io.Reader) (a []any, err error) { ...@@ -408,7 +469,11 @@ func readGGUFArray(llm *gguf, r io.Reader) (a []any, err error) {
case ggufTypeBool: case ggufTypeBool:
e, err = readGGUF[bool](llm, r) e, err = readGGUF[bool](llm, r)
case ggufTypeString: case ggufTypeString:
e, err = readGGUFString(llm, r) if a.values != nil {
e, err = readGGUFString(llm, r)
} else {
err = discardGGUFString(llm, r)
}
default: default:
return nil, fmt.Errorf("invalid array type: %d", t) return nil, fmt.Errorf("invalid array type: %d", t)
} }
...@@ -416,238 +481,183 @@ func readGGUFArray(llm *gguf, r io.Reader) (a []any, err error) { ...@@ -416,238 +481,183 @@ func readGGUFArray(llm *gguf, r io.Reader) (a []any, err error) {
return nil, err return nil, err
} }
a = append(a, e) if a.values != nil {
a.values[i] = e
}
} }
return return a, nil
} }
func writeGGUFArray[S ~[]E, E any](llm *gguf, w io.Writer, t uint32, s S) error { // writeGGUFArray writes a slice s of type E to the write with a gguf type of t
if err := binary.Write(w, llm.ByteOrder, ggufTypeArray); err != nil { func writeGGUFArray[S ~[]E, E any](w io.Writer, t uint32, s S) error {
if err := binary.Write(w, binary.LittleEndian, ggufTypeArray); err != nil {
return err return err
} }
if err := binary.Write(w, llm.ByteOrder, t); err != nil { if err := binary.Write(w, binary.LittleEndian, t); err != nil {
return err return err
} }
if err := binary.Write(w, llm.ByteOrder, uint64(len(s))); err != nil { if err := binary.Write(w, binary.LittleEndian, uint64(len(s))); err != nil {
return err return err
} }
for _, e := range s { return binary.Write(w, binary.LittleEndian, s)
if err := binary.Write(w, llm.ByteOrder, e); err != nil {
return err
}
}
return nil
}
var ggufKVOrder = map[string][]string{
"llama": {
"general.architecture",
"general.name",
"llama.vocab_size",
"llama.context_length",
"llama.embedding_length",
"llama.block_count",
"llama.feed_forward_length",
"llama.attention.head_count",
"llama.attention.head_count_kv",
"llama.attention.layer_norm_rms_epsilon",
"llama.rope.freq_base",
"llama.rope.dimension_count",
"llama.expert_count",
"llama.expert_used_count",
"gemma.context_length",
"gemma.embedding_length",
"gemma.block_count",
"gemma.feed_forward_length",
"gemma.attention.head_count",
"gemma.attention.head_count_kv",
"gemma.attention.layer_norm_rms_epsilon",
"gemma.attention.key_length",
"gemma.attention.value_length",
"general.file_type",
"tokenizer.ggml.pre",
"tokenizer.ggml.model",
"tokenizer.ggml.tokens",
"tokenizer.ggml.scores",
"tokenizer.ggml.merges",
"tokenizer.ggml.token_type",
"tokenizer.ggml.bos_token_id",
"tokenizer.ggml.eos_token_id",
"tokenizer.ggml.unknown_token_id",
"tokenizer.ggml.padding_token_id",
"tokenizer.ggml.add_bos_token",
"tokenizer.ggml.add_eos_token",
"tokenizer.chat_template",
},
} }
func (llm *gguf) Encode(ws io.WriteSeeker, kv KV, tensors []Tensor) error { func WriteGGUF(ws io.WriteSeeker, kv KV, ts []Tensor) error {
switch llm.Version { if err := binary.Write(ws, binary.LittleEndian, []byte("GGUF")); err != nil {
case 3:
llm.V3.NumTensor = uint64(len(tensors))
llm.V3.NumKV = uint64(len(kv))
default:
return fmt.Errorf("not implemented: ggufv%d", llm.Version)
}
if err := binary.Write(ws, llm.ByteOrder, []byte("GGUF")); err != nil {
return err return err
} }
if err := binary.Write(ws, llm.ByteOrder, llm.Version); err != nil { if err := binary.Write(ws, binary.LittleEndian, uint32(3)); err != nil {
return err return err
} }
if err := binary.Write(ws, llm.ByteOrder, llm.numTensor()); err != nil { if err := binary.Write(ws, binary.LittleEndian, uint64(len(ts))); err != nil {
return err return err
} }
if err := binary.Write(ws, llm.ByteOrder, llm.numKV()); err != nil { if err := binary.Write(ws, binary.LittleEndian, uint64(len(kv))); err != nil {
return err return err
} }
kvCheck := make(map[string]bool) keys := maps.Keys(kv)
for k := range kv { slices.Sort(keys)
kvCheck[k] = false
for _, key := range keys {
if err := ggufWriteKV(ws, key, kv[key]); err != nil {
return err
}
} }
for _, k := range ggufKVOrder["llama"] { slices.SortFunc(ts, func(a, b Tensor) int {
v, ok := kv[k] var i, j int
if !ok { if n, err := fmt.Sscanf(a.Name, "blk.%d", &i); err != nil || n != 1 {
continue return cmp.Compare(a.Name, b.Name)
} else if n, err := fmt.Sscanf(b.Name, "blk.%d", &j); err != nil || n != 1 {
return cmp.Compare(a.Name, b.Name)
} }
kvCheck[k] = true
if err := binary.Write(ws, llm.ByteOrder, uint64(len(k))); err != nil { return cmp.Compare(i, j)
})
var s uint64
for _, t := range ts {
t.Offset = s
if err := ggufWriteTensorInfo(ws, t); err != nil {
return err return err
} }
s += t.Size()
}
if err := binary.Write(ws, llm.ByteOrder, []byte(k)); err != nil { var alignment int64 = 32
for _, t := range ts {
if err := ggufWriteTensor(ws, t, alignment); err != nil {
return err return err
} }
}
var err error return nil
switch v := v.(type) { }
case uint32:
err = writeGGUF(llm, ws, ggufTypeUint32, v)
case float32:
err = writeGGUF(llm, ws, ggufTypeFloat32, v)
case bool:
err = writeGGUF(llm, ws, ggufTypeBool, v)
case string:
err = writeGGUFString(llm, ws, v)
case []int32:
err = writeGGUFArray(llm, ws, ggufTypeInt32, v)
case []uint32:
err = writeGGUFArray(llm, ws, ggufTypeUint32, v)
case []float32:
err = writeGGUFArray(llm, ws, ggufTypeFloat32, v)
case []string:
if err := binary.Write(ws, llm.ByteOrder, ggufTypeArray); err != nil {
return err
}
if err := binary.Write(ws, llm.ByteOrder, ggufTypeString); err != nil {
return err
}
if err := binary.Write(ws, llm.ByteOrder, uint64(len(v))); err != nil {
return err
}
for _, e := range v {
if err := binary.Write(ws, llm.ByteOrder, uint64(len(e))); err != nil {
return err
}
if err := binary.Write(ws, llm.ByteOrder, []byte(e)); err != nil { func ggufWriteKV(ws io.WriteSeeker, k string, v any) error {
return err slog.Debug(k, "type", fmt.Sprintf("%T", v))
} if err := binary.Write(ws, binary.LittleEndian, uint64(len(k))); err != nil {
} return err
default:
return fmt.Errorf("improper type for '%s'", k)
}
if err != nil {
return err
}
} }
for k, v := range kvCheck { if err := binary.Write(ws, binary.LittleEndian, []byte(k)); err != nil {
if !v { return err
return fmt.Errorf("Didn't know how to write kv %s", k)
}
} }
for _, tensor := range tensors { var err error
if err := binary.Write(ws, llm.ByteOrder, uint64(len(tensor.Name))); err != nil { switch v := v.(type) {
case uint32:
err = writeGGUF(ws, ggufTypeUint32, v)
case float32:
err = writeGGUF(ws, ggufTypeFloat32, v)
case bool:
err = writeGGUF(ws, ggufTypeBool, v)
case string:
err = writeGGUFString(ws, v)
case []int32:
err = writeGGUFArray(ws, ggufTypeInt32, v)
case []uint32:
err = writeGGUFArray(ws, ggufTypeUint32, v)
case []float32:
err = writeGGUFArray(ws, ggufTypeFloat32, v)
case []string:
if err := binary.Write(ws, binary.LittleEndian, ggufTypeArray); err != nil {
return err return err
} }
if err := binary.Write(ws, llm.ByteOrder, []byte(tensor.Name)); err != nil { if err := binary.Write(ws, binary.LittleEndian, ggufTypeString); err != nil {
return err return err
} }
dims := 0 if err := binary.Write(ws, binary.LittleEndian, uint64(len(v))); err != nil {
for cnt := 0; cnt < len(tensor.Shape); cnt++ {
if tensor.Shape[cnt] > 0 {
dims++
}
}
if err := binary.Write(ws, llm.ByteOrder, uint32(dims)); err != nil {
return err return err
} }
for i := 0; i < dims; i++ { for _, e := range v {
if err := binary.Write(ws, llm.ByteOrder, uint64(tensor.Shape[dims-1-i])); err != nil { if err := binary.Write(ws, binary.LittleEndian, uint64(len(e))); err != nil {
return err return err
} }
}
if err := binary.Write(ws, llm.ByteOrder, tensor.Kind); err != nil { if err := binary.Write(ws, binary.LittleEndian, []byte(e)); err != nil {
return err return err
}
} }
default:
return fmt.Errorf("improper type for '%s'", k)
}
if err := binary.Write(ws, llm.ByteOrder, tensor.Offset); err != nil { return err
return err }
}
func ggufWriteTensorInfo(ws io.WriteSeeker, t Tensor) error {
slog.Debug(t.Name, "kind", t.Kind, "shape", t.Shape, "offset", t.Offset)
if err := binary.Write(ws, binary.LittleEndian, uint64(len(t.Name))); err != nil {
return err
} }
offset, err := ws.Seek(0, io.SeekCurrent) if err := binary.Write(ws, binary.LittleEndian, []byte(t.Name)); err != nil {
if err != nil {
return err return err
} }
var alignment int64 = 32 if err := binary.Write(ws, binary.LittleEndian, uint32(len(t.Shape))); err != nil {
padding := llm.padding(offset, alignment)
if err := binary.Write(ws, llm.ByteOrder, bytes.Repeat([]byte{0}, int(padding))); err != nil {
return err return err
} }
for _, tensor := range tensors { for i := range len(t.Shape) {
if _, err := tensor.WriteTo(ws); err != nil { if err := binary.Write(ws, binary.LittleEndian, t.Shape[len(t.Shape)-i-1]); err != nil {
return err return err
} }
}
offset, err := ws.Seek(0, io.SeekCurrent) if err := binary.Write(ws, binary.LittleEndian, t.Kind); err != nil {
if err != nil { return err
return err }
}
padding := llm.padding(offset, alignment) return binary.Write(ws, binary.LittleEndian, t.Offset)
if err := binary.Write(ws, llm.ByteOrder, bytes.Repeat([]byte{0}, int(padding))); err != nil { }
return err
} func ggufWriteTensor(ws io.WriteSeeker, t Tensor, alignment int64) error {
offset, err := ws.Seek(0, io.SeekCurrent)
if err != nil {
return err
} }
return nil if err := binary.Write(ws, binary.LittleEndian, bytes.Repeat([]byte{0}, int(ggufPadding(offset, alignment)))); err != nil {
return err
}
_, err = t.WriteTo(ws)
return err
} }
func (gguf) padding(offset, align int64) int64 { func ggufPadding(offset, align int64) int64 {
return (align - offset%align) % align return (align - offset%align) % align
} }
---
Checks: >
bugprone-*,
-bugprone-easily-swappable-parameters,
-bugprone-implicit-widening-of-multiplication-result,
-bugprone-misplaced-widening-cast,
-bugprone-narrowing-conversions,
readability-*,
-readability-avoid-unconditional-preprocessor-if,
-readability-function-cognitive-complexity,
-readability-identifier-length,
-readability-implicit-bool-conversion,
-readability-magic-numbers,
-readability-uppercase-literal-suffix,
-readability-simplify-boolean-expr,
clang-analyzer-*,
-clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling,
performance-*,
portability-*,
misc-*,
-misc-const-correctness,
-misc-non-private-member-variables-in-classes,
-misc-no-recursion,
FormatStyle: none
...@@ -15,7 +15,7 @@ node('x86_runner1'){ // Running on x86 runner containing latest vecto ...@@ -15,7 +15,7 @@ node('x86_runner1'){ // Running on x86 runner containing latest vecto
stage('Running llama.cpp'){ stage('Running llama.cpp'){
sh'''#!/bin/bash sh'''#!/bin/bash
module load gnu-bin2/0.1 # loading latest versions of vector qemu and vector gcc module load gnu-bin2/0.1 # loading latest versions of vector qemu and vector gcc
qemu-riscv64 -L /softwares/gnu-bin2/sysroot -cpu rv64,v=true,vlen=256,elen=64,vext_spec=v1.0 ./main -m /home/alitariq/codellama-7b.Q4_K_M.gguf -p "Anything" -n 9 > llama_log.txt # Running llama.cpp on vector qemu-riscv64 qemu-riscv64 -L /softwares/gnu-bin2/sysroot -cpu rv64,v=true,vlen=256,elen=64,vext_spec=v1.0 ./llama-cli -m /home/alitariq/codellama-7b.Q4_K_M.gguf -p "Anything" -n 9 > llama_log.txt # Running llama.cpp on vector qemu-riscv64
cat llama_log.txt # Printing results cat llama_log.txt # Printing results
''' '''
} }
......
...@@ -6,13 +6,13 @@ ARG CUDA_VERSION=11.7.1 ...@@ -6,13 +6,13 @@ ARG CUDA_VERSION=11.7.1
# Target the CUDA build image # Target the CUDA build image
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION} ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
FROM ${BASE_CUDA_DEV_CONTAINER} as build FROM ${BASE_CUDA_DEV_CONTAINER} AS build
# Unless otherwise specified, we make a fat build. # Unless otherwise specified, we make a fat build.
ARG CUDA_DOCKER_ARCH=all ARG CUDA_DOCKER_ARCH=all
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y build-essential python3 python3-pip git libcurl4-openssl-dev apt-get install -y build-essential python3 python3-pip git libcurl4-openssl-dev libgomp1
COPY requirements.txt requirements.txt COPY requirements.txt requirements.txt
COPY requirements requirements COPY requirements requirements
...@@ -27,10 +27,10 @@ COPY . . ...@@ -27,10 +27,10 @@ COPY . .
# Set nvcc architecture # Set nvcc architecture
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH} ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
# Enable CUDA # Enable CUDA
ENV LLAMA_CUDA=1 ENV GGML_CUDA=1
# Enable cURL # Enable cURL
ENV LLAMA_CURL=1 ENV LLAMA_CURL=1
RUN make RUN make -j$(nproc)
ENTRYPOINT ["/app/.devops/tools.sh"] ENTRYPOINT ["/app/.devops/tools.sh"]
...@@ -6,7 +6,7 @@ ARG ROCM_VERSION=5.6 ...@@ -6,7 +6,7 @@ ARG ROCM_VERSION=5.6
# Target the CUDA build image # Target the CUDA build image
ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-complete ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-complete
FROM ${BASE_ROCM_DEV_CONTAINER} as build FROM ${BASE_ROCM_DEV_CONTAINER} AS build
# Unless otherwise specified, we make a fat build. # Unless otherwise specified, we make a fat build.
# List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878 # List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878
...@@ -36,7 +36,7 @@ COPY . . ...@@ -36,7 +36,7 @@ COPY . .
# Set nvcc architecture # Set nvcc architecture
ENV GPU_TARGETS=${ROCM_DOCKER_ARCH} ENV GPU_TARGETS=${ROCM_DOCKER_ARCH}
# Enable ROCm # Enable ROCm
ENV LLAMA_HIPBLAS=1 ENV GGML_HIPBLAS=1
ENV CC=/opt/rocm/llvm/bin/clang ENV CC=/opt/rocm/llvm/bin/clang
ENV CXX=/opt/rocm/llvm/bin/clang++ ENV CXX=/opt/rocm/llvm/bin/clang++
...@@ -45,6 +45,6 @@ ENV LLAMA_CURL=1 ...@@ -45,6 +45,6 @@ ENV LLAMA_CURL=1
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y libcurl4-openssl-dev apt-get install -y libcurl4-openssl-dev
RUN make RUN make -j$(nproc)
ENTRYPOINT ["/app/.devops/tools.sh"] ENTRYPOINT ["/app/.devops/tools.sh"]
ARG UBUNTU_VERSION=22.04 ARG UBUNTU_VERSION=22.04
FROM ubuntu:$UBUNTU_VERSION as build FROM ubuntu:$UBUNTU_VERSION AS build
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y build-essential python3 python3-pip git libcurl4-openssl-dev apt-get install -y build-essential python3 python3-pip git libcurl4-openssl-dev libgomp1
COPY requirements.txt requirements.txt COPY requirements.txt requirements.txt
COPY requirements requirements COPY requirements requirements
...@@ -18,7 +18,7 @@ COPY . . ...@@ -18,7 +18,7 @@ COPY . .
ENV LLAMA_CURL=1 ENV LLAMA_CURL=1
RUN make RUN make -j$(nproc)
ENV LC_ALL=C.utf8 ENV LC_ALL=C.utf8
......
ARG UBUNTU_VERSION=22.04
# This needs to generally match the container host's environment.
ARG CUDA_VERSION=11.7.1
# Target the CUDA build image
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
# Target the CUDA runtime image
ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
FROM ${BASE_CUDA_DEV_CONTAINER} AS build
# Unless otherwise specified, we make a fat build.
ARG CUDA_DOCKER_ARCH=all
RUN apt-get update && \
apt-get install -y build-essential git
WORKDIR /app
COPY . .
# Set nvcc architecture
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
# Enable CUDA
ENV GGML_CUDA=1
RUN make -j$(nproc) llama-cli
FROM ${BASE_CUDA_RUN_CONTAINER} AS runtime
RUN apt-get update && \
apt-get install -y libgomp1
COPY --from=build /app/llama-cli /llama-cli
ENTRYPOINT [ "/llama-cli" ]
ARG ONEAPI_VERSION=2024.1.1-devel-ubuntu22.04
FROM intel/oneapi-basekit:$ONEAPI_VERSION AS build
ARG GGML_SYCL_F16=OFF
RUN apt-get update && \
apt-get install -y git
WORKDIR /app
COPY . .
RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \
echo "GGML_SYCL_F16 is set" && \
export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \
fi && \
echo "Building with static libs" && \
cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx \
${OPT_SYCL_F16} -DBUILD_SHARED_LIBS=OFF && \
cmake --build build --config Release --target llama-cli
FROM intel/oneapi-basekit:$ONEAPI_VERSION AS runtime
COPY --from=build /app/build/bin/llama-cli /llama-cli
ENV LC_ALL=C.utf8
ENTRYPOINT [ "/llama-cli" ]
ARG UBUNTU_VERSION=22.04
# This needs to generally match the container host's environment.
ARG ROCM_VERSION=5.6
# Target the CUDA build image
ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-complete
FROM ${BASE_ROCM_DEV_CONTAINER} AS build
# Unless otherwise specified, we make a fat build.
# List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878
# This is mostly tied to rocBLAS supported archs.
ARG ROCM_DOCKER_ARCH=\
gfx803 \
gfx900 \
gfx906 \
gfx908 \
gfx90a \
gfx1010 \
gfx1030 \
gfx1100 \
gfx1101 \
gfx1102
COPY requirements.txt requirements.txt
COPY requirements requirements
RUN pip install --upgrade pip setuptools wheel \
&& pip install -r requirements.txt
WORKDIR /app
COPY . .
# Set nvcc architecture
ENV GPU_TARGETS=${ROCM_DOCKER_ARCH}
# Enable ROCm
ENV GGML_HIPBLAS=1
ENV CC=/opt/rocm/llvm/bin/clang
ENV CXX=/opt/rocm/llvm/bin/clang++
RUN make -j$(nproc) llama-cli
ENTRYPOINT [ "/app/llama-cli" ]
ARG UBUNTU_VERSION=jammy
FROM ubuntu:$UBUNTU_VERSION AS build
# Install build tools
RUN apt update && apt install -y git build-essential cmake wget libgomp1
# Install Vulkan SDK
RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
apt update -y && \
apt-get install -y vulkan-sdk
# Build it
WORKDIR /app
COPY . .
RUN cmake -B build -DGGML_VULKAN=1 && \
cmake --build build --config Release --target llama-cli
# Clean up
WORKDIR /
RUN cp /app/build/bin/llama-cli /llama-cli && \
rm -rf /app
ENV LC_ALL=C.utf8
ENTRYPOINT [ "/llama-cli" ]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment