Commit fd962a36 authored by Jeffrey Morgan's avatar Jeffrey Morgan
Browse files

client updates

parent 6292f4b6
package api
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"github.com/jmorganca/ollama/signature"
)
type Client struct {
......@@ -36,7 +35,7 @@ func checkError(resp *http.Response, body []byte) error {
return apiError
}
func (c *Client) do(ctx context.Context, method string, path string, stream bool, reqData any, respData any) error {
func (c *Client) stream(ctx context.Context, method string, path string, reqData any, callback func (data []byte)) error {
var reqBody io.Reader
var data []byte
var err error
......@@ -55,17 +54,50 @@ func (c *Client) do(ctx context.Context, method string, path string, stream bool
return err
}
if c.PrivateKey != nil {
s := signature.SignatureData{
Method: method,
Path: url,
Data: data,
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json")
for k, v := range c.Headers {
req.Header[k] = v
}
authHeader, err := signature.SignAuthData(s, c.PrivateKey)
res, err := c.HTTP.Do(req)
if err != nil {
return err
}
req.Header.Set("Authorization", authHeader)
defer res.Body.Close()
reader := bufio.NewReader(res.Body)
for {
line, err := reader.ReadBytes('\n')
if err != nil {
break
}
callback(bytes.TrimSuffix(line, []byte("\n")))
}
return nil
}
func (c *Client) do(ctx context.Context, method string, path string, reqData any, respData any) error {
var reqBody io.Reader
var data []byte
var err error
if reqData != nil {
data, err = json.Marshal(reqData)
if err != nil {
return err
}
reqBody = bytes.NewReader(data)
}
url := fmt.Sprintf("%s%s", c.URL, path)
req, err := http.NewRequestWithContext(ctx, method, url, reqBody)
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
......@@ -97,3 +129,14 @@ func (c *Client) do(ctx context.Context, method string, path string, stream bool
}
return nil
}
func (c *Client) Generate(ctx context.Context, req *GenerateRequest, callback func(token string)) (*GenerateResponse, error) {
var res GenerateResponse
if err := c.stream(ctx, http.MethodPost, "/api/generate", req, func(token []byte) {
callback(string(token))
}); err != nil {
return nil, err
}
return &res, nil
}
......@@ -2,7 +2,6 @@ package cmd
import (
"context"
"fmt"
"log"
"net"
"net/http"
......@@ -10,35 +9,70 @@ import (
"path"
"time"
"github.com/spf13/cobra"
"github.com/jmorganca/ollama/api"
"github.com/jmorganca/ollama/server"
"github.com/spf13/cobra"
)
func NewAPIClient(cmd *cobra.Command) (*api.Client, error) {
var rawKey []byte
var err error
func sockpath() string {
home, err := os.UserHomeDir()
if err != nil {
return nil, err
panic(err)
}
socket := path.Join(home, ".ollama", "ollama.sock")
return path.Join(home, ".ollama", "ollama.sock")
}
dialer := &net.Dialer{
Timeout: 10 * time.Second,
func running() bool {
// Set a timeout duration
timeout := time.Second
// Dial the unix socket
conn, err := net.DialTimeout("unix", sockpath(), timeout)
if err != nil {
return false
}
if conn != nil {
defer conn.Close()
}
return true
}
func serve() error {
sp := sockpath()
if err := os.MkdirAll(path.Dir(sp), 0o700); err != nil {
return err
}
k, _ := cmd.Flags().GetString("key")
if err := os.RemoveAll(sp); err != nil {
return err
}
if k != "" {
fn := path.Join(home, ".ollama/keys/", k)
rawKey, err = os.ReadFile(fn)
ln, err := net.Listen("unix", sp)
if err != nil {
return err
}
if err := os.Chmod(sp, 0o700); err != nil {
return err
}
return server.Serve(ln)
}
func NewAPIClient() (*api.Client, error) {
var err error
home, err := os.UserHomeDir()
if err != nil {
return nil, err
}
socket := path.Join(home, ".ollama", "ollama.sock")
dialer := &net.Dialer{
Timeout: 10 * time.Second,
}
return &api.Client{
......@@ -50,7 +84,6 @@ func NewAPIClient(cmd *cobra.Command) (*api.Client, error) {
},
},
},
PrivateKey: rawKey,
}, nil
}
......@@ -69,28 +102,12 @@ func NewCLI() *cobra.Command {
},
}
rootCmd.PersistentFlags().StringP("key", "k", "", "Private key to use for authenticating")
cobra.EnableCommandSorting = false
modelsCmd := &cobra.Command{
Use: "models",
Args: cobra.MaximumNArgs(1),
Short: "List models",
Long: "List the models",
RunE: func(cmd *cobra.Command, args []string) error {
client, err := NewAPIClient(cmd)
if err != nil {
return err
}
fmt.Printf("client = %q\n", client)
return nil
},
}
runCmd := &cobra.Command{
Use: "run",
Short: "Run a model and submit prompts.",
Use: "run MODEL",
Short: "Run a model",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command,args []string) error {
return nil
},
......@@ -101,35 +118,11 @@ func NewCLI() *cobra.Command {
Aliases: []string{"start"},
Short: "Start ollama",
RunE: func(cmd *cobra.Command, args []string) error {
home, err := os.UserHomeDir()
if err != nil {
return err
}
socket := path.Join(home, ".ollama", "ollama.sock")
if err := os.MkdirAll(path.Dir(socket), 0o700); err != nil {
return err
}
if err := os.RemoveAll(socket); err != nil {
return err
}
ln, err := net.Listen("unix", socket)
if err != nil {
return err
}
if err := os.Chmod(socket, 0o700); err != nil {
return err
}
return server.Serve(ln)
return serve()
},
}
rootCmd.AddCommand(
modelsCmd,
serveCmd,
runCmd,
)
......
......@@ -5,7 +5,6 @@ go 1.20
require (
github.com/gin-gonic/gin v1.9.1
github.com/spf13/cobra v1.7.0
golang.org/x/crypto v0.10.0
)
require (
......@@ -30,6 +29,7 @@ require (
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.11 // indirect
golang.org/x/arch v0.3.0 // indirect
golang.org/x/crypto v0.10.0 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/sys v0.9.0 // indirect
golang.org/x/text v0.10.0 // indirect
......
......@@ -55,78 +55,14 @@ void sigint_handler(int signo) {
}
#endif
int get_embeddings(void *params_ptr, void *state_pr, float *res_embeddings) {
gpt_params *params_p = (gpt_params *)params_ptr;
llama_context *ctx = (llama_context *)state_pr;
gpt_params params = *params_p;
if (params.seed <= 0) {
params.seed = time(NULL);
}
std::mt19937 rng(params.seed);
llama_init_backend(params.numa);
int n_past = 0;
// Add a space in front of the first character to match OG llama tokenizer
// behavior
params.prompt.insert(0, 1, ' ');
// tokenize the prompt
auto embd_inp = ::llama_tokenize(ctx, params.prompt, true);
// determine newline token
auto llama_token_newline = ::llama_tokenize(ctx, "\n", false);
if (embd_inp.size() > 0) {
if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past,
params.n_threads)) {
fprintf(stderr, "%s : failed to eval\n", __func__);
return 1;
}
}
const int n_embd = llama_n_embd(ctx);
const auto embeddings = llama_get_embeddings(ctx);
for (int i = 0; i < n_embd; i++) {
res_embeddings[i] = embeddings[i];
}
return 0;
}
int get_token_embeddings(void *params_ptr, void *state_pr, int *tokens,
int tokenSize, float *res_embeddings) {
gpt_params *params_p = (gpt_params *)params_ptr;
llama_context *ctx = (llama_context *)state_pr;
gpt_params params = *params_p;
for (int i = 0; i < tokenSize; i++) {
auto token_str = llama_token_to_str(ctx, tokens[i]);
if (token_str == nullptr) {
continue;
}
std::vector<std::string> my_vector;
std::string str_token(token_str); // create a new std::string from the char*
params_p->prompt += str_token;
}
return get_embeddings(params_ptr, state_pr, res_embeddings);
}
int eval(void *params_ptr, void *state_pr, char *text) {
gpt_params *params_p = (gpt_params *)params_ptr;
llama_context *ctx = (llama_context *)state_pr;
int eval(void *p, void *c, char *text) {
gpt_params *params = (gpt_params *)params;
llama_context *ctx = (llama_context *)ctx;
auto n_past = 0;
auto last_n_tokens_data =
std::vector<llama_token>(params_p->repeat_last_n, 0);
auto last_n_tokens_data = std::vector<llama_token>(params->repeat_last_n, 0);
auto tokens = std::vector<llama_token>(params_p->n_ctx);
auto tokens = std::vector<llama_token>(params->n_ctx);
auto n_prompt_tokens =
llama_tokenize(ctx, text, tokens.data(), tokens.size(), true);
......@@ -135,26 +71,22 @@ int eval(void *params_ptr, void *state_pr, char *text) {
return 1;
}
// evaluate prompt
return llama_eval(ctx, tokens.data(), n_prompt_tokens, n_past,
params_p->n_threads);
params->n_threads);
}
int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
gpt_params *params_p = (gpt_params *)params_ptr;
llama_context *ctx = (llama_context *)state_pr;
gpt_params params = *params_p;
int llama_predict(void *p, void *c, char *result, bool debug) {
gpt_params *params = (gpt_params *)params;
llama_context *ctx = (llama_context *)ctx;
const int n_ctx = llama_n_ctx(ctx);
if (params.seed <= 0) {
params.seed = time(NULL);
if (params->seed <= 0) {
params->seed = time(NULL);
}
std::mt19937 rng(params.seed);
std::string path_session = params.path_prompt_cache;
std::mt19937 rng(params->seed);
std::string path_session = params->path_prompt_cache;
std::vector<llama_token> session_tokens;
if (!path_session.empty()) {
......@@ -177,7 +109,7 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
return 1;
}
session_tokens.resize(n_token_count_out);
llama_set_rng_seed(ctx, params.seed);
llama_set_rng_seed(ctx, params->seed);
if (debug) {
fprintf(stderr, "%s: loaded a session with prompt size of %d tokens\n",
__func__, (int)session_tokens.size());
......@@ -191,12 +123,12 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
}
std::vector<llama_token> embd_inp;
if (!params.prompt.empty() || session_tokens.empty()) {
if (!params->prompt.empty() || session_tokens.empty()) {
// Add a space in front of the first character to match OG llama tokenizer
// behavior
params.prompt.insert(0, 1, ' ');
params->prompt.insert(0, 1, ' ');
embd_inp = ::llama_tokenize(ctx, params.prompt, true);
embd_inp = ::llama_tokenize(ctx, params->prompt, true);
} else {
embd_inp = session_tokens;
}
......@@ -212,7 +144,7 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
n_matching_session_tokens++;
}
if (debug) {
if (params.prompt.empty() &&
if (params->prompt.empty() &&
n_matching_session_tokens == embd_inp.size()) {
fprintf(stderr, "%s: using full prompt from session file\n", __func__);
} else if (n_matching_session_tokens >= embd_inp.size()) {
......@@ -237,8 +169,8 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
session_tokens.resize(embd_inp.size() - 1);
}
// number of tokens to keep when resetting context
if (params.n_keep < 0 || params.n_keep > (int)embd_inp.size()) {
params.n_keep = (int)embd_inp.size();
if (params->n_keep < 0 || params->n_keep > (int)embd_inp.size()) {
params->n_keep = (int)embd_inp.size();
}
// determine newline token
......@@ -251,7 +183,7 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
bool need_to_save_session =
!path_session.empty() && n_matching_session_tokens < embd_inp.size();
int n_past = 0;
int n_remain = params.n_predict;
int n_remain = params->n_predict;
int n_consumed = 0;
int n_session_consumed = 0;
......@@ -263,7 +195,7 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
const std::vector<llama_token> tmp = {
llama_token_bos(),
};
llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads);
llama_eval(ctx, tmp.data(), tmp.size(), 0, params->n_threads);
llama_reset_timings(ctx);
}
......@@ -276,10 +208,10 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
// - take half of the last (n_ctx - n_keep) tokens and recompute the
// logits in batches
if (n_past + (int)embd.size() > n_ctx) {
const int n_left = n_past - params.n_keep;
const int n_left = n_past - params->n_keep;
// always keep the first token - BOS
n_past = std::max(1, params.n_keep);
n_past = std::max(1, params->n_keep);
// insert n_left/2 tokens at the start of embd from last_n_tokens
embd.insert(embd.begin(),
......@@ -288,14 +220,6 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
// stop saving session if we run out of context
path_session.clear();
// printf("\n---\n");
// printf("resetting: '");
// for (int i = 0; i < (int) embd.size(); i++) {
// printf("%s", llama_token_to_str(ctx, embd[i]));
// }
// printf("'\n");
// printf("\n---\n");
}
// try to reuse a matching prefix from the loaded session instead of
......@@ -324,15 +248,17 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
// evaluate tokens in batches
// embd is typically prepared beforehand to fit within a batch, but not
// always
for (int i = 0; i < (int)embd.size(); i += params.n_batch) {
for (int i = 0; i < (int)embd.size(); i += params->n_batch) {
int n_eval = (int)embd.size() - i;
if (n_eval > params.n_batch) {
n_eval = params.n_batch;
if (n_eval > params->n_batch) {
n_eval = params->n_batch;
}
if (llama_eval(ctx, &embd[i], n_eval, n_past, params.n_threads)) {
if (llama_eval(ctx, &embd[i], n_eval, n_past, params->n_threads)) {
fprintf(stderr, "%s : failed to eval\n", __func__);
return 1;
}
n_past += n_eval;
}
......@@ -346,26 +272,26 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
if ((int)embd_inp.size() <= n_consumed) {
// out of user input, sample next token
const float temp = params.temp;
const float temp = params->temp;
const int32_t top_k =
params.top_k <= 0 ? llama_n_vocab(ctx) : params.top_k;
const float top_p = params.top_p;
const float tfs_z = params.tfs_z;
const float typical_p = params.typical_p;
params->top_k <= 0 ? llama_n_vocab(ctx) : params->top_k;
const float top_p = params->top_p;
const float tfs_z = params->tfs_z;
const float typical_p = params->typical_p;
const int32_t repeat_last_n =
params.repeat_last_n < 0 ? n_ctx : params.repeat_last_n;
const float repeat_penalty = params.repeat_penalty;
const float alpha_presence = params.presence_penalty;
const float alpha_frequency = params.frequency_penalty;
const int mirostat = params.mirostat;
const float mirostat_tau = params.mirostat_tau;
const float mirostat_eta = params.mirostat_eta;
const bool penalize_nl = params.penalize_nl;
params->repeat_last_n < 0 ? n_ctx : params->repeat_last_n;
const float repeat_penalty = params->repeat_penalty;
const float alpha_presence = params->presence_penalty;
const float alpha_frequency = params->frequency_penalty;
const int mirostat = params->mirostat;
const float mirostat_tau = params->mirostat_tau;
const float mirostat_eta = params->mirostat_eta;
const bool penalize_nl = params->penalize_nl;
// optionally save the session on first sample (for faster prompt loading
// next time)
if (!path_session.empty() && need_to_save_session &&
!params.prompt_cache_ro) {
!params->prompt_cache_ro) {
need_to_save_session = false;
llama_save_session_file(ctx, path_session.c_str(),
session_tokens.data(), session_tokens.size());
......@@ -378,8 +304,8 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
auto n_vocab = llama_n_vocab(ctx);
// Apply params.logit_bias map
for (auto it = params.logit_bias.begin(); it != params.logit_bias.end();
it++) {
for (auto it = params->logit_bias.begin();
it != params->logit_bias.end(); it++) {
logits[it->first] += it->second;
}
......@@ -435,7 +361,6 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
id = llama_sample_token(ctx, &candidates_p);
}
}
// printf("`%d`", candidates_p.size);
last_n_tokens.erase(last_n_tokens.begin());
last_n_tokens.push_back(id);
......@@ -450,7 +375,7 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
// call the token callback, no need to check if one is actually
// registered, that will be handled on the Go side.
auto token_str = llama_token_to_str(ctx, id);
if (!tokenCallback(state_pr, (char *)token_str)) {
if (!tokenCallback(ctx, (char *)token_str)) {
break;
}
} else {
......@@ -461,7 +386,7 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
last_n_tokens.erase(last_n_tokens.begin());
last_n_tokens.push_back(embd_inp[n_consumed]);
++n_consumed;
if ((int)embd.size() >= params.n_batch) {
if ((int)embd.size() >= params->n_batch) {
break;
}
}
......@@ -472,13 +397,13 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
}
// check for stop prompt
if (params.antiprompt.size()) {
if (params->antiprompt.size()) {
std::string last_output;
for (auto id : last_n_tokens) {
last_output += llama_token_to_str(ctx, id);
}
// Check if each of the reverse prompts appears at the end of the output.
for (std::string &antiprompt : params.antiprompt) {
for (std::string &antiprompt : params->antiprompt) {
// size_t extra_padding = params.interactive ? 0 : 2;
size_t extra_padding = 2;
size_t search_start_pos =
......@@ -501,8 +426,8 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
}
}
if (!path_session.empty() && params.prompt_cache_all &&
!params.prompt_cache_ro) {
if (!path_session.empty() && params->prompt_cache_all &&
!params->prompt_cache_ro) {
if (debug) {
fprintf(stderr, "\n%s: saving final output to session file '%s'\n",
__func__, path_session.c_str());
......@@ -525,68 +450,8 @@ end:
return 0;
}
void llama_binding_free_model(void *state_ptr) {
llama_context *ctx = (llama_context *)state_ptr;
llama_free(ctx);
}
void llama_free_params(void *params_ptr) {
gpt_params *params = (gpt_params *)params_ptr;
delete params;
}
std::vector<std::string> create_vector(const char **strings, int count) {
std::vector<std::string> *vec = new std::vector<std::string>;
for (int i = 0; i < count; i++) {
vec->push_back(std::string(strings[i]));
}
return *vec;
}
void delete_vector(std::vector<std::string> *vec) { delete vec; }
int load_state(void *ctx, char *statefile, char *modes) {
llama_context *state = (llama_context *)ctx;
const llama_context *constState = static_cast<const llama_context *>(state);
const size_t state_size = llama_get_state_size(state);
uint8_t *state_mem = new uint8_t[state_size];
{
FILE *fp_read = fopen(statefile, modes);
if (state_size != llama_get_state_size(constState)) {
fprintf(stderr, "\n%s : failed to validate state size\n", __func__);
return 1;
}
const size_t ret = fread(state_mem, 1, state_size, fp_read);
if (ret != state_size) {
fprintf(stderr, "\n%s : failed to read state\n", __func__);
return 1;
}
llama_set_state_data(
state, state_mem); // could also read directly from memory mapped file
fclose(fp_read);
}
return 0;
}
void save_state(void *ctx, char *dst, char *modes) {
llama_context *state = (llama_context *)ctx;
const size_t state_size = llama_get_state_size(state);
uint8_t *state_mem = new uint8_t[state_size];
// Save state (rng, logits, embedding and kv_cache) to file
{
FILE *fp_write = fopen(dst, modes);
llama_copy_state_data(
state, state_mem); // could also copy directly to memory mapped file
fwrite(state_mem, 1, state_size, fp_write);
fclose(fp_write);
}
}
void llama_binding_free_model(void *ctx) { llama_free((llama_context *)ctx); }
void llama_free_params(void *params) { delete (gpt_params *)params; }
void *llama_allocate_params(
const char *prompt, int seed, int threads, int tokens, int top_k,
......@@ -640,9 +505,13 @@ void *llama_allocate_params(
if (ignore_eos) {
params->logit_bias[llama_token_eos()] = -INFINITY;
}
if (antiprompt_count > 0) {
params->antiprompt = create_vector(antiprompt, antiprompt_count);
for (int i = 0; i < antiprompt_count; i++) {
params->antiprompt.push_back(std::string(antiprompt[i]));
}
}
params->tfs_z = tfs_z;
params->typical_p = typical_p;
params->presence_penalty = presence_penalty;
......@@ -650,6 +519,7 @@ void *llama_allocate_params(
params->mirostat_eta = mirostat_eta;
params->mirostat_tau = mirostat_tau;
params->penalize_nl = penalize_nl;
std::stringstream ss(logit_bias);
llama_token key;
char sign;
......@@ -669,7 +539,6 @@ void *load_model(const char *fname, int n_ctx, int n_seed, bool memory_f16,
bool mlock, bool embeddings, bool mmap, bool low_vram,
bool vocab_only, int n_gpu_layers, int n_batch,
const char *maingpu, const char *tensorsplit, bool numa) {
// load the model
auto lparams = llama_context_default_params();
lparams.n_ctx = n_ctx;
......@@ -706,25 +575,11 @@ void *load_model(const char *fname, int n_ctx, int n_seed, bool memory_f16,
lparams.n_batch = n_batch;
llama_init_backend(numa);
void *res = nullptr;
try {
llama_model *model = llama_load_model_from_file(fname, lparams);
if (model == NULL) {
fprintf(stderr, "error: failed to load model \n");
return res;
}
llama_context *lctx = llama_new_context_with_model(model, lparams);
if (lctx == NULL) {
fprintf(stderr, "error: failed to create context with model \n");
llama_free_model(model);
return res;
}
} catch (std::runtime_error &e) {
fprintf(stderr, "failed %s", e.what());
return res;
struct llama_model *model = llama_load_model_from_file(fname, lparams);
if (!model) {
return nullptr;
}
return res;
return llama_new_context_with_model(model, lparams);
}
......@@ -30,22 +30,13 @@ extern "C" {
extern unsigned char tokenCallback(void *, char *);
int load_state(void *ctx, char *statefile, char *modes);
int eval(void *params_ptr, void *ctx, char *text);
void save_state(void *ctx, char *dst, char *modes);
int eval(void *p, void *c, char *text);
void *load_model(const char *fname, int n_ctx, int n_seed, bool memory_f16,
bool mlock, bool embeddings, bool mmap, bool low_vram,
bool vocab_only, int n_gpu, int n_batch, const char *maingpu,
const char *tensorsplit, bool numa);
int get_embeddings(void *params_ptr, void *state_pr, float *res_embeddings);
int get_token_embeddings(void *params_ptr, void *state_pr, int *tokens,
int tokenSize, float *res_embeddings);
void *llama_allocate_params(
const char *prompt, int seed, int threads, int tokens, int top_k,
float top_p, float temp, float repeat_penalty, int repeat_last_n,
......@@ -59,13 +50,11 @@ void *llama_allocate_params(
void llama_free_params(void *params_ptr);
void llama_binding_free_model(void *state);
void llama_binding_free_model(void *ctx);
int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug);
#ifdef __cplusplus
}
std::vector<std::string> create_vector(const char **strings, int count);
void delete_vector(std::vector<std::string> *vec);
#endif
......@@ -31,135 +31,35 @@ package llama
import "C"
import (
"fmt"
"os"
"strings"
"sync"
"unsafe"
)
type LLama struct {
state unsafe.Pointer
ctx unsafe.Pointer
embeddings bool
contextSize int
}
func New(model string, opts ...ModelOption) (*LLama, error) {
mo := NewModelOptions(opts...)
// TODO: free this pointer
modelPath := C.CString(model)
result := C.load_model(modelPath, C.int(mo.ContextSize), C.int(mo.Seed), C.bool(mo.F16Memory), C.bool(mo.MLock), C.bool(mo.Embeddings), C.bool(mo.MMap), C.bool(mo.LowVRAM), C.bool(mo.VocabOnly), C.int(mo.NGPULayers), C.int(mo.NBatch), C.CString(mo.MainGPU), C.CString(mo.TensorSplit), C.bool(mo.NUMA))
if result == nil {
ctx := C.load_model(modelPath, C.int(mo.ContextSize), C.int(mo.Seed), C.bool(mo.F16Memory), C.bool(mo.MLock), C.bool(mo.Embeddings), C.bool(mo.MMap), C.bool(mo.LowVRAM), C.bool(mo.VocabOnly), C.int(mo.NGPULayers), C.int(mo.NBatch), C.CString(mo.MainGPU), C.CString(mo.TensorSplit), C.bool(mo.NUMA))
if ctx == nil {
return nil, fmt.Errorf("failed loading model")
}
ll := &LLama{state: result, contextSize: mo.ContextSize, embeddings: mo.Embeddings}
ll := &LLama{ctx: ctx, contextSize: mo.ContextSize, embeddings: mo.Embeddings}
return ll, nil
}
func (l *LLama) Free() {
C.llama_binding_free_model(l.state)
}
func (l *LLama) LoadState(state string) error {
d := C.CString(state)
w := C.CString("rb")
result := C.load_state(l.state, d, w)
if result != 0 {
return fmt.Errorf("error while loading state")
}
return nil
}
func (l *LLama) SaveState(dst string) error {
d := C.CString(dst)
w := C.CString("wb")
C.save_state(l.state, d, w)
_, err := os.Stat(dst)
return err
}
// Token Embeddings
func (l *LLama) TokenEmbeddings(tokens []int, opts ...PredictOption) ([]float32, error) {
if !l.embeddings {
return []float32{}, fmt.Errorf("model loaded without embeddings")
}
po := NewPredictOptions(opts...)
outSize := po.Tokens
if po.Tokens == 0 {
outSize = 9999999
}
floats := make([]float32, outSize)
myArray := (*C.int)(C.malloc(C.size_t(len(tokens)) * C.sizeof_int))
// Copy the values from the Go slice to the C array
for i, v := range tokens {
(*[1<<31 - 1]int32)(unsafe.Pointer(myArray))[i] = int32(v)
}
params := C.llama_allocate_params(C.CString(""), C.int(po.Seed), C.int(po.Threads), C.int(po.Tokens), C.int(po.TopK),
C.float(po.TopP), C.float(po.Temperature), C.float(po.Penalty), C.int(po.Repeat),
C.bool(po.IgnoreEOS), C.bool(po.F16KV),
C.int(po.Batch), C.int(po.NKeep), nil, C.int(0),
C.float(po.TailFreeSamplingZ), C.float(po.TypicalP), C.float(po.FrequencyPenalty), C.float(po.PresencePenalty),
C.int(po.Mirostat), C.float(po.MirostatETA), C.float(po.MirostatTAU), C.bool(po.PenalizeNL), C.CString(po.LogitBias),
C.CString(po.PathPromptCache), C.bool(po.PromptCacheAll), C.bool(po.MLock), C.bool(po.MMap),
C.CString(po.MainGPU), C.CString(po.TensorSplit),
C.bool(po.PromptCacheRO),
)
ret := C.get_token_embeddings(params, l.state, myArray, C.int(len(tokens)), (*C.float)(&floats[0]))
if ret != 0 {
return floats, fmt.Errorf("embedding inference failed")
}
return floats, nil
}
// Embeddings
func (l *LLama) Embeddings(text string, opts ...PredictOption) ([]float32, error) {
if !l.embeddings {
return []float32{}, fmt.Errorf("model loaded without embeddings")
}
po := NewPredictOptions(opts...)
input := C.CString(text)
if po.Tokens == 0 {
po.Tokens = 99999999
}
floats := make([]float32, po.Tokens)
reverseCount := len(po.StopPrompts)
reversePrompt := make([]*C.char, reverseCount)
var pass **C.char
for i, s := range po.StopPrompts {
cs := C.CString(s)
reversePrompt[i] = cs
pass = &reversePrompt[0]
}
params := C.llama_allocate_params(input, C.int(po.Seed), C.int(po.Threads), C.int(po.Tokens), C.int(po.TopK),
C.float(po.TopP), C.float(po.Temperature), C.float(po.Penalty), C.int(po.Repeat),
C.bool(po.IgnoreEOS), C.bool(po.F16KV),
C.int(po.Batch), C.int(po.NKeep), pass, C.int(reverseCount),
C.float(po.TailFreeSamplingZ), C.float(po.TypicalP), C.float(po.FrequencyPenalty), C.float(po.PresencePenalty),
C.int(po.Mirostat), C.float(po.MirostatETA), C.float(po.MirostatTAU), C.bool(po.PenalizeNL), C.CString(po.LogitBias),
C.CString(po.PathPromptCache), C.bool(po.PromptCacheAll), C.bool(po.MLock), C.bool(po.MMap),
C.CString(po.MainGPU), C.CString(po.TensorSplit),
C.bool(po.PromptCacheRO),
)
ret := C.get_embeddings(params, l.state, (*C.float)(&floats[0]))
if ret != 0 {
return floats, fmt.Errorf("embedding inference failed")
}
return floats, nil
C.llama_binding_free_model(l.ctx)
}
func (l *LLama) Eval(text string, opts ...PredictOption) error {
......@@ -189,7 +89,7 @@ func (l *LLama) Eval(text string, opts ...PredictOption) error {
C.CString(po.MainGPU), C.CString(po.TensorSplit),
C.bool(po.PromptCacheRO),
)
ret := C.eval(params, l.state, input)
ret := C.eval(params, l.ctx, input)
if ret != 0 {
return fmt.Errorf("inference failed")
}
......@@ -203,7 +103,7 @@ func (l *LLama) Predict(text string, opts ...PredictOption) (string, error) {
po := NewPredictOptions(opts...)
if po.TokenCallback != nil {
setCallback(l.state, po.TokenCallback)
setCallback(l.ctx, po.TokenCallback)
}
input := C.CString(text)
......@@ -231,7 +131,7 @@ func (l *LLama) Predict(text string, opts ...PredictOption) (string, error) {
C.CString(po.MainGPU), C.CString(po.TensorSplit),
C.bool(po.PromptCacheRO),
)
ret := C.llama_predict(params, l.state, (*C.char)(unsafe.Pointer(&out[0])), C.bool(po.DebugMode))
ret := C.llama_predict(params, l.ctx, (*C.char)(unsafe.Pointer(&out[0])), C.bool(po.DebugMode))
if ret != 0 {
return "", fmt.Errorf("inference failed")
}
......@@ -248,7 +148,7 @@ func (l *LLama) Predict(text string, opts ...PredictOption) (string, error) {
C.llama_free_params(params)
if po.TokenCallback != nil {
setCallback(l.state, nil)
setCallback(l.ctx, nil)
}
return res, nil
......@@ -268,7 +168,7 @@ func (l *LLama) Predict(text string, opts ...PredictOption) (string, error) {
//
// It is save to call this method while a prediction is running.
func (l *LLama) SetTokenCallback(callback func(token string) bool) {
setCallback(l.state, callback)
setCallback(l.ctx, callback)
}
var (
......
# Ollama Python bindings
```
pip install ollama
```
## Developing
Ollama is built using Python 3 and uses [Poetry](https://python-poetry.org/) to manage dependencies and build packages.
```
pip install poetry
```
Install ollama and its dependencies:
```
poetry install --extras server --with dev
```
Run ollama server:
```
poetry run ollama server
```
Update dependencies:
```
poetry update --extras server --with dev
poetry lock
poetry export >requirements.txt
```
Build binary package:
```
poetry build
```
from ollama.model import models
from ollama.engine import generate, load, unload
__all__ = [
'models',
'generate',
'load',
'unload',
]
from ollama.cmd import cli
if __name__ == '__main__':
cli.main()
import os
import sys
from argparse import ArgumentParser, HelpFormatter, PARSER
from yaspin import yaspin
from ollama import model, engine
from ollama.cmd import server
class CustomHelpFormatter(HelpFormatter):
"""
This class is used to customize the way the argparse help text is displayed.
We specifically override the _format_action method to exclude the line that
shows all the subparser command options in the help text. This line is typically
in the form "{serve,models,pull,run}".
"""
def _format_action(self, action):
# get the original help text
parts = super()._format_action(action)
if action.nargs == PARSER:
# remove the unwanted first line
parts = "\n".join(parts.split("\n")[1:])
return parts
def main():
parser = ArgumentParser(
description='Ollama: Run any large language model on any machine.',
formatter_class=CustomHelpFormatter,
)
# create models home if it doesn't exist
os.makedirs(model.MODELS_CACHE_PATH, exist_ok=True)
subparsers = parser.add_subparsers(
title='commands',
)
list_parser = subparsers.add_parser(
"models",
description="List all available models stored locally.",
help="List all available models stored locally.",
)
list_parser.set_defaults(fn=list_models)
search_parser = subparsers.add_parser(
"search",
description="Search for compatible models that Ollama can run.",
help="Search for compatible models that Ollama can run. Usage: search [model]",
)
search_parser.add_argument(
"query",
nargs="?",
help="Optional name of the model to search for.",
)
search_parser.set_defaults(fn=search)
pull_parser = subparsers.add_parser(
"pull",
description="Download a specified model from a remote source.",
help="Download a specified model from a remote source. Usage: pull [model]",
)
pull_parser.add_argument("model", help="Name of the model to download.")
pull_parser.set_defaults(fn=pull)
run_parser = subparsers.add_parser(
"run",
description="Run a model and submit prompts.",
help="Run a model and submit prompts. Usage: run [model] [prompt]",
)
run_parser.add_argument("model", help="Name of the model to run.")
run_parser.add_argument(
"prompt",
nargs="?",
help="Optional prompt for the model, interactive mode enabled when not specified.",
)
run_parser.set_defaults(fn=run)
server.set_parser(
subparsers.add_parser(
"serve",
description="Start a persistent server to interact with models via the API.",
help="Start a persistent server to interact with models via the API.",
)
)
args = parser.parse_args()
args = vars(args)
try:
fn = args.pop("fn")
fn(**args)
except KeyboardInterrupt:
pass
except KeyError:
parser.print_help()
except Exception as e:
print(e)
def list_models(*args, **kwargs):
for m in model.models(*args, **kwargs):
print(m)
def generate(*args, **kwargs):
if prompt := kwargs.get("prompt"):
print(">>>", prompt, flush=True)
generate_oneshot(*args, **kwargs)
return
if sys.stdin.isatty():
return generate_interactive(*args, **kwargs)
return generate_batch(*args, **kwargs)
def generate_oneshot(*args, **kwargs):
print(flush=True)
spinner = yaspin()
spinner.start()
spinner_running = True
try:
for output in engine.generate(model_name=kwargs.pop('model'), *args, **kwargs):
choices = output.get("choices", [])
if len(choices) > 0:
if spinner_running:
spinner.stop()
spinner_running = False
print("\r", end="") # move cursor back to beginning of line again
print(choices[0].get("text", ""), end="", flush=True)
except Exception:
spinner.stop()
raise
# end with a new line
print(flush=True)
print(flush=True)
def generate_interactive(*args, **kwargs):
while True:
print(">>> ", end="", flush=True)
line = next(sys.stdin)
if not line:
return
kwargs.update({"prompt": line})
generate_oneshot(*args, **kwargs)
def generate_batch(*args, **kwargs):
for line in sys.stdin:
print(">>> ", line, end="", flush=True)
kwargs.update({"prompt": line})
generate_oneshot(*args, **kwargs)
def search(*args, **kwargs):
try:
model_names = model.search_directory(*args, **kwargs)
if len(model_names) == 0:
print("No models found.")
return
elif len(model_names) == 1:
print(f"Found {len(model_names)} available model:")
else:
print(f"Found {len(model_names)} available models:")
for model_name in model_names:
print(model_name.lower())
except Exception as e:
print("Failed to fetch available models, check your network connection")
def pull(*args, **kwargs):
try:
model.pull(model_name=kwargs.pop('model'), *args, **kwargs)
print("Up to date.")
except Exception as e:
print(f"An error occurred: {e}")
def run(*args, **kwargs):
try:
name = model.pull(model_name=kwargs.pop('model'), *args, **kwargs)
kwargs.update({"model": name})
print(f"Running {name}...")
generate(*args, **kwargs)
except Exception as e:
print(f"An error occurred: {e}")
import json
import aiohttp_cors
from aiohttp import web
from ollama import engine
def set_parser(parser):
parser.add_argument("--host", default="127.0.0.1")
parser.add_argument("--port", default=7734)
parser.set_defaults(fn=serve)
def serve(*args, **kwargs):
app = web.Application()
cors = aiohttp_cors.setup(
app,
defaults={
"*": aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers="*",
allow_headers="*",
)
},
)
app.add_routes(
[
web.post("/load", load),
web.post("/unload", unload),
web.post("/generate", generate),
]
)
for route in app.router.routes():
cors.add(route)
app.update(
{
"models": {},
}
)
web.run_app(app, **kwargs)
async def load(request):
body = await request.json()
name = body.get("model")
if not name:
raise web.HTTPBadRequest()
kwargs = {
"models": request.app.get("models"),
}
engine.load(name, **kwargs)
return web.Response()
async def unload(request):
body = await request.json()
name = body.get("model")
if not name:
raise web.HTTPBadRequest()
engine.unload(name, models=request.app.get("models"))
return web.Response()
async def generate(request):
body = await request.json()
name = body.get("model")
if not name:
raise web.HTTPBadRequest()
prompt = body.get("prompt")
if not prompt:
raise web.HTTPBadRequest()
response = web.StreamResponse()
await response.prepare(request)
kwargs = {
"models": request.app.get("models"),
}
for output in engine.generate(name, prompt, **kwargs):
output = json.dumps(output).encode('utf-8')
await response.write(output)
await response.write(b"\n")
return response
import os
import sys
from os import path
from contextlib import contextmanager
from thefuzz import process
from llama_cpp import Llama
from ctransformers import AutoModelForCausalLM
import ollama.prompt
from ollama.model import MODELS_CACHE_PATH
@contextmanager
def suppress(file):
original = os.dup(file.fileno())
with open(os.devnull, "w") as devnull:
os.dup2(devnull.fileno(), file.fileno())
yield
os.dup2(original, file.fileno())
def generate(model_name, prompt, models={}, *args, **kwargs):
model = load(model_name, models=models)
inputs = ollama.prompt.template(model_name, prompt)
return model.generate(inputs, *args, **kwargs)
def load(model_name, models={}):
if not models.get(model_name, None):
model_path = path.expanduser(model_name)
if not path.exists(model_path):
model_path = str(MODELS_CACHE_PATH / (model_name + ".bin"))
runners = {
model_type: cls
for cls in [LlamaCppRunner, CtransformerRunner]
for model_type in cls.model_types()
}
for match, _ in process.extract(model_path, runners.keys(), limit=len(runners)):
try:
model = runners.get(match)
runner = model(model_path, match)
models.update({model_name: runner})
return runner
except Exception:
pass
raise Exception("failed to load model", model_path, model_name)
def unload(model_name, models={}):
if model_name in models:
models.pop(model_name)
class LlamaCppRunner:
def __init__(self, model_path, model_type):
try:
with suppress(sys.stderr), suppress(sys.stdout):
self.model = Llama(model_path, verbose=False, n_gpu_layers=1, seed=-1)
except Exception:
raise Exception("Failed to load model", model_path, model_type)
@staticmethod
def model_types():
return [
'llama',
'orca',
'vicuna',
'ultralm',
]
def generate(self, prompt, *args, **kwargs):
if "max_tokens" not in kwargs:
kwargs.update({"max_tokens": 512})
if "stop" not in kwargs:
kwargs.update({"stop": ["Q:"]})
if "stream" not in kwargs:
kwargs.update({"stream": True})
with suppress(sys.stderr):
for output in self.model(prompt, *args, **kwargs):
yield output
class CtransformerRunner:
def __init__(self, model_path, model_type):
self.model = AutoModelForCausalLM.from_pretrained(
model_path, model_type=model_type, local_files_only=True
)
@staticmethod
def model_types():
return [
'falcon',
'mpt',
'starcoder',
]
def generate(self, prompt, *args, **kwargs):
if "max_new_tokens" not in kwargs:
kwargs.update({"max_new_tokens": 512})
if "stop" not in kwargs:
kwargs.update({"stop": ["User"]})
if "stream" not in kwargs:
kwargs.update({"stream": True})
for output in self.model(prompt, *args, **kwargs):
yield {
'choices': [
{
'text': output,
},
],
}
import requests
import validators
from pathlib import Path
from os import path, walk
from urllib.parse import urlsplit, urlunsplit
from tqdm import tqdm
MODELS_MANIFEST = 'https://ollama.ai/api/models'
MODELS_CACHE_PATH = Path.home() / '.ollama' / 'models'
def models(*args, **kwargs):
for _, _, files in walk(MODELS_CACHE_PATH):
for file in files:
base, ext = path.splitext(file)
if ext == '.bin':
yield base
# search the directory and return all models which contain the search term as a substring,
# or all models if no search term is provided
def search_directory(query):
response = requests.get(MODELS_MANIFEST)
response.raise_for_status()
directory = response.json()
model_names = []
for model_info in directory:
if not query or query.lower() in model_info.get('name', '').lower():
model_names.append(model_info.get('name'))
return model_names
# get the url of the model from our curated directory
def get_url_from_directory(model):
response = requests.get(MODELS_MANIFEST)
response.raise_for_status()
directory = response.json()
for model_info in directory:
if model_info.get('name').lower() == model.lower():
return model_info.get('url')
return model
def download_from_repo(url, file_name):
parts = urlsplit(url)
path_parts = parts.path.split('/tree/')
if len(path_parts) == 1:
location = path_parts[0]
branch = 'main'
else:
location, branch = path_parts
location = location.strip('/')
if file_name == '':
file_name = path.basename(location).lower()
download_url = urlunsplit(
(
'https',
parts.netloc,
f'/api/models/{location}/tree/{branch}',
parts.query,
parts.fragment,
)
)
response = requests.get(download_url)
response.raise_for_status()
json_response = response.json()
download_url, file_size = find_bin_file(json_response, location, branch)
return download_file(download_url, file_name, file_size)
def find_bin_file(json_response, location, branch):
download_url = None
file_size = 0
for file_info in json_response:
if file_info.get('type') == 'file' and file_info.get('path').endswith('.bin'):
f_path = file_info.get('path')
download_url = (
f'https://huggingface.co/{location}/resolve/{branch}/{f_path}'
)
file_size = file_info.get('size')
if download_url is None:
raise Exception('No model found')
return download_url, file_size
def download_file(download_url, file_name, file_size):
local_filename = MODELS_CACHE_PATH / str(file_name + '.bin')
first_byte = path.getsize(local_filename) if path.exists(local_filename) else 0
if first_byte >= file_size:
return local_filename
print(f'Pulling {file_name}...')
header = {'Range': f'bytes={first_byte}-'} if first_byte != 0 else {}
response = requests.get(download_url, headers=header, stream=True)
response.raise_for_status()
total_size = int(response.headers.get('content-length', 0)) + first_byte
with open(local_filename, 'ab' if first_byte else 'wb') as file, tqdm(
total=total_size,
unit='iB',
unit_scale=True,
unit_divisor=1024,
initial=first_byte,
ascii=' ==',
bar_format='Downloading [{bar}] {percentage:3.2f}% {rate_fmt}{postfix}',
) as bar:
for data in response.iter_content(chunk_size=1024):
size = file.write(data)
bar.update(size)
return local_filename
def pull(model_name, *args, **kwargs):
# check the remote model location and see if it needs to be downloaded
url = model_name
file_name = ""
if not validators.url(url) and not url.startswith('huggingface.co'):
try:
url = get_url_from_directory(model_name)
except Exception as e:
# may not have been able to check remote directory, return now
return model_name
if url is model_name:
# this is not a model from our directory, so can't check remote
maybe_existing_model_location = MODELS_CACHE_PATH / str(model_name + '.bin')
if path.exists(model_name) or path.exists(maybe_existing_model_location):
# a file on the filesystem is being specified
return model_name
raise Exception("unknown model")
else:
# this is a model from our directory, check remote
file_name = model_name
if not (url.startswith('http://') or url.startswith('https://')):
url = f'https://{url}'
if not validators.url(url):
if model_name in models(MODELS_CACHE_PATH):
# the model is already downloaded, and specified by name
return model_name
raise Exception(f'Unknown model {model_name}')
local_filename = download_from_repo(url, file_name)
return local_filename
from os import path
from difflib import get_close_matches
from jinja2 import Environment, PackageLoader
def template(name, prompt):
environment = Environment(loader=PackageLoader(__name__, 'templates'))
best_templates = get_close_matches(
path.basename(name), environment.list_templates(), n=1, cutoff=0
)
template = environment.get_template(best_templates.pop())
return template.render(prompt=prompt)
This diff is collapsed.
[tool.poetry]
name = "ollama"
version = "0.0.9"
description = "Run ai models locally"
authors = ["ollama team"]
readme = "README.md"
packages = [{include = "ollama"}]
scripts = {ollama = "ollama.cmd.cli:main"}
[tool.poetry.dependencies]
python = "^3.8"
aiohttp = "^3.8.4"
aiohttp-cors = "^0.7.0"
jinja2 = "^3.1.2"
requests = "^2.31.0"
tqdm = "^4.65.0"
validators = "^0.20.0"
yaspin = "^2.3.0"
llama-cpp-python = "^0.1.67"
ctransformers = "^0.2.10"
thefuzz = {version = "^0.19.0", extras = ["speedup"]}
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
This diff is collapsed.
......@@ -17,26 +17,14 @@ import (
func Serve(ln net.Listener) error {
r := gin.Default()
var l *llama.LLama
gpulayers := 1
// TODO: these should be request parameters
gpulayers := 0
tokens := 512
threads := runtime.NumCPU()
model := "/Users/pdevine/.cache/gpt4all/GPT4All-13B-snoozy.ggmlv3.q4_0.bin"
r.POST("/api/load", func(c *gin.Context) {
var err error
l, err = llama.New(model, llama.EnableF16Memory, llama.SetContext(128), llama.EnableEmbeddings, llama.SetGPULayers(gpulayers))
if err != nil {
fmt.Println("Loading the model failed:", err.Error())
}
})
r.POST("/api/unload", func(c *gin.Context) {
})
r.POST("/api/generate", func(c *gin.Context) {
// TODO: set prompt from template
fmt.Println("Generating text...")
var req api.GenerateRequest
if err := c.ShouldBindJSON(&req); err != nil {
......@@ -44,6 +32,14 @@ func Serve(ln net.Listener) error {
return
}
fmt.Println(req)
l, err := llama.New(req.Model, llama.EnableF16Memory, llama.SetContext(128), llama.EnableEmbeddings, llama.SetGPULayers(gpulayers))
if err != nil {
fmt.Println("Loading the model failed:", err.Error())
return
}
ch := make(chan string)
go func() {
......@@ -65,11 +61,6 @@ func Serve(ln net.Listener) error {
c.SSEvent("token", tok)
return true
})
// embeds, err := l.Embeddings(text)
// if err != nil {
// fmt.Printf("Embeddings: error %s \n", err.Error())
// }
})
log.Printf("Listening on %s", ln.Addr())
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment